query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Zoom in on a specific station on a map
def station_on_map(request, station_number): data_stations = stations_with_data() station_number = int(station_number) down, problem, up = status_lists() station = get_object_or_404(Station, number=station_number) center = station.latest_location() if center['latitude'] is None and center['longitude'] is None: raise Http404 subclusters = [] for subcluster in Cluster.objects.all(): stations = [] for station in (Station.objects.select_related('cluster__parent', 'cluster__country') .filter(cluster=subcluster, pc__is_test=False)): link = station in data_stations status = get_station_status(station.number, down, problem, up) location = station.latest_location() station_data = {'number': station.number, 'name': station.name, 'cluster': station.cluster, 'link': link, 'status': status} station_data.update(location) stations.append(station_data) subclusters.append({'name': subcluster.name, 'stations': stations}) return render(request, 'map.html', {'subclusters': subclusters, 'center': center})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def zoom_to(self):\n # Will seek user feedback. QGIS will\n # Pan to first layer loaded", "def zoomIn(self, x, y):\n\n # set view state\n (map_x, map_y) = self.getMapCoordsFromView((x,y))\n self.view_offset_x = map_x*2 - self.view_width/2\n self.view_offset_y = map_y*2 - self.view_height/2\n\n self.onResize(None)", "def zoom_in_out():\n # Locating the map\n map = My.search_presence_webelement(driver, By.ID, \"ypgmap\")\n assert map\n\n # Locating the zoom-in button\n zoom_in_button = My.search_clickable_webelement(map, By.XPATH, \"//*[@id='ypgmap']/div[2]/div[1]/div[1]/a[1]\")\n assert zoom_in_button\n zoom_in_button.click()\n\n # Locating the zoom-out button\n zoom_out_button = My.search_clickable_webelement(map, By.XPATH, \"//*[@id='ypgmap']/div[2]/div[1]/div[1]/a[2]\")\n assert zoom_out_button\n zoom_out_button.click()", "def station_viewer():\r\n name = request.args[\"address\"]\r\n stations = get_zipcode_stations(name)\r\n\r\n if len(stations) > 0:\r\n stations['coordinate'] = 'end_point='+stations['name'].astype(str)+'&'+'end_lng=' + stations['lon'].astype(str)+'&'+'end_lat='+stations['lat'].astype(str)\r\n\r\n #genetrate folium map\r\n station_coordinates = stations[[\"lat\", \"lon\"]].values.tolist()\r\n\r\n map=make_folium_map(station_coordinates)\r\n\r\n\r\n # generate interactive map\r\n\r\n return render_template(\r\n \"page3.html\",\r\n num_stations=get_num_stations(name),\r\n address=name,\r\n stations=stations[[\"name\", \"address\", \"available_bikes\", 'coordinate']].values,\r\n map=map._repr_html_()\r\n )\r\n\r\n else:\r\n lng=get_address(name)[1]\r\n lat=get_address(name)[0]\r\n near_bike = find_5near_stations(lng, lat)\r\n near_bike['coordinate'] = 'end_point='+near_bike['name'].astype(str)+'&'+'end_lng=' + near_bike['lon'].astype(str)+'&'+'end_lat='+near_bike['lat'].astype(str)\r\n\r\n return render_template(\r\n \"page3_1b_nobike.html\",\r\n address=name,\r\n near_bike_table=near_bike[[\"name\", \"address\", \"available_bikes\", \"coordinate\", \"distance\"]].values)", "def ui_select_station(stations,\n zoom=3,\n center=(-24, 138),\n **kw):\n import ipywidgets as W\n from IPython.display import display\n import matplotlib.pyplot as plt\n import ipyleaflet as L\n from odc.ui import ui_poll\n\n dbg_display = W.Output()\n fig_display = W.Output()\n btn_done = W.Button(description='Done')\n scroll_wheel_zoom = kw.pop('scroll_wheel_zoom', True)\n map_widget = L.Map(zoom=zoom,\n center=center,\n scroll_wheel_zoom=scroll_wheel_zoom,\n **kw)\n\n state = SimpleNamespace(pos=None,\n gauge_data=None,\n finished=False,\n station=None)\n\n plt_interactive_state = plt.isinteractive()\n plt.interactive(False)\n\n with fig_display:\n fig, ax = plt.subplots(1, figsize=(14,4))\n ax.set_visible(False)\n display(fig)\n\n def _on_select(station):\n if state.finished:\n print('Please re-run the cell')\n return\n\n state.station = station\n state.pos = station.pos\n state.gauge_data = None\n\n print('Fetching data for: {}'.format(station.name))\n try:\n xx = get_station_data(station).dropna()\n except Exception:\n print('Failed to read data')\n return\n print('Got {} observations'.format(xx.shape[0]))\n\n state.gauge_data = xx\n\n with fig_display:\n ax.clear()\n ax.set_visible(True)\n xx.plot(ax=ax)\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(\"Cubic meters per second\")\n ax.legend([station.name])\n\n fig_display.clear_output(wait=True)\n with fig_display:\n display(fig)\n\n def on_select(station):\n with dbg_display:\n _on_select(station)\n\n def on_done(btn):\n if state.finished:\n with dbg_display:\n print('Please re-run the cell')\n return\n\n state.finished = True\n n_obs = 0 if state.gauge_data is None else state.gauge_data.shape[0]\n\n with dbg_display:\n print('''Finished\nStation: {}\nNumber of Observations: {}'''.format(state.station.name, n_obs))\n\n def on_poll():\n with dbg_display:\n if state.finished:\n return state.gauge_data, state.station\n return None\n\n mk_station_selector(on_select,\n stations=stations,\n dst_map=map_widget)\n\n ## UI:\n ##\n ## MMMMMMMMMMMMM BBBBB\n ## MMMMMMMMMMMMM .....\n ## MMMMMMMMMMMMM .....\n ## MMMMMMMMMMMMM .....\n ## MMMMMMMMMMMMM .....\n ## FFFFFFFFFFFFFFFFFFF\n ## FFFFFFFFFFFFFFFFFFF\n\n # M - Map F - Figure\n # B - Button . - Debug output\n\n btn_done.on_click(on_done)\n r_panel = W.VBox([btn_done, dbg_display],\n layout=W.Layout(width='30%'))\n\n ui = W.VBox([W.HBox([map_widget, r_panel]),\n fig_display])\n\n display(ui)\n\n result = ui_poll(on_poll, 1/20) # this will block until done is pressed\n\n #restore interactive state\n fig_display.clear_output(wait=True)\n with fig_display:\n plt.interactive(plt_interactive_state)\n plt.show()\n\n return result", "def apply_zoom(self):\n self.maparea.setTransform(self.zoom_levels[self.cur_zoom][1])\n self.scene.draw_visible_area()", "def zoom(self, zoom):\n\n self.container['zoom'] = zoom", "def __zoomIn(self):\n if QApplication.focusWidget() == e5App().getObject(\"Shell\"):\n e5App().getObject(\"Shell\").zoomIn()\n else:\n aw = self.activeWindow()\n if aw:\n aw.zoomIn()\n self.sbZoom.setValue(aw.getZoom())", "def center_screen(self):\n avg_lat, avg_lon = self.midpoint_euclidean(lat1, lon1, lat2, lon2)\n self.ids.mapview.center_on(avg_lat,avg_lon) # Set map center\n\n # Zoom levels dependent on distance\n if (distance < 5000) & (distance > 2500):\n self.ids.mapview.zoom = 3\n elif (distance < 2500) & (distance > 1500):\n self.ids.mapview.zoom = 4\n elif (distance < 1500) & (distance > 1000):\n self.ids.mapview.zoom = 5\n elif (distance < 1000) & (distance > 500):\n self.ids.mapview.zoom = 6\n elif (distance < 500) & (distance > 250):\n self.ids.mapview.zoom = 7\n elif (distance < 250) & (distance > 80):\n self.ids.mapview.zoom = 8\n elif (distance < 80) & (distance > 30):\n self.ids.mapview.zoom = 9\n elif (distance < 30) & (distance > 10):\n self.ids.mapview.zoom = 10\n elif (distance < 10) & (distance > 5):\n self.ids.mapview.zoom = 11\n elif (distance < 5) & (distance > 0):\n self.ids.mapview.zoom = 12\n else:\n self.ids.mapview.zoom = 2\n\n self.marker() # Put markers on map\n self.line_layer() # Draw line", "def zoom(self, parameter):\n dx, px, dy, py = parameter\n if self.parent.constrain_ratio:\n if (dx >= 0) and (dy >= 0):\n dx, dy = (max(dx, dy),) * 2\n elif (dx <= 0) and (dy <= 0):\n dx, dy = (min(dx, dy),) * 2\n else:\n dx = dy = 0\n self.sx *= np.exp(dx)\n self.sy *= np.exp(dy)\n \n # constrain scaling\n if self.constrain_navigation:\n self.sx = np.clip(self.sx, self.sxmin, self.sxmax)\n self.sy = np.clip(self.sy, self.symin, self.symax)\n \n self.tx += -px * (1./self.sxl - 1./self.sx)\n self.ty += -py * (1./self.syl - 1./self.sy)\n self.sxl = self.sx\n self.syl = self.sy", "def onZoomIn(self, event):\n try:\n print('%s click: button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %\n ('double' if event.dblclick else 'single', event.button,\n event.x, event.y, event.xdata, event.ydata))\n except:\n return\n\n\n self.plotter.zoomIn(event)", "def restrict_lat( mv, latmin, latmax ):\n if latmin==-90: latmin = -91 # just to make sure\n if latmax==90: latmax = 91\n\n # axes\n latax,idx = latAxis2(mv)\n if latax is None: return None\n imin = min( [i for i in range(len(latax)) if latax[i]>=latmin and latax[i]<=latmax ] )\n imax = max( [i for i in range(len(latax)) if latax[i]>=latmin and latax[i]<=latmax ] )\n newlatax = latax.subaxis( imin, imax+1 )\n # TO DO: use latax.bounds (if present) for newlatax.bounds\n # At the moment, I'm working with data for which latax.bounds doesn't exist.\n # At the moment, we don't need bounds. This would get us through if necessary:\n # newlatax.bounds = newlatax.genGenericBounds()\n newaxes = list( allAxes(mv) ) # shallow copy\n newaxes[idx] = newlatax\n\n # shrink the data to match the shrunk lat axis\n newmv_shape = list( mv.shape )\n newmv_shape[idx] = imax+1 - imin\n if imin>0:\n nd = numpy.delete( mv.data, slice(0,imin), idx ) # doesn't change mv\n else:\n nd = mv\n lenidx = nd.shape[idx]\n if lenidx > newmv_shape[idx]:\n newdata = numpy.delete( nd.data, slice(imax+1-imin,lenidx), idx )\n else:\n newdata = nd\n\n # new variable\n newmv = cdms2.createVariable( newdata, copy=True, axes=newaxes, id=mv.id )\n newmv.units = mv.units\n return newmv", "def setCoordinateResolution(*args):", "def setCoordinateResolution(*args):", "def setCoordinateResolution(*args):", "def setCoordinateResolution(*args):", "def zoom(self, xmin, xmax, xlen, ymin, ymax, ylen):\n self.xmax = xmax\n self.xmin = xmin\n self.xmax = xmax\n self.xlen = xlen\n self.ymin = ymin\n self.ymax = ymax\n self.ylen = ylen\n self.refresh()", "def station_map(data, station_counter):\n r = 6373.0\n\n phi = np.cos(55.943894)\n\n stations = list(set(data['start_station_name'].tolist() + data['end_station_name'].tolist()))\n dict = {name: () for name in stations}\n for index, row in data.iterrows():\n dict[row['start_station_name']] = (row['start_station_latitude'], row['start_station_longitude'])\n dict[row['end_station_name']] = (row['end_station_latitude'], row['end_station_longitude'])\n # number of stations\n n = len(dict.keys())\n\n # Convert to XY coordinates\n dict_xy = {ids: (station_counter[ids], (r * dict[ids][1] * phi, r * dict[ids][0])) for ids in dict.keys()}\n\n journeys, loc = zip(*list(dict_xy.values()))\n x, y = zip(*list(loc))\n journeys = [i for i in journeys]\n\n fig, ax = plt.subplots(figsize=(13, 8))\n plt.scatter(x, y, s=journeys)\n\n for ii in dict_xy.keys():\n ax.annotate(ii, dict_xy[ii][1], dict_xy[ii][1])\n\n plt.show()", "def zoom(self, zoomIn):\n zoomFactor = 0.05\n maxZoomIn = 2\n maxZoomOut = 0.1\n if zoomIn:\n s = self.viewNP.getScale()\n if s.getX()-zoomFactor < maxZoomIn and s.getY()-zoomFactor < maxZoomIn and s.getZ()-zoomFactor < maxZoomIn:\n self.viewNP.setScale(s.getX()+zoomFactor,s.getY()+zoomFactor,s.getZ()+zoomFactor)\n else:\n s = self.viewNP.getScale()\n if s.getX()-zoomFactor > maxZoomOut and s.getY()-zoomFactor > maxZoomOut and s.getZ()-zoomFactor > maxZoomOut:\n self.viewNP.setScale(s.getX()-zoomFactor,s.getY()-zoomFactor,s.getZ()-zoomFactor)\n self.nodeMgr.updateConnections()", "def zoomMap(self, scale, x=0, y=0):\n if self.zoomed:\n self.delete(self.zoomed)\n self.zoomed = self.im.zoom(scale, scale)\n zoomed_id = self.create_image(x, y, image=self.zoomed, anchor=NW)\n self.delete(self.original)\n self.scale = scale", "def lat_lons(self):", "def _autozoom(self):\n bounds = self._autobounds()\n attrs = {}\n\n midpoint = lambda a, b: (a + b)/2\n attrs['location'] = (\n midpoint(bounds['min_lat'], bounds['max_lat']),\n midpoint(bounds['min_lon'], bounds['max_lon'])\n )\n\n # remove the following with new Folium release\n # rough approximation, assuming max_zoom is 18\n import math\n try:\n lat_diff = bounds['max_lat'] - bounds['min_lat']\n lon_diff = bounds['max_lon'] - bounds['min_lon']\n area, max_area = lat_diff*lon_diff, 180*360\n if area:\n factor = 1 + max(0, 1 - self._width/1000)/2 + max(0, 1-area**0.5)/2\n zoom = math.log(area/max_area)/-factor\n else:\n zoom = self._default_zoom\n zoom = max(1, min(18, round(zoom)))\n attrs['zoom_start'] = zoom\n except ValueError as e:\n raise Exception('Check that your locations are lat-lon pairs', e)\n\n return attrs", "def _set_folium_map(self):", "def test_map_settings_custom():\n m = view(nybb, zoom_control=False, width=200, height=200, tiles=\"CartoDB positron\")\n assert m.location == [\n pytest.approx(40.70582377450201, rel=1e-6),\n pytest.approx(-73.9778006856748, rel=1e-6),\n ]\n assert m.options[\"zoom\"] == 10\n assert m.options[\"zoomControl\"] == False\n assert m.height == (200.0, \"px\")\n assert m.width == (200.0, \"px\")\n assert \"cartodbpositron\" in m.to_dict()[\"children\"].keys()\n\n # custom XYZ tiles\n m = view(\n nybb,\n zoom_control=False,\n width=200,\n height=200,\n tiles=\"https://mt1.google.com/vt/lyrs=m&x={x}&y={y}&z={z}\",\n attr=\"Google\",\n )\n\n out_str = _fetch_map_string(m)\n assert (\n 'tileLayer(\"https://mt1.google.com/vt/lyrs=m\\\\u0026x={x}\\\\u0026y={y}\\\\u0026z={z}\",{\"attribution\":\"Google\"'\n in out_str\n )\n\n m = view(nybb, location=(40, 5))\n assert m.location == [40, 5]\n assert m.options[\"zoom\"] == 10\n\n m = view(nybb, zoom_start=8)\n assert m.location == [\n pytest.approx(40.70582377450201, rel=1e-6),\n pytest.approx(-73.9778006856748, rel=1e-6),\n ]\n assert m.options[\"zoom\"] == 8\n\n m = view(nybb, location=(40, 5), zoom_start=8)\n assert m.location == [40, 5]\n assert m.options[\"zoom\"] == 8", "def set_coordinate(self):\n airqual_dictionary = self.realtime_data['stations'][0] #get the very first(recent) data/result\n self.latitude = airqual_dictionary['lat']\n self.longitude = airqual_dictionary['lng']", "def zoom_in(self, time=0.25):\n\n command = [0x00, 0x20, 0x00, 0x00]\n self.send_command(command)\n\n if time:\n sleep(time)\n self.stop()", "def small_map(self):\n self.map_url = \"https://maps.googleapis.com/maps/api/staticmap?center={},{}&zoom=12&size=350x350&key={}\".format(self.lat, self.lng, api_key) \n return (self.map_url)", "def get_map(self, center, zoom, raster_for_footprint=None, polygon_for_footprint=None):\n\n # Add map with rough footprint for mosaic\n preview_map = Map(basemap=basemaps.OpenStreetMap.Mapnik, center=center, zoom=zoom, scroll_wheel_zoom=True)\n \n if polygon_for_footprint is not None: \n preview_map.add_layer(Polygon(\n locations=polygon_for_footprint,\n color=\"green\",\n fill_color=\"green\"\n ))\n\n # Disable every draw control but rectangle\n draw_control = DrawControl()\n draw_control.polyline = {}\n draw_control.polygon = {}\n draw_control.circlemarker = {}\n draw_control.rectangle = {\n \"shapeOptions\": {\n \"fillColor\": \"#fca45d\",\n \"color\": \"#fca45d\",\n \"fillOpacity\": 0.2\n }\n }\n\n def handle_draw(draw_control_self, action, geo_json):\n # Take only the most recent recangle\n self.mosaic_selection['features'] = (geo_json)\n\n draw_control.on_draw(handle_draw)\n\n # Add rectangle controls\n preview_map.add_control(draw_control)\n\n # Enable full screen\n preview_map.add_control(FullScreenControl())\n\n # Display map\n return preview_map", "def update():\n\n # ensure parameters are present\n if not request.args.get(\"sw\"):\n raise RuntimeError(\"missing sw\")\n if not request.args.get(\"ne\"):\n raise RuntimeError(\"missing ne\")\n\n # ensure parameters are in lat,lng format\n if not re.search(\"^-?\\d+(?:\\.\\d+)?,-?\\d+(?:\\.\\d+)?$\", request.args.get(\"sw\")):\n raise RuntimeError(\"invalid sw\")\n if not re.search(\"^-?\\d+(?:\\.\\d+)?,-?\\d+(?:\\.\\d+)?$\", request.args.get(\"ne\")):\n raise RuntimeError(\"invalid ne\")\n\n # explode southwest corner into two variables\n (sw_lat, sw_lng) = [float(s) for s in request.args.get(\"sw\").split(\",\")]\n\n # explode northeast corner into two variables\n (ne_lat, ne_lng) = [float(s) for s in request.args.get(\"ne\").split(\",\")]\n\n # find stations within view\n if (sw_lng <= ne_lng):\n # doesn't cross the antimeridian\n\n stations = Station.query.join(Place).\\\n filter(db.and_(\n sw_lat <= Place.lat, Place.lat <= ne_lat,(db.and_(\n sw_lng <= Place.lng, Place.lng <= ne_lng)))).all()\n\n else:\n # crosses the antimeridian\n\n stations = Station.query.join(Place).\\\n filter(db.and_(\n sw_lat <= Place.lat, Place.lat <= ne_lat,(db.or_(\n sw_lng <= Place.lng, Place.lng <= ne_lng)))).all()\n\n result = geo_stations.dump(stations)\n\n return jsonify(result.data)", "def update_zoom_region(self):\n self.linear_region.setRegion(self.plot_zoom.getViewBox().viewRange()[0])", "def zoom_world_map(world_map, country_name):\n location = geolocator.geocode(country_name)\n latitude, longitude = location.latitude, location.longitude\n geo = dict(\n projection_scale=2, # this is kind of like zoom\n center=dict(lat=latitude, lon=longitude), # this will center on the point\n )\n world_map.update_layout(geo=geo)\n return world_map", "def zoom(self,event):\r\n if self.measurements == None:\r\n return\r\n if self.controlLock.locked():\r\n return\r\n self.controlLock.acquire()\r\n if len(self.dataPlayers) > 0:\r\n dp = self.dataPlayers[0]\r\n dp.zoom(event.delta*2/120)\r\n scalex,scaley = dp.getScale()\r\n # Do functions together\r\n for dp in self.dataPlayers:\r\n dp.setScaleX(scalex[0],scalex[1])\r\n for dp in self.dataPlayers:\r\n dp.draw()\r\n for dp in self.dataPlayers:# Update canvas together\r\n dp.redraw()\r\n self.controlLock.release()", "def zoom(self, dr):\n d = self.getDistance()\n vn = self.getViewNormal()\n vn *= dr*d\n GL.glTranslate(vn[0], vn[1], vn[2])", "def action_zoom_in(self):\n if self.cur_zoom < len(self.zoom_levels) - 1:\n self.cur_zoom += 1\n self.zoom_widget.setValue(self.cur_zoom)\n self.apply_zoom()", "def lonToTile(lon, zoom):\n n = 2.0 ** zoom\n return ((lon + 180.0) / 360.0) * n", "def setZoom(self, zoom):\r\n self._viewZoom = zoom", "def handle_click_map_view(subway: subway_system.Subway, buttons: pygame_buttons.Buttons,\n event: pygame.event.Event, shortest_path: list[str]) -> None:\n # Check if user left-clicked, if the MAP VIEW button is allowed to be pressed\n # and if the user actually pressed the MAP VIEW button\n if event.button == 1 and buttons.get_button_colour('map view') == 'blue' and \\\n buttons.was_pressed('map view', event.pos):\n # Get the locations of the stations in the shortest path\n locations = subway.get_locations(shortest_path)\n # Plot the shortest path on a map in plotly\n plotly_visualization.plot_shortest_path(shortest_path, locations)", "def updatePosition(self, scene):\n\n pos = scene.posFromLonLat(self._lon, self._lat)\n self.setPos(pos)\n if self._min_zoom is not None:\n self.setVisible(scene._zoom >= self._min_zoom)", "def map_ll_to_seviri(lon, lat):\n # new method\n # project lat/lon input to meteosat view, mask out of bounds data\n geos = pyproj.Proj(proj='geos', h=35785831.0,lon_0=0,lat_0=0,x_0=0,y_0=0,units='m')\n x,y = geos(lon,lat)\n x = ma.masked_equal(x,1e30)\n y = ma.masked_equal(y,1e30)\n # Convert to index. ~3000.5m per pixel, centre pixel index is [1855,1855]\n x = x/-3000.5+1855\n y = y/3000.5+1855\n return x,y\n # old method\n \"\"\"\n # Define Earth radius and geostationary orbit height in km and calucalte max\n # viewer angle\n r_sat = 42164.\n r_earth = 6378.\n zenith_max = np.arcsin(r_earth/r_sat)\n # convert lat/lon to cartesian coordinates\n x = np.cos(np.radians(lat)) * np.sin(np.radians(lon))\n y = np.sin(np.radians(lat))\n z = np.cos(np.radians(lat)) * np.cos(np.radians(lon))\n # x,y vector magnitude\n d = np.sqrt(x**2 + y**2)\n # Calculate footprint SEVIRI effective zenith angle and mask for > pi/2\n # values\n zenith = np.arctan2(d, z) + np.arctan2(r_earth*d, r_sat-r_earth*z)\n zenith_mask = np.abs(zenith) >= (0.5 * np.pi)\n # Calculate x and y viewer angles\n theta_x = np.arctan2(r_earth*x, r_sat-r_earth*z)\n theta_y = np.arctan2(r_earth*y, r_sat-r_earth*z)\n # Define SEVIRI global index range and offset\n # These should be the same on all files, but may need to check\n x_irange = 3623\n x_ioffset = 44\n y_irange = 3611\n y_ioffset = 51\n # Remap viewer angles to indexes using max viewer angle, index range and\n # offset. Note -ve theta_y as SEVIRI indexes the x-axis right to left(E-W)\n x_out = (1 - theta_x / zenith_max) * 0.5 * x_irange + x_ioffset\n y_out = (1 + theta_y / zenith_max) * 0.5 * y_irange + y_ioffset\n # Return masked arrays using the zenith angle mask\n return ma.array(x_out, mask=zenith_mask), ma.array(y_out, mask=zenith_mask)\n \"\"\"", "def latToTile(lat, zoom):\n n = 2.0 ** zoom\n lat_rad = math.radians(lat)\n return (1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad))) / math.pi) / 2.0 * n", "def mk_station_selector(on_select,\n stations=None,\n dst_map=None,\n **kw):\n import ipyleaflet as L\n\n if stations is None:\n stations = get_stations()\n\n stations = [st for st in stations if st.pos is not None]\n pos2st = {st.pos: st for st in stations}\n\n def on_click(event='', type='', coordinates=None):\n pos = tuple(coordinates)\n st = pos2st.get(pos)\n if st is None:\n # should probably log warning here\n print(\"Can't map click to station\")\n return\n\n on_select(st)\n\n markers = [L.Marker(location=st.pos,\n draggable=False,\n title=st.name)\n for st in stations]\n\n cluster = L.MarkerCluster(markers=markers)\n\n if dst_map is None:\n dst_map = L.Map(**kw)\n\n dst_map.add_layer(cluster)\n cluster.on_click(on_click)\n\n return dst_map, cluster", "def __init__(self, island_map):\n self.island_map = island_map\n self.landscape_dict = {'M': Mountain,\n 'O': Ocean,\n 'J': Jungle,\n 'S': Savannah,\n 'D': Desert}", "def ts_map(self, vmin=0, vmax=25, pixelsize=0.25):\n self.tsmap.plot(ait_kw=dict(pixelsize=pixelsize), vmin=vmin, vmax=vmax)\n fig=plt.gcf()\n fig.set_facecolor('white')\n return fig", "def make_map(data,LatLonBox):\n\n proj = ccrs.LambertConformal(central_longitude=data.StationLongitude,\n central_latitude=data.StationLatitude)\n\n fig = plt.figure(figsize=(17,11))\n ax = plt.subplot(111,projection=proj)\n \n ax.coastlines('50m', 'black', linewidth=2, zorder=2)\n\n reader = shpreader.Reader('/Users/chowdahead/Documents/shapefiles/countyl010g_shp_nt00964/countyl010g.shp')\n counties = list(reader.geometries())\n COUNTIES = cfeature.ShapelyFeature(counties,ccrs.PlateCarree())\n ax.add_feature(COUNTIES, facecolor='none',edgecolor='w')\n # Grab state borders\n state_borders = cfeature.NaturalEarthFeature(\n category='cultural', name='admin_1_states_provinces_lines',\n scale='50m', facecolor='none')\n ax.add_feature(state_borders, edgecolor='w', linewidth=1, zorder=3)\n \n ocean = cfeature.NaturalEarthFeature('physical', 'ocean', scale='50m',\n edgecolor='face',\n facecolor=cfeature.COLORS['water'])\n land = cfeature.NaturalEarthFeature('physical', 'land', scale='50m',\n edgecolor='face',\n facecolor=\"k\")\n\n ax.add_feature(ocean, zorder=-1)\n ax.add_feature(land, zorder=-1)\n ax.set_facecolor('black')\n \n ax.set_extent(LatLonBox,ccrs.PlateCarree())\n \n return fig,ax,proj", "def stations_on_map(request, country=None, cluster=None, subcluster=None):\n\n data_stations = stations_with_data()\n down, problem, up = status_lists()\n\n if country:\n get_object_or_404(Country, name=country)\n if cluster:\n get_object_or_404(Cluster, name=cluster, parent=None,\n country__name=country)\n if subcluster:\n if cluster == subcluster:\n get_object_or_404(Cluster, name=subcluster, parent=None)\n else:\n get_object_or_404(Cluster, name=subcluster,\n parent__name=cluster)\n focus = (Cluster.objects.filter(name=subcluster)\n .values_list('name', flat=True))\n else:\n focus = [Cluster.objects.get(name=cluster, parent=None).name]\n focus.extend(Cluster.objects.filter(parent__name=cluster)\n .values_list('name', flat=True))\n else:\n focus = (Cluster.objects.filter(country__name=country)\n .values_list('name', flat=True))\n else:\n focus = Cluster.objects.all().values_list('name', flat=True)\n\n subclusters = []\n for subcluster in Cluster.objects.all():\n stations = []\n for station in (Station.objects.select_related('cluster__parent',\n 'cluster__country')\n .filter(cluster=subcluster,\n pc__is_test=False)):\n link = station in data_stations\n status = get_station_status(station.number, down, problem, up)\n location = station.latest_location()\n station_data = {'number': station.number,\n 'name': station.name,\n 'cluster': station.cluster,\n 'link': link,\n 'status': status}\n station_data.update(location)\n stations.append(station_data)\n subclusters.append({'name': subcluster.name,\n 'stations': stations})\n\n return render(request, 'map.html',\n {'subclusters': subclusters,\n 'focus': focus})", "def on_zoom_change(self, event) -> None:\r\n\r\n zoom_level = int(self.zoom_scale.get())\r\n self.painter.zoom = zoom_level\r\n self.painter.draw_board()", "def home(request):\n # view_options = MVView(\n # projection='EPSG:4326',\n # center=[-70.6,18.75],\n # zoom=8,\n # maxZoom=18,\n # minZoom=2\n # )\n #\n # # Define GeoServer Layer\n # ozama_layer = MVLayer(source='ImageWMS',\n # options={'url': 'http://tethys.byu.edu:8181/geoserver/wms',\n # 'params': {'LAYERS': 'spt-30935191ace55f90bd1e61456f1ef016:dominican_republic-ozama-drainage_line'},\n # 'serverType': 'geoserver'},\n # legend_title='Ozama'\n # )\n # haina_layer = MVLayer(source='ImageWMS',\n # options={'url': 'http://tethys.byu.edu:8181/geoserver/wms',\n # 'params': {\n # 'LAYERS': 'spt-d7a8ccd9e71e5d7f9e8ecc2985206c8b:dominican_republic-haina-drainage_line'},\n # 'serverType': 'geoserver'},\n # legend_title='Haina'\n # )\n # dr_layer = MVLayer(source='ImageWMS',\n # options={'url': 'http://tethys.byu.edu:8181/geoserver/wms',\n # 'params': {\n # 'LAYERS': 'catalog:DominicanRepublic'},\n # 'serverType': 'geoserver'},\n # legend_title='DR'\n # )\n # # Define map view options\n # map_view_options = MapView(\n # height='600px',\n # width='100%',\n # controls=['ZoomSlider'],\n # layers=[ozama_layer,haina_layer,dr_layer],\n # view=view_options,\n # basemap='Bing'\n # )\n\n context = {}\n\n return render(request, 'glass/home.html', context)", "def enableZoomIn(self):\n self.zoomInID = self.canvas.mpl_connect('button_press_event', self.onZoomIn)\n self.master.config(cursor = \"cross\")", "def zoom(self, dr):\n d = self.getDistance()\n vn = self.getViewNormal()\n vn *= dr*d\n GL.glTranslatef(vn[0], vn[1], vn[2])", "def plot_map(locations, sel_street):\n\n kortteli = get_gml_data('Data/Paikkatieto/akaava-kortteli.gml')\n kiinteistot = get_gml_data('Data/Paikkatieto/kanta-kiinteisto.gml')\n\n base = kortteli.plot()\n kiinteistot.plot(ax=base, color='lightblue')\n locations.plot(ax=base, color='red', markersize=20)\n\n # selected location with a bigger and more red\n loc_df = pd.DataFrame()\n loc_df = loc_df.append(locations.iloc[int(sel_street)-1])\n loc_df = make_geodf(loc_df, lat_col_name='lat', lon_col_name='long')\n loc_df.plot(ax=base, color='darkred', markersize=100)\n\n plt.show()", "def toggle_airports(self):\n if self.locations_map.show_airports:\n self.locations_map.show_airports = False\n else:\n if self.locations_map.zoom > 5:\n self.locations_map.show_airports = True\n self.locations_map.start_getting_locations_in_fov()\n else:\n self.btn_toggle_airports.state = 'normal'\n show_message_popup(\"Zoom level must be greater than 5.\")", "def zoom(self, amount):\n pass", "def zoom(self):\n res = np.max(self.metadata[\"resolution\"])\n\n if self.atlas_name == \"allen_human_500um\":\n logger.debug(\n \"ATLAS: setting zoom manually for human atlas, atlas needs fixing\"\n )\n return 350\n else:\n return 40 / res", "def draw_centermap(lat, lon, size=(3,3), resolution='i', area_thresh=2000):\n return Basemap(projection='cyl', resolution=resolution, area_thresh=area_thresh,\n llcrnrlon=lon-size[1], llcrnrlat=lat-size[0],\n urcrnrlon=lon+size[1], urcrnrlat=lat+size[0])", "def wgs84_to_mercator(df, lon, lat):\n k = 6378137\n df[\"x\"] = df[lon] * (k * np.pi/180.0)\n df[\"y\"] = np.log(np.tan((90 + df[lat]) * np.pi/360.0)) * k\n return df", "def set_current_tool_to_zoom_in(self):\n\n self.variables.current_shape_id = self.variables.zoom_rect_id\n self.variables.active_tool = TOOLS.ZOOM_IN_TOOL\n self.variables.current_tool = TOOLS.ZOOM_IN_TOOL", "def map(self, color='#64D1B8', weight=6):\n centroid = [self.ring.centroid.xy[1][0], self.ring.centroid.xy[0][0]]\n result_map = folium.Map(location=centroid, tiles='OpenStreetMap')\n geojson_str = get_geojson_string(self.bbox['geometry'])\n folium.GeoJson(\n data=geojson_str,\n style_function=lambda x: {\n 'fillOpacity': 0,\n 'weight': weight,\n 'color': color\n }\n ).add_to(result_map)\n w,s,e,n = list(self.ring.bounds)\n result_map.fit_bounds([[s, w], [n, e]])\n if self.np_array is not None:\n blist = [[self.np_array_bounds[1], self.np_array_bounds[0]],[self.np_array_bounds[3], self.np_array_bounds[2]]]\n image_overlay = folium.raster_layers.ImageOverlay(self.np_array, blist)\n result_map.add_child(image_overlay)\n return result_map\n elif not self.tile_url:\n self.tile_url = self.get_image_url()\n if self.tile_url:\n result_map.add_tile_layer(tiles=self.tile_url, attr=f\"{self.instrument} image\")\n return result_map", "def zoom(self) -> Optional[int]:\n return self.get(\"/Zoom\", None)", "def map_viewing_client():\n\n # Read configuration settings\n config = gis.get_config()\n if config.opt_gis_layout == 1:\n window = True\n else:\n window = False\n\n # @ToDo Make Configurable\n toolbar = True\n\n map = define_map(window=window, toolbar=toolbar, config=config)\n\n response.title = T(\"Map Viewing Client\")\n return dict(map=map)", "def zoom(self,xmin,xmax,xlen,ymin,ymax,ylen):\n\n self.xmin = xmin\n self.xmax = xmax\n self.xlen = xlen\n self.ymin = ymin\n self.ymax = ymax\n self.ylen = ylen\n r = np.linspace(self.xmin, self.xmax,self.xlen)\n q = np.linspace(self.ymin, self.ymax, self.ylen)\n x,y = np.meshgrid(r,q)\n self.plane = x+y*1j\n print(self.plane)\n for i in range(len(self.fs)):\n self.apply(self.fs[i]) #Applies all functions in order.\n return self.plane", "def generateStationPlot(dir_path, traj_list, color_scheme='light'):\n\n\n # Choose the color scheme\n cs = MapColorScheme()\n \n if color_scheme == 'light':\n cs.light()\n\n else:\n cs.dark()\n\n\n plt.figure(figsize=(19.2, 10.8))\n\n # Init the map\n m = Basemap(projection='cyl', resolution='i')\n\n # Draw the coast boundary and fill the oceans with the given color\n m.drawmapboundary(fill_color=cs.map_background)\n\n # Fill continents, set lake color same as ocean color\n m.fillcontinents(color=cs.continents, lake_color=cs.lakes, zorder=1)\n\n # Draw country borders\n m.drawcountries(color=cs.countries)\n m.drawstates(color=cs.states, linestyle='--')\n\n\n\n ### PLOT WORLD MAP ###\n\n # Group stations into countries\n country_dict = {}\n for traj in traj_list:\n\n for obs in traj.observations:\n\n # Extract country code\n country_code = obs.station_id[:2]\n\n if country_code not in country_dict:\n country_dict[country_code] = {}\n \n\n if obs.station_id not in country_dict[country_code]:\n country_dict[country_code][obs.station_id] = [obs.lat, obs.lon]\n\n\n\n # Plot stations in all countries\n for country_code in country_dict:\n\n station_dict = country_dict[country_code]\n\n # Extract lat/lon\n lat = np.degrees([station_dict[station_id][0] for station_id in station_dict])\n lon = np.degrees([station_dict[station_id][1] for station_id in station_dict])\n\n # Convert lat/lon to x/y\n x, y = m(lon, lat)\n\n plt.scatter(x, y, s=0.75, zorder=5, label=\"{:s}: {:d}\".format(country_code, len(lat)))\n\n\n plt.legend(loc='lower left')\n\n plt.tight_layout()\n\n plt.savefig(os.path.join(dir_path, \"world_map.png\"), dpi=100)\n\n plt.close()\n\n ### ###", "def zoom(self):\n return self['zoom']", "def increment_zoom(self):\n if self._diving:\n self.mpl_mandelbrot.increment_zoom_anchored(self._zoom_frac_per_frame)", "def __zoomTo(self, value):\n if QApplication.focusWidget() == e5App().getObject(\"Shell\"):\n aw = e5App().getObject(\"Shell\")\n else:\n aw = self.activeWindow()\n if aw:\n aw.zoomTo(value)\n self.sbZoom.setValue(aw.getZoom())", "def zoom_units(self, units, center=None):\n # calculate pixels per unit etc\n unitscm = units\n cmsunit = 1 / float(unitscm)\n pixscm = 28.346457\n pixsunit = pixscm * cmsunit\n unitswidth = self.width / float(pixsunit) # use as the width of the bbox\n unitsheight = self.height / float(pixsunit) # use as the height of the bbox\n # zoom it\n newbbox = bboxhelper.resize_dimensions(self.coordspace_bbox,\n newwidth=unitswidth,\n newheight=unitsheight)\n # center it\n if center:\n newbbox = bboxhelper.center(newbbox, center)\n self.custom_space(*newbbox, lock_ratio=True)", "def run(self):\n self.iface.mapCanvas().setMapTool(self.tool)", "def zoomtoRec(self, xyMin, xyMax , crs=None):\n if crs is None:\n crs = self.getGetMapCrs()\n \n maxpoint = QgsPoint(xyMax[0], xyMax[1])\n minpoint = QgsPoint(xyMin[0], xyMin[1])\n \n pmaxpoint = self.prjPtToMapCrs(maxpoint, crs)\n pminpoint = self.prjPtToMapCrs(minpoint, crs)\n \n # Create a rectangle to cover the new extent\n rect = QgsRectangle( pmaxpoint, pminpoint )\n \n # Set the extent to our new rectangle\n self.iface.mapCanvas().setExtent(rect)\n # Refresh the map\n self.iface.mapCanvas().refresh()", "def show_map(self):\n self.m1.display()", "def __init__(\n self,\n title: str = \"OpenStreetMap\",\n zoom_start: float = 2.5,\n tiles=None,\n width: str = \"100%\",\n height: str = \"100%\",\n location: list = None,\n ):\n if not location:\n location = [47.67, -122.13]\n\n self.folium_map = folium.Map(\n zoom_start=zoom_start,\n tiles=tiles,\n width=width,\n height=height,\n location=location,\n control_scale=True,\n )\n folium.TileLayer(name=title).add_to(self.folium_map)\n self.locations: List[Tuple[float, float]] = []", "def displayMapTest(self):\n \n #To run the test do:\n #coastalDistanceMap = CoastalDistanceMap()\n #coastalDistanceMap.loadMap(os.path.join(os.getcwd(), 'GNSSR_Python', 'landDistGrid_0.10LLRes_hGSHHSres.nc'))\n #coastalDistanceMap.DisplayMapTest()\n \n mapPlotter = MapPlotter(200e3) #Map grid in km (at equator)\n\n coastDistance = np.zeros((mapPlotter.sizeLat, mapPlotter.sizeLon))\n lons = np.zeros((mapPlotter.sizeLat, mapPlotter.sizeLon))\n lats = np.zeros((mapPlotter.sizeLat, mapPlotter.sizeLon))\n\n for indexes, x in np.ndenumerate(coastDistance):\n lon = np.array(mapPlotter.scaleLon[indexes[1]])\n lat = np.array(mapPlotter.scaleLat[indexes[0]])\n\n # Fill in output table\n coastDistance[indexes[0]][indexes[1]] = self.getDistanceToCoast(lon, lat)\n \n #Reshape to 2D map\n np.reshape(coastDistance, (mapPlotter.sizeLon, mapPlotter.sizeLat))\n #Plot\n mapPlotter.plotMapStatic(coastDistance)", "def set_zoom_on_edit(self, should_zoom):\n self._should_zoom = should_zoom", "def zoom_markers(self, zoom=0.0, marker=-1, **kwargs):\n self._p('*{:.6f} {}'.format(zoom, marker),\n *[kwargs.get(k, -1) for k in 'abcdefghijklmnopqrstuvwxyz'])", "def process_zoom(self, status):\n log.debug(\"Zoom tool clicked %s\", status)\n if status == \"True\":\n self.auto_scale = False", "def tileToLat(tile, zoom):\n n = 2.0 ** zoom\n lat_rad = math.atan(math.sinh(math.pi * (1.0 - 2.0 * tile / n)))\n return math.degrees(lat_rad)", "def draw_map(data, title, output):\n import cartopy.crs as ccrs\n\n coords = get_lat_lon(data).values()\n\n lat = [coord[0] for coord in coords]\n lon = [coord[1] for coord in coords]\n\n ax = plt.axes(projection=ccrs.PlateCarree())\n ax.stock_img()\n ax.coastlines()\n ax.scatter(lon, lat, marker='o', s=50, alpha=0.8)\n ax.set_extent([-75, -20, -35, 5], crs=ccrs.PlateCarree())\n ax.set_title(title)\n plt.savefig(output)", "def map_event(self, widget, event):\n #self.configure_window(width, height)\n return self.make_callback('map')", "def onMapToolActivated(self, e):\n pass", "def test_update_zoom_rooms_location_structure(self):\n pass", "def main():\n # Constants\n groundstation_name = 'Wallops Antenna'\n groundstation_address = 'Radar Road, Temperanceville, VA 23442'\n satnum = 25544 # ISS = 25544\n saturl=\"http://www.celestrak.com/NORAD/elements/stations.txt\"\n gs_minimum_elevation_angle = 10.0\n\n # Alternate constants\n gs_alt_lat = 37.854886 # Only needed if address not found\n gs_alt_lon = -75.512936 # Ditto\n gs_alt_el_meters = 3.8 # Ditto\n gs_alt_tz_offset_seconds = -18000.0 # Ditto\n gs_tzname = 'US/Eastern'\n\n # Construct the ground station info\n try:\n # Try to use the address...\n gs = GroundStation.from_address(groundstation_address, \\\n groundstation_name, \\\n gs_minimum_elevation_angle)\n except:\n # Otherwise, use explicit location data...\n gs = GroundStation.from_location(gs_alt_lat, gs_alt_lon, \\\n gs_alt_el_meters, \\\n gs_tzname, \\\n groundstation_name, \\\n gs_minimum_elevation_angle)\n\n # Times we need\n now = datetime.now()\n gs_today = gs.get_tz().localize(datetime(now.year, now.month, now.day))\n gs_today_start = gs.get_tz().localize(datetime(now.year, now.month, now.day, \\\n 0, 0, 0)) \n gs_today_end = gs.get_tz().localize(datetime(now.year, now.month, now.day, \\\n 23, 59, 59))\n\n # Get the InviewCalculator and compute the inviews\n st = SatelliteTle(satnum, tle_url=saturl)\n ic = InviewCalculator(gs, st)\n inviews = ic.compute_inviews(gs_today_start, gs_today_end)\n\n # Print the results\n print_satellite_header(st)\n print_inview_header(gs.get_minimum_elevation_angle(), gs_today, gs)\n print_inviews(gs, inviews)\n print_azeltables(inviews, ic)", "def draw_on(self, folium_map):", "def zoom(self):\n return self.container['zoom']", "def zoom(cls, img, zoom):\n w, h = img.size\n x = h / 2\n y = w / 2\n zoom2 = zoom * 2\n img = img.crop((x - w / zoom2, y - h / zoom2,\n x + w / zoom2, y + h / zoom2))\n return img.resize((w, h), Image.LANCZOS)", "def toggle_airplanes(self):\n if self.locations_map.show_airplanes:\n self.locations_map.show_airplanes = False\n else:\n if self.locations_map.zoom > 5:\n self.locations_map.show_airplanes = True\n self.locations_map.start_getting_locations_in_fov()\n else:\n self.btn_toggle_airplanes.state = 'normal'\n show_message_popup(\"Zoom level must be greater than 5.\")", "def basic_map(proj):\n fig = plt.figure(figsize=(15, 10))\n add_metpy_logo(fig, 0, 80, size='large')\n view = fig.add_axes([0, 0, 1, 1], projection=proj)\n view.set_extent([-120, -70, 20, 50])\n view.add_feature(cfeature.STATES.with_scale('50m'))\n view.add_feature(cfeature.OCEAN)\n view.add_feature(cfeature.COASTLINE)\n view.add_feature(cfeature.BORDERS, linestyle=':')\n return fig, view", "def setStation(self, isStation: bool) -> None:", "def set_maprange(xmin, ymin, xmax, ymax, epsg_in='epsg:4326'):\n outProj = pyproj.Proj(init='epsg:3857')\n inProj = pyproj.Proj(init=epsg_in)\n xmin,ymin = 75, -55\n xmax,ymax = 175, -5\n x1,y1 = pyproj.transform(inProj,outProj,xmin,ymin)\n x2,y2 = pyproj.transform(inProj,outProj,xmax,ymax)\n return x1, y1, x2, y2", "def zoomToMark(self, mark, zoomLevel):\n\n # get the center of the mark\n point = mark.mapToScene(mark.pos())\n\n # and zoom to it\n self.zoomToPoint(point, zoomLevel)", "def on_station_member_station_updated(\n self, func,\n ):\n self._set_event_handler(\"stations\")\n self._events.on_station_member_station_updated(func)", "def make_folium_map(station_coord):\r\n map = folium.Map(location=station_coord[0], tiles='Cartodb Positron', zoom_start=13)\r\n for point in range(0, len(station_coord)):\r\n folium.Marker(station_coord[point]).add_to(map)\r\n\r\n return map", "def maplot_subs(va500, cmax=1, colormin=-999, precip='no'):\n from mpl_toolkits.basemap import Basemap, shiftgrid, addcyclic\n import numpy as np\n import matplotlib as mpl\n import matplotlib.pyplot as plt\n from netcdfread import ncread\n fig = plt.figure(facecolor='w', edgecolor='k', linewidth=2)\n\n def plotter(pdata, colormax=1, colormin=-999, title=''):\n lon = ncread('/network/aopp/hera/mad/bakerh/HAPPI/HadAM3P-N96/All-Hist/mon/tas/item3236_monthly_mean_a011_2006-01_2016-12.nc', 'longitude0')\n lat = ncread('/network/aopp/hera/mad/bakerh/HAPPI/HadAM3P-N96/All-Hist/mon/tas/item3236_monthly_mean_a011_2006-01_2016-12.nc', 'latitude0')\n #lat = ncread('/network/aopp/hera/mad/bakerh/HAPPI/HadAM3P-N96/Plus15-Future_LCO2/day/ua/item15201_daily_mean_a00b_2090-01_2100-12.nc', 'latitude1') \n if colormin == -999:\n colormin = -colormax\n pdata, lon = shiftgrid(180., pdata, lon, start=False)\n pdata, lon = addcyclic(pdata, lon)\n meshlon, meshlat = np.meshgrid(lon, lat)\n\n m = Basemap(projection='cyl', llcrnrlat=-90, urcrnrlat=90,\n llcrnrlon=-180, urcrnrlon=180, resolution='c')\n m.drawcoastlines()\n m.drawmapboundary()\n x, y = m(meshlon, meshlat)\n mycmap2 = plt.cm.YlOrRd(np.arange(256))\n mycmap1 = plt.cm.Blues_r(np.arange(256))\n my_cmap = np.concatenate((mycmap1, mycmap2), axis=0)\n my_cmap[230:282, :] = 1\n if precip == 'yes':\n my_cmap = my_cmap[::-1]\n newcmap = mpl.colors.LinearSegmentedColormap.from_list(\"newjet\", my_cmap)\n ctrs = np.linspace(colormin, colormax, 17)\n plot = m.contourf(x, y, pdata, ctrs,\n cmap=newcmap, vmin=np.min(ctrs), vmax=np.max(ctrs),\n extend='both')\n\n plt.title(title, y=1)\n plt.show()\n return plot\n\n ax1 = fig.add_subplot(3, 3, 1)\n plotter(np.mean(va500['All-Hist'][1],axis=0)-273.15,colormax=cmax*40,title='All-Hist (multiply scale by 40)')\n ax2 = fig.add_subplot(3, 3, 2)\n plotter(np.mean(va500['All-Hist'][1], axis=0)-np.mean(va500['All-Nat'][1],axis=0),colormax=cmax,title='All-Hist - All-Nat')\n ax3 = fig.add_subplot(3, 3, 3)\n plotter(np.mean(va500['Plus15-Future'][1], axis=0)-np.mean(va500['All-Hist'][1],axis=0),colormax=cmax,title='Plus15-Future - All-Hist')\n ax4 = fig.add_subplot(3, 3, 4)\n plotter(np.mean(va500['All-Hist'][1], axis=0)-np.mean(va500['GHG-Nat'][1],axis=0),colormax=cmax,title='All-Hist - GHG-Nat')\n ax5 = fig.add_subplot(3, 3, 5)\n plotter(np.mean(va500['SST-Nat'][1], axis=0)-np.mean(va500['All-Nat'][1],axis=0),colormax=cmax,title='SST-Nat - All-Nat')\n ax6 = fig.add_subplot(3, 3, 6)\n plotter(np.mean(va500['Plus15-Future_HCO2'][1], axis=0)-np.mean(va500['Plus15-Future_LCO2'][1],axis=0),colormax=cmax,title='Plus15-Future_HCO2 - Plus15-Future_LCO2')\n ax7 = fig.add_subplot(3, 3, 7)\n plotter(np.mean(va500['All-Hist'][1], axis=0)-np.mean(va500['SST-Nat'][1],axis=0),colormax=cmax,title='All-Hist - SST-Nat')\n ax8 = fig.add_subplot(3, 3, 9)\n plotter(np.mean(va500['Plus15-Future_LCO2'][1], axis=0)-np.mean(va500['All-Hist'][1],axis=0),colormax=cmax,title='Plus15-Future_LCO2 - All-Hist')\n ax9 = fig.add_subplot(3, 3, 8)\n plot = plotter(np.mean(va500['GHG-Nat'][1], axis=0)-np.mean(va500['All-Nat'][1],axis=0),colormax=cmax,title='GHG-Nat - All-Nat')\n\n cbar_ax = fig.add_axes([0.2, 0.1, 0.6, 0.015])\n b = fig.colorbar(plot, cax=cbar_ax, spacing='proportional',\n orientation='horizontal', extend='max')\n \n b.set_label(label='t200 difference ($^\\circ$C)', size=20, fontsize=20, fontname='Arial')\n plt.subplots_adjust(hspace=0, wspace=0.05, top=.97, bottom=0.15, left=.05,\n right=.95)", "def map_property(self, linc):\n sleep(1)\n linc = '{}'.format(linc).zfill(10)\n self.driver.switch_to_frame('fOpts')\n select_box = Select(self.driver.find_element_by_id('Finds_lstFindTypes'))\n select_box.select_by_visible_text('Linc Number')\n linc_box = self.driver.find_element_by_id('Finds_ctlLincNumber_txtLincNumber')\n linc_box.clear()\n linc_box.send_keys(linc)\n self.driver.find_element_by_id('Finds_cmdSubmit').click()\n if self.spatial_count == 0:\n sleep(8)\n sleep(4)\n self.driver.switch_to_default_content()\n if self.spatial_count == 0:\n try:\n e = WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located((By.ID, 'map'))\n )\n except:\n self.driver.refresh()\n self.driver.switch_to_default_content()\n e = WebDriverWait(self.driver, 60).until(\n EC.presence_of_element_located((By.ID, 'map'))\n )\n hover_target = self.driver.find_element_by_id('map')\n if self.spatial_count == 0:\n sleep(5)\n map_location = hover_target.location\n map_size = hover_target.size\n filename = 'data/sites/{}.png'.format(linc)\n self.driver.save_screenshot(filename)\n x = map_location['x'] + 50\n y = map_location['y']\n width = map_location['x'] + map_size['width'] - 50\n height = map_location['y'] + map_size['height']\n im = Image.open(filename)\n im = im.crop((int(x), int(y), int(width), int(height)))\n im.save(filename)\n if self.spatial_count == 0:\n sleep(5)\n ActionChains(self.driver).move_to_element(hover_target).drag_and_drop_by_offset(hover_target, 1, 1).perform()\n if self.spatial_count == 0:\n sleep(5)\n nad83_raw = self.driver.find_element_by_id('coordinateOutput').text\n nad83 = tuple(re.findall(r\"[0-9\\.\\-]+\", nad83_raw))\n gps = Geography().nad83(nad83, reverse=True)\n gpsr = (gps[1], gps[0])\n self.spatial_count += 1\n return gpsr", "def update_map(year: int, sea_level: float) -> any:\n ctx = dash.callback_context\n\n # run on start-up or if year_slider is used\n if ctx.triggered[0]['prop_id'] == '.' or ctx.triggered[0]['prop_id'] == 'year_slider.value':\n\n # get the predicted mean sea level at a given year in mm and convert to m\n predictions = pd.read_csv('data_predictions.csv')\n row_id = predictions.index[predictions['year'] == year].tolist()\n val = predictions.loc[row_id[0]]['mean_sea_level']\n sea_level = val / 1000\n\n # run_file() function will call other computation functions in canada_dsm.py\n run_file('elevation_data.asc', sea_level)\n\n # run if sea_level_slider is used\n else:\n run_file('elevation_data.asc', sea_level)\n\n return display_map()", "def recalc_view_lonlat_limits(self):\n\n self.view_llon = self.map_llon + self.view_offset_x / self.ppd_x\n self.view_rlon = self.view_llon + self.view_width / self.ppd_x\n\n self.view_tlat = self.map_tlat - self.view_offset_y / self.ppd_y\n self.view_blat = self.view_tlat - self.view_height / self.ppd_y", "def change_zoom(self, b):\n\n x_mid = int(self.ff[0].info['xres'] / 2)\n y_mid = int(self.ff[0].info['yres'] / 2)\n\n x = x_mid - self.x_crop_slider.value\n\n if self.y_crop.value is True:\n y = y_mid - self.y_crop_slider.value\n else:\n y = y_mid - self.x_crop_slider.value\n\n x0 = x_mid - x\n x1 = x_mid + x\n y0 = y_mid - y\n y1 = y_mid + y\n\n self.x_range = [x0, x1]\n self.y_range = [y0, y1]\n\n self.ax.set_xlim([x0, x1])\n self.ax.set_ylim([y0, y1])", "def onResize(self, event=None):\n\n # get new size of the view\n (self.view_width, self.view_height) = self.GetClientSizeTuple()\n\n # if map > view in X axis\n if self.map_width > self.view_width:\n self.max_x_offset = self.map_width - self.view_width\n # do nothing unless background is showing\n # if map left edge right of view edge\n if self.view_offset_x < 0:\n # move view to hide background at left\n self.view_offset_x = 0\n elif self.view_offset_x + self.view_width > self.map_width:\n # move view to hide background at right\n self.view_offset_x = self.map_width - self.view_width\n else:\n # else view >= map - centre map in X direction\n self.max_x_offset = self.map_width - self.view_width\n self.view_offset_x = self.max_x_offset / 2\n\n # if map > view in Y axis\n if self.map_height > self.view_height:\n self.max_y_offset = self.map_height - self.view_height\n # do nothing unless background is showing\n # if map top edge below view edge\n if self.view_offset_y < 0:\n # move view to hide background at top\n self.view_offset_y = 0\n elif self.view_offset_y + self.view_height > self.map_height:\n # move view to hide background at bottom\n self.view_offset_y = self.map_height - self.view_height\n else:\n # else view >= map - centre map in Y direction\n self.max_y_offset = self.map_height - self.view_height\n self.view_offset_y = self.max_y_offset / 2\n\n # set the left/right/top/bottom lon/lat extents\n self.recalc_view_lonlat_limits()\n\n # redraw tiles & layers\n self.drawTilesLayers(clear=True)", "def covid_map(geojson, add):\r\n #make the map centered at Philly center\r\n m = folium.Map(location=[39.99, -75.13],tiles='Cartodb Positron',zoom_start=11)\r\n\r\n folium.GeoJson(\r\n geojson,\r\n style_function=get_style,\r\n highlight_function=get_highlighted_style,\r\n tooltip=folium.GeoJsonTooltip(['zip_code', 'covid_cases'])\r\n ).add_to(m)\r\n\r\n #Add the point which indicates the user's location\r\n point_cor =get_address(add)\r\n\r\n folium.Marker(location=point_cor,\r\n icon=folium.Icon(color='black')).add_to(m)\r\n\r\n return m", "def __zoom(self):\n if QApplication.focusWidget() == e5App().getObject(\"Shell\"):\n aw = e5App().getObject(\"Shell\")\n else:\n aw = self.activeWindow()\n if aw:\n from QScintilla.ZoomDialog import ZoomDialog\n dlg = ZoomDialog(aw.getZoom(), self.ui, None, True)\n if dlg.exec_() == QDialog.Accepted:\n value = dlg.getZoomSize()\n self.__zoomTo(value)", "def on_mouse_wheel(self, event):\n delta = event.delta[1]\n if delta > 0: # Zoom in\n factor = 0.9\n elif delta < 0: # Zoom out\n factor = 1 / 0.9\n for _ in range(int(abs(delta))):\n self.zoom(factor, event.pos)", "def _onbuttonReqMapClicked(self):\n\n day1=str(self.combo_wms_time_first_d.currentText())\n hour1=str(self.combo_wms_time_first_h.currentText())\n date_val=day1+hour1\n depth=str(self.combo_wms_layer_depth.currentText())\n variable=str(self.combo_variable_list.currentText())\n product=str(self.combo_product_list.currentText())\n dataset=str(self.combo_dataset_list.currentText())\n xmin=int(float(self.WMS_westBound.text()))\n xmax=int(float(self.WMS_eastBound.text()))\n ymin=int(float(self.WMS_southBound.text()))\n ymax=int(float(self.WMS_northBound.text()))\n dir_out=self.tmp\n rastermin=self.minscale_value.text()\n rastermax=self.maxscale_value.text()\n nb_colors=self.nbcolors_value.text()\n xpixels=float(self.Xpixels_value.text())\n xparallels=int(self.Xparallels_value.text())\n ymeridians=int(self.Ymedians_value.text())\n dpi=300\n colorbar=str(self.combo_colorbar.currentText())\n input_srs=str(self.combo_proj.currentText())\n epsg_val=input_srs.split(':')[1]\n ll_polar=False\n##\tif self.checkBox_2.isChecked() == True :\n##\t print \"Projection arctic\"\n## #m = Basemap(llcrnrlon=xmin, urcrnrlat=ymax,\n## # urcrnrlon=xmax, llcrnrlat=ymin,resolution='l',epsg=epsg_val) \n## ##m = Basemap(projection='npstere',boundinglat=ymin,lon_0=0,round=True,resolution='l') \n## m = Basemap(projection='npstere',boundinglat=ymin,lon_0=0,round=True,resolution='l') \n## #Proj4js.defs[\"EPSG:3408\"] = \"+proj=laea +lat_0=90 +lon_0=0 +x_0=0 +y_0=0 +a=6371228 +b=6371228 +units=m +no_defs\";\n## #\n## ll_polar=True\n##\telif self.checkBox_3.isChecked() == True :\n##\t print \"Projection antarctic\"\n## m = Basemap(projection='spstere',boundinglat=ymax,lon_0=180,round=True,resolution='l') \n## ll_polar=True\n##\telse : \n m = Basemap(llcrnrlon=xmin, urcrnrlat=ymax,\n urcrnrlon=xmax, llcrnrlat=ymin,resolution='l',epsg=epsg_val) \n print \"cylindric projection\"\n\n # ypixels not given, find by scaling xpixels by the map aspect ratio.\n ypixels = int(m.aspect*xpixels)\n style='boxfill/'+colorbar\n print input_srs\n print epsg_val\n p = pyproj.Proj(init=\"epsg:%s\" % epsg_val, preserve_units=True)\n xmin,ymin = p(m.llcrnrlon,m.llcrnrlat)\n xmax,ymax = p(m.urcrnrlon,m.urcrnrlat)\n if epsg_val == '4326' :\n xmin = (180./np.pi)*xmin; xmax = (180./np.pi)*xmax\n ymin = (180./np.pi)*ymin; ymax = (180./np.pi)*ymax\n print \"Cylindric projection\"\n print xmin,xmax,ymin,ymax\n print style\n img = self.wms.getmap(layers=[variable],service='wms',bbox=(xmin,ymin,xmax,ymax),\n size=(int(xpixels),ypixels),\n format='image/png',\n elevation=depth,\n srs=input_srs,\n time=date_val,\n colorscalerange=rastermin+','+rastermax,numcolorbands=nb_colors,logscale=False,\n styles=[style])\n image=imread(io.BytesIO(img.read()),format='png')\n if variable == \"sea_water_velocity\" :\n ylabel=\"magnitude\"\n else :\n ylabel=self.wms[variable].abstract\n\n long_name=self.wms[variable].title\n title=product+\" - \"+long_name+\" \"+\" - \"+date_val\n file_pal='./palettes/thredds/'+colorbar+'.pal'\n my_cmap=compute_cmap(file_pal,colorbar)\n cm.register_cmap(name=colorbar, cmap=my_cmap)\n font=10\n norm = mpl.colors.Normalize(vmin=float(rastermin), vmax=float(rastermax), clip=False) \n parallels=np.round(np.arange(ymin,ymax+xparallels/2,xparallels))\n meridians = np.round(np.arange(xmin,xmax+ymeridians/2,ymeridians))\n # Plot figure \n plt.figure(figsize=(20,12))\n if epsg_val == '4326' :\n m.drawcoastlines(color='lightgrey',linewidth=0.25)\n m.fillcontinents(color='lightgrey')\n m.drawmeridians(meridians,labels=[0,0,0,1],fontsize=10,linewidth=0.2,dashes=[1, 5])\n m.drawparallels(parallels,labels=[1,0,0,0],fontsize=10,linewidth=0.2)\n\n elif ll_polar == True : \n #m.drawcoastlines(linewidth=0.5)\n m.drawparallels(parallels,labels=[1,0,0,0],fontsize=10,linewidth=0.2)\n m.drawmeridians(meridians[:-1],labels=[1,1,1,1],fontsize=10,linewidth=0.2,dashes=[1, 5])\n ## Plot the image\n cs=m.imshow(image,origin='upper',alpha=1,cmap=(cm.get_cmap(colorbar,int(nb_colors))),norm=norm)\n ## Add colorbar\n cb=plt.colorbar(cs,orientation='vertical',format='%4.2f',shrink=0.7)\n cb.ax.set_ylabel(ylabel, fontsize=int(font)+4)\n cl=plt.getp(cb.ax, 'ymajorticklabels')\n plt.setp(cl, fontsize=font)\n\n plt.title(title,fontsize=font+4,y=1.05)\n plt.savefig('images/'+product+\"_\"+long_name+\"_\"+date_val+\"_basemap.png\",dpi=300,bbox_inches='tight')\n plt.show()", "def zoom(\n self,\n departement=[\"75\", \"92\", \"93\", \"94\"],\n center=(-133583.39, 5971815.98),\n radius=650000,\n startAngle=math.pi * (1 - 2.5 * 1 / 9),\n factor=2,\n):\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=ShapelyDeprecationWarning)\n df = self\n if all([x in df.columns for x in [\"insee_dep\", \"geometry\"]]):\n\n zoomDep = df[df[\"insee_dep\"].isin(departement)].reset_index(drop=True)\n\n if len(zoomDep.index) > 0:\n\n zoomDep = _rescale_geom(df=zoomDep, factor=factor)\n end = Point(center[0] + radius, center[1])\n line = LineString([center, end])\n\n line = rotate(line, startAngle, origin=center, use_radians=True)\n endPoint = Point(line.coords[1])\n center = _get_center(zoomDep)\n\n xoff = endPoint.coords.xy[0][0] - center[0]\n yoff = endPoint.coords.xy[1][0] - center[1]\n\n zoomDep[\"geometry\"] = zoomDep[\"geometry\"].apply(\n lambda x: trs(x, xoff=xoff, yoff=yoff)\n )\n df = pd.concat([self, zoomDep]).reset_index(drop=True)\n\n return df" ]
[ "0.62788576", "0.6039453", "0.58890414", "0.58117414", "0.5672202", "0.5654756", "0.56279266", "0.55998397", "0.55568296", "0.55527616", "0.5512002", "0.54508984", "0.5449918", "0.5449918", "0.5449918", "0.5449918", "0.54433745", "0.54276776", "0.5427263", "0.53882915", "0.5378679", "0.5368002", "0.5357868", "0.5346166", "0.5326071", "0.5302074", "0.5294098", "0.52754194", "0.52568877", "0.5242984", "0.523453", "0.52254075", "0.5224901", "0.52167207", "0.52120984", "0.52117413", "0.5199956", "0.51969206", "0.5192506", "0.51767266", "0.51662886", "0.51536846", "0.5152247", "0.51425225", "0.513149", "0.51255274", "0.5124447", "0.5118338", "0.511796", "0.511009", "0.51082665", "0.5108154", "0.51044583", "0.5104349", "0.50942594", "0.5083305", "0.50689834", "0.50521994", "0.5041218", "0.5038935", "0.5038392", "0.5023874", "0.5021162", "0.50142634", "0.5000989", "0.49988356", "0.49945936", "0.49907908", "0.4981258", "0.49803954", "0.4977128", "0.49757555", "0.49642187", "0.49631804", "0.49437046", "0.49410427", "0.49360225", "0.49349055", "0.4929658", "0.49206534", "0.49127647", "0.49037772", "0.4900195", "0.48931366", "0.48881036", "0.48848993", "0.48834056", "0.487665", "0.48741102", "0.48709202", "0.48678178", "0.48636517", "0.48597288", "0.48533487", "0.48497665", "0.48435035", "0.48428515", "0.48387343", "0.48361805", "0.4830924" ]
0.6076124
1
Show all stations from a subcluster on a map
def stations_on_map(request, country=None, cluster=None, subcluster=None): data_stations = stations_with_data() down, problem, up = status_lists() if country: get_object_or_404(Country, name=country) if cluster: get_object_or_404(Cluster, name=cluster, parent=None, country__name=country) if subcluster: if cluster == subcluster: get_object_or_404(Cluster, name=subcluster, parent=None) else: get_object_or_404(Cluster, name=subcluster, parent__name=cluster) focus = (Cluster.objects.filter(name=subcluster) .values_list('name', flat=True)) else: focus = [Cluster.objects.get(name=cluster, parent=None).name] focus.extend(Cluster.objects.filter(parent__name=cluster) .values_list('name', flat=True)) else: focus = (Cluster.objects.filter(country__name=country) .values_list('name', flat=True)) else: focus = Cluster.objects.all().values_list('name', flat=True) subclusters = [] for subcluster in Cluster.objects.all(): stations = [] for station in (Station.objects.select_related('cluster__parent', 'cluster__country') .filter(cluster=subcluster, pc__is_test=False)): link = station in data_stations status = get_station_status(station.number, down, problem, up) location = station.latest_location() station_data = {'number': station.number, 'name': station.name, 'cluster': station.cluster, 'link': link, 'status': status} station_data.update(location) stations.append(station_data) subclusters.append({'name': subcluster.name, 'stations': stations}) return render(request, 'map.html', {'subclusters': subclusters, 'focus': focus})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def station_on_map(request, station_number):\n\n data_stations = stations_with_data()\n station_number = int(station_number)\n down, problem, up = status_lists()\n\n station = get_object_or_404(Station, number=station_number)\n center = station.latest_location()\n if center['latitude'] is None and center['longitude'] is None:\n raise Http404\n\n subclusters = []\n for subcluster in Cluster.objects.all():\n stations = []\n for station in (Station.objects.select_related('cluster__parent',\n 'cluster__country')\n .filter(cluster=subcluster,\n pc__is_test=False)):\n link = station in data_stations\n status = get_station_status(station.number, down, problem, up)\n location = station.latest_location()\n station_data = {'number': station.number,\n 'name': station.name,\n 'cluster': station.cluster,\n 'link': link,\n 'status': status}\n station_data.update(location)\n stations.append(station_data)\n subclusters.append({'name': subcluster.name,\n 'stations': stations})\n\n return render(request, 'map.html',\n {'subclusters': subclusters,\n 'center': center})", "def stations():\n\n return station_list", "def show_clusters(self):\n cluster_ids = [\n self.controller.cluster and self.controller.cluster['id']\n ]\n self.print_list(\n ('id', 'name', 'status'), self.controller.get_clusters(),\n lambda x: cluster_ids.index(x['id'])\n )", "def atlas_clusters():\n pass", "def printStations(self):\n print(\"Bus numero \" + str(self._num) + \" :\")\n for i in range(len(self._stations)) :\n print(self._stations[i])\n print('\\n')", "def cluster(self):\n\n result_nominatim = self.nominatim()\n try:\n coord = [(float( i['lat'] ), float( i['lon'] )) for i in result_nominatim]\n except:\n return None\n #print( \"coord\", coord )\n kms_per_radian = 6371.0088\n # Augmenter cette valeur augmente le nombre d'éléments dans un cluster et change les résultats\n epsilon = 2 / kms_per_radian\n # Adapter le nombre de clusters (min_sample) au nombre d'entités dans array ?\n db = DBSCAN( eps=epsilon, min_samples=1, algorithm='ball_tree',\n metric='haversine' ).fit( np.radians( coord ) )\n cluster_labels = db.labels_\n #print( \"cluster\", cluster_labels )\n num_clusters = len( set( cluster_labels ) )\n #print( \"num clusters\", num_clusters )\n counts = np.bincount( cluster_labels )\n #print( \"count\", counts )\n maxi = np.argmax( counts )\n #print( \"maxi\", maxi )\n itemindex = np.where( cluster_labels == maxi )[0]\n #print( \"itemindex\", itemindex )\n\n lat: List[float] = [float( result_nominatim[index]['lat'] ) for index in itemindex]\n lon: List[float] = [float( result_nominatim[index]['lon'] ) for index in itemindex]\n\n # on récupère la moyenne des coordonnées du plus gros cluster. Cette moyenne équivaut au centroide :\n # https://gis.stackexchange.com/questions/12120/calculate-midpoint-from-a-series-of-latitude-and-longitude-coordinates\n\n average = {\"lat\": sum( lat ) / len( lat ), \"lon\": sum( lon ) / len( lon )}\n\n #print( list( zip( cluster_labels, [x['display_name'] for x in results] ) ) )\n #print( \"plus proche de moyenne\", closest( results, average ) )\n return closest( result_nominatim, average )", "def get_stations(self):\n return self.__request('stations')['stations']", "def station_list(request):\n center=request.session.__getitem__('center')\n ctrs = connection.Station.find({'cn': center.__unicode__()})\n return render(request, 'list_station.html',\n {'ctrs': ctrs}, content_type=\"text/html\")", "def station_viewer():\r\n name = request.args[\"address\"]\r\n stations = get_zipcode_stations(name)\r\n\r\n if len(stations) > 0:\r\n stations['coordinate'] = 'end_point='+stations['name'].astype(str)+'&'+'end_lng=' + stations['lon'].astype(str)+'&'+'end_lat='+stations['lat'].astype(str)\r\n\r\n #genetrate folium map\r\n station_coordinates = stations[[\"lat\", \"lon\"]].values.tolist()\r\n\r\n map=make_folium_map(station_coordinates)\r\n\r\n\r\n # generate interactive map\r\n\r\n return render_template(\r\n \"page3.html\",\r\n num_stations=get_num_stations(name),\r\n address=name,\r\n stations=stations[[\"name\", \"address\", \"available_bikes\", 'coordinate']].values,\r\n map=map._repr_html_()\r\n )\r\n\r\n else:\r\n lng=get_address(name)[1]\r\n lat=get_address(name)[0]\r\n near_bike = find_5near_stations(lng, lat)\r\n near_bike['coordinate'] = 'end_point='+near_bike['name'].astype(str)+'&'+'end_lng=' + near_bike['lon'].astype(str)+'&'+'end_lat='+near_bike['lat'].astype(str)\r\n\r\n return render_template(\r\n \"page3_1b_nobike.html\",\r\n address=name,\r\n near_bike_table=near_bike[[\"name\", \"address\", \"available_bikes\", \"coordinate\", \"distance\"]].values)", "def stations():\n results = session.query(Measurement.station).\\\n group_by(Measurement.station).all()\n\n return jsonify(results)", "def stations():\n # Query all station names from dataset\n station_list = session.query(Measurement.station).distinct().all()\n all_stations = list(np.ravel(station_list))\n\n return jsonify(all_stations)", "def _get_cluster_list(self):\n return self.__cluster_list", "def cluster_stations(stations, empty='empty'):\n if empty == 'empty':\n tocluster = [i for i in stations if (i[3] - i[2])/float(i[3]) < .2]\n else:\n tocluster = [i for i in stations if (i[2])/float(i[3]) < .2]\n cl = KMeansClustering([(i[4], i[5]) for i in tocluster])\n clusters = cl.getclusters(4)\n\n # Note that this returns a list of lists of lat/long tuples. We're\n # going to have to re-associate them back to the rest of the stations\n\n clustered = []\n for ix, i in enumerate(clusters):\n for j in i:\n for k in tocluster:\n if (j[0], j[1]) == (k[4], k[5]):\n clustered.append([k[0], k[1], k[2],\n k[3], k[4], k[5], ix+1])\n\n return clustered", "def stations():\n \n session = Session(engine)\n # Query to bring all stations\n results = pd.DataFrame(session.query(S.id.label('ID'),S.station.label('Station'),S.name.label('Name'),\\\n S.latitude.label('Latitude'),S.longitude.label('Longitude'), \\\n S.elevation.label('Elevation')).all())\n \n session.close()\n \n # Create a dictionary from the row data of the dataframe and return it as a JSON\n return jsonify(results.to_dict(orient = 'records'))", "def add_stations_to_map(gdf_stations: gpd.GeoDataFrame, basemap):\n # make a feature group for stations\n fg_stations = folium.FeatureGroup(name='Railway Stations', show=False)\n for j, rowj in gdf_stations.iterrows():\n folium.CircleMarker(\n location=rowj[\"folium_geom\"],\n radius=5,\n tooltip=f\"{rowj['name']}\",\n popup=f\"{rowj['name']}\",\n color=\"ffff00\",\n fill=True,\n fill_color=\"black\"\n ).add_to(fg_stations)\n basemap.add_child(fg_stations)\n \n return None", "def displayMapTest(self):\n \n #To run the test do:\n #coastalDistanceMap = CoastalDistanceMap()\n #coastalDistanceMap.loadMap(os.path.join(os.getcwd(), 'GNSSR_Python', 'landDistGrid_0.10LLRes_hGSHHSres.nc'))\n #coastalDistanceMap.DisplayMapTest()\n \n mapPlotter = MapPlotter(200e3) #Map grid in km (at equator)\n\n coastDistance = np.zeros((mapPlotter.sizeLat, mapPlotter.sizeLon))\n lons = np.zeros((mapPlotter.sizeLat, mapPlotter.sizeLon))\n lats = np.zeros((mapPlotter.sizeLat, mapPlotter.sizeLon))\n\n for indexes, x in np.ndenumerate(coastDistance):\n lon = np.array(mapPlotter.scaleLon[indexes[1]])\n lat = np.array(mapPlotter.scaleLat[indexes[0]])\n\n # Fill in output table\n coastDistance[indexes[0]][indexes[1]] = self.getDistanceToCoast(lon, lat)\n \n #Reshape to 2D map\n np.reshape(coastDistance, (mapPlotter.sizeLon, mapPlotter.sizeLat))\n #Plot\n mapPlotter.plotMapStatic(coastDistance)", "def get_all_locations(self):", "def stations():\n results = session.query(Station.station,Station.name).all()\n key=[results[i][0] for i in range(len(results))]\n values=[results[i][1] for i in range(len(results))]\n results=dict(zip(key,values))\n print(f\"Route /api/v1.0/stations is being visited\")\n return jsonify(results)", "def show_clusters() -> Dict[str, Cluster]:\n environment = EnvironmentProvider().environment\n return {key: value for key, value in environment.clusters.items()}", "def stations(self):\n for stat in sorted(self.station_records):\n yield self.station_records[stat]", "def stations():\n # Query \n results = session.query(Station.station).all()\n \n list = []\n for result in results:\n list.append(result)\n return jsonify(list)", "def map_service_catalogue():\n if deployment_settings.get_security_map() and not s3_has_role(\"MapAdmin\"):\n unauthorised()\n\n subtitle = T(\"List Layers\")\n # Start building the Return with the common items\n output = dict(subtitle=subtitle)\n\n # Hack: We control all perms from this 1 table\n table = db.gis_layer_openstreetmap\n authorised = s3_has_permission(\"update\", table)\n item_list = []\n even = True\n if authorised:\n # List View with checkboxes to Enable/Disable layers\n for type in gis_layer_types:\n table = db[\"gis_layer_%s\" % type]\n query = table.id > 0\n sqlrows = db(query).select()\n for row in sqlrows:\n if even:\n theclass = \"even\"\n even = False\n else:\n theclass = \"odd\"\n even = True\n if row.description:\n description = row.description\n else:\n description = \"\"\n label = \"%s_%s\" % (type, str(row.id))\n if row.enabled:\n enabled = INPUT(_type=\"checkbox\", value=True, _name=label)\n else:\n enabled = INPUT(_type=\"checkbox\", _name=label)\n item_list.append(TR(TD(A(row.name,\n _href=URL(r=request, f=\"layer_%s\" % type,\n args=row.id))),\n TD(description),\n TD(enabled),\n _class=theclass))\n # Feature Layers\n type = \"feature\"\n for row in db(db.gis_layer_feature.id > 0).select():\n if even:\n theclass = \"even\"\n even = False\n else:\n theclass = \"odd\"\n even = True\n if row.comments:\n description = row.comments\n else:\n description = \"\"\n label = \"%s_%s\" % (type, str(row.id))\n if row.enabled:\n enabled = INPUT(_type=\"checkbox\", value=True, _name=label)\n else:\n enabled = INPUT(_type=\"checkbox\", _name=label)\n item_list.append(TR(TD(A(row.name,\n _href=URL(r=request, f=\"layer_feature\",\n args=row.id))),\n TD(description),\n TD(enabled),\n _class=theclass))\n\n table_header = THEAD(TR(TH(\"Layer\"),\n TH(\"Description\"),\n TH(\"Enabled?\")))\n table_footer = TFOOT(TR(TD(INPUT(_id=\"submit_button\",\n _type=\"submit\",\n _value=T(\"Update\")),\n _colspan=3)),\n _align=\"right\")\n items = DIV(FORM(TABLE(table_header,\n TBODY(item_list),\n table_footer,\n _id=\"table-container\"),\n _name=\"custom\",\n _method=\"post\",\n _enctype=\"multipart/form-data\",\n _action=URL(r=request, f=\"layers_enable\")))\n\n else:\n # Simple List View\n for type in gis_layer_types:\n table = db[\"gis_layer_%s\" % type]\n query = table.id > 0\n sqlrows = db(query).select()\n for row in sqlrows:\n if even:\n theclass = \"even\"\n even = False\n else:\n theclass = \"odd\"\n even = True\n if row.description:\n description = row.description\n else:\n description = \"\"\n if row.enabled:\n enabled = INPUT(_type=\"checkbox\",\n value=\"on\",\n _disabled=\"disabled\")\n else:\n enabled = INPUT(_type=\"checkbox\",\n _disabled=\"disabled\")\n item_list.append(TR(TD(A(row.name,\n _href=URL(r=request, f=\"layer_%s\" % type,\n args=row.id))),\n TD(description),\n TD(enabled),\n _class=theclass))\n # Feature Layers\n type = \"feature\"\n table = db[\"gis_layer_%s\" % type]\n query = table.id > 0\n sqlrows = db(query).select()\n for row in sqlrows:\n if even:\n theclass = \"even\"\n even = False\n else:\n theclass = \"odd\"\n even = True\n if row.comments:\n description = row.comments\n else:\n description = \"\"\n if row.enabled:\n enabled = INPUT(_type=\"checkbox\",\n value=\"on\",\n _disabled=\"disabled\")\n else:\n enabled = INPUT(_type=\"checkbox\", _disabled=\"disabled\")\n item_list.append(TR(TD(A(row.name,\n _href=URL(r=request, f=\"layer_feature\",\n args=row.id))),\n TD(description),\n TD(enabled),\n _class=theclass))\n\n table_header = THEAD(TR(TH(\"Layer\"), TH(\"Description\"), TH(\"Enabled?\")))\n items = DIV(TABLE(table_header, TBODY(item_list), _id=\"table-container\"))\n\n output.update(dict(items=items))\n return output", "def stations ():\n # Query all passengers\n Stns= session.query(Measurement.station, func.count(Measurement.station)).group_by(Measurement.station).all()\n\n allStationns = list(np.ravel(Stns))\n\n return jsonify(allStations)", "def _get_ogd_stations():\n return {r[\"Station\"] for r in ZamgData.current_observations()}", "def stations():\r\n # Query all passengers\r\n results = session.query(Station.station, \r\n Station.name, \r\n Station.latitude,\r\n Station.longitude,\r\n Station.elevation).all()\r\n\r\n return jsonify(results)", "def station_list() -> List[Dict]:\n return STATIONS", "def stations():\n # Query all stations\n results = session.query(Measurement.station).group_by(Measurement.station).all()\n all_sessions = list(np.ravel(results))\n return jsonify(all_sessions)", "def getStations(self) :\n return self._stations", "def get_all_stations(session: Session) -> List[Row]:\n return session.query(PlanningWeatherStation.station_code).all()", "def station_map(data, station_counter):\n r = 6373.0\n\n phi = np.cos(55.943894)\n\n stations = list(set(data['start_station_name'].tolist() + data['end_station_name'].tolist()))\n dict = {name: () for name in stations}\n for index, row in data.iterrows():\n dict[row['start_station_name']] = (row['start_station_latitude'], row['start_station_longitude'])\n dict[row['end_station_name']] = (row['end_station_latitude'], row['end_station_longitude'])\n # number of stations\n n = len(dict.keys())\n\n # Convert to XY coordinates\n dict_xy = {ids: (station_counter[ids], (r * dict[ids][1] * phi, r * dict[ids][0])) for ids in dict.keys()}\n\n journeys, loc = zip(*list(dict_xy.values()))\n x, y = zip(*list(loc))\n journeys = [i for i in journeys]\n\n fig, ax = plt.subplots(figsize=(13, 8))\n plt.scatter(x, y, s=journeys)\n\n for ii in dict_xy.keys():\n ax.annotate(ii, dict_xy[ii][1], dict_xy[ii][1])\n\n plt.show()", "def DisplayCentroids(Centroids,outputs,ax,N=1,sections=1):\r\n\r\n SliceValues = np.linspace(float(min(Centroids[:,0])),float(max(Centroids[:,0])),sections+1) # Create boundaries in x for each slice.\r\n idx1 = np.asarray((Centroids[:,0]>=SliceValues[N-1]))*np.asarray((Centroids[:,0]<=SliceValues[N]))\r\n\r\n idx1 = idx1.flatten() \r\n\r\n CentroidSlice = Centroids[idx1,:]\r\n \r\n outputSlice = outputs[idx1,:]\r\n\r\n # Plot Data-------------------------------------------------------------------------------------------------------\r\n ax.scatter(CentroidSlice[:,0],CentroidSlice[:,1],CentroidSlice[:,2],c = [float(N) for N in outputSlice],cmap = 'bwr')\r\n ax.set_zlabel('z')\r\n ax.set_ylabel('y')\r\n ax.set_xlabel('x')", "def stations():\n\n # Query all Stations\n station_results = session.query(Station.station).all()\n\n # Convert list of tuples into normal list\n all_station_names = list(np.ravel(station_results))\n\n return jsonify(all_station_names)", "async def getAll():\n return [cluster.export() for cluster in clusters.get_all()]", "def stations():\n results = session.query(Station.station).all()\n stations = list(np.revel(results))\n return jsonify(stations)", "def strech_list(sector, subgraphs_):\n\n strechs=[]\n subs=conv_sub(subgraphs_)\n for j in range(len(subs)):\n si=len(set(sector)&set(subs[j]))-subgraphs_[j].NLoopSub()\n strechs+=[1000+j]*si\n return list(set(strechs))", "def icos_stations(*args):\n\n if len(args) != 4:\n filterstr = \" \"\n else:\n filterstr = \"\"\"\n filter(\n ?lat >= %s && ?lat <= %s &&\n ?lon >= %s && ?lon <= %s).\"\"\" % (args)\n\n\n query = \"\"\"\n PREFIX cpst: <http://meta.icos-cp.eu/ontologies/stationentry/>\n SELECT\n (IF(bound(?lat), str(?lat), \"?\") AS ?latstr)\n (IF(bound(?lon), str(?lon), \"?\") AS ?lonstr)\n (REPLACE(str(?class),\"http://meta.icos-cp.eu/ontologies/stationentry/\", \"\") AS ?themeShort)\n (str(?country) AS ?Country)\n (str(?sName) AS ?Short_name)\n (str(?lName) AS ?Long_name)\n (GROUP_CONCAT(?piLname; separator=\";\") AS ?PI_names)\n (str(?siteType) AS ?Site_type)\n FROM <http://meta.icos-cp.eu/resources/stationentry/>\n WHERE {\n ?s cpst:hasCountry ?country .\n ?s cpst:hasShortName ?sName .\n ?s cpst:hasLongName ?lName .\n ?s cpst:hasSiteType ?siteType .\n ?s cpst:hasPi ?pi .\n ?pi cpst:hasLastName ?piLname .\n ?s a ?class .\n OPTIONAL{?s cpst:hasLat ?lat } .\n OPTIONAL{?s cpst:hasLon ?lon } .\n OPTIONAL{?s cpst:hasSpatialReference ?spatRef } .\n OPTIONAL{?pi cpst:hasFirstName ?piFname } .\n %s\n }\n GROUP BY ?lat ?lon ?class ?country ?sName ?lName ?siteType\n ORDER BY ?themeShort ?sName\n \"\"\" %filterstr\n\n return query", "def clusters(self,project_id=os.environ.get(\"ATLAS_PROJECT\")):\n project_id = project_id if project_id != '' else self.__project_id\n return self.get('{}/groups/{}/clusters'.format(ApiVersion.A1.value,project_id))", "def stations():\n # Create a link to the session\n session = Session(engine)\n \n # Query all station records\n results = session.query(Stations.station, Stations.name).all()\n \n session.close()\n\n # Create a dictionary from the query results\n all_stations = []\n for station, name in results:\n station_dict = {}\n station_dict[\"station\"] = station\n station_dict[\"name\"] = name\n all_stations.append(station_dict)\n \n return jsonify(all_stations)", "def clusters(self):\n raise NotImplementedError", "def show_subgraph(dfs_codes, nsupport, mapper):\n\tglobal __subgraph_count\n\n\tg = build_graph(dfs_codes)\n\tg.id = __subgraph_count\n\t__subgraph_count += 1\n\tg.gprint(nsupport, mapper)", "def stations():\n\t\n\n\tstationquery = session.query(Station.station).all()\n\n\tstationlist = list(np.ravel(stationquery))\n\t\n\treturn jsonify(stationlist)", "def stations():\n list_of_stations = session.query(Station.station, Station.name)\n all_stations = []\n for s, n in list_of_stations:\n station_dict = {}\n station_dict[\"station\"] = s\n station_dict[\"name\"] = n\n all_stations.append(station_dict)\n return jsonify(all_stations)", "def stations_dict(self):\n return self.__stations_dict", "def cluster_list():\n request_debug(r, logger)\n json_body = r.get_json(force=True, silent=True) or {}\n result = cluster_handler.list(filter_data=json_body)\n response_ok[\"data\"] = result\n return jsonify(response_ok), CODE_OK", "def stations():\n Stationlist = session.query(Station.name).all()\n session.close()\n # Unravel results into a 1D array and convert to a list\n allstations = list(np.ravel(Stationlist))\n return jsonify(allstations)", "def get_all_stations(engine): \n # Query db\n sql = (\"SELECT DISTINCT a.station_id, \"\n \" a.station_code, \"\n \" a.station_name, \"\n \" c.station_type, \"\n \" d.latitude, \"\n \" d.longitude \"\n \"FROM nivadatabase.projects_stations a, \"\n \" nivadatabase.stations b, \"\n \" nivadatabase.station_types c, \"\n \" niva_geometry.sample_points d \"\n \"WHERE a.station_id = b.station_id \"\n \"AND b.station_type_id = c.station_type_id \"\n \"AND b.geom_ref_id = d.sample_point_id \"\n \"ORDER BY a.station_id\")\n df = pd.read_sql(sql, engine)\n\n return df", "def show_cluster(self):\n if self.controller.cluster:\n self.print_object(\n 'cluster', ('id', 'name', 'status'), self.controller.cluster\n )\n else:\n print(\"There is no cluster.\")", "def get_clusters(self):\n fields = ['name', ]\n return self.get_data(\"clusters\", fields)", "def stations():\n stats_all=session.query(stations.station).group_by(stations.station).all()\n station_df=pd.DataFrame(stats_all)\n station_dict= station_df.to_dict()\n return jsonify(station_dict)", "def get_stations():\n response = requests.get('https://api.hh.ru/metro/160')\n todos = json.loads(response.text)\n colors = {'CD0505': 'red'}\n all_stations_one_line = []\n\n for i in todos['lines']:\n all_stations_one_line = []\n\n for j in i['stations']:\n one_station = station.station()\n one_station.set_name(j['name'])\n one_station.set_color(colors.get(i['hex_color']))\n one_station.set_lat(j['lat'])\n one_station.set_lng(j['lng'])\n all_stations_one_line.append(one_station)\n return all_stations_one_line", "def stations(): \n # creating the Docstring\n session = Session(engine)\n\n # creat the Query stations\n\n stations_qu = session.query(measurement.station).group_by(measurement.station).all()\n\n # Converting the list of tuples into a normal list\n stations_qu_dict = list(np.ravel(stations_qu))\n session.close()\n\n return jsonify(stations_qu_dict)", "def stations():\n # Query all stations\n\n stations = session.query(Station.station).all()\n all_stations = list(np.ravel(stations))\n\n return jsonify(all_stations)", "def get_all_servers_locations():\n servers = get_servers()\n locations = {}\n\n for server in servers:\n if not server.location.country_code:\n continue\n\n if server.location.continent_code not in locations:\n locations[server.location.continent_code] = {\n 'name': server.location.continent_name,\n 'countries': {}\n }\n\n if server.location.country_code not in locations[server.location.continent_code]['countries']:\n locations[server.location.continent_code]['countries'][server.location.country_code] = server.location.country_name\n\n ret = []\n\n for continent_code, continent in locations.items():\n group = {\n 'type': 'group',\n 'value': 'continent:' + continent_code,\n 'label': continent['name'],\n 'entries': []\n }\n\n for country_code, country_name in continent['countries'].items():\n group['entries'].append({\n 'value': 'country:' + country_code,\n 'label': country_name\n })\n\n group['entries'] = sorted(group['entries'], key=lambda k: k['label'])\n\n ret.append(group)\n\n ret = sorted(ret, key=lambda k: k['label'])\n\n # Extra location filters\n ret.append({\n 'value': 'continent:eu+continent:na',\n 'label': 'Europe + North America'\n })\n\n return ret", "def generateStationPlot(dir_path, traj_list, color_scheme='light'):\n\n\n # Choose the color scheme\n cs = MapColorScheme()\n \n if color_scheme == 'light':\n cs.light()\n\n else:\n cs.dark()\n\n\n plt.figure(figsize=(19.2, 10.8))\n\n # Init the map\n m = Basemap(projection='cyl', resolution='i')\n\n # Draw the coast boundary and fill the oceans with the given color\n m.drawmapboundary(fill_color=cs.map_background)\n\n # Fill continents, set lake color same as ocean color\n m.fillcontinents(color=cs.continents, lake_color=cs.lakes, zorder=1)\n\n # Draw country borders\n m.drawcountries(color=cs.countries)\n m.drawstates(color=cs.states, linestyle='--')\n\n\n\n ### PLOT WORLD MAP ###\n\n # Group stations into countries\n country_dict = {}\n for traj in traj_list:\n\n for obs in traj.observations:\n\n # Extract country code\n country_code = obs.station_id[:2]\n\n if country_code not in country_dict:\n country_dict[country_code] = {}\n \n\n if obs.station_id not in country_dict[country_code]:\n country_dict[country_code][obs.station_id] = [obs.lat, obs.lon]\n\n\n\n # Plot stations in all countries\n for country_code in country_dict:\n\n station_dict = country_dict[country_code]\n\n # Extract lat/lon\n lat = np.degrees([station_dict[station_id][0] for station_id in station_dict])\n lon = np.degrees([station_dict[station_id][1] for station_id in station_dict])\n\n # Convert lat/lon to x/y\n x, y = m(lon, lat)\n\n plt.scatter(x, y, s=0.75, zorder=5, label=\"{:s}: {:d}\".format(country_code, len(lat)))\n\n\n plt.legend(loc='lower left')\n\n plt.tight_layout()\n\n plt.savefig(os.path.join(dir_path, \"world_map.png\"), dpi=100)\n\n plt.close()\n\n ### ###", "def get_stations(base_url, hts, mtype):\n stns1 = ws.site_list(base_url, hts, location='LatLong') # There's a problem with Hilltop that requires running the site list without a measurement first...\n stns1 = ws.site_list(base_url, hts, location='LatLong', measurement=mtype)\n stns2 = stns1[(stns1.lat > -47.5) & (stns1.lat < -34) & (stns1.lon > 166) & (stns1.lon < 179)].dropna().copy()\n stns2.rename(columns={'SiteName': 'ref'}, inplace=True)\n\n return stns2", "def stations():\n session = Session(engine)\n # Query all Stations\n stations = session.query(Station.station).all()\n\n # Convert list of tuples into normal list\n all_stations = list(np.ravel(stations))\n\n return jsonify(all_stations)", "def plot_clusters(self):\n pass", "def display_map(map):\n for row in map:\n line = \"\"\n for point in row:\n line += point.display_point()\n print(line)", "def Clusters(self):\n return", "def listNatura2000Locations(self):\n vocab = (\n ('inside', 'location_inside'),\n ('near', 'location_near'),\n )\n return DisplayList(vocab)", "def stations_call():\n # Query all stations\n stations_call = session.query(Station.station).all()\n all_stations = list(np.ravel(stations_call))\n \n return jsonify(all_stations)", "def train_stations(self) -> List[str]:\n return sorted([train_info['HE'] for train_info in train_api.stations_info.values()])", "def plot_clusters(data_table, cluster_list, draw_centers = False):\n\tfips_to_line = {}\n\tfor line_idx in range(len(data_table)):\n\t\tfips_to_line[data_table[line_idx][0]] = line_idx\n\n\t# Load map image\n\tmap_file = urllib2.urlopen(MAP_URL)\n\tmap_img = plt.imread(map_file)\n\n\t# Scale plot to get size similar to CodeSkulptor version\n\typixels, xpixels, bands = map_img.shape\n\tDPI = 60.0 # adjust this constant to resize your plot\n\txinch = xpixels / DPI\n\tyinch = ypixels / DPI\n\tplt.figure(figsize=(xinch,yinch))\n\timplot = plt.imshow(map_img)\n\n\t# draw the counties colored by cluster on the map\n\tif not draw_centers:\n\t\tfor cluster_idx in range(len(cluster_list)):\n\t\t\tcluster = cluster_list[cluster_idx]\n\t\t\tcluster_color = COLORS[cluster_idx % len(COLORS)]\n\t\t\tfor fips_code in cluster.fips_codes():\n\t\t\t\tline = data_table[fips_to_line[fips_code]]\n\t\t\t\tplt.scatter(x = [line[1]], y = [line[2]], s = circle_area(line[3]), lw = 1,\n\t\t\t\t\t\t\tfacecolors = cluster_color, edgecolors = cluster_color)\n\n\t# add cluster centers and lines from center to counties\n\telse:\n\t\tfor cluster_idx in range(len(cluster_list)):\n\t\t\tcluster = cluster_list[cluster_idx]\n\t\t\tcluster_color = COLORS[cluster_idx % len(COLORS)]\n\t\t\tfor fips_code in cluster.fips_codes():\n\t\t\t\tline = data_table[fips_to_line[fips_code]]\n\t\t\t\tplt.scatter(x = [line[1]], y = [line[2]], s = circle_area(line[3]), lw = 1,\n\t\t\t\t\t\t\tfacecolors = cluster_color, edgecolors = cluster_color, zorder = 1)\n\t\tfor cluster_idx in range(len(cluster_list)):\n\t\t\tcluster = cluster_list[cluster_idx]\n\t\t\tcluster_color = COLORS[cluster_idx % len(COLORS)]\n\t\t\tcluster_center = (cluster.horiz_center(), cluster.vert_center())\n\t\t\tfor fips_code in cluster.fips_codes():\n\t\t\t\tline = data_table[fips_to_line[fips_code]]\n\t\t\t\tplt.plot( [cluster_center[0], line[1]],[cluster_center[1], line[2]], cluster_color, lw=1, zorder = 2)\n\t\tfor cluster_idx in range(len(cluster_list)):\n\t\t\tcluster = cluster_list[cluster_idx]\n\t\t\tcluster_color = COLORS[cluster_idx % len(COLORS)]\n\t\t\tcluster_center = (cluster.horiz_center(), cluster.vert_center())\n\t\t\tcluster_pop = cluster.total_population()\n\t\t\tplt.scatter(x = [cluster_center[0]], y = [cluster_center[1]], s = circle_area(cluster_pop), lw = 2,\n\t\t\t\t\t\tfacecolors = \"none\", edgecolors = \"black\", zorder = 3)\n\n\tplt.show()", "def stations():\n\n station_results = session.query(Stations.station, Stations.name).all()\n\n station_data = []\n for row in station_results:\n station_dict = {}\n station_dict[\"station\"] = row.station\n station_dict[\"name\"] = row.name\n station_data.append(station_dict)\n\n return jsonify(station_data)", "def show_overview(self) -> None:\n print(f\"\\n\\nCluster overview:\")\n all_clusters = self.get_all_clusters()\n print(f\" - Total of {len(all_clusters)} clusters\")\n if all_clusters:\n cluster_lengths = [len(v) for v in all_clusters.values()]\n print(f\" - Average number of cluster-labels: {round(sum(cluster_lengths) / len(cluster_lengths), 2)}\")", "def get_stations(self, limit=250):\n\n endpoint = \"/station/getStations\"\n response = self._send(endpoint, \"POST\", {\"pageSize\": limit})\n stations = response.json()[\"stations\"]\n return stations", "def stations():\n \n # Query all the stations\n results = session.query(Station).all()\n\n # Create a dictionary to append the station data\n stations_info = []\n for stations in results:\n stations_dict = {}\n stations_dict[\"Station\"] = stations.station\n stations_dict[\"Station Name\"] = stations.name\n stations_dict[\"Latitude\"] = stations.latitude\n stations_dict[\"Longitude\"] = stations.longitude\n stations_dict[\"Elevation\"] = stations.elevation\n all_stations.append(stations_dict)\n \n return jsonify(stations_info)", "def stations():\n \n station_result = session.query(Station.station).all()\n stations = []\n # Convert list of tuples into normal list\n stations = list(np.ravel(station_result))\n return jsonify(stations)", "def stations():\n # Return a JSON list of stations from the dataset\n session = Session(engine)\n stations = session.query(Station.name).all()\n\n # Convert list of tuples into normal list\n station_names = list(np.ravel(stations))\n\n return jsonify(station_names)", "def add_ska1_v5(self, r_min=None, r_max=None):\n # Load the station coordinates.\n path = os.path.dirname(os.path.abspath(__file__))\n coords = np.loadtxt(join(path, 'data', 'v5_enu.txt'))\n x, y, z = coords[:, 0], coords[:, 1], coords[:, 2]\n r = (x**2 + y**2)**0.5\n\n cluster_radius = 90 # This just seems to work (not confirmed)\n\n if r_min and r_max:\n idx = np.where(np.logical_and(r >= r_min, r <= r_max))\n x, y, z = x[idx], y[idx], z[idx]\n elif r_min:\n idx = np.where(r >= r_min)\n x, y, z = x[idx], y[idx], z[idx]\n elif r_max:\n idx = np.where(r <= r_max)\n x, y, z = x[idx], y[idx], z[idx]\n\n # Get the cluster centres within the given range.\n cluster_x, cluster_y, _ = \\\n TelescopeLayout.cluster_centres_ska_v5(r_min, r_max)\n\n # Loop over clusters and extract stations within a 90 m radius.\n for cx, cy in zip(cluster_x, cluster_y):\n dr = ((x - cx)**2 + (y - cy)**2)**0.5\n idx = np.where(dr <= cluster_radius)\n tx, ty, tz = x[idx], y[idx], z[idx]\n # num_clusters += tx.size\n # r_ = (tx**2 + ty**2)**0.5\n # r_min = min(r_min, r_.min())\n # r_max = max(r_max, r_.max())\n if tx.size > 0:\n cluster_count = 0\n for name in self.layouts:\n if name.startswith('ska1_v5_cluster'):\n cluster_count += 1\n self.layouts['ska1_v5_cluster_%03i' % cluster_count] = {\n 'x': tx, 'y': ty, 'z': tz, 'cx': cx, 'cy': cy,\n 'cr': cluster_radius, 'r_min': r_min, 'r_max': r_max}\n x = np.delete(x, idx)\n y = np.delete(y, idx)\n z = np.delete(z, idx)\n if x.size > 0:\n # Add any remaining stations that were not assigned to a cluster.\n count = 0\n for name in self.layouts:\n if name.startswith('ska1_v5') and not '_cluster' in name:\n count += 1\n key_ = 'ska1_v5_%03i' % count\n self.layouts[key_] = dict(x=x, y=y, z=z, r_min=r_min, r_max=r_max)", "def show(self):\n if self.nodes_ is None:\n logging.debug(\"Segment - Nothing to show. Skipping.\")\n return\n\n if len(self.polygons_) != 0:\n logging.debug(\"Segment - Showing 3D Segments using `vedo`.\")\n logging.warning(\"Segment - Showing 3D Segments can be slow!.\")\n\n import vedo\n\n points = vedo.Points(self.nodes)\n lines = []\n for p in self.polygons:\n p = np.asarray(p).astype(np.int32)\n lines.append(vedo.Line(self.nodes[p]))\n\n vedo.show([points, *lines]).show().close()\n\n else:\n logging.debug(\"Segment - Showing 2D Segments using `matplotlib`.\")\n\n import matplotlib.pyplot as plt\n\n plt.scatter(\n self.nodes_[:, 0],\n self.nodes_[:, 1],\n c=\"pink\",\n zorder=1000,\n )\n\n for c in self.connectivity_:\n plt.plot(\n self.nodes_[c][:,0],\n self.nodes_[c][:,1],\n c=\"grey\",\n lw=2,\n zorder=10,\n )\n\n plt.show()", "def stations():\n # Query all stations before a given date 2017\n results = session.query(Measurement.date, Measurement.tobs).filter(func.strftime(\"%Y\", Measurement.date) >= \"2017\").all()\n all_results = list(np.ravel(results))\n \n return jsonify(all_results)", "def stations():\n # Create link from Python to db\n session = Session(engine)\n\n # Query stations.\n stations = session.query(Station.station, Station.name, Station.latitude, Station.longitude, Station.elevation).all()\n\n session.close()\n\n # Convert to a dictionary.\n all_stations = []\n for station, name, latitude, longitude, elevation in stations:\n station_dict = {}\n station_dict[\"station\"] = station\n station_dict[\"name\"] = name\n station_dict[\"latitude\"] = latitude\n station_dict[\"longitude\"] = longitude\n station_dict[\"elevation\"] = elevation\n all_stations.append(station_dict)\n\n # Return JSON\n return jsonify(all_stations)", "def stations():\n print(\"server received request for stations data...\")\n return jsonify(stations_data)", "def list_cluster(self, ip, x_api_session):\n log.log_debug(\"cluster object list is started\")\n list_object = ListModule.ListModule()\n object_list = list_object.listing(\"uom\", ip,\n self.root, self.content_type,\n \"Cluster\", x_api_session)\n log.log_debug(\"cluster object list is returned\")\n return object_list", "def stations():\n results = session.query(Station.name).all()\n station_names = list(np.ravel(results))\n\n return jsonify(station_names)", "def list_clusters(ctx, project_name):\n project = ctx.obj.groups.byName[project_name].get().data\n clusters = ctx.obj.groups[project.id].clusters.get()\n pprint(clusters.data)", "def list_vsan_clusters(self, detail=False, params=None, return_body=False):\n url = 'clusters'\n if detail:\n url += '/detail'\n if params:\n url += '?%s' % self._prepare_params(params)\n\n key = None if return_body else 'clusters'\n return self._ext_get(url, key)", "def query(self, page) -> [str, dict]:\n params = {'size': self.max_page_size,\n 'sort': 'displayLabel', 'page': page}\n url = '{base_url}/v1/stations'.format(base_url=self.base_url)\n return [url, params]", "def get_zipcode_stations(add):\r\n name=get_zipcode_names(add)\r\n engine = get_sql_engine()\r\n neighborhood_stations = text(\r\n \"\"\"\r\n SELECT\r\n \"name\" as name,\r\n \"addressStreet\" as address,\r\n \"bikesAvailable\" as available_bikes,\r\n v.geom as geom,\r\n ST_X(v.geom) as lon, ST_Y(v.geom)as lat\r\n FROM indego_rt1130 as v\r\n JOIN philly_zipcode as n\r\n ON ST_Intersects(v.geom, n.geom)\r\n WHERE n.code = :name\r\n \"\"\"\r\n )\r\n stations = gpd.read_postgis(neighborhood_stations, con=engine, params={\"name\": name})\r\n return stations", "def processStationInfo(obs_loc_df, source, st_list=None):\n if not st_list:\n st_list = dict()\n st_data = obs_loc_df['station_id']\n lat_data = obs_loc_df['latitude (degree)']\n lon_data = obs_loc_df['longitude (degree)']\n\n for k, station_name in enumerate(st_data):\n if station_name in st_list:\n pass\n else:\n st_list[station_name] = dict()\n st_list[station_name][\"lat\"] = lat_data[k]\n st_list[station_name][\"source\"] = source\n st_list[station_name][\"lon\"] = lon_data[k]\n print(station_name)\n\n print(\"Number of stations in bbox {}\".format(len(st_list.keys())))\n return st_list", "def stations():\n \n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n results = session.query(Station.station, Station.name, Station.latitude, Station.longitude, Station.elevation).all()\n\n session.close()\n\n stations = []\n for result in results:\n station_dict = {}\n station_dict[\"station\"] = result.station\n station_dict[\"name\"] = result.name\n station_dict[\"latitude\"] = result.latitude\n station_dict[\"longitude\"] = result.longitude\n station_dict[\"elevation\"] = result.elevation\n stations.append(station_dict)\n \n return jsonify(stations)", "def stations(self):\n stations = []\n f = self._fetch(Citibike.STATION_URL)\n data = json.load(f)\n if 'stationBeanList' not in data or len(data['stationBeanList']) == 0:\n raise BadResponse('Station Fetch Failed', data)\n for station in data['stationBeanList']:\n stations.append(Station._from_json(station))\n logging.debug(\"Retrieved %d stations\" % len(stations))\n return stations", "def get_clusters(self):\r\n\r\n return self.__clusters", "def get_stations(nordic_file_names, output_level=0):\n stations = []\n for file in nordic_file_names:\n new_stations = get_event_stations(file, output_level)\n\n if new_stations == -1:\n continue\n\n for x in new_stations:\n if x not in stations:\n stations.append(x)\n\n return sorted(stations)", "def station_analysis(data):\n unique_stations = list(set(data['start_station_name'].tolist() + data['end_station_name'].tolist()))\n\n station_counter = {station : 0 for station in unique_stations}\n for index, row in data.iterrows():\n station_counter[row['start_station_name']] += 1\n\n print('List of all stations:')\n print(unique_stations)\n\n keys = list(station_counter.keys())\n vals = list(station_counter.values())\n indexArr = np.argsort(list(station_counter.values()))\n popularStations = []\n for i in reversed(indexArr):\n popularStations.append((keys[i], vals[i]))\n\n stations1, journeys = zip(*popularStations[0:10])\n plt.bar(stations1, journeys, 0.1)\n\n plt.xticks(stations1, rotation='vertical')\n plt.title('Popular stations')\n plt.xlabel('Station names')\n plt.ylabel('Journeys')\n\n plt.show()\n return station_counter", "def list(refresh):\n # This works too, but is much slower:\n # ogrinfo WFS:http://openmaps.gov.bc.ca/geo/ows?VERSION=1.1.0\n for table in bcdata.list_tables(refresh):\n click.echo(table)", "def print_cluster(self):\n print('Cluster', self.number)\n for pattern in self.patterns:\n pattern.print_pattern()", "def run():\n\n # Build list of tuples of station names and distance \n stations = build_station_list()\n p = (52.2053, 0.1218)\n by_distance = stations_by_distance(stations, p)\n for n in range(10):\n print(by_distance[n])\n for n in range(10):\n i = len(by_distance) - 10 + n\n print(by_distance[i])", "def show_map(self):\n for line in self.map:\n print(\"\".join(line))", "def closest_stations(lat: float, lon: float, limit: int = 1) -> List[Dict]:\n dist_sorted = sorted(\n STATIONS, key=lambda s: distance((lat, lon), (s[\"lat\"], s[\"lon\"]))\n )\n return dist_sorted[:limit]", "def singleton_list(data_table):\n\tsingleton_list = []\n\tfor line in data_table:\n\t\tsingleton_list.append(Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\n\t\t\n\treturn singleton_list", "def Map(data):\n lon = [loc[1] for loc in data[\"geo\"]]\n lat = [loc[0] for loc in data[\"geo\"]]\n\n return dcc.Graph(id=\"MapGraph\", figure=dict(\n data=[dict(\n type='scattergeo',\n # mode='markers',\n lon=lon,\n lat=lat,\n text=data[\"names\"],\n hoverinfo='text',\n marker=dict(\n symbol='circle',\n color=\"#B22234\",\n opacity=0.8,\n size=data['frequencies'],\n sizemode='area',\n sizeref=max(data['frequencies']) / (5.**3),\n sizemin=1,\n line=dict(width=0)\n )\n )],\n layout=dict(\n title='<b>Most common Places</b>',\n font=dict(family='Soria, Times New Roman, Times, serif', color='#B22234', size=19),\n dragmode=\"pan\",\n geo=dict(\n showocean=True,\n oceancolor=\"rgba(0, 44, 119, 0.7)\",\n showland=True,\n landcolor=\"#ededed\", # c4c4c4, #0ba340\n lonaxis=dict(range=[min(lon) - 10, max(lon) + 10]),\n lataxis=dict(range=[min(lat) - 10, max(lat) + 10]),\n showcountries=True,\n countrywidth=0.5,\n subunitwidth=0.5,\n projection=dict(type=\"equirectangular\")\n ),\n margin=dict(l=0, r=0, t=50, b=30),\n hovermode=\"closest\",\n paper_bgcolor='rgba(0,0,0,0)',\n plot_bgcolor='rgba(0,0,0,0)',\n autosize=True,\n )\n ))", "def get_station_boroughs(self):\\", "def all_stations(self, provider: ID) -> List[StationInfo]:\n srv_key = self.__stations_key(provider=provider)\n value = self.get(name=srv_key)\n if value is None:\n return []\n js = utf8_decode(data=value)\n array = json_decode(string=js)\n return StationInfo.convert(array=array)", "def _load_cluster(self):", "def stations():\n\n active_stations = session.query(Station.station).all()\n\n # Convert list of tuples into normal list \n stations = list(np.ravel(active_stations))\n\n return jsonify(stations)", "def select_all_topologies(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM topologies_topology\")\n \n rows = cur.fetchall()\n \n for row in rows:\n print(row)", "def data_fetch(self, curs, splat_table, mcl_table, crs_no=0, output_fname=None):\n\t\tgene_no2gene_id = get_gene_no2gene_id(curs)\t#08-31-05\n\t\toutf = open(output_fname, 'w')\t#08-31-05\n\t\toutf.write(\"r:=[\")\t#08-31-05\n\t\t\n\t\tmcl_id2cluster_dstructure = {}\n\t\tno_of_total_genes = get_no_of_total_genes(curs)\n\t\tsys.stderr.write(\"Getting the basic information for all clusters...\\n\")\n\t\tcurs.execute(\"DECLARE crs%s CURSOR FOR select m.mcl_id, m.vertex_set, m.connectivity, 0,\\\n\t\t\tm.recurrence_array, s.edge_set, s.connectivity, m.cooccurrent_cluster_id from %s m, %s s where \\\n\t\t\tm.splat_id=s.splat_id\"\\\n\t\t\t%(crs_no, mcl_table, splat_table))\t#06-20-05\tconnectivity_original faked to be 0\n\t\tcurs.execute(\"fetch 5000 from crs%s\"%crs_no)\n\t\trows = curs.fetchall()\n\t\twhile rows:\n\t\t\tfor row in rows:\n\t\t\t\tunit = cluster_dstructure()\n\t\t\t\tunit.cluster_id = row[0]\n\t\t\t\tvertex_set = row[1][1:-1].split(',')\n\t\t\t\tunit.vertex_set = map(int, vertex_set)\n\t\t\t\tunit.connectivity = row[2]\n\t\t\t\tunit.connectivity_original = row[3]\n\t\t\t\trecurrence_array = row[4][1:-1].split(',')\n\t\t\t\tunit.recurrence_array = map(float, recurrence_array)\n\t\t\t\tunit.edge_set = parse_splat_table_edge_set(row[5])\n\t\t\t\tunit.splat_connectivity = row[6]\n\t\t\t\tunit.cooccurrent_cluster_id = row[7]\n\t\t\t\tunit.go_no2association_genes = self.get_go_functions_of_this_gene_set(curs, unit.vertex_set)\n\t\t\t\tunit.go_no2information = self.get_information_of_go_functions(curs, \\\n\t\t\t\t\tunit.go_no2association_genes, len(unit.vertex_set), no_of_total_genes, p_value_cut_off=0.05)\t#jasmine wants to cut some go-nos.\n\t\t\t\tunit.edge_cor_2d_list, unit.edge_sig_2d_list = self.get_cor_sig_2d_list(curs, unit.edge_set)\n\t\t\t\t\n\t\t\t\tstr_tmp = self.return_string_form_of_cluster_dstructure(unit, gene_no2gene_id)\t#08-31-05\n\t\t\t\toutf.write(\"%s,\"%str_tmp)\n\t\t\t\t#mcl_id2cluster_dstructure[unit.cluster_id] = unit\n\t\t\t\t\"\"\"\n\t\t\t\torder_1st_id, order_2nd_id = map(int, unit.cooccurrent_cluster_id.split('.'))\n\t\t\t\tif order_1st_id not in self.order_1st_id2all_clusters:\n\t\t\t\t\tself.order_1st_id2all_clusters[order_1st_id] = {}\n\t\t\t\tif order_2nd_id not in self.order_1st_id2all_clusters[order_1st_id]:\n\t\t\t\t\tself.order_1st_id2all_clusters[order_1st_id][order_2nd_id] = []\n\t\t\t\tself.order_1st_id2all_clusters[order_1st_id][order_2nd_id].append(unit)\n\t\t\t\t\"\"\"\n\t\t\tcurs.execute(\"fetch 5000 from crs%s\"%crs_no)\n\t\t\trows = curs.fetchall()\n\t\toutf.write(\"[]]:\")\t#08-31-05, 09-01-05 add the last blank []\n\t\tdel outf\n\t\tsys.stderr.write(\"Done.\\n\")\n\t\treturn mcl_id2cluster_dstructure", "def listClusters():\n return [c['name'] for c in pymongo.Connection().clovr.clusters.find()]" ]
[ "0.67217124", "0.6456021", "0.6094351", "0.59413135", "0.57239014", "0.56707394", "0.5667912", "0.5660193", "0.56526005", "0.56276786", "0.5618511", "0.56184906", "0.5581086", "0.55698586", "0.5552247", "0.5542267", "0.5477948", "0.5476224", "0.54496205", "0.5442602", "0.5442067", "0.5436707", "0.5433754", "0.5433157", "0.54323006", "0.54277486", "0.5421539", "0.54213524", "0.5408177", "0.54060924", "0.5392749", "0.53870463", "0.5369274", "0.53586936", "0.5346998", "0.5335561", "0.53346294", "0.53319776", "0.5329238", "0.5327878", "0.5326603", "0.5322817", "0.53139895", "0.5309173", "0.530896", "0.5295192", "0.52913564", "0.5289617", "0.5284172", "0.5280895", "0.5277366", "0.5272983", "0.526849", "0.5263235", "0.5256736", "0.52475685", "0.5236576", "0.5203381", "0.5200592", "0.5197065", "0.51877165", "0.5187117", "0.5185226", "0.51823294", "0.5166037", "0.5164883", "0.51626676", "0.5139568", "0.5138217", "0.5136508", "0.5136009", "0.51260567", "0.5120431", "0.51179785", "0.5117141", "0.5115056", "0.5109084", "0.5104729", "0.5100047", "0.5087217", "0.5084103", "0.5064863", "0.50637925", "0.5061", "0.5053046", "0.5050366", "0.5046347", "0.50446266", "0.5044309", "0.50301474", "0.50225055", "0.50188017", "0.50182444", "0.5015219", "0.50149274", "0.501037", "0.50054413", "0.5005039", "0.4996542", "0.49953347" ]
0.7505736
0
Helper shortcut for creating subcommands. Accepts arguments for `add_subparsers`, creating a new subparser and returning a partial function wrapping `add_subcommand` for the new subparser. If the `dest` argument isn't specified, it defaults to `'subcmd'`. Example cmd_foo = CommandParser('foo', 'Does foo stuff') foo_adder = cmd_foo.make_adder(metavar='OPERATION', required=True) bar_subcmd = foo_adder('bar', description='Does bar stuff to foo')
def make_adder(self, *args, **kwargs): kwargs.setdefault("dest", "subcmd") subp = self.add_subparsers(*args, **kwargs) return partial(self.add_subcommand, subp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subcommand(self, func=None, **subcommand_options):\n\n def decorator(subcommand_func):\n subcommand_sig = inspect.signature(subcommand_func)\n\n @functools.wraps(subcommand_func)\n def wrapped(args):\n\n final_args = []\n final_kwargs = {}\n\n if \"args\" in subcommand_sig.parameters:\n final_kwargs[\"args\"] = args\n\n return subcommand_func(*final_args, **final_kwargs)\n\n subcommand_name = subcommand_options.pop(\"name\", subcommand_func.__name__)\n subcommand_args_def = subcommand_options.pop(\"args\", None) or ()\n subcommand_doc = subcommand_options.pop(\"help\", None) or subcommand_options.pop(\"description\", None)\n if subcommand_doc is None:\n subcommand_doc = subcommand_func.__doc__\n subcommand_aliases = subcommand_options.pop(\"aliases\", None) or []\n if subcommand_options:\n raise ValueError(f\"Unexpected kwarg(s): {', '.join(str(k) for k in subcommand_options.keys())}\")\n\n parser = self.subparsers.add_parser(\n name=subcommand_name,\n help=subcommand_doc,\n description=subcommand_doc,\n aliases=subcommand_aliases,\n )\n parser.set_defaults(func=wrapped)\n\n for arg in subcommand_args_def:\n if isinstance(arg, str):\n parser.add_argument(arg)\n elif isinstance(arg, (list, tuple)):\n if isinstance(arg[-1], dict):\n arg_args = arg[:-1]\n arg_kwargs = arg[-1]\n else:\n arg_args = arg[:]\n arg_kwargs = {}\n parser.add_argument(*arg_args, **arg_kwargs)\n else:\n raise TypeError(r\"Expected a string or list, got {arg!r}\")\n\n return parser\n\n if func is None:\n return decorator\n else:\n return decorator(func)", "def setup_subparser(name, description, commands):\n subparser = SUBPARSER.add_parser(\n name,\n help=description\n )\n subparser.add_argument(\n 'sub_command',\n metavar='sub_command',\n type=str,\n nargs='+',\n help='Which command to run. Options: %s' % ', '.join(commands),\n choices=commands\n )\n\n return subparser", "def add_subcommand(\n subp: _SubParsersAction, name: str, description: Optional[str] = None, **kwargs\n ) -> \"CommandParser\":\n desc_help = {\"description\": description, \"help\": description}\n return subp.add_parser(name, **desc_help, **kwargs)", "def build_subcommands_parser(parser, module):\n mdefs = module.__dict__\n keys = list(mdefs.keys())\n keys.sort()\n subparsers = parser.add_subparsers(help='sub-command help')\n for command in keys:\n if command.startswith('pub_'):\n func = module.__dict__[command]\n parser = subparsers.add_parser(command[4:], help=func.__doc__)\n parser.set_defaults(func=func)\n argspec = inspect.signature(func)\n positionals = []\n short_opts = set([])\n for arg in argspec.parameters.values():\n if arg.default == inspect.Parameter.empty:\n positionals += [arg]\n else:\n param_name = arg.name.replace('_', '-')\n short_opt = param_name[0]\n if not (param_name.startswith('no') or\n (short_opt in short_opts)):\n opts = ['-%s' % short_opt, '--%s' % param_name]\n else:\n opts = ['--%s' % param_name]\n short_opts |= set([short_opt])\n if isinstance(arg.default, list):\n parser.add_argument(*opts, action='append')\n elif isinstance(arg.default, dict):\n parser.add_argument(*opts, type=json.loads)\n elif arg.default is False:\n parser.add_argument(*opts, action='store_true')\n elif arg.default is not None:\n parser.add_argument(*opts, default=arg.default)\n else:\n parser.add_argument(*opts)\n if positionals:\n for arg in positionals[:-1]:\n parser.add_argument(arg.name)\n parser.add_argument(positionals[-1].name, nargs='*')", "def add(cls, subparsers):\n subparser = subparsers.add_parser(\n name=cls.__tool_name__(),\n description=cls.__get_description__())\n\n cls.__add_arguments__(subparser)\n subparser.set_defaults(func=cls.from_args)\n return subparser", "def add_subparser(sp, name, **kwargs):\n kwargs[\"add_help\"] = False\n kwargs['formatter_class'] = ap.ArgumentDefaultsHelpFormatter\n sparser = sp.add_parser(name, **kwargs)\n\n sparser.add_argument(\"-h\", \"--help\", action=custom_help(),\n help=\"print the short or long help\")\n\n return sparser", "def subcommand(args=None, parent=subparsers):\n def decorator(func):\n parser = parent.add_parser(func.__name__, description=func.__doc__)\n for arg in args:\n parser.add_argument(*arg[0], **arg[1])\n parser.set_defaults(func=func)\n\n if args is None:\n args = []\n return decorator", "def add_args_to_subparser(the_parser, subcommand_name):\n\n the_parser.add_argument(CmdArgs.verbose_optional, help=CmdArgs.verbose_help,\n action='store_true',\n )\n\n if subcommand_name in DCA_VISUALIZATION_SUBCOMMANDS:\n the_parser.add_argument(CmdArgs.biomolecule, help=CmdArgs.biomolecule_help)\n the_parser.add_argument(CmdArgs.pdb_chain_id, help=CmdArgs.pdb_chain_id_help)\n the_parser.add_argument(CmdArgs.pdb_file, help=CmdArgs.pdb_file_help)\n the_parser.add_argument(CmdArgs.refseq_file, help=CmdArgs.refseq_file_help)\n the_parser.add_argument(CmdArgs.dca_file, help=CmdArgs.dca_file_help)\n the_parser.add_argument(CmdArgs.rna_secstruct_file_optional,\n help=CmdArgs.rna_secstruct_file_help,\n )\n the_parser.add_argument(CmdArgs.linear_dist_optional,\n help=CmdArgs.linear_dist_help, type = int,\n )\n the_parser.add_argument(CmdArgs.contact_dist_optional,\n help=CmdArgs.contact_dist_help, type = float,\n )\n the_parser.add_argument(CmdArgs.num_dca_contacts_optional,\n help = CmdArgs.num_dca_contacts_help, type = int,\n )\n the_parser.add_argument(CmdArgs.wc_neighbor_dist_optional, type= int,\n help = CmdArgs.wc_neighbor_dist_help,\n )\n the_parser.add_argument(CmdArgs.pdb_id_optional, help = CmdArgs.pdb_id_help)\n\n if subcommand_name in FILE_CONTENT_SUBCOMMANDS:\n if subcommand_name == 'pdb_content':\n the_parser.add_argument(CmdArgs.pdb_file, help = CmdArgs.pdb_file_help)\n if subcommand_name in MSA_TRIMMING_SUBCOMMANDS:\n the_parser.add_argument(CmdArgs.max_gap_optional,\n type = float, help = CmdArgs.max_gap_help,\n )\n if subcommand_name == 'trim_by_refseq':\n the_parser.add_argument(CmdArgs.biomolecule, help=CmdArgs.biomolecule_help)\n the_parser.add_argument(CmdArgs.msa_file, help=CmdArgs.msa_file_help)\n the_parser.add_argument(CmdArgs.refseq_file, help=CmdArgs.refseq_file_help)\n the_parser.add_argument(CmdArgs.remove_all_gaps_optional,\n help= CmdArgs.remove_all_gaps_help, action='store_true',\n )\n if subcommand_name == 'trim_by_gap_size':\n the_parser.add_argument(CmdArgs.msa_file, help=CmdArgs.msa_file_help)\n return None", "def addSubParser( parentParser, cmdName ) :\n parser = parentParser.add_parser( cmdName, help='Generate a new UBOS package scaffold.' )\n parser.add_argument( '--directory', required=True, help='Directory where to create the package scaffold')\n parser.add_argument( '--template', required=True, help='Name of the template to use' )\n parser.add_argument( '--json', required=False, help='Settings file' )", "def add_subparsers(cls, parser, name=\"\", prefixes=[], delim=\"_\", title=\"commands\", description=\"available commands\", required=True):\n\t\tcommand = f\"command_{name}\"\n\t\tif command in cls.COMMANDS:\n\t\t\traise CommandParserNameDuplicated(f\"Command parser with name {name} already registered.\")\n\t\t\n\t\tcls.COMMANDS[command] = {}\n\t\t\n\t\tsub = parser.add_subparsers(title=title, dest=command, description=description)\n\t\tsub.required = True\n\t\tfor pf in prefixes:\n\t\t\tfor c, method in cls.get_commands(prefix=pf, delim=delim):\n\t\t\t\tcls.set_subparser_for(c, method, sub)\n\t\t\t\tcls.COMMANDS[command][c] = method", "def _make_parser(self, **kwargs):\n\n kwargs.setdefault('help', self.help)\n kwargs.setdefault('formatter_class',argparse.RawDescriptionHelpFormatter)\n kwargs.setdefault('description', self.description)\n kwargs.setdefault('name', self.name)\n names = (kwargs.get('name') or self.name).split('.')\n \n def _get_subparser(a):\n if a._subparsers:\n for action in a._subparsers._actions:\n if isinstance(action, argparse._SubParsersAction):\n return action\n raise RuntimeError('could not find adequate subparser')\n return a.add_subparsers(dest='command',\n title='commands',\n metavar='COMMAND')\n def _get_parser(node, idx, names):\n name = names[idx]\n if name in node.choices:\n return node.choices[name]\n args = {\n 'name' : name,\n 'help' : 'a group of sub-commands',\n }\n return node.add_parser(**args)\n \n parser = ACMD_PARSER\n node = _get_subparser(parser)\n\n for i,n in enumerate(names[:-1]):\n node = _get_subparser(parser)\n parser = _get_parser(node, i, names)\n \n node = _get_subparser(parser)\n kwargs['name'] = names[-1]\n parser = node.add_parser(**kwargs)\n return parser", "def register_argument_parser(add_parser, action):\n sub_command = str(action)\n return add_parser(sub_command,\n help=f'{sub_command} token',\n description=f'{sub_command.capitalize()} a Waiter token. '\n 'In addition to the optional arguments '\n 'explicitly listed below, '\n 'you can optionally provide any Waiter '\n 'token parameter as a flag. For example, '\n 'to specify 10 seconds for the '\n 'grace-period-secs parameter, '\n 'you can pass --grace-period-secs 10. '\n 'You can also provide nested fields separated by a period. For example, '\n 'to specify an environment variable FOO as \\\"bar\\\", you can pass --env.FOO \\\"bar\\\".')", "def add_subcommands(cls, parser: argparse.ArgumentParser) -> None:\n if cls.SUBCOMMANDS:\n subparsers = parser.add_subparsers(title=\"subcommands\", metavar=\"\", dest='cmd')\n for subcmd_class in cls.SUBCOMMANDS:\n parsers = subcmd_class.get_args()\n subcmd_class.parser = parsers[-1]\n\n subparser = subparsers.add_parser(\n subcmd_class.NAMES[0],\n aliases=subcmd_class.NAMES[1:],\n parents=parsers,\n help=subcmd_class.HELP,\n epilog=subcmd_class.EPILOG)\n subparser.set_defaults(command_class=subcmd_class)\n subcmd_class.customize_subparser(subparser)", "def set_subparser_for(cls, command, method, subparser):\n\n\t\tdef add_pos_argument(sub, label, arg):\n\t\t\tif arg[\"type\"] == bool:\n\t\t\t\traise CommandTypeError(\"bool type not supported as positional argument\")\n\t\t\tif \"value\" in arg:\n\t\t\t\tif arg[\"type\"] in [str, int, float]:\n\t\t\t\t\tsub.add_argument(label, nargs='?', default=arg[\"value\"], type=arg[\"type\"], help=arg[\"help_line\"])\n\t\t\telif \"values\" in arg:\n\t\t\t\tif arg[\"type\"] in [str, int, float]:\n\t\t\t\t\tsub.add_argument(label, nargs='?', default=arg[\"values\"][0], choices=arg[\"values\"], type=arg[\"type\"], help=arg[\"help_line\"])\n\t\t\t\telif arg[\"type\"] == list:\n\t\t\t\t\tsub.add_argument(label, nargs='+', default=arg[\"values\"][0], choices=arg[\"values\"], help=arg[\"help_line\"])\n\t\t\telse:\n\t\t\t\tsub.add_argument(label, type=arg[\"type\"], help=arg[\"help_line\"])\n\n\t\tdef add_opt_argument(sub, label, arg, add_alias=True):\n\t\t\tif arg[\"type\"] == bool:\n\t\t\t\tif add_alias:\n\t\t\t\t\tsub.add_argument(arg[\"alias\"], arg[\"name\"], action=\"store_true\", default=False, help=arg[\"help_line\"])\n\t\t\t\telse:\n\t\t\t\t\tsub.add_argument(arg[\"name\"], action=\"store_true\", default=False, help=arg[\"help_line\"])\n\n\t\t\telif arg[\"type\"] in [str, int, float] and \"value\" in arg:\n\t\t\t\tif add_alias:\n\t\t\t\t\tsub.add_argument(arg[\"alias\"], arg[\"name\"], type=arg[\"type\"], default=arg[\"value\"], help=arg[\"help_line\"])\n\t\t\t\telse:\n\t\t\t\t\tsub.add_argument(arg[\"name\"], type=arg[\"type\"], default=arg[\"value\"], help=arg[\"help_line\"])\n\t\t\telif arg[\"type\"] == list and \"values\" not in arg:\n\t\t\t\tsub.add_argument(label, nargs=\"*\", help=arg[\"help_line\"])\n\t\t\telif \"values\" in arg:\n\t\t\t\tif arg[\"type\"] == list:\n\t\t\t\t\tsub.add_argument(label, choices=arg[\"values\"], default=arg[\"values\"][0], nargs=\"*\", help=arg[\"help_line\"])\n\t\t\t\telse:\n\t\t\t\t\tsub.add_argument(label, type=arg[\"type\"], choices=arg[\"values\"], default=arg[\"values\"][0], nargs=\"?\", help=arg[\"help_line\"])\n\t\t\telse:\n\t\t\t\tif add_alias:\n\t\t\t\t\tsub.add_argument(arg[\"alias\"], arg[\"name\"], type=arg[\"type\"], help=arg[\"help_line\"])\n\t\t\t\telse:\n\t\t\t\t\tsub.add_argument(arg[\"name\"], type=arg[\"type\"], help=arg[\"help_line\"])\n\t\t\n\t\tfunc = getattr(cls, method)\n\n\t\targs_info = cls.__parse_docstring(func.__doc__)\n\t\tif args_info == {}:\n\t\t\treturn\n\n\t\tc = subparser.add_parser(command, help=args_info[\"help_line\"])\n\n\t\tif \"arguments\" in args_info:\n\t\t\tfor label, arg in args_info[\"arguments\"].items():\n\t\t\t\tif arg[\"pos\"]:\n\t\t\t\t\tadd_pos_argument(c, label, arg)\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tadd_opt_argument(c, label, arg, add_alias=True)\n\t\t\t\t\texcept ArgumentError as e:\n\t\t\t\t\t\tadd_opt_argument(c, label, arg, add_alias=False)", "def define_command(subparsers, command, callback, cmd_mapper):\n desc = callback.__doc__ or ''\n daemon_help = desc.strip().split('\\n')[0]\n arguments = getattr(callback, 'arguments', [])\n\n subparser = subparsers.add_parser(command, help=daemon_help,\n description=desc,\n add_help=False,\n formatter_class=HelpFormatter)\n subparser.add_argument('-h', '--help', action='help',\n help=argparse.SUPPRESS)\n cmd_mapper[command] = subparser\n for (args, kwargs) in arguments:\n subparser.add_argument(*args, **kwargs)\n subparser.set_defaults(func=callback)", "def add_subparsers(dct, **kwargs):\n def _add_subparsers(parser):\n factory = parser.add_subparsers(**kwargs)\n # hack: bypass bug in python3 argparse\n # http://stackoverflow.com/questions/22990977/why-does-this-argparse-code-behave-differently-between-python-2-and-3\n factory.required = True\n for name in sorted(dct.keys()):\n funcs = dct[name]\n _subparser = factory.add_parser(name)\n build_arg_parser(funcs, _subparser)\n return _add_subparsers", "def sub_command(self, name: str=None, description: str=None, options: list=None, connectors: dict=None, **kwargs):\r\n def decorator(func):\r\n new_func = SubCommand(\r\n func,\r\n name=name,\r\n description=description,\r\n options=options,\r\n connectors=connectors,\r\n **kwargs\r\n )\r\n self.children[new_func.name] = new_func\r\n self.option.options.append(new_func.option)\r\n return new_func\r\n return decorator", "def add_command(subparsers):\n\n parser = subparsers.add_parser('create', help=create.__doc__)\n\n parser.add_argument('-r', '--recreate', action='store_true', help='If set, I\\'ll first erase the current database')\n parser.add_argument('-v', '--verbose', action='count', help='Increase verbosity?')\n parser.add_argument('-d', '--image-dir', default='/idiap/project/hface/databases/polimetric_thermal_database/Registered/', help=\"Change the relative path to the directory containing the images of the Polarimetric database.\")\n\n parser.set_defaults(func=create) #action", "def add_generate_token_subcommand(\n subparsers: Any,\n) -> None:\n generate_token_sp = subparsers.add_parser(\n \"generate-token\",\n formatter_class=Formatter,\n description=dedent( # noqa: WPS462 -- docs\n \"\"\"\\\n Generate token.\n\n Token is required to consume the protected endpoints.\n\n Example:\n ```shell\n # Generate a rsa key pair\n openssl genpkey -algorithm RSA -out private_key.pem \\\\\n -pkeyopt rsa_keygen_bits:2048\n openssl rsa -pubout -in private_key.pem -out public_key.pem\n # Generate token\n bartender generate-token --format header > token.txt\n # Use token\n curl -X 'GET' \\\\\n 'http://127.0.0.1:8000/api/whoami' \\\\\n -H 'accept: application/json' \\\\\n -H @token.txt | jq .\n ```\n \"\"\",\n ),\n help=\"Generate token.\",\n )\n generate_token_sp.add_argument(\n \"--private-key\",\n default=Path(\"private_key.pem\"),\n type=Path,\n help=\"Path to RSA private key file\",\n )\n generate_token_sp.add_argument(\n \"--username\",\n default=\"someone\",\n help=\"Username to use in token\",\n )\n generate_token_sp.add_argument(\n \"--roles\",\n nargs=\"+\",\n default=[\"expert\", \"guru\"],\n help=\"Roles to use in token\",\n )\n onehour_in_minutes = 60\n generate_token_sp.add_argument(\n \"--lifetime\",\n default=onehour_in_minutes,\n type=int,\n help=\"Lifetime of token in minutes\",\n )\n generate_token_sp.add_argument(\n \"--issuer\",\n default=\"bartendercli\",\n help=\"Issuer of token\",\n )\n generate_token_sp.add_argument(\n \"--oformat\",\n default=\"plain\",\n choices=[\"header\", \"plain\"],\n help=\"Format of output\",\n )\n generate_token_sp.set_defaults(func=generate_token_subcommand)", "def _add_to_cli(self, parser, group=None):\n dest = self.dest\n if group is not None:\n dest = group.name + '_' + dest\n\n subparsers = parser.add_subparsers(dest=dest,\n title=self.title,\n description=self.description,\n help=self.help)\n # NOTE(jd) Set explicitly to True for Python 3\n # See http://bugs.python.org/issue9253 for context\n subparsers.required = True\n\n if self.handler is not None:\n self.handler(subparsers)", "def construct_subcommand(\n parser,\n hooks=None,\n arg_filter=None,\n is_root=True\n):\n subcommands = []\n options = []\n args = []\n subcommand = {}\n hooks = {} if hooks is None else hooks\n subcommand_hook = hooks.get(\"subcommand\")\n\n if is_root:\n subcommand[\"name\"] = parser.prog\n\n for arg in parser._actions:\n if arg_filter is not None and arg_filter(arg):\n continue\n if arg.nargs == argparse.PARSER:\n subcommand.update(get_base_suggestion(arg))\n help_map = {a.dest: a.help for a in arg._choices_actions}\n\n nested_subcommands = {}\n for name, nested_parser in arg.choices.items():\n if nested_parser in nested_subcommands:\n nested_subcommands[nested_parser][\"name\"].append(name)\n else:\n nested_subcommands[nested_parser] = {\n \"name\": [name],\n **construct_subcommand(\n nested_parser,\n hooks=hooks,\n arg_filter=arg_filter,\n is_root=False\n ),\n }\n if name in help_map and help_map[name] != argparse.SUPPRESS:\n nested_subcommands[nested_parser][\"description\"] = str(help_map[name])\n for p, nested_subcommand in nested_subcommands.items():\n if len(nested_subcommand[\"name\"]) == 1:\n nested_subcommand[\"name\"] = nested_subcommand[\"name\"][0]\n if subcommand_hook:\n subcommand_hook(nested_subcommand, p)\n subcommands.append(nested_subcommand)\n elif arg.option_strings:\n options.append(construct_option(arg, hooks, parser))\n else:\n args.extend(construct_args(arg, hooks, parser))\n\n if subcommands:\n subcommand[\"subcommands\"] = subcommands\n if options:\n subcommand[\"options\"] = options\n if args:\n subcommand[\"args\"] = args\n\n if is_root and subcommand_hook:\n subcommand_hook(subcommand, parser)\n\n return subcommand", "def sub_command(self, name: str=None, description: str=None, options: list=None, connectors: dict=None, **kwargs):\r\n def decorator(func):\r\n if self.child_type is None:\r\n if len(self.registerable.options) > 0:\r\n self.registerable.options = []\r\n self.child_type = Type.SUB_COMMAND\r\n \r\n new_func = SubCommand(\r\n func,\r\n name=name,\r\n description=description,\r\n options=options,\r\n connectors=connectors,\r\n **kwargs\r\n )\r\n self.children[new_func.name] = new_func\r\n self.registerable.options.append(new_func.option)\r\n return new_func\r\n return decorator", "def add_arg_parser(subparsers):\n # add\n add_p = subparsers.add_parser('add', description='Create a bundle from a .csv, .tsv, or a directory of files.')\n add_p.add_argument('-t', '--tag', nargs=1, type=str, action='append',\n help=\"Set one or more tags: 'dsdt add -t authoritative:True -t version:0.7.1'\")\n add_p.add_argument('bundle', type=str, help='The destination bundle in the current context')\n add_p.add_argument('path_name', type=str, help='File or directory of files to add to the bundle', action='store')\n add_p.set_defaults(func=lambda args: _add(args))", "def create_parser(self, prog_name, subcommand):\n parser = CommandParser(\n self, prog=\"%s %s\" % (os.path.basename(prog_name), subcommand),\n description=self.help or None,\n )\n parser.add_argument(\n '--pythonpath',\n help='A directory to add to the Python path',\n )\n self.add_arguments(parser)\n return parser", "def get_parser(subparsers, parent=None):\n parser = subparsers.add_parser(\n \"flow\",\n description=\"Invoke ML on MCU flow\",\n parents=[parent] if parent else [],\n add_help=(parent is None),\n )\n parser.set_defaults(func=handle)\n add_common_options(parser)\n add_context_options(parser)\n add_flow_options(parser)\n subparsers = parser.add_subparsers(dest=\"subcommand2\") # this line changed\n load.get_parser(subparsers)\n tune.get_parser(subparsers)\n build.get_parser(subparsers)\n compile_.get_parser(subparsers)\n run.get_parser(subparsers)", "def add_argparse_subparser(subparsers):\n\n new_sub_parser = subparsers.add_parser(\n PluginManager.argparse_subparser_name(), help=\"plugin commands\"\n )\n PluginManager.__argparse_subparser = new_sub_parser\n plugin_subparsers = new_sub_parser.add_subparsers(\n dest=PluginManager.__root_subparser_name\n )\n\n sub_sub_parser = plugin_subparsers.add_parser(\n \"list\", help=\"list the available plugins\"\n )\n sub_sub_parser.add_argument(\n \"--all\",\n dest=\"show_all\",\n action=\"store_true\",\n default=False,\n help=\"show all loaded plugins (default is False)\",\n )\n sub_sub_parser.add_argument(\n dest=\"list_filter\",\n default=None,\n help=\"filter\",\n nargs=\"?\",\n type=PluginManager.__list_filter_type,\n )\n sub_sub_parser = plugin_subparsers.add_parser(\n \"info\", help=\"information on a specific plugin\"\n )\n sub_sub_parser.add_argument(\n dest=\"info_filter\",\n default=None,\n type=PluginManager.__info_filter_type,\n help=\"an id\",\n )", "def _init_cfg_subcmd(subparsers):\n cfg_related = subparsers.add_parser(\"cfg\")\n cfg_subparsers = cfg_related.add_subparsers(dest=\"cfg_subcommand\")\n\n cfg_write_parser = cfg_subparsers.add_parser(\"write\")\n cfg_write_parser.add_argument(\n \"--level\",\n choices=[\"user\", \"cwd\"],\n default=None,\n help=\"Specify if this config is for user or just the working directory.\",\n )\n cfg_write_parser.add_argument(\n \"--open\", action=\"store_const\", const=True, default=False\n )\n cfg_subparsers.add_parser(\"show\")\n\n cfg_export_parser = cfg_subparsers.add_parser(\"export\")\n cfg_export_parser.add_argument(\"--dir\", default=os.getcwd())\n\n return cfg_related", "def _init_cfg_subcmd(subparsers):\n cfg_related = subparsers.add_parser(\"cfg\",)\n cfg_subparsers = cfg_related.add_subparsers(dest=\"cfg_subcommand\")\n\n cfg_write_parser = cfg_subparsers.add_parser(\"write\")\n cfg_write_parser.add_argument(\n \"--level\",\n choices=[\"user\", \"cwd\"],\n default=None,\n help=\"Specify if this config is for user or just the working directory.\",\n )\n cfg_write_parser.add_argument(\n \"--open\", action=\"store_const\", const=True, default=False\n )\n cfg_subparsers.add_parser(\"show\")\n\n cfg_export_parser = cfg_subparsers.add_parser(\"export\")\n cfg_export_parser.add_argument(\"--dir\", default=os.getcwd())\n\n return cfg_related", "def create_parser():\n parser = argparse.ArgumentParser(\n description='CLI for SMS',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n\n # Add subcommands\n subparsers = parser.add_subparsers(title='subcommands', dest='cmd')\n\n # Downlink Unitdata\n downlink_unitdata_parser = subparsers.add_parser(\n 'DU', help=\"Send downlink unitdata to SMSOrc8rGW service\",\n )\n downlink_unitdata_parser.add_argument('imsi', help='e.g. 001010000090122 (no prefix required)')\n downlink_unitdata_parser.add_argument('data', help='Data as a hex string e.g. 1fc13a00')\n downlink_unitdata_parser.set_defaults(func=send_downlink_unitdata)\n\n return parser", "def setup_subcommands(argparser):\n\n subparsers = argparser.add_subparsers()\n\n parser_info = subparsers.add_parser('info', help = 'Provide the information about the user')\n parser_info.add_argument('user', help = 'The user to inspect')\n\n parser_ownerships = subparsers.add_parser('ownerships', help = 'Show items which this user owns')\n parser_ownerships.add_argument('user', help = 'The name of the user to show information about')\n parser_ownerships.add_argument('-r', '--recursive', action = 'store_true', help = 'Show items which this user own through being in lists')\n \n parser_info.set_defaults(handler = show_info)\n parser_ownerships.set_defaults(handler = show_ownerships)", "def extend_cli(self, subparser):", "def customize_subparser(cls, subparser: argparse.ArgumentParser) -> None:\n pass", "def subparser( parser, subparsers ):", "def parse_args():\n parser = ArgumentParser()\n subparsers = parser.add_subparsers(help=\"Sub-command help\", dest=\"subcommand\")\n\n parser_collect_csv = subparsers.add_parser(\"collect-csv\", help=\"Collect csv files and place them in their right directories\")\n parser_collect_csv.add_argument(\"--dest-dir\", help=\"Output dir to store the results\", default=\"output_dir\")\n parser_collect_csv.add_argument(\"--search-paths\", nargs='*', default=[\".\"], help=\"List of paths to recursively search for files\")\n\n parser_backup_tables = subparsers.add_parser(\"backup-tables\", help=\"Make a pg_dump of the selected tables\")\n parser_backup_tables.add_argument(\"database\", help=\"Name of the database to make the backup\")\n parser_backup_tables.add_argument(\"tables\", nargs='*', help=\"List of tables to dump\", default=\"-\")\n parser_backup_tables.add_argument(\"--outfile\", help=\"Filename for the backup\", default=\"backup.sql\")\n\n return parser.parse_args()", "def add_cli(self, subparser):\n new_parser = subparser.add_parser('create', help='create new scratch file')\n new_parser.add_argument('name', nargs='?', default=None, help=\"Optional Name to be given to the file, \"\n \"default name is an increment of 'scratch##'\")\n new_parser.set_defaults(func=self.action)\n return subparser", "def load_into(subparser, as_cmd=None):\n p = subparser\n p.description = description\n\n if not as_cmd:\n as_cmd = default_name\n out = cli.CommandSuite(as_cmd, p)\n out.load_subcommand(topics)\n return out", "def set_parser(*, collected, parser=None):\n if parser is None:\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers()\n commands = unique(collected)\n for name, details in commands.items():\n original = details.original\n args = details.extra\n a_subparser = subparsers.add_parser(name)\n a_subparser.set_defaults(\n __gather_name__=name,\n __gather_command__=original,\n )\n for arg_details in args:\n a_subparser.add_argument(*arg_details.args, **dict(arg_details.kwargs))\n return parser", "def parser(subparsers, _):\n desc = 'Synchronize branches, tags, references and other meta data from another repository.\\n' \\\n 'For more information on this advanced command refer to the manual page for git-fetch.'\n fetch_parser = subparsers.add_parser(\n 'fetch', help=desc, description=desc.capitalize(), aliases=['ft'])\n fetch_parser.set_defaults(func=main)\n fetch_parser.add_argument(\n 'fetch_args', nargs=\"*\", help='Additional arguments to pass to `git fetch`')", "def register_subcommand(nest_ops_subparsers):\n parser = nest_ops_subparsers.add_parser('compile', \\\n help=COMPILE_CMD_HELP, \\\n description=COMPILE_CMD_DESCRIPTION, \\\n formatter_class=argparse.RawTextHelpFormatter )\n\n parser.add_argument('--code_type', \\\n choices=VALID_CODE_TYPES, \\\n help=CODE_TYPE_ARG_HELP, \\\n default='all' \\\n )\n\n parser.add_argument('--project', \\\n help=\"\"\"Which project to build. Only affects the web_assets:dist\n code_type, where it determines which project's index.html\n will be the main entry point index.html in the static files.\"\"\", \\\n choices=nest_envs.VALID_PROJECT_NAMES, \\\n default=nest_envs.DEFAULT_PROJECT_NAME, \\\n )\n \n parser.add_argument('--runlevel', \\\n help='Determines the run level for logging, error checking, etc.',\n choices=nest_envs.VALID_RUNLEVEL_NAMES,\n default=nest_envs.DEFAULT_RUNLEVEL_NAME, \\\n )\n\n #super ugly callback mechanism from argparse\n parser.set_defaults(func=_run_compile_cmd)\n return", "def add_subparser(subparsers):\n parser = subparsers.add_parser(\"utils/update\",\n description=\"Update apt and the groot_ansible ecosystem\", # this shows in the help for this command\n help=\"update your ansible/apt environment\", # this shows in the parent parser\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n common.add_ansible_arguments(parser)\n parser.set_defaults(func=parse_args)", "def build_parser(self, argspec, arguments):\n fname = argspec.func.__name__\n if fname.startswith('do_'):\n fname = fname[3:]\n\n parser_args = arguments.pop('parser_args', {})\n parser_args.setdefault('prog', '%s %s' % (sys.argv[0], fname))\n if argspec.func.__doc__ is not None:\n parser_args.setdefault('description', argspec.func.__doc__)\n return argparse.ArgumentParser(**parser_args)", "def refine_cli(\n subparsers: SubParsersAction,\n parent_parsers: List[argparse.ArgumentParser],\n) -> None:", "def subdivide_parser(subparser):\n parser = subparser.add_parser('subdivide',\\\n help='will subdivide a mesh by one iteration '\\\n '(splitting all triangles in four others)'\\\n ' deprecated use converter.py refine instead')\n parser = add_arg(parser, 'replace')\n parser.add_argument(\"args\", metavar='SELAFIN file', nargs=\"+\")\n\n return subparser", "def fill_subparsers(subparsers):\n\n\t\tfor cls in BaseCommand.__subclasses__():\n\t\t\tcls.add_subparser(subparsers)", "def get_argument_parser(self):\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(dest='command')\n fetch_parser = subparsers.add_parser('fetch', help='fetches and displays a release from discogs')\n fetch_parser.add_argument('discogs_id', help='the ID of the release')\n rip_parser = subparsers.add_parser('rip', help='rips the current CD to WAV')\n rip_parser.add_argument('--destination', help='optional destination for the CD rip')\n search_parser = subparsers.add_parser(\n 'search',\n prog='search',\n help='performs a very simple search on discogs')\n search_parser.add_argument('term', help='the term to search for')\n encode_parser = subparsers.add_parser(\n 'encode', help='Encodes a CD or a set of WAV files to mp3.')\n encode_parser.add_argument(\n 'encoding_from', choices=['cd', 'wav'], help='The source to encode from.')\n encode_parser.add_argument(\n 'encoding_to', choices=['mp3', 'flac'], help='The destination to encode to.')\n encode_parser.add_argument(\n '--source', help='The destination of the source wav file. This can be a file or directory.')\n encode_parser.add_argument(\n '--destination', help='The destination of the resulting mp3 or flac. This can be a file or directory.')\n encode_parser.add_argument(\n '--keep-source', action='store_true', help='If encoding from wav, use this to keep the original wav being removed.')\n encode_parser.add_argument(\n '--collapse-index-tracks', action='store_true', help='If set this will collapse any subtracks to a single track.')\n encode_parser.add_argument(\n '--discogs-id', help='The discogs ID for the release. When this is used metadata from the discogs release will be applied to the encoded files.')\n decode_parser = subparsers.add_parser('decode', help='Decodes a set of FLAC or MP3 files to WAV.')\n decode_parser.add_argument(\n 'decode_from', choices=['flac', 'mp3'], help='The source to decode from.')\n decode_parser.add_argument(\n '--source', help='The destination of the source file. This can be a file or directory.')\n decode_parser.add_argument(\n '--destination', help='The destination of the resulting wav. This can be a file or directory.')\n tag_parser = subparsers.add_parser('tag', help='Tags an audio file')\n tag_parser.add_argument(\n 'action', choices=['add', 'remove'], help='The tagging action to be performed. A tag can be added or removed.')\n tag_parser.add_argument(\n 'format', choices=['mp3', 'flac'], help='The file format of the audio file being tagged.')\n tag_parser.add_argument(\n '--collapse-index-tracks', action='store_true', help='If set this will collapse any subtracks to a single track.')\n tag_parser.add_argument(\n '--source',\n help='The source audio files to tag. This can be a file or a directory. If the source is omitted, the files in the current working directory will be used.')\n tag_parser.add_argument('--discogs-id', help='The discogs ID for the release. When this is used metadata from the discogs release will be applied to the tagged files.')\n tag_parser.add_argument('--artist', help='The artist to use for the tag.')\n tag_parser.add_argument('--album-artist', help='The album artist to use for the tag.')\n tag_parser.add_argument('--album', help='The album to use for the tag.')\n tag_parser.add_argument('--title', help='The title to use for the tag.')\n tag_parser.add_argument('--year', help='The year to use for the tag.')\n tag_parser.add_argument('--genre', help='The year to use for the tag.')\n tag_parser.add_argument('--track-number', help='The track number to use for the tag.')\n tag_parser.add_argument('--track-total', help='The track total to use for the tag.')\n tag_parser.add_argument('--disc-number', help='The disc number to use for the tag.')\n tag_parser.add_argument('--disc-total', help='The disc total to use for the tag.')\n tag_parser.add_argument('--comment', help='The comment for the tag.')\n artwork_parser = subparsers.add_parser('artwork', help='adds or removes artwork from a file')\n artwork_parser.add_argument(\n 'action', choices=['add', 'remove'], help='The artwork action to be performed. The artwork can be added or removed.')\n artwork_parser.add_argument(\n 'type', choices=['mp3', 'flac'], help='The type of file to apply the artwork to.')\n artwork_parser.add_argument(\n '--source', help='The destination file or directory to apply the artwork to. If there is no source then any artwork in the current directory will be used.')\n artwork_parser.add_argument(\n '--destination', help='The destination file or directory to apply the artwork to. If there is no destination then the current directory will be used.')\n mix_parser = subparsers.add_parser('mix', help='adds a mix')\n mix_parser.add_argument('source', help='the source of the mix')\n mix_parser.add_argument('--artist', help='The artist to use for the tag.')\n mix_parser.add_argument('--album', help='The album to use for the mix.')\n mix_parser.add_argument('--title', help='The title to use for the mix.')\n mix_parser.add_argument('--year', help='The year to use for the mix.')\n mix_parser.add_argument('--comment', help='The comment for the mix.')\n return parser", "def _add_create_command(subparser: _SubParsersAction):\r\n parser = subparser.add_parser('create', help='Create a new folder.') \r\n parser.add_argument(\r\n '--project',\r\n required=True,\r\n help='Project key of the project that the folder will be created under.'\r\n )\r\n parser.add_argument(\r\n '--name',\r\n required=False,\r\n help='Name of the folder.'\r\n )\r\n parser.add_argument(\r\n '--type',\r\n required=False,\r\n choices=['plan', 'case', 'cycle'],\r\n help='Type of folder to create.',\r\n )\r\n parser.set_defaults(cmd=CreateFolderCommand(parser))", "def add_subcommand(self, command):\n\n if self.subcommand_parser is None:\n self.subcommand_parser = self.parser.add_subparsers(\n dest='command', help='Please select one command mode below',\n title='Command modes'\n )\n self.subcommands = {}\n\n if not isinstance(command, ScriptCommand):\n raise ScriptError('Subcommand must be a ScriptCommand instance')\n\n parser = self.subcommand_parser.add_parser(\n command.name,\n help=command.short_description,\n description=command.description,\n epilog=command.epilog,\n formatter_class=argparse.RawTextHelpFormatter,\n )\n self.subcommands[command.name] = command\n command.script = self\n\n if callable(getattr(command, '__register_arguments__', None)):\n command.__register_arguments__(parser)\n\n return parser", "def add_new_subparser(subparsers, formatter_class=RawTextHelpFormatter):\n # TODO: add info on no args to description or help\n # Adds custom --help argument\n generic_parent_parser = cmd.argparse.get_generic_parent_parser()\n new_description = 'Create a new test module or page object'\n new_help = new_description\n new_parser = subparsers.add_parser(\n 'new', description=new_description, help=new_help,\n parents=[generic_parent_parser],\n formatter_class=formatter_class,\n add_help=False, epilog=cmd.argparse.ARGPARSE_EPILOG\n )\n # New <type> subparsers\n new_type_desc = 'Run \\'{} <type> --help\\' for details'.format(new_parser.prog)\n new_subparsers = new_parser.add_subparsers(\n title='File Types', description=new_type_desc, dest='type', metavar='<type>'\n )\n # New test parser\n new_test_parent_parser = get_new_parent_parser(\n parents=[generic_parent_parser], class_name_metavar='<TestCaseClass>',\n class_name_help='Name to use for the initial test case class'\n )\n new_test_description = 'Create a new test module'\n new_test_help = new_test_description\n new_subparsers.add_parser(\n 'test', description=new_test_description, help=new_test_help,\n parents=[new_test_parent_parser],\n add_help=False, epilog=cmd.argparse.ARGPARSE_EPILOG\n )\n # New page object parser\n new_page_parent_parser = get_new_parent_parser(\n parents=[generic_parent_parser], class_name_metavar='<PageObjectClass>',\n class_name_help='Name to use for the initial page object class'\n )\n new_page_description = 'Create a new page object module'\n new_page_help = new_page_description\n new_page_parser = new_subparsers.add_parser(\n 'page', description=new_page_description, help=new_page_help,\n parents=[new_page_parent_parser],\n add_help=False, epilog=cmd.argparse.ARGPARSE_EPILOG\n )\n # TODO: add optional --prototype arg with a list of valid page object prototype classes\n return new_parser", "def create_parser(self, prog_name, subcommand):\n parser = CommandParser(\n self, prog=\"%s %s\" % (os.path.basename(prog_name), subcommand),\n description=self.help or None,\n )\n parser.add_argument(\n '--version', action='version', version=self.get_version())\n\n self.add_arguments(parser)\n return parser", "def make_parser():\n parser = argparse.ArgumentParser(prog=__file__.replace(\".py\", \"\"),\n description='simple $PATH tool')\n parser.add_argument('-n', '--nocolor', dest=\"color\",\n action=\"store_false\", default=True,\n help='Turn off ANSI color codes.')\n parser.add_argument('-w', '--nowarn', action=\"store_true\",\n help='Turn off path warnings.')\n subs = parser.add_subparsers(title='subcommands',\n description='The subcommands')\n\n sub = subs.add_parser('replace', description=\"Search & Replace $PATH\")\n sub.set_defaults(cmd='path_replace')\n sub.add_argument('terms', nargs='+',\n help='Format: search:replace, search:replace, ...')\n\n sub = subs.add_parser('show', description=\"Show $PATH compoents\")\n sub.set_defaults(cmd='path_show')\n sub.add_argument('-n', '--nocolor', dest=\"color\",\n action=\"store_false\", default=True,\n help='Turn off ANSI color codes.')\n sub.add_argument('-w', '--nowarn', action=\"store_true\",\n help='Turn off path warnings.')\n\n sub = subs.add_parser('which', description=\"Platform agnostic `which -a`\")\n sub.set_defaults(cmd='path_which')\n sub.add_argument('look', help='Look for this executable')\n sub.add_argument('-n', '--nocolor', dest=\"color\",\n action=\"store_false\", default=True,\n help='Turn off ANSI color codes.')\n sub.add_argument('-v', '--version', action=\"store_true\",\n help='Show version of exact matches.')\n\n return parser", "def test_add_common_arguments_subparser():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(dest='action')\n sub = subparsers.add_parser('sub')\n add_common_arguments(sub)\n\n options = parser.parse_args(['sub'])\n assert hasattr(options, 'config')\n assert hasattr(options, 'configdir')\n assert options.config == 'default'\n assert options.configdir == config.DEFAULT_HOMEDIR\n\n options = parser.parse_args(['sub', '-c', 'test-short'])\n assert options.config == 'test-short'\n\n options = parser.parse_args(['sub', '--config', 'test-long'])\n assert options.config == 'test-long'\n\n options = parser.parse_args(['sub', '--config-dir', 'test-long'])\n assert options.configdir == 'test-long'\n\n options = parser.parse_args(\n ['sub', '-c', 'test-short', '--config-dir', 'test-long-dir'])\n assert options.config == 'test-short'\n assert options.configdir == 'test-long-dir'\n\n options = parser.parse_args(\n ['sub', '--config', 'test-long', '--config-dir', 'test-long-dir'])\n assert options.config == 'test-long'\n assert options.configdir == 'test-long-dir'", "def add_command(self, name, desc, func=None):\n assert type(name) == str\n assert type(desc) == str\n if func is not None:\n assert callable(func)\n\n def wrap_argparse(parser, args, func):\n \"\"\"Convenience function calls argparse with list of args and calls func with them\"\"\"\n pargs = parser.parse_args(args)\n return func(**vars(pargs))\n\n assert name not in self.cmd2func, \"Command with same name already defined on this level!\"\n\n self.cmd_list.append((name, desc))\n if func is None:\n m = necapy(name=name, desc=desc)\n self.cmd2func[name] = m.parse\n return m\n else:\n ap = argparse.ArgumentParser(description=desc)\n self.cmd2func[name] = lambda args: wrap_argparse(ap, args, func)\n return ap", "def add_subcommands(self, name='subcmd', arg_kws=None, optional=False):\n if self._subcmds is not None:\n raise RuntimeError(\"This config already has subcommands.\")\n if name in self.ReservedVariables or name[0] == '_':\n raise ValueError(\"Config variable name '%s' is reserved.\" % name)\n if name in self.confvariable:\n raise ValueError(\"Config variable '%s' is already defined.\" % name)\n if arg_kws is None:\n arg_kws = dict(title=\"subcommands\")\n else:\n arg_kws = dict(arg_kws)\n arg_kws['dest'] = name\n subparsers = self.argparser.add_subparsers(**arg_kws)\n var = ConfigSubCmds(name, optional, self, subparsers)\n self.confvariable[name] = var\n self.confvariables.append(var)\n self._subcmds = var\n return var", "def create_parser(self, prog_name, subcommand):\r\n return OptionParser(prog=prog_name,\r\n usage=self.usage(subcommand),\r\n version='',\r\n add_help_option = False,\r\n option_list=self.option_list)", "def add_cmd(self, name: str, help_str: str, cmd_fn: typing.Callable, arg: str = None, arg_help: str = None):\n self.cmd_names.append(name)\n cmd = self.cli_subparsers.add_parser(name, help=help_str)\n cmd.set_defaults(func=cmd_fn)\n if arg is not None:\n cmd.add_argument(arg, help=arg_help)", "def add_rm_parser(subparsers):\n rm_parser = subparsers.add_parser(\"rm\")\n rm_parser.set_defaults(func=rm_cli.main)\n rm_parser.add_argument('--scenario', '-s',\n dest=\"scenario\",\n help='Predefined scenario to use for exection')\n rm_parser.add_argument('--platform', dest=\"platform\",\n help=\"The platform to use \\\n(podman, docker, terraform, shell, python)\")\n rm_parser.add_argument('--vars', dest=\"vars\",\n default=\"\",\n help=\"extra variables\")\n rm_parser.add_argument('--debug', dest=\"debug\",\n action=\"store_true\",\n help=\"Enable debug level logging\")", "def register_command(subparser):\n update_parser = subparser.add_parser('update', help='Run the log files through an updater. Used to update '\n 'between versions of autology')\n update_parser.set_defaults(func=_main)\n\n # Arguments\n update_parser.add_argument('-f', '--files', help='Update the files that are currently defined in the log '\n 'directories', action='store_true')\n update_parser.add_argument('-t', '--templates', help='Install a new output template', action='store_true')\n update_parser.add_argument('-T', '--template-definition', help='Define a template definition to install',\n default=template_utilities.DEFAULT_TEMPLATES_URL)", "def add_subparser(\n subparsers: SubParsersAction, parents: List[argparse.ArgumentParser]\n) -> None:\n run_parser = subparsers.add_parser(\n \"run\",\n parents=parents,\n conflict_handler=\"resolve\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n help=\"Starts a Rasa server with your trained model.\",\n )\n run_parser.set_defaults(func=run)\n\n run_subparsers = run_parser.add_subparsers()\n sdk_subparser = run_subparsers.add_parser(\n \"actions\",\n parents=parents,\n conflict_handler=\"resolve\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n help=\"Runs the action server.\",\n )\n sdk_subparser.set_defaults(func=run_actions)\n\n arguments.set_run_arguments(run_parser)\n arguments.set_run_action_arguments(sdk_subparser)", "def create_parser():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(title=\"Commands\", dest=\"subparser_name\")\n subparsers.add_parser(\"generate-settings\", help=\"Generate settings.json to install \"\n \"Gluu Cloud Native Edition non-interactively\")\n subparsers.add_parser(\"install\", help=\"Install Gluu Cloud Native Edition using Kustomize. Depreciated > 4.3.\")\n subparsers.add_parser(\"install-no-wait\", help=\"Install Gluu Cloud Native Edition using Kustomize. \"\n \"Depreciated > 4.3. \"\n \"There will be no wait time between installing services. \"\n \"Pods may look like they are restarting but they will \"\n \"be waiting for hierarchy \"\n \"pods to be running\")\n subparsers.add_parser(\"install-ldap-backup\", help=\"Install ldap backup cronjob only.\")\n subparsers.add_parser(\"restore\", help=\"Install Gluu Cloud Native Edition with a \"\n \"running database and previous configuration using Kustomize.\"\n \"Depreciated > 4.3\")\n subparsers.add_parser(\"uninstall\", help=\"Uninstall Gluu that was installed using Kustomize. \"\n \"Depreciated > 4.3\")\n subparsers.add_parser(\"upgrade\", help=\"Upgrade Gluu Cloud Native Edition using Kustomize. Depreciated > 4.3\")\n subparsers.add_parser(\"upgrade-values-yaml\", help=\"Upgrade Gluu Cloud Native Edition\")\n subparsers.add_parser(\"install-couchbase\", help=\"Install Couchbase only. Used with installation of Gluu with Helm\")\n subparsers.add_parser(\"install-couchbase-backup\", help=\"Install Couchbase backup only.\")\n subparsers.add_parser(\"uninstall-couchbase\", help=\"Uninstall Couchbase only.\")\n subparsers.add_parser(\"helm-install\", help=\"Install Gluu Cloud Native Edition using helm. \"\n \"This also installs the nginx-ingress chart\")\n subparsers.add_parser(\"helm-uninstall\", help=\"Uninstall Gluu Cloud Native Edition using helm. \"\n \"This also uninstalls the nginx-ingress chart\")\n\n subparsers.add_parser(\"helm-install-gluu\", help=\"Install Gluu Cloud Native Edition using helm. \"\n \"This assumes nginx-ingress is installed\")\n subparsers.add_parser(\"helm-uninstall-gluu\", help=\"Uninstall Gluu Cloud Native Edition using helm. \"\n \"This only uninstalls Gluu\")\n subparsers.add_parser(\"version\", help=\"Outputs version of pygluu installer.\")\n return parser", "def sub_command_group(self, name=None, **kwargs):\r\n def decorator(func):\r\n if self.child_type is None:\r\n if len(self.registerable.options) > 0:\r\n self.registerable.options = []\r\n self.child_type = Type.SUB_COMMAND_GROUP\r\n \r\n new_func = SubCommandGroup(func, name=name, **kwargs)\r\n self.children[new_func.name] = new_func\r\n self.registerable.options.append(new_func.option)\r\n return new_func\r\n return decorator", "def calcs_parser(subparser, name, help_msg):\n parser = subparser.add_parser(name,\\\n help=help_msg)\n parser = add_arg(parser, 'parallel')\n parser = add_arg(parser, 'xvars')\n parser = add_arg(parser, 'time')\n parser = add_arg(parser, 'modif_time')\n parser = add_arg(parser, 'modif_coord')\n parser = add_arg(parser, 'modif_var')\n parser = add_arg(parser, 'eswitch')\n parser = add_arg(parser, 'fswitch')\n parser.add_argument(\"args\", metavar='SELAFIN file', nargs=\"+\")\n\n return subparser", "def cog_subcommand(\n *,\n base,\n subcommand_group=None,\n name=None,\n description: str = None,\n base_description: str = None,\n base_desc: str = None,\n subcommand_group_description: str = None,\n sub_group_desc: str = None,\n guild_ids: typing.List[int] = None,\n options: typing.List[dict] = None,\n connector: dict = None\n):\n base_description = base_description or base_desc\n subcommand_group_description = subcommand_group_description or sub_group_desc\n\n def wrapper(cmd):\n desc = description or inspect.getdoc(cmd)\n if options is None:\n opts = manage_commands.generate_options(cmd, desc, connector)\n else:\n opts = options\n\n _sub = {\n \"func\": cmd,\n \"name\": name or cmd.__name__,\n \"description\": desc,\n \"base_desc\": base_description,\n \"sub_group_desc\": subcommand_group_description,\n \"guild_ids\": guild_ids,\n \"api_options\": opts,\n \"connector\": connector,\n }\n return CogSubcommandObject(_sub, base, name or cmd.__name__, subcommand_group)\n\n return wrapper", "def create_parser(self, prog_name, subcommand):\n return OptionParser(prog=prog_name,\n usage=self.usage(subcommand),\n version=self.get_version(),\n option_list=self.option_list)", "def make_parser():\n description = 'Interact with Twitter from command line interface'\n parser = argparse.ArgumentParser(description=description)\n\n subparsers = parser.add_subparsers(help='Available commands')\n\n # Subparsers for the \"update status / tweet\" command\n put_parser = subparsers.add_parser('tweet', help='Update status / post a tweet')\n put_parser.add_argument('message', help='The message to post, must be no longer than 140 characters')\n put_parser.set_defaults(command=\"tweet\")\n\n # Subparsers for the \"see homepage tweets\" command\n put_parser = subparsers.add_parser('home', help='See timeline of tweets on your Twitter homepage')\n put_parser.set_defaults(command='home')\n\n # Subparsers for the \"get trending posts\" command\n put_parser = subparsers.add_parser('trends', help='See trending topics globally or in your location')\n put_parser.add_argument('location', default='world', nargs='?',\n help='The name of a location of interest')\n put_parser.set_defaults(command='trends')\n\n return parser", "def chop_parser(subparser):\n parser = subparser.add_parser('chop',\\\n help='will chop a SELAFIN given a new set of time range and step (but'\\\n ' alter is better)')\n parser = add_arg(parser, 'xvars')\n parser = add_arg(parser, 'time')\n parser = add_arg(parser, 'eswitch')\n parser = add_arg(parser, 'fswitch')\n parser = add_arg(parser, 'replace')\n parser = add_arg(parser, 'parallel')\n parser.add_argument(\"args\", metavar='SELAFIN file', nargs=\"+\")\n\n return subparser", "def make_parser():\n\tlogging.info(\"Constructing parser\")\n\tdescription = \"Store and retrieve snippets of text\"\n\tparser = argparse.ArgumentParser(description = description)\n\n\tsubparsers = parser.add_subparsers(dest=\"command\", help=\"Available commands\")\n\n\t# Subparser for the put command\n\tlogging.debug(\"Constructing put subparser\")\n\tput_parser = subparsers.add_parser(\"put\", help = \"Store a snippet\")\n\tput_parser.add_argument(\"name\", help=\"The name of the snippet\")\n\tput_parser.add_argument(\"snippet\", help=\"The snippet\")\n\tput_parser.add_argument(\"filename\", default=\"snippets.csv\", nargs=\"?\", help=\"The snippet filename\")\n\t\n\t# Subparser for the get command\n\tlogging.debug(\"Constructing get subparser\")\n\tget_parser = subparsers.add_parser(\"get\", help=\"Get a snippet\")\n\tget_parser.add_argument(\"name\", help=\"The name of the snippet\")\n\tget_parser.add_argument(\"filename\", default=\"snippets.csv\", nargs=\"?\", \n\t\thelp=\"The Snippet filename\")\n\n\t# Subparser for the search command\n\tlogging.debug(\"Constructing search subparser\")\n\tsearch_parser = subparsers.add_parser(\"search\", help=\"Search for a snippet\")\n\tsearch_parser.add_argument(\"snippet_portion\", help=\"The snippet you're searching for\")\n\tsearch_parser.add_argument(\"filename\", default=\"snippets.csv\", nargs=\"?\", \n\t\thelp=\"The Snippet filename\")\n\n\t# Subparser for the update command\n\tlogging.debug(\"Constructing update subparser\")\n\tupdate_parser = subparsers.add_parser(\"update\", help=\"Search for a snippet\")\n\tupdate_parser.add_argument(\"snippet_original\", help=\"The snippet you're searching for\")\n\tupdate_parser.add_argument(\"filename\", default=\"snippets.csv\", nargs=\"?\", \n\t\thelp=\"The Snippet filename\")\n\n\t# Subparser for the update2 command\n\tlogging.debug(\"Constructing update2 subparser\")\n\tupdate2_parser = subparsers.add_parser(\"update2\", help=\"Search for a snippet\")\n\tupdate2_parser.add_argument(\"snippet_original\", help=\"The snippet you're searching for\")\n\tupdate2_parser.add_argument(\"filename\", default=\"snippets.csv\", nargs=\"?\", \n\t\thelp=\"The Snippet filename\")\n\tupdate2_parser.add_argument(\"change\", help=\"The snippet you want to change it to\")\n\n\treturn parser", "def create_parser(self, prog_name, subcommand):\r\n self.prog_name = \"{} {}\".format(prog_name, subcommand)\r\n return super(TrackedCommand, self).create_parser(prog_name, subcommand)", "def subcommand(wrapped):\n def callback(scanner, name, ob):\n scanner.subcommands[ob.name] = ob\n venusian.attach(wrapped, callback, category='subcommands')\n return wrapped", "def _parser(s, remainder):\n parser = _ArgumentParser()\n def run():\n return s.parse(parser, remainder)\n parser.run = run\n return parser", "def add_arguments(self, parser):\n\n cmd = self # make sure we can use sub parser in django. via stack_overflow\n\n class SubParser(CommandParser):\n \"\"\"Use to avoid the error when using sub parser in django's add_arguments method.\"\"\"\n def __init__(self, **kwargs):\n super(SubParser, self).__init__(cmd, **kwargs)\n\n # add custom sub commands.\n\n subparsers = parser.add_subparsers(\n title=\"sub commands\",\n parser_class=SubParser,\n dest='sub_command',\n help='Sub commands you can use.'\n )\n\n # actions to start or stop socket server.\n\n server = subparsers.add_parser('server', help=\"Server Commands\")\n server.add_argument(\n 'action',\n metavar='ACTION',\n choices=self.socket_server_actions,\n help='Actions is: <%s>' % '|'.join(self.socket_server_actions),\n )\n\n # actions of targets when calling server is running.\n\n proxy = subparsers.add_parser('proxy', help=\"Proxy Commands\")\n proxy.add_argument(\n '-a', '--action',\n metavar='ACTION',\n required=True,\n choices=self.proxy_job_actions,\n help='Actions is: <%s>' % '|'.join(self.proxy_job_actions)\n )\n proxy.add_argument(\n '-t', '--targets',\n metavar='TARGET',\n nargs='*',\n help='Targets can be empty which means ALL, you can list targets by <./manage.py mirrordata proxy -a ping>.'\n )", "def get_commands(bot):\n new_commands = []\n\n new_commands.append(Command(\n 'mycommand', subcommands=[\n SubCommand(\n Opt('myoption'),\n doc='This is a simple command with a single required option.'),\n SubCommand(\n Opt('custom', optional=True),\n Opt('attached', optional=True, attached='attached argument'),\n doc='This has two different optional options, one without an attached '\n 'parameter, and the other requiring an attached parameter.'),\n SubCommand(\n Opt('trailing'),\n Arg('arg 1'),\n Arg('arg 2'),\n Arg('arg 3', argtype=ArgTypes.SPLIT, additional='more args'),\n doc='This command requires a lot of trailing arguments.'),\n SubCommand(\n Opt('grouped'),\n Arg('grouped arguments', argtype=ArgTypes.MERGED),\n doc='This will group all given arguments as a single string.'),\n SubCommand(\n Opt('complex', attached='attached'),\n Opt('other', optional=True, attached='also required'),\n Arg('arg 1'),\n Arg('arg 2', argtype=ArgTypes.SPLIT_OPTIONAL, additional='more args'),\n doc='The complex option has a required attached parameter, and the '\n '\\'other\\' option also has a required attached parameter if '\n '\\'other\\' is included. Additionally, there will be a requirement '\n 'of at least 1 trailing argument.'),\n SubCommand(\n Opt('marquee'),\n Arg('text', argtype=ArgTypes.MERGED,\n check=lambda b, m, v, *a: len(v) <= 100,\n check_error=\"Marquee message must be less than 100 characters long.\"),\n doc='Creates a marquee that loops 3 times.')],\n shortcuts=[\n Shortcut(\n 'complex', 'complex {attached} other {other} {arg 1} {arg 2}',\n Arg('attached'), Arg('other'), Arg('arg 1'),\n Arg('arg 2', argtype=ArgTypes.SPLIT_OPTIONAL)),\n Shortcut(\n 'marquee', 'marquee {text}', Arg('text', argtype=ArgTypes.MERGED))],\n description='Your command description here.',\n other='This text is optional - it just shows up after everything '\n 'else. Quick note, all of the commands here can only be used by '\n 'bot moderators or above, as indicated by elevated_level. A '\n 'level of 2 would mean only server owners or above can use the '\n 'command, and a level of 3 would restrict the command to only '\n 'the bot owners.',\n elevated_level=1, category='demo'))\n\n new_commands.append(Command(\n 'myothercommand', subcommands=[\n SubCommand(\n Arg('text', argtype=ArgTypes.MERGED_OPTIONAL),\n doc='This traps all further commands from being executed.'),\n SubCommand(\n Opt('order'), Opt('matters'),\n doc='It is impossible to access this command because the first '\n 'subcommand will always be satisfied first. Order of the '\n 'subcommand matters!'),\n SubCommand(\n Opt('sample'), Opt('foo'), Opt('bar'),\n doc='Also impossible to access. This subcommand just adds some '\n 'keywords to the command.')],\n description='Only bot owners can see this text!',\n other='Note that no shortcuts were defined. They, too, are optional. '\n 'Also, this command is hidden, which means that only the bot '\n 'owners can see this command listed from the help command. '\n 'However, unless the command is configured with an elevated '\n 'permissions level, any user can still execute the command. '\n 'Users still will not be able to see the specific help for this '\n 'command, though. Lastly, this command is disabled in DMs.',\n hidden=True, allow_direct=False, category='demo'))\n\n new_commands.append(Command(\n 'notify', subcommands=[\n SubCommand(\n Arg('text', argtype=ArgTypes.MERGED),\n doc='Notify the owners with some text!')],\n other='This command uses a custom function. It is called with the '\n 'same arguments as get_response. The command will show up to '\n 'all users in the help command, but can only be used by server '\n 'owners, as it is disallowed in direct messages.',\n elevated_level=2, allow_direct=False, function=custom_notify,\n category='demo'))\n\n new_commands.append(Command(\n 'wait', other='Use this command to demo the wait_for functionality', category='demo'))\n\n return new_commands", "def add_command_parsers(parser, logparser):\n subparsers = parser.add_subparsers(metavar='Command')\n help_text = 'ONE OF THE FOLLOWING:\\n'\n available_commands = find_commands(\n os.path.join(os.path.abspath(os.path.dirname(__file__)), os.path.pardir, 'functions'))\n max_length = max([len(a) for a in available_commands])\n for command in available_commands:\n child_parser = subparsers.add_parser(command, parents=[logparser])\n call = importlib.import_module('functions.%s'% command)\n if hasattr(call, 'set_argparser'):\n call.set_argparser(child_parser)\n else:\n child_parser.description = 'Description is missing'\n help_text += command + \": \" + \" \"*(max_length-len(command)) + ('\\n'+' '*(max_length+2)\n ).join(textwrap.wrap(child_parser.description,60)) + '\\n'\n child_parser.set_defaults(func=call.main)\n subparsers.help = help_text + '\\nType \"Command --help\" for more information about given command'", "def fill_subparser(subparser):\n return convert_dogs_vs_cats", "def build_arg_parser():\n\n main = ArgumentParser(description='AMFinder command-line arguments.',\n allow_abbrev=False,\n formatter_class=RawTextHelpFormatter)\n\n subparsers = main.add_subparsers(dest='run_mode', required=True,\n help='action to be performed.')\n\n _ = training_subparser(subparsers)\n _ = prediction_subparser(subparsers)\n _ = diagnostic_subparser(subparsers)\n\n return main", "def setup_args() -> argparse.ArgumentParser:\n main_parser = argparse.ArgumentParser(prog=\"gh\")\n subparsers = main_parser.add_subparsers(dest=\"subparser\")\n command_parser = subparsers.add_parser(\"commands\", help=\"Runs a command\")\n command_parser.add_argument(\n \"choice\",\n help=\"The chosen command to run\",\n choices=gh.commands.OPTIONS.keys(),\n )\n analytics_parser = subparsers.add_parser(\"analytics\", help=\"Runs an analysis\")\n analytics_parser.add_argument(\n \"choice\",\n help=\"The chosen analysis to run\",\n choices=gh.analytics.OPTIONS.keys(),\n )\n return main_parser", "def create_subparser(\n subparsers: _SubParsersAction,\n fn: Callable[..., None],\n *,\n with_jobs: bool = False,\n with_no_lksp: bool = False,\n with_gargs: bool = False,\n with_build_dir: bool = False,\n with_libs: bool = False,\n with_no_mypy: bool = False,\n accept_unknown_args: bool = False,\n) -> ArgumentParser:\n subparser = subparsers.add_parser(\n name=fn.__name__.replace('_', '-'),\n help=fn.__doc__,\n add_help=not accept_unknown_args\n )\n\n subparser.add_argument(\n \"--build-mode\", \"-b\", choices=(\"dev\", \"prod\"), default=\"dev\",\n help=\"Select a preset for build options.\"\n )\n LibraryType.add_argument(subparser)\n\n if with_jobs:\n subparser.add_argument(\n \"--jobs\", \"-j\", type=int, default=get_cpu_count(),\n help=\"Number of parallel jobs to spawn in parallel (default: your\"\n \" number of cpu).\"\n )\n if with_no_lksp:\n subparser.add_argument(\n \"--no-langkit-support\", action=\"store_true\",\n help=\"Assume that Langkit_Support is already built and installed.\"\n \" We rebuild it by default, for the convenience of\"\n \" developers.\"\n )\n if with_gargs:\n subparser.add_argument(\n '--gargs', action='append',\n help='Options appended to GPRbuild invocations.'\n )\n if with_build_dir:\n subparser.add_argument(\n '--build-dir',\n help='Use a non-default build directory. This allows out-of-tree'\n ' builds.'\n )\n if with_libs:\n subparser.add_argument(\n \"--lib\", \"-l\", choices=(\"python\", \"lkt\"), action=\"append\",\n help=\"Select which libraries on which to operate. By default, work\"\n \" on all libraries.\"\n )\n if with_no_mypy:\n subparser.add_argument(\n \"--no-mypy\", action=\"store_true\",\n help=\"Whether to disable type-checking with mypy.\"\n )\n\n def wrapper(args: Namespace, rest: List[str]):\n if len(rest) > 0:\n print(\"ERROR - unhandled command line arguments: {}\".format(\n \", \".join(rest)\n ))\n sys.exit(1)\n fn(args)\n\n subparser.set_defaults(func=fn if accept_unknown_args else wrapper)\n\n return subparser", "def create_parser():\n helpdict = create_parser.helpdict\n # Customized usage, for more verbosity concerning these subparsers options.\n usage = \"\"\"%(prog)s [-h] [--version] {run,info} ... \"\"\"\n usage += tw.dedent(\"\"\"\\n\n From more help on each of the subcommands, type:\n %(prog)s run -h\n %(prog)s info -h\\n\\n\"\"\")\n\n # parser = ap.ArgumentParser(\n #parser = MpArgumentParser(\n #formatter_class=ap.ArgumentDefaultsHelpFormatter,\n #description='Monte Python, a Monte Carlo code in Python',\n #usage=usage)\n parser = initialise_parser(\n description='Monte Python, a Monte Carlo code in Python', usage=usage)\n\n # -- add the subparsers\n subparser = parser.add_subparsers(dest='subparser_name')\n\n ###############\n # run the MCMC\n runparser = add_subparser(subparser, 'run', help=\"run the MCMC chains\")\n\n # -- number of steps (OPTIONAL)\n runparser.add_argument('-N', help=helpdict['N'], type=positive_int,\n dest='N')\n # -- output folder (OBLIGATORY)\n runparser.add_argument('-o', '--output', help=helpdict['o'], type=str,\n dest='folder')\n # -- parameter file (OBLIGATORY)\n runparser.add_argument('-p', '--param', help=helpdict['p'],\n type=existing_file, dest='param')\n # -- covariance matrix (OPTIONAL)\n runparser.add_argument('-c', '--covmat', help=helpdict['c'],\n type=existing_file, dest='cov')\n # -- jumping method (OPTIONAL)\n runparser.add_argument('-j', '--jumping', help=helpdict['j'],\n dest='jumping', default='fast',\n choices=['global', 'sequential', 'fast'])\n # -- sampling method (OPTIONAL)\n runparser.add_argument('-m', '--method', help=helpdict['m'],\n dest='method', default='MH',\n choices=['MH', 'NS', 'PC', 'CH', 'IS', 'Der', 'Fisher'])\n # -- update Metropolis Hastings (OPTIONAL)\n runparser.add_argument('--update', help=helpdict['update'], type=int,\n dest='update', default=50)\n # -- update Metropolis Hastings with an adaptive jumping factor (OPTIONAL)\n runparser.add_argument('--superupdate', help=helpdict['superupdate'], type=int,\n dest='superupdate', default=0)\n # -- superupdate acceptance rate argument (OPTIONAL)\n runparser.add_argument('--superupdate-ar', help=helpdict['superupdate-ar'], type=float,\n dest='superupdate_ar', default=0.26)\n # -- superupdate acceptance rate tolerance argument (OPTIONAL)\n runparser.add_argument('--superupdate-ar-tol', help=helpdict['superupdate-ar-tol'], type=float,\n dest='superupdate_ar_tol', default=0.01)\n # -- adaptive jumping factor Metropolis Hastings (OPTIONAL)\n runparser.add_argument('--adaptive', help=helpdict['adaptive'], type=int,\n dest='adaptive', default=0)\n # -- adaptive ts argument (OPTIONAL)\n runparser.add_argument('--adaptive-ts', help=helpdict['adaptive-ts'], type=int,\n dest='adaptive_ts', default=1000)\n\n # -- jumping factor (OPTIONAL)\n runparser.add_argument('-f', help=helpdict['f'], type=float,\n dest='jumping_factor', default=2.4)\n # -- temperature (OPTIONAL)\n runparser.add_argument('-T', help=helpdict['T'], type=float,\n dest='temperature', default=1.0)\n # -- minimize (OPTIONAL)\n runparser.add_argument('--minimize', help=helpdict['minimize'],\n action='store_true')\n # -- minimize argument, minimization tolerance (OPTIONAL)\n runparser.add_argument('--minimize-tol', help=helpdict['minimize-tol'], type=float,\n dest='minimize_tol', default=0.00001)\n # -- fisher (OPTIONAL)\n runparser.add_argument('--fisher', help=helpdict['fisher'],\n action='store_true')\n # -- fisher argument (OPTIONAL)\n runparser.add_argument('--fisher-asymmetric', help=helpdict['fisher-asymmetric'],\n dest='fisher_asymmetric',action='store_true')\n # -- fisher step iteration (OPTIONAL)\n runparser.add_argument('--fisher-step-it', help=helpdict['fisher-step-it'],\n dest='fisher_step_it', default=10)\n # -- fisher step iteration argument, -deltaloglkl target (OPTIONAL)\n runparser.add_argument('--fisher-delta', help=helpdict['fisher-delta'], type=float,\n dest='fisher_delta', default=0.1)\n # -- fisher step iteration argument, -deltaloglkl tolerance (OPTIONAL)\n runparser.add_argument('--fisher-tol', help=helpdict['fisher-tol'], type=float,\n dest='fisher_tol', default=0.05)\n # -- fisher symmetric likelihood assumption threshold (OPTIONAL)\n runparser.add_argument('--fisher-sym-lkl', help=helpdict['fisher-sym-lkl'], type=float,\n dest='fisher_sym_lkl', default=0.1)\n # -- configuration file (OPTIONAL)\n runparser.add_argument('--conf', help=helpdict['conf'],\n type=str, dest='config_file',\n default='default.conf')\n # -- arbitrary numbering of an output chain (OPTIONAL)\n runparser.add_argument('--chain-number', help=helpdict['chain-number'])\n # -- stop run after first successful update using --update (EXPERIMENTAL)\n runparser.add_argument('--stop-after-update', help=helpdict['stop-after-update'],\n dest='stop_after_update', action='store_true')\n # display option\n runparser.add_argument('--display-each-chi2', help=helpdict['display-each-chi2'],\n dest='display_each_chi2', action='store_true')\n # -- parallel chains without MPI (OPTIONAL)\n runparser.add_argument('--parallel-chains', help=helpdict['parallel-chains'],\n action='store_true')\n\n ###############\n # MCMC restart from chain or best fit file\n runparser.add_argument('-r', '--restart', help=helpdict['r'],\n type=existing_file, dest='restart')\n runparser.add_argument('-b', '--bestfit', dest='bf', help=helpdict['b'],\n type=existing_file)\n\n ###############\n # Silence the output (no print on the console)\n runparser.add_argument('--silent', help=helpdict['silent'],\n action='store_true')\n ###############\n # Adding new derived parameters to a run\n runparser.add_argument(\n '--Der-target-folder', dest=\"Der_target_folder\",\n help=helpdict['Der-target-folder'], type=str, default='')\n runparser.add_argument(\n '--Der-param-list', dest='derived_parameters',\n help=helpdict['Der-param-list'], type=str, default='', nargs='+')\n\n ###############\n # Importance Sampling Arguments\n runparser.add_argument(\n '--IS-starting-folder', dest='IS_starting_folder',\n help=helpdict['IS-starting-folder'], type=str, default='', nargs='+')\n\n ###############\n # We need the following so the run does not crash if one of the external\n # samplers is not correctly installed despite not being used\n from contextlib import contextmanager\n import sys, os\n\n @contextmanager\n def suppress_stdout():\n with open(os.devnull, \"w\") as devnull:\n old_stdout = sys.stdout\n sys.stdout = devnull\n try:\n yield\n finally:\n sys.stdout = old_stdout\n\n ###############\n # MultiNest arguments (all OPTIONAL and ignored if not \"-m=NS\")\n # The default values of -1 mean to take the PyMultiNest default values\n try:\n with suppress_stdout():\n from MultiNest import NS_prefix, NS_user_arguments\n NSparser = runparser.add_argument_group(\n title=\"MultiNest\",\n description=\"Run the MCMC chains using MultiNest\"\n )\n for arg in NS_user_arguments:\n NSparser.add_argument('--'+NS_prefix+arg,\n default=-1,\n **NS_user_arguments[arg])\n except ImportError:\n # Not defined if not installed\n pass\n except:\n warnings.warn('PyMultiNest detected but MultiNest likely not installed correctly. '\n 'You can safely ignore this if not running with option -m NS')\n\n ###############\n # PolyChord arguments (all OPTIONAL and ignored if not \"-m=PC\")\n # The default values of -1 mean to take the PyPolyChord default values\n try:\n with suppress_stdout():\n from PolyChord import PC_prefix, PC_user_arguments\n PCparser = runparser.add_argument_group(\n title=\"PolyChord\",\n description=\"Run the MCMC chains using PolyChord\"\n )\n for arg in PC_user_arguments:\n PCparser.add_argument('--'+PC_prefix+arg,\n default=-1,\n **PC_user_arguments[arg])\n except ImportError:\n # Not defined if not installed\n pass\n except:\n warnings.warn('PyPolyChord detected but PolyChord likely not installed correctly. '\n 'You can safely ignore this if not running with option -m PC')\n\n ###############\n # CosmoHammer arguments (all OPTIONAL and ignored if not \"-m=CH\")\n # The default values of -1 mean to take the CosmoHammer default values\n try:\n with suppress_stdout():\n from cosmo_hammer import CH_prefix, CH_user_arguments\n CHparser = runparser.add_argument_group(\n title=\"CosmoHammer\",\n description=\"Run the MCMC chains using the CosmoHammer framework\")\n for arg in CH_user_arguments:\n CHparser.add_argument('--'+CH_prefix+arg,\n default=-1,\n **CH_user_arguments[arg])\n except ImportError:\n # Not defined if not installed\n pass\n except:\n warnings.warn('CosmoHammer detected but emcee likely not installed correctly. '\n 'You can safely ignore this if not running with option -m CH')\n\n ###############\n # Information\n infoparser = add_subparser(subparser, 'info',\n help=\"analyze the MCMC chains\")\n\n # -- folder to analyze\n infoparser.add_argument('files', help=helpdict['files'],\n nargs='+')\n # Silence the output (no print on the console)\n infoparser.add_argument('--silent', help=helpdict['silent'],\n action='store_true')\n # -- to only write the covmat and bestfit, without computing the posterior\n infoparser.add_argument('--minimal', help=helpdict['minimal'],\n action='store_true')\n # -- number of bins (defaulting to 20)\n infoparser.add_argument('--bins', help=helpdict['bins'],\n type=int, default=20)\n # -- temperature (OPTIONAL)\n infoparser.add_argument('-T', help=helpdict['T'], type=float,\n dest='temperature', default=1.0)\n # -- deprecated: remove the mean-likelihood line\n infoparser.add_argument('--no-mean', help=helpdict['no-mean'],\n dest='mean_likelihood_old', action='store_false')\n # -- plot the mean-likelihood line\n infoparser.add_argument('--plot-mean', help=helpdict['plot-mean'],\n dest='mean_likelihood', action='store_true')\n # -- to remove the mean and 68% limits on top of each 1D plot\n infoparser.add_argument('--short-title-1d', help=helpdict['short-title-1d'],\n dest='short_title_1d', action='store_true')\n # -- possible plot file describing custom commands\n infoparser.add_argument('--extra', help=helpdict['extra'],\n dest='optional_plot_file', default='')\n # -- if you just want the covariance matrix, use this option\n infoparser.add_argument('--noplot', help=helpdict['noplot'],\n dest='plot', action='store_false')\n # -- if you just want to output 1d posterior distributions (faster)\n infoparser.add_argument('--noplot-2d', help=helpdict['noplot-2d'],\n dest='plot_2d', action='store_false')\n # -- if you just want to output triangle with 2d contours\n infoparser.add_argument('--noplot-2d-diag', help=helpdict['noplot-2d-diag'],\n dest='plot_diag', action='store_false')\n # -- when plotting 2d posterior distribution, use contours and not contours\n # filled (might be useful when comparing several folders)\n infoparser.add_argument('--contours-only', help=helpdict['contours-only'],\n dest='contours_only', action='store_true')\n # -- if you want to output every single subplots\n infoparser.add_argument('--all', help=helpdict['all'], dest='subplot',\n action='store_true')\n # -- to change the extension used to output files (pdf is the default one,\n # but takes long, valid options are png and eps)\n infoparser.add_argument('--ext', help=helpdict['ext'],\n type=str, dest='extension', default='pdf')\n # -- to set manually the number of plots per hoorizontal raw in 1d plot\n infoparser.add_argument('--num-columns-1d', help=helpdict['num-columns-1d'],\n type=int, dest='num_columns_1d')\n # -- also analyze the non-markovian part of the chains\n infoparser.add_argument('--keep-non-markovian', help=helpdict['keep-non-markovian'],\n dest='markovian', action='store_false')\n # -- force only analyzing the markovian part of the chains\n infoparser.add_argument('--keep-only-markovian', help=helpdict['keep-only-markovian'],\n dest='only_markovian', action='store_true')\n # -- fraction of chains to be analyzed after burn-in removal (defaulting to 1.0)\n infoparser.add_argument('--keep-fraction', help=helpdict['keep-fraction'],\n type=float, dest='keep_fraction', default=1.0)\n # -- calculate the covariant matrix when analyzing the chains\n infoparser.add_argument('--want-covmat', help=helpdict['want-covmat'],\n dest='want_covmat', action='store_true')\n # -------------------------------------\n # Further customization\n # -- fontsize of plots (defaulting to 16)\n infoparser.add_argument('--fontsize', help=helpdict['fontsize'],\n type=int, default=16)\n # -- ticksize of plots (defaulting to 14)\n infoparser.add_argument('--ticksize', help=helpdict['ticksize'],\n type=int, default=14)\n # -- linewidth of 1d plots (defaulting to 4, 2 being a bare minimum for\n # legible graphs\n infoparser.add_argument('--line-width', help=helpdict['line-width'],\n type=int, default=4)\n # -- number of decimal places that appear on the tick legend. If you want\n # to increase the number of ticks, you should reduce this number\n infoparser.add_argument('--decimal', help=helpdict['decimal'], type=int,\n default=3)\n # -- number of ticks that appear on the graph.\n infoparser.add_argument('--ticknumber', help=helpdict['ticknumber'],\n type=int, default=3)\n # -- legend type, to choose between top (previous style) to sides (new\n # style). It modifies the place where the name of the variable appear.\n infoparser.add_argument('--legend-style', help=helpdict['legend-style'],\n type=str, choices=['sides', 'top'],\n default='sides')\n # -- width of gaussian smoothing for plotting posteriors,\n # in units of bin size, increase for smoother data.\n infoparser.add_argument('--gaussian-smoothing', help=helpdict['gaussian-smoothing'],\n type=float, default=0.5)\n # interpolation factor for plotting posteriors, 1 means no interpolation,\n # increase for smoother curves (it means that extra bins are created\n # and interpolated between computed bins)\n infoparser.add_argument('--interpolation-smoothing', help=helpdict['interpolation-smoothing'],\n type=int, default=4)\n # -- plot Fisher ellipses\n infoparser.add_argument('--plot-fisher', help=helpdict['plot-fisher'],\n dest='plot_fisher',action='store_true')\n infoparser.add_argument('--center-fisher', help=helpdict['center-fisher'],\n dest='center_fisher',action='store_true')\n\n infoparser.add_argument('--posterior-smoothing', help=helpdict['posterior-smoothing'],\n type=int, default=5)\n\n return parser", "def command(\n self,\n handler: Handler = None,\n *,\n name: str = None,\n aliases: Sequence[str] = (),\n help_text: str = None,\n ) -> CommandProxy:\n\n def inner(func: Handler) -> CommandProxy:\n kwargs = {\"aliases\": aliases}\n\n help_text_ = help_text or func.__doc__\n if help_text_:\n kwargs[\"help\"] = help_text_.strip()\n\n name_ = name or func.__name__\n if asyncio.iscoroutinefunction(func):\n proxy = AsyncCommandProxy(\n func, self._sub_parsers.add_parser(name_, **kwargs)\n )\n else:\n proxy = CommandProxy(\n func, self._sub_parsers.add_parser(name_, **kwargs)\n )\n\n self._add_handler(proxy, name_, aliases)\n\n return proxy\n\n return inner(handler) if handler else inner", "def _make_argument_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(prog=\"pyrasaeco-render\", description=__doc__)\n subparsers = parser.add_subparsers(help=\"Commands\", dest=\"command\")\n subparsers.required = True\n\n once = subparsers.add_parser(\n \"once\", help=\"Render once the scenarios and the scenario ontology\"\n )\n\n continuously = subparsers.add_parser(\n \"continuously\",\n help=\"Re-render continuously the scenarios and the scenario ontology\",\n )\n\n continuously.add_argument(\n \"-p\",\n \"--port\",\n help=\"Port on which the demo server should listen to.\\n\\n\"\n \"If not specified, the demo server will not be started.\",\n type=int,\n )\n\n for command in [once, continuously]:\n command.add_argument(\n \"-s\",\n \"--scenarios_dir\",\n help=\"Directory where scenarios reside\\n\\n\"\n \"The rendering artefacts will be produced in-place in this directory.\",\n required=True,\n )\n\n return parser", "def setup_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:\n parser.set_defaults(command=lambda x: parser.print_usage())\n\n subparsers = parser.add_subparsers(\n title=f\"{COMMAND} commands\",\n description=\"sub commands for managing configs\"\n )\n\n apply_parser = subparsers.add_parser(\n 'apply',\n help='download and apply a config from github or bitbucket'\n )\n\n apply_parser.set_defaults(command=main)\n\n apply_parser.add_argument(\n '-u',\n '--user',\n required=True,\n help='username to pull from'\n )\n apply_parser.add_argument(\n '-r',\n '--repo',\n default='config',\n help='repo name to pull, defaults to config'\n )\n apply_parser.add_argument(\n '--no-download',\n action='store_true',\n help='Use already downloaded copy'\n )\n apply_parser.add_argument(\n '-b',\n '--bitbucket',\n action='store_const',\n dest='host',\n default=Hosts.GITHUB,\n const=Hosts.BITBUCKET,\n help='pull from bitbucket'\n )\n apply_parser.add_argument(\n '-g',\n '--github',\n action='store_const',\n dest='host',\n default=Hosts.GITHUB,\n const=Hosts.GITHUB,\n help='pull from bitbucket'\n )\n apply_parser.add_argument(\n '--no-apply',\n action=\"store_true\",\n dest=\"no_apply\",\n help=\"Don't actually run\"\n )\n\n list_parser = subparsers.add_parser('list', help=\"list downloaded configs\")\n\n list_parser.set_defaults(command=list_configs)\n\n list_parser.add_argument(\n '-u',\n '--user',\n help='username to pull from'\n )\n\n undo_parser = subparsers.add_parser(\n 'undo',\n help='Restore the snapshot taken when config was last applied'\n )\n\n undo_parser.set_defaults(command=undo_config)\n\n undo_parser.add_argument(\n '-u',\n '--user',\n required=True,\n help='username to pull from'\n )\n undo_parser.add_argument(\n '-r',\n '--repo',\n default='config',\n help='repo name to pull, defaults to config'\n )\n\n return parser", "def add_subcommand(self, cmd):\n self.subcommands.append(cmd)", "def parse_commandline_arguments(subparsers, config):\n add_parser = subparsers.add_parser(\"policy-add\")\n del_parser = subparsers.add_parser(\"policy-del\")\n list_parser = subparsers.add_parser(\"policy-list\")\n read_parser = subparsers.add_parser(\"policy-read\")\n export_parser = subparsers.add_parser(\"policy-export\")\n import_parser = subparsers.add_parser(\"policy-import\")\n\n add_parser.set_defaults(func=add)\n del_parser.set_defaults(func=delete)\n list_parser.set_defaults(func=list_policys)\n read_parser.set_defaults(func=read)\n export_parser.set_defaults(func=export)\n import_parser.set_defaults(func=policy_import)\n\n for parser in [add_parser, del_parser, read_parser]:\n parser.add_argument(\"policy_name\", help=\"name of the policy\")\n\n add_parser.add_argument(\"datafile\", help=\"filename containing policy data\")\n for parser in [import_parser, export_parser]:\n parser.add_argument(\"dir\", help=\"directory for the policies\")", "def cog_subcommand(\n *,\n base,\n subcommand_group=None,\n name=None,\n description: str = None,\n base_description: str = None,\n base_desc: str = None,\n base_default_permission: bool = True,\n base_permissions: typing.Dict[int, list] = None,\n subcommand_group_description: str = None,\n sub_group_desc: str = None,\n guild_ids: typing.List[int] = None,\n options: typing.List[dict] = None,\n connector: dict = None,\n):\n base_description = base_description or base_desc\n subcommand_group_description = subcommand_group_description or sub_group_desc\n guild_ids = guild_ids if guild_ids else []\n if not base_permissions:\n base_permissions = {}\n\n def wrapper(cmd):\n decorator_permissions = getattr(cmd, \"__permissions__\", None)\n if decorator_permissions:\n base_permissions.update(decorator_permissions)\n\n desc = description or inspect.getdoc(cmd)\n if options is None:\n opts = manage_commands.generate_options(cmd, desc, connector)\n else:\n opts = options\n\n if guild_ids and not all(isinstance(item, int) for item in guild_ids):\n raise IncorrectGuildIDType(\n f\"The snowflake IDs {guild_ids} given are not a list of integers. Because of discord.py convention, please use integer IDs instead. Furthermore, the command '{name or cmd.__name__}' will be deactivated and broken until fixed.\"\n )\n\n _cmd = {\n \"func\": None,\n \"description\": base_description,\n \"guild_ids\": guild_ids.copy(),\n \"api_options\": [],\n \"default_permission\": base_default_permission,\n \"api_permissions\": base_permissions,\n \"connector\": {},\n \"has_subcommands\": True,\n }\n\n _sub = {\n \"func\": cmd,\n \"name\": name or cmd.__name__,\n \"description\": desc,\n \"base_desc\": base_description,\n \"sub_group_desc\": subcommand_group_description,\n \"guild_ids\": guild_ids,\n \"api_options\": opts,\n \"connector\": connector,\n }\n return CogSubcommandObject(base, _cmd, subcommand_group, name or cmd.__name__, _sub)\n\n return wrapper", "def test_create_subparser_noop(self):\n self.command.create_subparser()", "def generate_parser(renamer):\n parser = ShlexArgumentParser(\n formatter_class=RawDescriptionHelpFormatter,\n prog=\"brp\",\n usage=\"cmd [args ...]\",\n add_help=False,\n exit_on_error=False,\n )\n subparsers = parser.add_subparsers(\n title=\"commands\",\n description=\"actions to take on the filenames\",\n )\n\n _help = [\n _help_parser((\"help\", \"h\", \"?\"), subparsers, renamer),\n _save_parser((\"save\", \"s\"), subparsers, renamer),\n _quit_parser((\"quit\", \"q\", \"exit\"), subparsers, renamer),\n _save_quit_parser((\"write\", \"w\"), subparsers, renamer),\n _print_parser((\"list\", \"ls\", \"l\"), subparsers, renamer),\n _history_parser((\"history\", \"hist\", \"past\"), subparsers, renamer),\n _undo_parser((\"undo\", \"u\"), subparsers, renamer),\n _reset_parser((\"reset\", \"over\", \"o\"), subparsers, renamer),\n _automate_parser((\"automate\", \"a\", \"auto\"), subparsers, renamer),\n _find_replace_parser((\"replace\", \"r\", \"re\", \"reg\", \"regex\"), subparsers, renamer),\n _append_parser((\"append\", \"ap\"), subparsers, renamer),\n _prepend_parser((\"prepend\", \"p\", \"pre\"), subparsers, renamer),\n _insert_parser((\"insert\", \"i\", \"in\"), subparsers, renamer),\n _case_parser((\"case\", \"c\"), subparsers, renamer),\n _extension_parser((\"extension\", \"x\", \"ext\"), subparsers, renamer),\n ]\n\n return parser, _help", "def add_parser(subparsers) -> None:\n contest_parser = subparsers.add_parser(\n 'contest', help='build contest files')\n mut_ex_group = contest_parser.add_mutually_exclusive_group()\n mut_ex_group.add_argument(\n '-p', '--pdf', action='store_true', default=False, help='generate contest PDFs')\n mut_ex_group.add_argument('-i', '--io', action='store_true',\n default=False, help='generate contest input/output files')\n contest_parser.add_argument(\n 'problem_dir', help='path to problem(s)', nargs='+')\n contest_parser.add_argument(\n 'contest_dir', help='directory which the contest will be saved')\n contest_parser.set_defaults(function=lambda options: process_contest(\n options.problem_dir, options.contest_dir, options.pdf, options.io))", "def build_parser():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(help='Blogstrap commands')\n init_parser = subparsers.add_parser(\n 'init',\n help='Initialize the Blogstrap directory')\n init_parser.set_defaults(func=init)\n init_parser.add_argument('-t', '--target',\n dest='target',\n type=str,\n default='.',\n help='Target folder to generate files in')\n init_parser.add_argument('--no-homepage',\n action='store_true',\n default=False,\n help='if specified, no homepage will be created')\n run_parser = subparsers.add_parser(\n 'run', help=\"Run the Flask development server\")\n run_parser.set_defaults(func=run)\n run_parser.add_argument('-c', '--config',\n dest='config',\n type=str,\n default=None,\n help='path to a config file')\n\n return parser", "def add_commands(subparsers, path):\n log.debug('importing %s', path)\n try:\n del sys.modules[path]\n except KeyError:\n pass\n try:\n package = importlib.import_module(path)\n except Exception as e:\n log.warning('failed to import commands package %s',\n path, exc_info=True)\n return\n log.debug('commands package: %s', path)\n for (finder, name, ispkg) in pkgutil.iter_modules(package.__path__):\n if ispkg:\n continue\n try:\n command = importlib.import_module('.' + name, path)\n except Exception as e:\n log.warning('failed to import %s command: %s',\n path, name, exc_info=True)\n continue\n if not getattr(command, 'run', None):\n log.warning('skipping command module without run function: %s',\n name)\n continue\n log.debug('command: %s'%(name))\n name = command.__name__.split('.')[-1]\n parser_help = getattr(command, 'parser_help', None)\n if parser_help is None:\n log.warning('command %s missing help text'%(command.__name__))\n parser = subparsers.add_parser(name, help=parser_help)\n command.add_arguments(parser)\n parser.set_defaults(run=command.run)", "def add_subparser(subparsers):\n parser = subparsers.add_parser('run', help=\"run artifact\")\n parser.add_argument('run_config', default='cps.ini',\n help=\"run configuration file\")\n parser.add_argument('-p', '--persist', action=\"store_true\", default=False,\n help=\"to persist data, dont delete sandbox after use\")\n\n parser.set_defaults(func=main)", "def _parse_args_cfg_subcmd(args):\n parser = argparse.ArgumentParser(\n description=\"Animation engine for explanatory math videos\",\n prog=\"manim cfg\",\n epilog=\"Made with <3 by the manim community devs\",\n )\n subparsers = parser.add_subparsers(help=\"subcommand\", dest=\"subcmd\")\n\n cfg_subparsers = {\n subcmd: subparsers.add_parser(subcmd) for subcmd in [\"write\", \"show\", \"export\"]\n }\n\n # Arguments for the write subcmd\n cfg_subparsers[\"write\"].add_argument(\n \"--level\",\n choices=[\"user\", \"cwd\"],\n default=\"cwd\",\n help=\"Specify if this config is for user or the working directory.\",\n )\n cfg_subparsers[\"write\"].add_argument(\n \"--open\", action=\"store_const\", const=True, default=False\n )\n\n # Arguments for the export subcmd\n cfg_subparsers[\"export\"].add_argument(\"--dir\", default=os.getcwd())\n\n # Arguments for the show subcmd: currently no arguments\n\n # Recall the argument list looks like 'manim cfg <subcmd> <args>' so we\n # only need to parse the remaining args\n parsed = parser.parse_args(args[2:])\n parsed.cmd = \"cfg\"\n parsed.cfg_subcommand = parsed.subcmd\n\n return parsed", "def emit_manpage_sub(tool, sub):\n\n required = []\n optional = []\n\n for arg in sub[\"args\"]:\n arg_txt = \" %s %s\\n\\n\" % (arg[\"arg\"], arg[\"descr\"])\n if arg[\"arg\"][0] in [\"<\", \"-\"]:\n required.append(arg_txt)\n elif arg[\"arg\"][0] in [\"[\"]:\n optional.append(arg_txt)\n else:\n return None\n\n txtpage = (\n MANPAGE_SUB.replace(\"${name}\", \"-\".join([tool[\"name\"], sub[\"name\"]]))\n .replace(\"${descr}\", sub[\"descr\"] if sub[\"descr\"] else \"None provided\")\n .replace(\"${descr_long}\", sub[\"descr\"] if sub[\"descr\"] else \"None provided\")\n .replace(\"${usage}\", sub[\"usage\"])\n .replace(\"${required}\", \"REQUIRED\\n\" + \"\".join(required) if required else \"\")\n .replace(\"${optional}\", \"OPTIONAL\\n\" + \"\".join(optional) if optional else \"\")\n .replace(\"${author_name}\", \"Simon A. F. Lund\")\n .replace(\n \"${author_email}\",\n \"[email protected]\",\n )\n .replace(\"${sponsor}\", \"Samsung\")\n )\n\n manpage, err, rcode = run(\n [\n \"txt2man\",\n \"-t\",\n \"-\".join([tool[\"name\"].upper(), sub[\"name\"].upper()]),\n \"-v\",\n \"xNVMe\",\n \"-s\",\n \"1\",\n \"-r\",\n \"xNVMe\",\n ],\n txtpage,\n )\n if rcode:\n logging.error(\"FAILED: txt2man; %s, %d\", err, rcode)\n return None\n\n return manpage", "def __init__(self, name, dest=None, handler=None,\n title=None, description=None, help=None):\n super(SubCommandOpt, self).__init__(name, type=types.String(),\n dest=dest, help=help)\n self.handler = handler\n self.title = title\n self.description = description", "def get_parser():\n parser = ArgumentParser(description=__doc__,\n formatter_class=ArgumentDefaultsHelpFormatter,\n prog='pv2')\n subparsers = parser.add_subparsers(dest='cmd')\n # subparsers.add_parser('selfcheck',\n # add_help=False,\n # help=\"Self-check of the sst toolkit.\")\n # parser.add_argument('--version',\n # action='version',\n # version=('sst %s' % str(sst.__version__)))\n subparsers.add_parser('eval',\n add_help=False,\n parents=[evaluate.get_parser()],\n formatter_class=ArgumentDefaultsHelpFormatter,\n help=(\"Evaluate a single image\"))\n subparsers.add_parser('train',\n add_help=False,\n parents=[train.get_parser()],\n formatter_class=ArgumentDefaultsHelpFormatter,\n help=(\"Train a new model.\"))\n subparsers.add_parser('plot',\n add_help=False,\n parents=[plot.get_parser()],\n formatter_class=ArgumentDefaultsHelpFormatter,\n help=(\"Plot summary information.\"))\n return parser", "def build_parser() -> ArgumentParser:\n parser = ArgumentParser(prog=\"bartender\")\n parser.add_argument(\"--version\", action=\"version\", version=version(\"bartender\"))\n subparsers = parser.add_subparsers(dest=\"subcommand\")\n\n serve_sp = subparsers.add_parser(\"serve\", help=\"Serve web service\")\n serve_sp.set_defaults(func=serve)\n\n perform_sp = subparsers.add_parser(\"perform\", help=\"Async Redis queue job worker\")\n perform_sp.add_argument(\n \"--config\",\n default=Path(\"config.yaml\"),\n type=Path,\n help=\"Configuration with schedulers that need arq workers\",\n )\n perform_sp.add_argument(\n \"--destination\",\n nargs=\"+\",\n help=\"\"\"Name of destinations to run workers for.\n Each destination must have `scheduler.type:arq`.\n By default runs workers for all destinations with `scheduler.type:arq`.\"\"\",\n dest=\"destination_names\",\n )\n perform_sp.set_defaults(func=perform)\n\n add_generate_token_subcommand(subparsers)\n\n return parser", "def create_subparser(subparsers):\n parser = subparsers.add_parser(\n 'symbols',\n add_help=False,\n help='Search recursively in any object files or archives for symbols '\n 'with names matching regex.')\n parser.add_argument(\n '-u', '--undefined',\n dest='undefined', action='store_const', const=True, default=False,\n help='Also print undefined symbols (i.e. in objects which reference '\n 'but don\\'t define the symbol).')\n return parser", "def command(*args, **kwargs):\n def deco(fct):\n return Command(fct, **kwargs)\n if args:\n return deco(*args)\n return deco", "def sample_parser(subparser):\n parser = subparser.add_parser('sample',\\\n help='???')\n parser = add_arg(parser, 'parallel')\n parser = add_arg(parser, 'xvars')\n parser = add_arg(parser, 'time')\n parser.add_argument(\"args\", metavar='SELAFIN file', nargs=\"+\")\n\n return subparser", "def register(\n self,\n root_parser: argparse.ArgumentParser,\n subparsers: argparse._SubParsersAction,\n ) -> None:\n self.root_parser = root_parser\n parser = subparsers.add_parser(\n self.name,\n aliases=self.aliases,\n help=self.help,\n description=self.help,\n add_help=self.add_help,\n )\n parser.set_defaults(command=self)\n self.configure(parser)", "def command(func):\n classname = inspect.getouterframes(inspect.currentframe())[1][3]\n name = func.__name__\n help_name = name.replace(\"do_\", \"help_\")\n doc = textwrap.dedent(func.__doc__)\n\n def new(instance, args):\n # instance.new.__doc__ = doc\n try:\n argv = shlex.split(args)\n arguments = docopt(doc, help=True, argv=argv)\n func(instance, args, arguments)\n except SystemExit as e:\n if args not in ('-h', '--help'):\n print(\"Could not execute the command.\")\n print(e)\n print(doc)\n\n new.__doc__ = doc\n return new", "def build_parser():\n \n parser = argparse.ArgumentParser(\n description='Interfaces with the Synapse repository.')\n parser.add_argument(\n '--version',\n action='version',\n version='Synapse Client %s' % synapseclient.__version__)\n parser.add_argument(\n '-u', '--username',\n dest='synapseUser',\n help='Username used to connect to Synapse')\n parser.add_argument(\n '-p', '--password',\n dest='synapsePassword',\n help='Password used to connect to Synapse')\n parser.add_argument(\n '--debug',\n dest='debug',\n action='store_true')\n parser.add_argument(\n '-s', '--skip-checks',\n dest='skip_checks',\n action='store_true',\n help='suppress checking for version upgrade messages and endpoint redirection')\n\n\n subparsers = parser.add_subparsers(\n title='commands',\n description='The following commands are available:',\n help='For additional help: \"synapse <COMMAND> -h\"')\n\n \n parser_get = subparsers.add_parser(\n 'get',\n help='downloads a dataset from Synapse')\n parser_get.add_argument(\n 'id',\n metavar='syn123', type=str,\n help='Synapse ID of form syn123 of desired data object')\n parser_get.set_defaults(func=get)\n\n \n parser_store = subparsers.add_parser(\n 'store',\n help='depending on the arguments supplied, '\n 'store will either create, add, or update')\n group = parser_store.add_mutually_exclusive_group()\n group.add_argument(\n '--id',\n metavar='syn123', type=str,\n help='Synapse ID of form syn123 of the Synapse object to update')\n group.add_argument(\n '--parentid',\n metavar='syn123', type=str,\n help='Synapse ID of project or folder where to upload new data.')\n parser_store.add_argument(\n '--name',\n type=str, nargs=\"+\",\n help='Name of data object in Synapse')\n parser_store.add_argument(\n '--description',\n type=str, nargs=\"+\",\n help='Description of data object in Synapse.')\n parser_store.add_argument(\n '--type',\n type=str, default='File',\n help='Type of object, such as \"File\", \"Folder\", or '\n '\"Project\", to create in Synapse. Defaults to \"File\"')\n parser_store.add_argument(\n '--used',\n metavar='TargetID', type=str, nargs='*',\n help='ID of a target data entity from which the specified entity is derived')\n parser_store.add_argument(\n '--executed',\n metavar='TargetID', type=str, nargs='*',\n help='ID of a code entity from which the specified entity is derived')\n parser_store.add_argument(\n '--file',\n type=str,\n help='file to be added to synapse.')\n parser_store.set_defaults(func=store)\n\n\n parser_delete = subparsers.add_parser(\n 'delete',\n help='removes a dataset from Synapse')\n parser_delete.add_argument(\n 'id',\n metavar='syn123', type=str,\n help='Synapse ID of form syn123 of desired data object')\n parser_delete.set_defaults(func=delete)\n\n\n parser_query = subparsers.add_parser(\n 'query',\n help='Performs SQL like queries on Synapse')\n parser_query.add_argument(\n 'queryString',\n metavar='string',\n type=str, nargs='*',\n help='A query string, see https://sagebionetworks.jira.com/wiki/'\n 'display/PLFM/Repository+Service+API#'\n 'RepositoryServiceAPI-QueryAPI for more information')\n parser_query.set_defaults(func=query)\n \n \n parser_submit = subparsers.add_parser(\n 'submit',\n help='submit an entity or a file for evaluation')\n parser_submit.add_argument(\n '--evaluationID', '--evalID',\n type=str,\n help='Evaluation ID where the entity/file will be submitted')\n parser_submit.add_argument(\n '--evaluationName', '--evalN',\n type=str,\n help='Evaluation Name where the entity/file will be submitted')\n parser_submit.add_argument(\n '--evaluation',\n type=str,\n help=argparse.SUPPRESS) #mainly to maintain the backward compatibility\n parser_submit.add_argument(\n '--entity', '--eid',\n type=str,\n help='Synapse ID of the entity to be submitted')\n parser_submit.add_argument(\n '--file', '-f',\n type=str,\n help='File to be submitted to the challenge')\n parser_submit.add_argument(\n '--parentId', '--pid',\n type=str,\n help='Synapse ID of project or folder where to upload data')\n parser_submit.add_argument(\n '--name',\n type=str,\n help='Name of the submission')\n parser_submit.add_argument(\n '--teamName', '--team',\n type=str,\n help='Publicly displayed name of team for the submission[defaults to username]')\n parser_submit.add_argument(\n '--used',\n metavar='TargetID', type=str, nargs='*',\n help='ID of a target data entity from which the specified entity is derived')\n parser_submit.add_argument(\n '--executed',\n metavar='TargetID', type=str, nargs='*',\n help='ID of a code entity from which the specified entity is derived')\n parser_submit.set_defaults(func=submit)\n\n \n parser_get = subparsers.add_parser(\n 'show',\n help='show metadata for an entity')\n parser_get.add_argument(\n 'id',\n metavar='syn123', type=str,\n help='Synapse ID of form syn123 of desired synapse object')\n parser_get.set_defaults(func=show)\n\n \n parser_cat = subparsers.add_parser(\n 'cat',\n help='prints a dataset from Synapse')\n parser_cat.add_argument(\n 'id',\n metavar='syn123', type=str,\n help='Synapse ID of form syn123 of desired data object')\n parser_cat.set_defaults(func=cat)\n\n\n parser_set_provenance = subparsers.add_parser(\n 'set-provenance',\n help='create provenance records')\n parser_set_provenance.add_argument(\n '-id',\n metavar='syn123', type=str, required=True,\n help='Synapse ID of entity whose provenance we are accessing.')\n parser_set_provenance.add_argument(\n '-name',\n metavar='NAME', type=str, required=False,\n help='Name of the activity that generated the entity')\n parser_set_provenance.add_argument(\n '-description',\n metavar='DESCRIPTION', type=str, required=False,\n help='Description of the activity that generated the entity')\n parser_set_provenance.add_argument(\n '-o', '-output',\n metavar='OUTPUT_FILE', dest='output',\n const='STDOUT', nargs='?', type=str,\n help='Output the provenance record in JSON format')\n parser_set_provenance.add_argument(\n '-used',\n metavar='TargetID', type=str, nargs='*',\n help='ID of a target data entity from which the specified entity is derived')\n parser_set_provenance.add_argument(\n '-executed',\n metavar='TargetID', type=str, nargs='*',\n help='ID of a code entity from which the specified entity is derived')\n parser_set_provenance.set_defaults(func=setProvenance)\n\n\n parser_get_provenance = subparsers.add_parser(\n 'get-provenance',\n help='show provenance records')\n parser_get_provenance.add_argument(\n '-id',\n metavar='syn123', type=str, required=True,\n help='Synapse ID of entity whose provenance we are accessing.')\n parser_get_provenance.add_argument(\n '-o', '-output',\n metavar='OUTPUT_FILE', dest='output',\n const='STDOUT', nargs='?', type=str,\n help='Output the provenance record in JSON format')\n parser_get_provenance.set_defaults(func=getProvenance)\n\n parser_add = subparsers.add_parser(\n 'add',\n help='uploads and adds a dataset to Synapse')\n parser_add.add_argument(\n '-parentid', '-parentId',\n metavar='syn123', type=str, required=True,\n help='Synapse ID of project or folder where to upload data.')\n parser_add.add_argument(\n '-name',\n metavar='NAME', type=str, required=False,\n help='Name of data object in Synapse')\n parser_add.add_argument(\n '-description',\n metavar='DESCRIPTION', type=str,\n help='Description of data object in Synapse.')\n parser_add.add_argument(\n '-type',\n type=str, default='File',\n help='Type of object to create in synapse. Defaults to \"File\".')\n parser_add.add_argument(\n '-used',\n metavar='TargetID', type=str, nargs='*',\n help='ID of a target data entity from which the specified entity is derived')\n parser_add.add_argument(\n '-executed',\n metavar='TargetID', type=str, nargs='*',\n help='ID of a code entity from which the specified entity is derived')\n parser_add.add_argument(\n 'file',\n type=str,\n help='file to be added to synapse.')\n parser_add.set_defaults(func=add)\n\n\n parser_create = subparsers.add_parser(\n 'create',\n help='Creates folders or projects on Synapse')\n parser_create.add_argument(\n '-parentid', '-parentId',\n metavar='syn123', type=str, required=False,\n help='Synapse ID of project or folder where to place folder [not used with project]')\n parser_create.add_argument(\n '-name',\n metavar='NAME', type=str, required=True,\n help='Name of folder/project.')\n parser_create.add_argument(\n '-description',\n metavar='DESCRIPTION', type=str,\n help='Description of project/folder')\n parser_create.add_argument(\n 'type',\n type=str,\n help='Type of object to create in synapse one of {Project, Folder}')\n parser_create.set_defaults(func=create)\n\n\n parser_update = subparsers.add_parser(\n 'update',\n help='uploads a new file to an existing Synapse Entity')\n parser_update.add_argument(\n '-id',\n metavar='syn123', type=str, required=True,\n help='Synapse ID of entity to be updated')\n parser_update.add_argument(\n 'file',\n type=str,\n help='file to be added to synapse.')\n parser_update.set_defaults(func=update)\n\n\n parser_onweb = subparsers.add_parser(\n 'onweb',\n help='opens Synapse website for Entity')\n parser_onweb.add_argument(\n 'id',\n type=str,\n help='Synapse id')\n parser_onweb.set_defaults(func=onweb)\n\n\n ## the purpose of the login command (as opposed to just using the -u and -p args) is\n ## to allow the command line user to cache credentials\n parser_login = subparsers.add_parser(\n 'login',\n help='login to Synapse and (optionally) cache credentials')\n parser_login.add_argument(\n '-u', '--username',\n dest='synapseUser',\n help='Username used to connect to Synapse')\n parser_login.add_argument(\n '-p', '--password',\n dest='synapsePassword',\n help='Password used to connect to Synapse')\n parser_login.add_argument(\n '--rememberMe', '--remember-me',\n dest='rememberMe',\n action='store_true',\n default=False,\n help='Cache credentials for automatic authentication on future interactions with Synapse')\n parser_login.set_defaults(func=login)\n\n\n return parser" ]
[ "0.7137559", "0.6991422", "0.69432056", "0.6767474", "0.66648597", "0.6645159", "0.6587919", "0.6517023", "0.6504958", "0.6392979", "0.6344873", "0.63118166", "0.623115", "0.62270325", "0.62064266", "0.6119958", "0.61081535", "0.60894066", "0.60719246", "0.6052931", "0.6003018", "0.59982985", "0.59937394", "0.59826934", "0.5937738", "0.58915204", "0.58833075", "0.58806044", "0.584286", "0.5840654", "0.5831472", "0.5819951", "0.580079", "0.5775416", "0.57682973", "0.57089305", "0.5706053", "0.5705647", "0.5693742", "0.5672658", "0.5672565", "0.5654731", "0.5648333", "0.564074", "0.56296057", "0.5627811", "0.5613365", "0.5595345", "0.5584048", "0.55667645", "0.5566741", "0.555785", "0.55457646", "0.5537151", "0.55358475", "0.5508137", "0.54768956", "0.546803", "0.5466575", "0.54324454", "0.54315215", "0.5424996", "0.54202706", "0.5418838", "0.5415309", "0.54151994", "0.541409", "0.54102504", "0.5408514", "0.5405165", "0.54039127", "0.54001564", "0.5399215", "0.53975135", "0.5392384", "0.53893346", "0.5378696", "0.53324014", "0.53324", "0.5328786", "0.5322507", "0.53150094", "0.53103805", "0.53005093", "0.5299249", "0.5293431", "0.5291682", "0.52807033", "0.5276046", "0.5261256", "0.5255738", "0.5254569", "0.5242469", "0.52416205", "0.5236815", "0.5233597", "0.52269316", "0.52251136", "0.52185667", "0.52171856" ]
0.84089714
0
Helper method for adding subcommands. Wrapper around `add_parser` that simplifies adding subcommands to ZeroBot commands. The same string is used for both the `description` and `help` parameters of `add_parser`.
def add_subcommand( subp: _SubParsersAction, name: str, description: Optional[str] = None, **kwargs ) -> "CommandParser": desc_help = {"description": description, "help": description} return subp.add_parser(name, **desc_help, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_subcommands(cls, parser: argparse.ArgumentParser) -> None:\n if cls.SUBCOMMANDS:\n subparsers = parser.add_subparsers(title=\"subcommands\", metavar=\"\", dest='cmd')\n for subcmd_class in cls.SUBCOMMANDS:\n parsers = subcmd_class.get_args()\n subcmd_class.parser = parsers[-1]\n\n subparser = subparsers.add_parser(\n subcmd_class.NAMES[0],\n aliases=subcmd_class.NAMES[1:],\n parents=parsers,\n help=subcmd_class.HELP,\n epilog=subcmd_class.EPILOG)\n subparser.set_defaults(command_class=subcmd_class)\n subcmd_class.customize_subparser(subparser)", "def setup_subparser(name, description, commands):\n subparser = SUBPARSER.add_parser(\n name,\n help=description\n )\n subparser.add_argument(\n 'sub_command',\n metavar='sub_command',\n type=str,\n nargs='+',\n help='Which command to run. Options: %s' % ', '.join(commands),\n choices=commands\n )\n\n return subparser", "def add_subparsers(cls, parser, name=\"\", prefixes=[], delim=\"_\", title=\"commands\", description=\"available commands\", required=True):\n\t\tcommand = f\"command_{name}\"\n\t\tif command in cls.COMMANDS:\n\t\t\traise CommandParserNameDuplicated(f\"Command parser with name {name} already registered.\")\n\t\t\n\t\tcls.COMMANDS[command] = {}\n\t\t\n\t\tsub = parser.add_subparsers(title=title, dest=command, description=description)\n\t\tsub.required = True\n\t\tfor pf in prefixes:\n\t\t\tfor c, method in cls.get_commands(prefix=pf, delim=delim):\n\t\t\t\tcls.set_subparser_for(c, method, sub)\n\t\t\t\tcls.COMMANDS[command][c] = method", "def extend_cli(self, subparser):", "def add_subparser(sp, name, **kwargs):\n kwargs[\"add_help\"] = False\n kwargs['formatter_class'] = ap.ArgumentDefaultsHelpFormatter\n sparser = sp.add_parser(name, **kwargs)\n\n sparser.add_argument(\"-h\", \"--help\", action=custom_help(),\n help=\"print the short or long help\")\n\n return sparser", "def build_subcommands_parser(parser, module):\n mdefs = module.__dict__\n keys = list(mdefs.keys())\n keys.sort()\n subparsers = parser.add_subparsers(help='sub-command help')\n for command in keys:\n if command.startswith('pub_'):\n func = module.__dict__[command]\n parser = subparsers.add_parser(command[4:], help=func.__doc__)\n parser.set_defaults(func=func)\n argspec = inspect.signature(func)\n positionals = []\n short_opts = set([])\n for arg in argspec.parameters.values():\n if arg.default == inspect.Parameter.empty:\n positionals += [arg]\n else:\n param_name = arg.name.replace('_', '-')\n short_opt = param_name[0]\n if not (param_name.startswith('no') or\n (short_opt in short_opts)):\n opts = ['-%s' % short_opt, '--%s' % param_name]\n else:\n opts = ['--%s' % param_name]\n short_opts |= set([short_opt])\n if isinstance(arg.default, list):\n parser.add_argument(*opts, action='append')\n elif isinstance(arg.default, dict):\n parser.add_argument(*opts, type=json.loads)\n elif arg.default is False:\n parser.add_argument(*opts, action='store_true')\n elif arg.default is not None:\n parser.add_argument(*opts, default=arg.default)\n else:\n parser.add_argument(*opts)\n if positionals:\n for arg in positionals[:-1]:\n parser.add_argument(arg.name)\n parser.add_argument(positionals[-1].name, nargs='*')", "def addSubParser( parentParser, cmdName ) :\n parser = parentParser.add_parser( cmdName, help='Generate a new UBOS package scaffold.' )\n parser.add_argument( '--directory', required=True, help='Directory where to create the package scaffold')\n parser.add_argument( '--template', required=True, help='Name of the template to use' )\n parser.add_argument( '--json', required=False, help='Settings file' )", "def get_commands(bot):\n new_commands = []\n\n new_commands.append(Command(\n 'mycommand', subcommands=[\n SubCommand(\n Opt('myoption'),\n doc='This is a simple command with a single required option.'),\n SubCommand(\n Opt('custom', optional=True),\n Opt('attached', optional=True, attached='attached argument'),\n doc='This has two different optional options, one without an attached '\n 'parameter, and the other requiring an attached parameter.'),\n SubCommand(\n Opt('trailing'),\n Arg('arg 1'),\n Arg('arg 2'),\n Arg('arg 3', argtype=ArgTypes.SPLIT, additional='more args'),\n doc='This command requires a lot of trailing arguments.'),\n SubCommand(\n Opt('grouped'),\n Arg('grouped arguments', argtype=ArgTypes.MERGED),\n doc='This will group all given arguments as a single string.'),\n SubCommand(\n Opt('complex', attached='attached'),\n Opt('other', optional=True, attached='also required'),\n Arg('arg 1'),\n Arg('arg 2', argtype=ArgTypes.SPLIT_OPTIONAL, additional='more args'),\n doc='The complex option has a required attached parameter, and the '\n '\\'other\\' option also has a required attached parameter if '\n '\\'other\\' is included. Additionally, there will be a requirement '\n 'of at least 1 trailing argument.'),\n SubCommand(\n Opt('marquee'),\n Arg('text', argtype=ArgTypes.MERGED,\n check=lambda b, m, v, *a: len(v) <= 100,\n check_error=\"Marquee message must be less than 100 characters long.\"),\n doc='Creates a marquee that loops 3 times.')],\n shortcuts=[\n Shortcut(\n 'complex', 'complex {attached} other {other} {arg 1} {arg 2}',\n Arg('attached'), Arg('other'), Arg('arg 1'),\n Arg('arg 2', argtype=ArgTypes.SPLIT_OPTIONAL)),\n Shortcut(\n 'marquee', 'marquee {text}', Arg('text', argtype=ArgTypes.MERGED))],\n description='Your command description here.',\n other='This text is optional - it just shows up after everything '\n 'else. Quick note, all of the commands here can only be used by '\n 'bot moderators or above, as indicated by elevated_level. A '\n 'level of 2 would mean only server owners or above can use the '\n 'command, and a level of 3 would restrict the command to only '\n 'the bot owners.',\n elevated_level=1, category='demo'))\n\n new_commands.append(Command(\n 'myothercommand', subcommands=[\n SubCommand(\n Arg('text', argtype=ArgTypes.MERGED_OPTIONAL),\n doc='This traps all further commands from being executed.'),\n SubCommand(\n Opt('order'), Opt('matters'),\n doc='It is impossible to access this command because the first '\n 'subcommand will always be satisfied first. Order of the '\n 'subcommand matters!'),\n SubCommand(\n Opt('sample'), Opt('foo'), Opt('bar'),\n doc='Also impossible to access. This subcommand just adds some '\n 'keywords to the command.')],\n description='Only bot owners can see this text!',\n other='Note that no shortcuts were defined. They, too, are optional. '\n 'Also, this command is hidden, which means that only the bot '\n 'owners can see this command listed from the help command. '\n 'However, unless the command is configured with an elevated '\n 'permissions level, any user can still execute the command. '\n 'Users still will not be able to see the specific help for this '\n 'command, though. Lastly, this command is disabled in DMs.',\n hidden=True, allow_direct=False, category='demo'))\n\n new_commands.append(Command(\n 'notify', subcommands=[\n SubCommand(\n Arg('text', argtype=ArgTypes.MERGED),\n doc='Notify the owners with some text!')],\n other='This command uses a custom function. It is called with the '\n 'same arguments as get_response. The command will show up to '\n 'all users in the help command, but can only be used by server '\n 'owners, as it is disallowed in direct messages.',\n elevated_level=2, allow_direct=False, function=custom_notify,\n category='demo'))\n\n new_commands.append(Command(\n 'wait', other='Use this command to demo the wait_for functionality', category='demo'))\n\n return new_commands", "def add_subcommand(self, cmd):\n self.subcommands.append(cmd)", "def sub_command(self, name: str=None, description: str=None, options: list=None, connectors: dict=None, **kwargs):\r\n def decorator(func):\r\n if self.child_type is None:\r\n if len(self.registerable.options) > 0:\r\n self.registerable.options = []\r\n self.child_type = Type.SUB_COMMAND\r\n \r\n new_func = SubCommand(\r\n func,\r\n name=name,\r\n description=description,\r\n options=options,\r\n connectors=connectors,\r\n **kwargs\r\n )\r\n self.children[new_func.name] = new_func\r\n self.registerable.options.append(new_func.option)\r\n return new_func\r\n return decorator", "def sub_command(self, name: str=None, description: str=None, options: list=None, connectors: dict=None, **kwargs):\r\n def decorator(func):\r\n new_func = SubCommand(\r\n func,\r\n name=name,\r\n description=description,\r\n options=options,\r\n connectors=connectors,\r\n **kwargs\r\n )\r\n self.children[new_func.name] = new_func\r\n self.option.options.append(new_func.option)\r\n return new_func\r\n return decorator", "def add_argparse_subparser(subparsers):\n\n new_sub_parser = subparsers.add_parser(\n PluginManager.argparse_subparser_name(), help=\"plugin commands\"\n )\n PluginManager.__argparse_subparser = new_sub_parser\n plugin_subparsers = new_sub_parser.add_subparsers(\n dest=PluginManager.__root_subparser_name\n )\n\n sub_sub_parser = plugin_subparsers.add_parser(\n \"list\", help=\"list the available plugins\"\n )\n sub_sub_parser.add_argument(\n \"--all\",\n dest=\"show_all\",\n action=\"store_true\",\n default=False,\n help=\"show all loaded plugins (default is False)\",\n )\n sub_sub_parser.add_argument(\n dest=\"list_filter\",\n default=None,\n help=\"filter\",\n nargs=\"?\",\n type=PluginManager.__list_filter_type,\n )\n sub_sub_parser = plugin_subparsers.add_parser(\n \"info\", help=\"information on a specific plugin\"\n )\n sub_sub_parser.add_argument(\n dest=\"info_filter\",\n default=None,\n type=PluginManager.__info_filter_type,\n help=\"an id\",\n )", "def set_subparser_for(cls, command, method, subparser):\n\n\t\tdef add_pos_argument(sub, label, arg):\n\t\t\tif arg[\"type\"] == bool:\n\t\t\t\traise CommandTypeError(\"bool type not supported as positional argument\")\n\t\t\tif \"value\" in arg:\n\t\t\t\tif arg[\"type\"] in [str, int, float]:\n\t\t\t\t\tsub.add_argument(label, nargs='?', default=arg[\"value\"], type=arg[\"type\"], help=arg[\"help_line\"])\n\t\t\telif \"values\" in arg:\n\t\t\t\tif arg[\"type\"] in [str, int, float]:\n\t\t\t\t\tsub.add_argument(label, nargs='?', default=arg[\"values\"][0], choices=arg[\"values\"], type=arg[\"type\"], help=arg[\"help_line\"])\n\t\t\t\telif arg[\"type\"] == list:\n\t\t\t\t\tsub.add_argument(label, nargs='+', default=arg[\"values\"][0], choices=arg[\"values\"], help=arg[\"help_line\"])\n\t\t\telse:\n\t\t\t\tsub.add_argument(label, type=arg[\"type\"], help=arg[\"help_line\"])\n\n\t\tdef add_opt_argument(sub, label, arg, add_alias=True):\n\t\t\tif arg[\"type\"] == bool:\n\t\t\t\tif add_alias:\n\t\t\t\t\tsub.add_argument(arg[\"alias\"], arg[\"name\"], action=\"store_true\", default=False, help=arg[\"help_line\"])\n\t\t\t\telse:\n\t\t\t\t\tsub.add_argument(arg[\"name\"], action=\"store_true\", default=False, help=arg[\"help_line\"])\n\n\t\t\telif arg[\"type\"] in [str, int, float] and \"value\" in arg:\n\t\t\t\tif add_alias:\n\t\t\t\t\tsub.add_argument(arg[\"alias\"], arg[\"name\"], type=arg[\"type\"], default=arg[\"value\"], help=arg[\"help_line\"])\n\t\t\t\telse:\n\t\t\t\t\tsub.add_argument(arg[\"name\"], type=arg[\"type\"], default=arg[\"value\"], help=arg[\"help_line\"])\n\t\t\telif arg[\"type\"] == list and \"values\" not in arg:\n\t\t\t\tsub.add_argument(label, nargs=\"*\", help=arg[\"help_line\"])\n\t\t\telif \"values\" in arg:\n\t\t\t\tif arg[\"type\"] == list:\n\t\t\t\t\tsub.add_argument(label, choices=arg[\"values\"], default=arg[\"values\"][0], nargs=\"*\", help=arg[\"help_line\"])\n\t\t\t\telse:\n\t\t\t\t\tsub.add_argument(label, type=arg[\"type\"], choices=arg[\"values\"], default=arg[\"values\"][0], nargs=\"?\", help=arg[\"help_line\"])\n\t\t\telse:\n\t\t\t\tif add_alias:\n\t\t\t\t\tsub.add_argument(arg[\"alias\"], arg[\"name\"], type=arg[\"type\"], help=arg[\"help_line\"])\n\t\t\t\telse:\n\t\t\t\t\tsub.add_argument(arg[\"name\"], type=arg[\"type\"], help=arg[\"help_line\"])\n\t\t\n\t\tfunc = getattr(cls, method)\n\n\t\targs_info = cls.__parse_docstring(func.__doc__)\n\t\tif args_info == {}:\n\t\t\treturn\n\n\t\tc = subparser.add_parser(command, help=args_info[\"help_line\"])\n\n\t\tif \"arguments\" in args_info:\n\t\t\tfor label, arg in args_info[\"arguments\"].items():\n\t\t\t\tif arg[\"pos\"]:\n\t\t\t\t\tadd_pos_argument(c, label, arg)\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tadd_opt_argument(c, label, arg, add_alias=True)\n\t\t\t\t\texcept ArgumentError as e:\n\t\t\t\t\t\tadd_opt_argument(c, label, arg, add_alias=False)", "def subcommand(self, func=None, **subcommand_options):\n\n def decorator(subcommand_func):\n subcommand_sig = inspect.signature(subcommand_func)\n\n @functools.wraps(subcommand_func)\n def wrapped(args):\n\n final_args = []\n final_kwargs = {}\n\n if \"args\" in subcommand_sig.parameters:\n final_kwargs[\"args\"] = args\n\n return subcommand_func(*final_args, **final_kwargs)\n\n subcommand_name = subcommand_options.pop(\"name\", subcommand_func.__name__)\n subcommand_args_def = subcommand_options.pop(\"args\", None) or ()\n subcommand_doc = subcommand_options.pop(\"help\", None) or subcommand_options.pop(\"description\", None)\n if subcommand_doc is None:\n subcommand_doc = subcommand_func.__doc__\n subcommand_aliases = subcommand_options.pop(\"aliases\", None) or []\n if subcommand_options:\n raise ValueError(f\"Unexpected kwarg(s): {', '.join(str(k) for k in subcommand_options.keys())}\")\n\n parser = self.subparsers.add_parser(\n name=subcommand_name,\n help=subcommand_doc,\n description=subcommand_doc,\n aliases=subcommand_aliases,\n )\n parser.set_defaults(func=wrapped)\n\n for arg in subcommand_args_def:\n if isinstance(arg, str):\n parser.add_argument(arg)\n elif isinstance(arg, (list, tuple)):\n if isinstance(arg[-1], dict):\n arg_args = arg[:-1]\n arg_kwargs = arg[-1]\n else:\n arg_args = arg[:]\n arg_kwargs = {}\n parser.add_argument(*arg_args, **arg_kwargs)\n else:\n raise TypeError(r\"Expected a string or list, got {arg!r}\")\n\n return parser\n\n if func is None:\n return decorator\n else:\n return decorator(func)", "def add_command_parsers(parser, logparser):\n subparsers = parser.add_subparsers(metavar='Command')\n help_text = 'ONE OF THE FOLLOWING:\\n'\n available_commands = find_commands(\n os.path.join(os.path.abspath(os.path.dirname(__file__)), os.path.pardir, 'functions'))\n max_length = max([len(a) for a in available_commands])\n for command in available_commands:\n child_parser = subparsers.add_parser(command, parents=[logparser])\n call = importlib.import_module('functions.%s'% command)\n if hasattr(call, 'set_argparser'):\n call.set_argparser(child_parser)\n else:\n child_parser.description = 'Description is missing'\n help_text += command + \": \" + \" \"*(max_length-len(command)) + ('\\n'+' '*(max_length+2)\n ).join(textwrap.wrap(child_parser.description,60)) + '\\n'\n child_parser.set_defaults(func=call.main)\n subparsers.help = help_text + '\\nType \"Command --help\" for more information about given command'", "def add(cls, subparsers):\n subparser = subparsers.add_parser(\n name=cls.__tool_name__(),\n description=cls.__get_description__())\n\n cls.__add_arguments__(subparser)\n subparser.set_defaults(func=cls.from_args)\n return subparser", "def add_args_to_subparser(the_parser, subcommand_name):\n\n the_parser.add_argument(CmdArgs.verbose_optional, help=CmdArgs.verbose_help,\n action='store_true',\n )\n\n if subcommand_name in DCA_VISUALIZATION_SUBCOMMANDS:\n the_parser.add_argument(CmdArgs.biomolecule, help=CmdArgs.biomolecule_help)\n the_parser.add_argument(CmdArgs.pdb_chain_id, help=CmdArgs.pdb_chain_id_help)\n the_parser.add_argument(CmdArgs.pdb_file, help=CmdArgs.pdb_file_help)\n the_parser.add_argument(CmdArgs.refseq_file, help=CmdArgs.refseq_file_help)\n the_parser.add_argument(CmdArgs.dca_file, help=CmdArgs.dca_file_help)\n the_parser.add_argument(CmdArgs.rna_secstruct_file_optional,\n help=CmdArgs.rna_secstruct_file_help,\n )\n the_parser.add_argument(CmdArgs.linear_dist_optional,\n help=CmdArgs.linear_dist_help, type = int,\n )\n the_parser.add_argument(CmdArgs.contact_dist_optional,\n help=CmdArgs.contact_dist_help, type = float,\n )\n the_parser.add_argument(CmdArgs.num_dca_contacts_optional,\n help = CmdArgs.num_dca_contacts_help, type = int,\n )\n the_parser.add_argument(CmdArgs.wc_neighbor_dist_optional, type= int,\n help = CmdArgs.wc_neighbor_dist_help,\n )\n the_parser.add_argument(CmdArgs.pdb_id_optional, help = CmdArgs.pdb_id_help)\n\n if subcommand_name in FILE_CONTENT_SUBCOMMANDS:\n if subcommand_name == 'pdb_content':\n the_parser.add_argument(CmdArgs.pdb_file, help = CmdArgs.pdb_file_help)\n if subcommand_name in MSA_TRIMMING_SUBCOMMANDS:\n the_parser.add_argument(CmdArgs.max_gap_optional,\n type = float, help = CmdArgs.max_gap_help,\n )\n if subcommand_name == 'trim_by_refseq':\n the_parser.add_argument(CmdArgs.biomolecule, help=CmdArgs.biomolecule_help)\n the_parser.add_argument(CmdArgs.msa_file, help=CmdArgs.msa_file_help)\n the_parser.add_argument(CmdArgs.refseq_file, help=CmdArgs.refseq_file_help)\n the_parser.add_argument(CmdArgs.remove_all_gaps_optional,\n help= CmdArgs.remove_all_gaps_help, action='store_true',\n )\n if subcommand_name == 'trim_by_gap_size':\n the_parser.add_argument(CmdArgs.msa_file, help=CmdArgs.msa_file_help)\n return None", "def _add_to_cli(self, parser, group=None):\n dest = self.dest\n if group is not None:\n dest = group.name + '_' + dest\n\n subparsers = parser.add_subparsers(dest=dest,\n title=self.title,\n description=self.description,\n help=self.help)\n # NOTE(jd) Set explicitly to True for Python 3\n # See http://bugs.python.org/issue9253 for context\n subparsers.required = True\n\n if self.handler is not None:\n self.handler(subparsers)", "def setup_subcommands(argparser):\n\n subparsers = argparser.add_subparsers()\n\n parser_info = subparsers.add_parser('info', help = 'Provide the information about the user')\n parser_info.add_argument('user', help = 'The user to inspect')\n\n parser_ownerships = subparsers.add_parser('ownerships', help = 'Show items which this user owns')\n parser_ownerships.add_argument('user', help = 'The name of the user to show information about')\n parser_ownerships.add_argument('-r', '--recursive', action = 'store_true', help = 'Show items which this user own through being in lists')\n \n parser_info.set_defaults(handler = show_info)\n parser_ownerships.set_defaults(handler = show_ownerships)", "def add_cmd(self, name: str, help_str: str, cmd_fn: typing.Callable, arg: str = None, arg_help: str = None):\n self.cmd_names.append(name)\n cmd = self.cli_subparsers.add_parser(name, help=help_str)\n cmd.set_defaults(func=cmd_fn)\n if arg is not None:\n cmd.add_argument(arg, help=arg_help)", "def _make_parser(self, **kwargs):\n\n kwargs.setdefault('help', self.help)\n kwargs.setdefault('formatter_class',argparse.RawDescriptionHelpFormatter)\n kwargs.setdefault('description', self.description)\n kwargs.setdefault('name', self.name)\n names = (kwargs.get('name') or self.name).split('.')\n \n def _get_subparser(a):\n if a._subparsers:\n for action in a._subparsers._actions:\n if isinstance(action, argparse._SubParsersAction):\n return action\n raise RuntimeError('could not find adequate subparser')\n return a.add_subparsers(dest='command',\n title='commands',\n metavar='COMMAND')\n def _get_parser(node, idx, names):\n name = names[idx]\n if name in node.choices:\n return node.choices[name]\n args = {\n 'name' : name,\n 'help' : 'a group of sub-commands',\n }\n return node.add_parser(**args)\n \n parser = ACMD_PARSER\n node = _get_subparser(parser)\n\n for i,n in enumerate(names[:-1]):\n node = _get_subparser(parser)\n parser = _get_parser(node, i, names)\n \n node = _get_subparser(parser)\n kwargs['name'] = names[-1]\n parser = node.add_parser(**kwargs)\n return parser", "def create_parser():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(title=\"Commands\", dest=\"subparser_name\")\n subparsers.add_parser(\"generate-settings\", help=\"Generate settings.json to install \"\n \"Gluu Cloud Native Edition non-interactively\")\n subparsers.add_parser(\"install\", help=\"Install Gluu Cloud Native Edition using Kustomize. Depreciated > 4.3.\")\n subparsers.add_parser(\"install-no-wait\", help=\"Install Gluu Cloud Native Edition using Kustomize. \"\n \"Depreciated > 4.3. \"\n \"There will be no wait time between installing services. \"\n \"Pods may look like they are restarting but they will \"\n \"be waiting for hierarchy \"\n \"pods to be running\")\n subparsers.add_parser(\"install-ldap-backup\", help=\"Install ldap backup cronjob only.\")\n subparsers.add_parser(\"restore\", help=\"Install Gluu Cloud Native Edition with a \"\n \"running database and previous configuration using Kustomize.\"\n \"Depreciated > 4.3\")\n subparsers.add_parser(\"uninstall\", help=\"Uninstall Gluu that was installed using Kustomize. \"\n \"Depreciated > 4.3\")\n subparsers.add_parser(\"upgrade\", help=\"Upgrade Gluu Cloud Native Edition using Kustomize. Depreciated > 4.3\")\n subparsers.add_parser(\"upgrade-values-yaml\", help=\"Upgrade Gluu Cloud Native Edition\")\n subparsers.add_parser(\"install-couchbase\", help=\"Install Couchbase only. Used with installation of Gluu with Helm\")\n subparsers.add_parser(\"install-couchbase-backup\", help=\"Install Couchbase backup only.\")\n subparsers.add_parser(\"uninstall-couchbase\", help=\"Uninstall Couchbase only.\")\n subparsers.add_parser(\"helm-install\", help=\"Install Gluu Cloud Native Edition using helm. \"\n \"This also installs the nginx-ingress chart\")\n subparsers.add_parser(\"helm-uninstall\", help=\"Uninstall Gluu Cloud Native Edition using helm. \"\n \"This also uninstalls the nginx-ingress chart\")\n\n subparsers.add_parser(\"helm-install-gluu\", help=\"Install Gluu Cloud Native Edition using helm. \"\n \"This assumes nginx-ingress is installed\")\n subparsers.add_parser(\"helm-uninstall-gluu\", help=\"Uninstall Gluu Cloud Native Edition using helm. \"\n \"This only uninstalls Gluu\")\n subparsers.add_parser(\"version\", help=\"Outputs version of pygluu installer.\")\n return parser", "def add_arguments(self, parser):\n\n cmd = self # make sure we can use sub parser in django. via stack_overflow\n\n class SubParser(CommandParser):\n \"\"\"Use to avoid the error when using sub parser in django's add_arguments method.\"\"\"\n def __init__(self, **kwargs):\n super(SubParser, self).__init__(cmd, **kwargs)\n\n # add custom sub commands.\n\n subparsers = parser.add_subparsers(\n title=\"sub commands\",\n parser_class=SubParser,\n dest='sub_command',\n help='Sub commands you can use.'\n )\n\n # actions to start or stop socket server.\n\n server = subparsers.add_parser('server', help=\"Server Commands\")\n server.add_argument(\n 'action',\n metavar='ACTION',\n choices=self.socket_server_actions,\n help='Actions is: <%s>' % '|'.join(self.socket_server_actions),\n )\n\n # actions of targets when calling server is running.\n\n proxy = subparsers.add_parser('proxy', help=\"Proxy Commands\")\n proxy.add_argument(\n '-a', '--action',\n metavar='ACTION',\n required=True,\n choices=self.proxy_job_actions,\n help='Actions is: <%s>' % '|'.join(self.proxy_job_actions)\n )\n proxy.add_argument(\n '-t', '--targets',\n metavar='TARGET',\n nargs='*',\n help='Targets can be empty which means ALL, you can list targets by <./manage.py mirrordata proxy -a ping>.'\n )", "def cli():\n pass # do nothing here, it just defines the name for other subcommands", "def add_subcommand(self, command):\n\n if self.subcommand_parser is None:\n self.subcommand_parser = self.parser.add_subparsers(\n dest='command', help='Please select one command mode below',\n title='Command modes'\n )\n self.subcommands = {}\n\n if not isinstance(command, ScriptCommand):\n raise ScriptError('Subcommand must be a ScriptCommand instance')\n\n parser = self.subcommand_parser.add_parser(\n command.name,\n help=command.short_description,\n description=command.description,\n epilog=command.epilog,\n formatter_class=argparse.RawTextHelpFormatter,\n )\n self.subcommands[command.name] = command\n command.script = self\n\n if callable(getattr(command, '__register_arguments__', None)):\n command.__register_arguments__(parser)\n\n return parser", "def _register_commands(self):\n cmds = []\n cmd_help = CommandParser(\"help\", \"Show help for a command.\")\n cmd_help.add_argument(\n \"command\",\n nargs=\"*\",\n help=\"The command to get help for. Specify multiple names to get help for subcommands.\",\n )\n cmd_help.add_argument(\"-m\", \"--module\", help=\"List all commands from the given module\")\n cmd_help.add_argument(\n \"-f\",\n \"--full\",\n action=\"store_true\",\n help='Include descriptions in the \"all\" help output.',\n )\n cmds.append(cmd_help)\n\n target_mod = CommandParser()\n target_mod.add_argument(\"module\", nargs=\"+\", help=\"Target module(s)\")\n target_mod.add_argument(\n \"-p\",\n \"--protocol\",\n action=\"store_const\",\n const=\"protocol\",\n default=\"feature\",\n dest=\"mtype\",\n help=\"Target is a protocol module\",\n )\n cmd_module = CommandParser(\"module\", \"Manage and query ZeroBot modules\")\n add_subcmd = cmd_module.make_adder(metavar=\"OPERATION\", dest=\"subcmd\", required=True)\n add_subcmd(\"load\", description=\"Load a module\", parents=[target_mod])\n add_subcmd(\"reload\", description=\"Reload a module\", parents=[target_mod])\n subcmd_list = add_subcmd(\"list\", description=\"List available modules\")\n subcmd_list.add_argument(\"-l\", \"--loaded\", action=\"store_true\", help=\"Only loaded modules\")\n list_group = subcmd_list.add_mutually_exclusive_group()\n default_categories = [\"protocol\", \"feature\"]\n list_group.add_argument(\n \"-f\",\n \"--feature\",\n action=\"store_const\",\n const=[\"feature\"],\n dest=\"category\",\n default=default_categories,\n help=\"Only feature modules\",\n )\n list_group.add_argument(\n \"-p\",\n \"--protocol\",\n action=\"store_const\",\n const=[\"protocol\"],\n dest=\"category\",\n default=default_categories,\n help=\"Only protocol modules\",\n )\n add_subcmd(\"info\", description=\"Show module information\", parents=[target_mod])\n cmds.append(cmd_module)\n\n save_reload_args = CommandParser()\n save_reload_args.add_argument(\n \"config_file\",\n nargs=\"*\",\n help=\"Name of config file (without .toml extension). Omit to affect all loaded config files.\",\n )\n set_reset_args = CommandParser()\n set_reset_args.add_argument(\"config_file\", help=\"Name of config file (without .toml extension)\")\n cmd_config = CommandParser(\"config\", \"Manage configuration\")\n add_subcmd = cmd_config.make_adder(metavar=\"OPERATION\", dest=\"subcmd\", required=True)\n add_subcmd(\"save\", description=\"Save config files to disk\", parents=[save_reload_args])\n subcmd_savenew = add_subcmd(\"savenew\", description=\"Save config file to a new path\")\n subcmd_savenew.add_argument(\"config_file\", help=\"Name of config file (without .toml extension)\")\n subcmd_savenew.add_argument(\"new_path\", help=\"The path to save the config file to\")\n add_subcmd(\n \"reload\",\n description=\"Reload config files from disk\",\n parents=[save_reload_args],\n )\n subcmd_set = add_subcmd(\"set\", description=\"Modify config settings\", parents=[set_reset_args])\n subcmd_set.add_argument(\n \"key_path\",\n help=\"The config key to set. Subkeys are separated by dots, e.g. 'Core.Backup.Filename'\",\n )\n subcmd_set.add_argument(\"value\", nargs=\"?\", help=\"The new value. Omit to show the current value.\")\n subcmd_reset = add_subcmd(\n \"reset\",\n description=\"Reset config settings to last loaded value\",\n parents=[set_reset_args],\n )\n subcmd_reset.add_argument(\n \"key_path\",\n nargs=\"?\",\n help=(\n \"The config key to set. Subkeys are separated by dots, \"\n \"e.g. 'Core.Backup.Filename'. If omitted, the entire \"\n \"config will be reset.\"\n ),\n )\n subcmd_reset.add_argument(\n \"-d\",\n \"--default\",\n action=\"store_true\",\n help=\"Set the key to its default value instead. Effectively unsets a config key.\",\n )\n cmds.append(cmd_config)\n\n cmd_version = CommandParser(\"version\", \"Show version information\")\n cmds.append(cmd_version)\n\n cmd_restart = CommandParser(\"restart\", \"Restart ZeroBot.\")\n cmd_restart.add_argument(\"msg\", nargs=\"*\", help=\"Message sent to protocol modules as a reason\")\n cmds.append(cmd_restart)\n\n cmd_quit = CommandParser(\"quit\", \"Shut down ZeroBot.\")\n cmd_quit.add_argument(\"msg\", nargs=\"*\", help=\"Message sent to protocol modules as a reason\")\n cmds.append(cmd_quit)\n\n cmd_wait = CommandParser(\"wait\", \"Execute a command after a delay\")\n cmd_wait.add_argument(\n \"delay\",\n help=\"Amount of time to delay. Accepts the following modifier suffixes: 'ms', 's' (default), 'm', 'h'.\",\n )\n cmd_wait.add_argument(\"command\", help=\"Command to delay\")\n cmd_wait.add_argument(\"args\", nargs=argparse.REMAINDER, help=\"Command arguments\")\n cmds.append(cmd_wait)\n\n cmd_cancel = CommandParser(\"cancel\", \"Cancel a waiting command\")\n cancel_group = cmd_cancel.add_mutually_exclusive_group()\n cancel_group.add_argument(\"id\", type=int, nargs=\"?\", help=\"The ID of a waiting command\")\n cancel_group.add_argument(\"-l\", \"--list\", action=\"store_true\", help=\"List currently waiting commands\")\n cmds.append(cmd_cancel)\n\n cmd_backup = CommandParser(\"backup\", \"Create a database backup\")\n cmd_backup.add_argument(\"name\", type=Path, help=\"Backup filename\")\n cmds.append(cmd_backup)\n\n self.command_register(\"core\", *cmds)", "def cog_subcommand(\n *,\n base,\n subcommand_group=None,\n name=None,\n description: str = None,\n base_description: str = None,\n base_desc: str = None,\n subcommand_group_description: str = None,\n sub_group_desc: str = None,\n guild_ids: typing.List[int] = None,\n options: typing.List[dict] = None,\n connector: dict = None\n):\n base_description = base_description or base_desc\n subcommand_group_description = subcommand_group_description or sub_group_desc\n\n def wrapper(cmd):\n desc = description or inspect.getdoc(cmd)\n if options is None:\n opts = manage_commands.generate_options(cmd, desc, connector)\n else:\n opts = options\n\n _sub = {\n \"func\": cmd,\n \"name\": name or cmd.__name__,\n \"description\": desc,\n \"base_desc\": base_description,\n \"sub_group_desc\": subcommand_group_description,\n \"guild_ids\": guild_ids,\n \"api_options\": opts,\n \"connector\": connector,\n }\n return CogSubcommandObject(_sub, base, name or cmd.__name__, subcommand_group)\n\n return wrapper", "def add_subcommands(self, name='subcmd', arg_kws=None, optional=False):\n if self._subcmds is not None:\n raise RuntimeError(\"This config already has subcommands.\")\n if name in self.ReservedVariables or name[0] == '_':\n raise ValueError(\"Config variable name '%s' is reserved.\" % name)\n if name in self.confvariable:\n raise ValueError(\"Config variable '%s' is already defined.\" % name)\n if arg_kws is None:\n arg_kws = dict(title=\"subcommands\")\n else:\n arg_kws = dict(arg_kws)\n arg_kws['dest'] = name\n subparsers = self.argparser.add_subparsers(**arg_kws)\n var = ConfigSubCmds(name, optional, self, subparsers)\n self.confvariable[name] = var\n self.confvariables.append(var)\n self._subcmds = var\n return var", "def create_parser(self, prog_name, subcommand):\r\n self.prog_name = \"{} {}\".format(prog_name, subcommand)\r\n return super(TrackedCommand, self).create_parser(prog_name, subcommand)", "async def module_command_help(self, ctx, parsed):\n\n def _create_commandhelp(request):\n usage, desc = request.format_help().split(\"\\n\\n\")[:2]\n usage = usage.partition(\" \")[2]\n desc = desc.rstrip()\n args, opts, subcmds, aliases = {}, {}, {}, []\n prev_arg = ()\n for arg in request._get_positional_actions():\n name = arg.metavar or arg.dest\n if isinstance(arg, _SubParsersAction):\n args[name] = (arg.help, True)\n prev_sub = ()\n for subname, subparser in arg.choices.items():\n # Aliases follow the canonical name\n if prev_sub and subparser is prev_sub[1]:\n subcmds[prev_sub[0]].aliases.append(subname)\n else:\n subcmds[subname] = _create_commandhelp(subparser)\n # Don't include parent command in subcommand name\n subcmds[subname].name = subname\n prev_sub = (subname, subparser)\n else:\n # Aliases follow the canonical name\n if prev_arg and arg is prev_arg[1]:\n args[prev_arg[0]].aliases.append(name)\n else:\n args[name] = (arg.help, False)\n prev_arg = (name, arg)\n for opt in request._get_optional_actions():\n names = tuple(opt.option_strings)\n if opt.nargs == 0 or opt.const:\n # Don't make it seem like flag options take a value\n metavar = None\n else:\n metavar = opt.metavar or opt.dest\n opts[names] = (metavar, opt.help)\n return CommandHelp(\n HelpType.CMD,\n request.name,\n desc,\n usage,\n aliases=aliases,\n args=args,\n opts=opts,\n subcmds=subcmds,\n )\n\n if parsed.args[\"command\"]:\n help_args = parsed.args[\"command\"]\n if len(help_args) > 1 and help_args[0:2] == [\"help\"] * 2:\n await ctx.reply_command_result(parsed, \"I'm afraid that you're far beyond any help...\")\n return\n try:\n request = self._commands[help_args[0]]\n except KeyError:\n cmd_help = CommandHelp(HelpType.NO_SUCH_CMD, help_args[0])\n else:\n cmd_help = _create_commandhelp(request)\n help_args.pop(0)\n subcmd = cmd_help\n for sub_request in help_args:\n try:\n parent = subcmd\n subcmd = cmd_help.get_subcmd(sub_request)\n except KeyError:\n cmd_help = CommandHelp(HelpType.NO_SUCH_SUBCMD, sub_request, parent=parent)\n break\n else:\n cmd_help = subcmd\n elif parsed.args[\"module\"]:\n mod_id = parsed.args[\"module\"]\n if mod_id not in self._features and mod_id != \"core\":\n cmd_help = CommandHelp(HelpType.NO_SUCH_MOD, mod_id)\n else:\n try:\n parsers = [parser for parser in self._commands.iter_by_module(mod_id)]\n except KeyError:\n parsers = []\n desc = parsers[0].module.description\n cmds = {}\n for parser in parsers:\n mod = cmds.setdefault(mod_id, {})\n mod[parser.name] = parser.description\n cmd_help = CommandHelp(HelpType.MOD, mod_id, desc, cmds=cmds)\n else:\n cmds = {}\n for mod_id, parsers in self._commands.pairs():\n for parser in parsers:\n mod = cmds.setdefault(mod_id, {})\n mod[parser.name] = parser.description\n cmd_help = CommandHelp(HelpType.ALL, cmds=cmds)\n await ctx.core_command_help(parsed, cmd_help)", "def cog_subcommand(\n *,\n base,\n subcommand_group=None,\n name=None,\n description: str = None,\n base_description: str = None,\n base_desc: str = None,\n base_default_permission: bool = True,\n base_permissions: typing.Dict[int, list] = None,\n subcommand_group_description: str = None,\n sub_group_desc: str = None,\n guild_ids: typing.List[int] = None,\n options: typing.List[dict] = None,\n connector: dict = None,\n):\n base_description = base_description or base_desc\n subcommand_group_description = subcommand_group_description or sub_group_desc\n guild_ids = guild_ids if guild_ids else []\n if not base_permissions:\n base_permissions = {}\n\n def wrapper(cmd):\n decorator_permissions = getattr(cmd, \"__permissions__\", None)\n if decorator_permissions:\n base_permissions.update(decorator_permissions)\n\n desc = description or inspect.getdoc(cmd)\n if options is None:\n opts = manage_commands.generate_options(cmd, desc, connector)\n else:\n opts = options\n\n if guild_ids and not all(isinstance(item, int) for item in guild_ids):\n raise IncorrectGuildIDType(\n f\"The snowflake IDs {guild_ids} given are not a list of integers. Because of discord.py convention, please use integer IDs instead. Furthermore, the command '{name or cmd.__name__}' will be deactivated and broken until fixed.\"\n )\n\n _cmd = {\n \"func\": None,\n \"description\": base_description,\n \"guild_ids\": guild_ids.copy(),\n \"api_options\": [],\n \"default_permission\": base_default_permission,\n \"api_permissions\": base_permissions,\n \"connector\": {},\n \"has_subcommands\": True,\n }\n\n _sub = {\n \"func\": cmd,\n \"name\": name or cmd.__name__,\n \"description\": desc,\n \"base_desc\": base_description,\n \"sub_group_desc\": subcommand_group_description,\n \"guild_ids\": guild_ids,\n \"api_options\": opts,\n \"connector\": connector,\n }\n return CogSubcommandObject(base, _cmd, subcommand_group, name or cmd.__name__, _sub)\n\n return wrapper", "def refine_cli(\n subparsers: SubParsersAction,\n parent_parsers: List[argparse.ArgumentParser],\n) -> None:", "def extend_cli(self, root_subparsers):\n\n user_dict = {}\n if self.add_base_groups:\n user_dict = dict(shared_groups=SHARED_GROUPS)\n\n self.specification = SpecParser.from_plugin(\n subparser=root_subparsers,\n plugin=self.plugin,\n base_groups=user_dict)", "def make_adder(self, *args, **kwargs):\n kwargs.setdefault(\"dest\", \"subcmd\")\n subp = self.add_subparsers(*args, **kwargs)\n return partial(self.add_subcommand, subp)", "def add_command(self, name, desc, func=None):\n assert type(name) == str\n assert type(desc) == str\n if func is not None:\n assert callable(func)\n\n def wrap_argparse(parser, args, func):\n \"\"\"Convenience function calls argparse with list of args and calls func with them\"\"\"\n pargs = parser.parse_args(args)\n return func(**vars(pargs))\n\n assert name not in self.cmd2func, \"Command with same name already defined on this level!\"\n\n self.cmd_list.append((name, desc))\n if func is None:\n m = necapy(name=name, desc=desc)\n self.cmd2func[name] = m.parse\n return m\n else:\n ap = argparse.ArgumentParser(description=desc)\n self.cmd2func[name] = lambda args: wrap_argparse(ap, args, func)\n return ap", "def subcommand(wrapped):\n def callback(scanner, name, ob):\n scanner.subcommands[ob.name] = ob\n venusian.attach(wrapped, callback, category='subcommands')\n return wrapped", "def construct_subcommand(\n parser,\n hooks=None,\n arg_filter=None,\n is_root=True\n):\n subcommands = []\n options = []\n args = []\n subcommand = {}\n hooks = {} if hooks is None else hooks\n subcommand_hook = hooks.get(\"subcommand\")\n\n if is_root:\n subcommand[\"name\"] = parser.prog\n\n for arg in parser._actions:\n if arg_filter is not None and arg_filter(arg):\n continue\n if arg.nargs == argparse.PARSER:\n subcommand.update(get_base_suggestion(arg))\n help_map = {a.dest: a.help for a in arg._choices_actions}\n\n nested_subcommands = {}\n for name, nested_parser in arg.choices.items():\n if nested_parser in nested_subcommands:\n nested_subcommands[nested_parser][\"name\"].append(name)\n else:\n nested_subcommands[nested_parser] = {\n \"name\": [name],\n **construct_subcommand(\n nested_parser,\n hooks=hooks,\n arg_filter=arg_filter,\n is_root=False\n ),\n }\n if name in help_map and help_map[name] != argparse.SUPPRESS:\n nested_subcommands[nested_parser][\"description\"] = str(help_map[name])\n for p, nested_subcommand in nested_subcommands.items():\n if len(nested_subcommand[\"name\"]) == 1:\n nested_subcommand[\"name\"] = nested_subcommand[\"name\"][0]\n if subcommand_hook:\n subcommand_hook(nested_subcommand, p)\n subcommands.append(nested_subcommand)\n elif arg.option_strings:\n options.append(construct_option(arg, hooks, parser))\n else:\n args.extend(construct_args(arg, hooks, parser))\n\n if subcommands:\n subcommand[\"subcommands\"] = subcommands\n if options:\n subcommand[\"options\"] = options\n if args:\n subcommand[\"args\"] = args\n\n if is_root and subcommand_hook:\n subcommand_hook(subcommand, parser)\n\n return subcommand", "def _add_subcommands():\n geocube.add_command(cmd_modules.make_geocube.make_geocube)", "def __init__(self, *args, **kwargs):\n # The subcommand array, with the help command included.\n self.subcommands = list(kwargs.pop('subcommands', []))\n self.subcommands.append(self._HelpSubcommand)\n\n # A more helpful default usage.\n if 'usage' not in kwargs:\n kwargs['usage'] = \"\"\"\n %prog [global options] COMMAND [ARGS...]\n %prog help COMMAND\"\"\"\n\n # Super constructor.\n optparse.OptionParser.__init__(self, *args, **kwargs)\n\n # Adjust the help-visible name of each subcommand.\n for subcommand in self.subcommands:\n subcommand.parser.prog = '%s %s' % \\\n (self.get_prog_name(), subcommand.name)\n\n # Our root parser needs to stop on the first unrecognized argument.\n self.disable_interspersed_args()", "def create_parser(self, prog_name, subcommand):\r\n # Hack __main__ so --help in dev_appserver_main works OK.\r\n sys.modules['__main__'] = dev_appserver_main\r\n return super(Command, self).create_parser(prog_name, subcommand)", "def register_argument_parser(add_parser, action):\n sub_command = str(action)\n return add_parser(sub_command,\n help=f'{sub_command} token',\n description=f'{sub_command.capitalize()} a Waiter token. '\n 'In addition to the optional arguments '\n 'explicitly listed below, '\n 'you can optionally provide any Waiter '\n 'token parameter as a flag. For example, '\n 'to specify 10 seconds for the '\n 'grace-period-secs parameter, '\n 'you can pass --grace-period-secs 10. '\n 'You can also provide nested fields separated by a period. For example, '\n 'to specify an environment variable FOO as \\\"bar\\\", you can pass --env.FOO \\\"bar\\\".')", "def test_create_subparser_noop(self):\n self.command.create_subparser()", "def add_cli(self, subparser):\n new_parser = subparser.add_parser('create', help='create new scratch file')\n new_parser.add_argument('name', nargs='?', default=None, help=\"Optional Name to be given to the file, \"\n \"default name is an increment of 'scratch##'\")\n new_parser.set_defaults(func=self.action)\n return subparser", "def subparser( parser, subparsers ):", "def AddSubCommand(self, command_info):\n name = command_info[2]\n self._commands_to_load[name] = command_info", "def create_parser():\n parser = argparse.ArgumentParser(\n description='CLI for SMS',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n\n # Add subcommands\n subparsers = parser.add_subparsers(title='subcommands', dest='cmd')\n\n # Downlink Unitdata\n downlink_unitdata_parser = subparsers.add_parser(\n 'DU', help=\"Send downlink unitdata to SMSOrc8rGW service\",\n )\n downlink_unitdata_parser.add_argument('imsi', help='e.g. 001010000090122 (no prefix required)')\n downlink_unitdata_parser.add_argument('data', help='Data as a hex string e.g. 1fc13a00')\n downlink_unitdata_parser.set_defaults(func=send_downlink_unitdata)\n\n return parser", "def fill_subparsers(subparsers):\n\n\t\tfor cls in BaseCommand.__subclasses__():\n\t\t\tcls.add_subparser(subparsers)", "def add_subparser(subparsers):\n parser = subparsers.add_parser(\"utils/update\",\n description=\"Update apt and the groot_ansible ecosystem\", # this shows in the help for this command\n help=\"update your ansible/apt environment\", # this shows in the parent parser\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n common.add_ansible_arguments(parser)\n parser.set_defaults(func=parse_args)", "def add_commands(subparsers, path):\n log.debug('importing %s', path)\n try:\n del sys.modules[path]\n except KeyError:\n pass\n try:\n package = importlib.import_module(path)\n except Exception as e:\n log.warning('failed to import commands package %s',\n path, exc_info=True)\n return\n log.debug('commands package: %s', path)\n for (finder, name, ispkg) in pkgutil.iter_modules(package.__path__):\n if ispkg:\n continue\n try:\n command = importlib.import_module('.' + name, path)\n except Exception as e:\n log.warning('failed to import %s command: %s',\n path, name, exc_info=True)\n continue\n if not getattr(command, 'run', None):\n log.warning('skipping command module without run function: %s',\n name)\n continue\n log.debug('command: %s'%(name))\n name = command.__name__.split('.')[-1]\n parser_help = getattr(command, 'parser_help', None)\n if parser_help is None:\n log.warning('command %s missing help text'%(command.__name__))\n parser = subparsers.add_parser(name, help=parser_help)\n command.add_arguments(parser)\n parser.set_defaults(run=command.run)", "def create_parser(self, prog_name, subcommand):\n parser = CommandParser(\n self, prog=\"%s %s\" % (os.path.basename(prog_name), subcommand),\n description=self.help or None,\n )\n parser.add_argument(\n '--pythonpath',\n help='A directory to add to the Python path',\n )\n self.add_arguments(parser)\n return parser", "def subcommand(args=None, parent=subparsers):\n def decorator(func):\n parser = parent.add_parser(func.__name__, description=func.__doc__)\n for arg in args:\n parser.add_argument(*arg[0], **arg[1])\n parser.set_defaults(func=func)\n\n if args is None:\n args = []\n return decorator", "def subcommand(self, base_name, name, description=MISSING, options=MISSING, guild_ids=MISSING, default_permission=True, guild_permissions=MISSING):\n def wrapper(callback):\n \"\"\"The wrapper for the callback function. The function's parameters have to have the same name as the parameters specified in the slash command.\n\n `ctx` is of type :class:`~SlashedCommand` and is used for responding to the interaction and more\n\n Examples\n --------\n - no parameter:\n `async def command(ctx): ...`\n - required parameter \"number\":\n `async def command(ctx, number): ...`\n - optional parameter \"user\":\n `async def command(ctx, user=default_value)`\n - multiple optional parameters \"user\", \"number\":\n `async def command(ctx, user=default_value, number=default_value)`\n - one required and one optional parameter \"user\", \"text\":\n `async def command(ctx, user, text=default_value)`\n\n Note: Replace `default_value` with a value you want to be used if the parameter is not specified in discord, if you don't want a default value, just set it to `None`\n \"\"\"\n if self.subcommands.get(base_name) is None:\n self.subcommands[base_name] = {}\n\n self.subcommands[base_name][name] = SubSlashCommand(callback, base_name, name, description, options=options, guild_ids=guild_ids, default_permission=default_permission, guild_permissions=guild_permissions)\n return wrapper", "def add_subparser(\n subparsers: SubParsersAction, parents: List[argparse.ArgumentParser]\n) -> None:\n run_parser = subparsers.add_parser(\n \"run\",\n parents=parents,\n conflict_handler=\"resolve\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n help=\"Starts a Rasa server with your trained model.\",\n )\n run_parser.set_defaults(func=run)\n\n run_subparsers = run_parser.add_subparsers()\n sdk_subparser = run_subparsers.add_parser(\n \"actions\",\n parents=parents,\n conflict_handler=\"resolve\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n help=\"Runs the action server.\",\n )\n sdk_subparser.set_defaults(func=run_actions)\n\n arguments.set_run_arguments(run_parser)\n arguments.set_run_action_arguments(sdk_subparser)", "def add_commands(parser, subparsers):\n subparser = subparsers.add_parser('libraries', help='search for LogicBlox libraries')\n subparser.set_defaults(func=execute_libraries)\n\n subparser.add_argument('libraries',\n nargs='*',\n help=\"libraries to locate\")\n\n subparser.add_argument('--libpath',\n help=\"library path to search\")\n \n subparser.add_argument('--dependencies', '-d',\n default=False,\n action='store_true',\n help=\"print the libraries upon which a library depends\")\n\n subparser.add_argument('--quiet', '-q',\n default=False,\n action='store_true',\n help=\"do not display any information. Used when simply querying the exit code\")\n subparser", "def _add_to_cli(self, parser, group=None):\n container = self._get_argparse_container(parser, group)\n kwargs = self._get_argparse_kwargs(group)\n prefix = self._get_argparse_prefix('', group.name if group else None)\n deprecated_names = []\n for opt in self.deprecated_opts:\n deprecated_name = self._get_deprecated_cli_name(opt.name,\n opt.group)\n if deprecated_name is not None:\n deprecated_names.append(deprecated_name)\n self._add_to_argparse(parser, container, self.name, self.short,\n kwargs, prefix,\n self.positional, deprecated_names)", "def add_new_subparser(subparsers, formatter_class=RawTextHelpFormatter):\n # TODO: add info on no args to description or help\n # Adds custom --help argument\n generic_parent_parser = cmd.argparse.get_generic_parent_parser()\n new_description = 'Create a new test module or page object'\n new_help = new_description\n new_parser = subparsers.add_parser(\n 'new', description=new_description, help=new_help,\n parents=[generic_parent_parser],\n formatter_class=formatter_class,\n add_help=False, epilog=cmd.argparse.ARGPARSE_EPILOG\n )\n # New <type> subparsers\n new_type_desc = 'Run \\'{} <type> --help\\' for details'.format(new_parser.prog)\n new_subparsers = new_parser.add_subparsers(\n title='File Types', description=new_type_desc, dest='type', metavar='<type>'\n )\n # New test parser\n new_test_parent_parser = get_new_parent_parser(\n parents=[generic_parent_parser], class_name_metavar='<TestCaseClass>',\n class_name_help='Name to use for the initial test case class'\n )\n new_test_description = 'Create a new test module'\n new_test_help = new_test_description\n new_subparsers.add_parser(\n 'test', description=new_test_description, help=new_test_help,\n parents=[new_test_parent_parser],\n add_help=False, epilog=cmd.argparse.ARGPARSE_EPILOG\n )\n # New page object parser\n new_page_parent_parser = get_new_parent_parser(\n parents=[generic_parent_parser], class_name_metavar='<PageObjectClass>',\n class_name_help='Name to use for the initial page object class'\n )\n new_page_description = 'Create a new page object module'\n new_page_help = new_page_description\n new_page_parser = new_subparsers.add_parser(\n 'page', description=new_page_description, help=new_page_help,\n parents=[new_page_parent_parser],\n add_help=False, epilog=cmd.argparse.ARGPARSE_EPILOG\n )\n # TODO: add optional --prototype arg with a list of valid page object prototype classes\n return new_parser", "def add_command(subparsers):\n\n parser = subparsers.add_parser('create', help=create.__doc__)\n\n parser.add_argument('-r', '--recreate', action='store_true', help='If set, I\\'ll first erase the current database')\n parser.add_argument('-v', '--verbose', action='count', help='Increase verbosity?')\n parser.add_argument('-d', '--image-dir', default='/idiap/project/hface/databases/polimetric_thermal_database/Registered/', help=\"Change the relative path to the directory containing the images of the Polarimetric database.\")\n\n parser.set_defaults(func=create) #action", "def _resolve_commands(parser):\n from .plugin import list as list_plugins\n\n # create a subparser\n subparsers = parser.add_subparsers(dest='cmd')\n\n default_command = None\n\n for command in list_plugins('command'):\n _log.info('add command ' + command.id)\n if hasattr(command, 'isDefault') and command.isDefault:\n default_command = command.id\n\n # create a argument parser for the command\n cmdparser = subparsers.add_parser(command.id)\n _log.info('loading and initializing the command: ' + command.id)\n\n # use the phovea extension point loading mechanism.\n # pass the parser as argument to the factory method so that the extension point (i.e., command)\n # can add further arguments to the parser (e.g., the address or port of the server).\n # the factory must return a launcher function, which gets the previously defined parser arguments as parameter.\n instance = command.load().factory(cmdparser)\n\n # register the instance as argument `launcher` and the command as `launcherid` to the command parser\n _log.info('add command instance to parser')\n cmdparser.set_defaults(launcher=instance, launcherid=command.id)\n\n return default_command", "def test_add_common_arguments_subparser():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(dest='action')\n sub = subparsers.add_parser('sub')\n add_common_arguments(sub)\n\n options = parser.parse_args(['sub'])\n assert hasattr(options, 'config')\n assert hasattr(options, 'configdir')\n assert options.config == 'default'\n assert options.configdir == config.DEFAULT_HOMEDIR\n\n options = parser.parse_args(['sub', '-c', 'test-short'])\n assert options.config == 'test-short'\n\n options = parser.parse_args(['sub', '--config', 'test-long'])\n assert options.config == 'test-long'\n\n options = parser.parse_args(['sub', '--config-dir', 'test-long'])\n assert options.configdir == 'test-long'\n\n options = parser.parse_args(\n ['sub', '-c', 'test-short', '--config-dir', 'test-long-dir'])\n assert options.config == 'test-short'\n assert options.configdir == 'test-long-dir'\n\n options = parser.parse_args(\n ['sub', '--config', 'test-long', '--config-dir', 'test-long-dir'])\n assert options.config == 'test-long'\n assert options.configdir == 'test-long-dir'", "def test_handle_subcommand_help(self):\r\n subcommands = list(self.testcommand.subparser.choices.keys())\r\n for subcommand in subcommands:\r\n command = f\"project {subcommand} --help\"\r\n ret, code = self.testcommand.handle(command, user)\r\n self.assertEqual(1, ret.count(\"usage\"))\r\n self.assertEqual(code, 200)\r\n\r\n command = f\"project {subcommand} -h\"\r\n ret, code = self.testcommand.handle(command, user)\r\n self.assertEqual(1, ret.count(\"usage\"))\r\n self.assertEqual(code, 200)\r\n\r\n command = f\"project {subcommand} --invalid argument\"\r\n ret, code = self.testcommand.handle(command, user)\r\n self.assertEqual(1, ret.count(\"usage\"))\r\n self.assertEqual(code, 200)", "def __init__(self, name, dest=None, handler=None,\n title=None, description=None, help=None):\n super(SubCommandOpt, self).__init__(name, type=types.String(),\n dest=dest, help=help)\n self.handler = handler\n self.title = title\n self.description = description", "def register_cli(cls):\n for cmd in cls.SUB_GROUP_COMMANDS:\n getattr(cls, cls.SUB_GROUP_NAME).add_command(getattr(cls, cmd))", "def test_handle_subcommand_help(self):\n subcommands = list(self.testcommand.subparser.choices.keys())\n for subcommand in subcommands:\n command = f\"team {subcommand} --help\"\n ret, code = self.testcommand.handle(command, user)\n self.assertEqual(1, ret.count(\"usage\"))\n self.assertEqual(code, 200)\n\n command = f\"team {subcommand} -h\"\n ret, code = self.testcommand.handle(command, user)\n self.assertEqual(1, ret.count(\"usage\"))\n self.assertEqual(code, 200)\n\n command = f\"team {subcommand} --invalid argument\"\n ret, code = self.testcommand.handle(command, user)\n self.assertEqual(1, ret.count(\"usage\"))\n self.assertEqual(code, 200)", "def register(\n self,\n root_parser: argparse.ArgumentParser,\n subparsers: argparse._SubParsersAction,\n ) -> None:\n self.root_parser = root_parser\n parser = subparsers.add_parser(\n self.name,\n aliases=self.aliases,\n help=self.help,\n description=self.help,\n add_help=self.add_help,\n )\n parser.set_defaults(command=self)\n self.configure(parser)", "def register_subcommand(nest_ops_subparsers):\n parser = nest_ops_subparsers.add_parser('compile', \\\n help=COMPILE_CMD_HELP, \\\n description=COMPILE_CMD_DESCRIPTION, \\\n formatter_class=argparse.RawTextHelpFormatter )\n\n parser.add_argument('--code_type', \\\n choices=VALID_CODE_TYPES, \\\n help=CODE_TYPE_ARG_HELP, \\\n default='all' \\\n )\n\n parser.add_argument('--project', \\\n help=\"\"\"Which project to build. Only affects the web_assets:dist\n code_type, where it determines which project's index.html\n will be the main entry point index.html in the static files.\"\"\", \\\n choices=nest_envs.VALID_PROJECT_NAMES, \\\n default=nest_envs.DEFAULT_PROJECT_NAME, \\\n )\n \n parser.add_argument('--runlevel', \\\n help='Determines the run level for logging, error checking, etc.',\n choices=nest_envs.VALID_RUNLEVEL_NAMES,\n default=nest_envs.DEFAULT_RUNLEVEL_NAME, \\\n )\n\n #super ugly callback mechanism from argparse\n parser.set_defaults(func=_run_compile_cmd)\n return", "def customize_subparser(cls, subparser: argparse.ArgumentParser) -> None:\n pass", "def add_generate_token_subcommand(\n subparsers: Any,\n) -> None:\n generate_token_sp = subparsers.add_parser(\n \"generate-token\",\n formatter_class=Formatter,\n description=dedent( # noqa: WPS462 -- docs\n \"\"\"\\\n Generate token.\n\n Token is required to consume the protected endpoints.\n\n Example:\n ```shell\n # Generate a rsa key pair\n openssl genpkey -algorithm RSA -out private_key.pem \\\\\n -pkeyopt rsa_keygen_bits:2048\n openssl rsa -pubout -in private_key.pem -out public_key.pem\n # Generate token\n bartender generate-token --format header > token.txt\n # Use token\n curl -X 'GET' \\\\\n 'http://127.0.0.1:8000/api/whoami' \\\\\n -H 'accept: application/json' \\\\\n -H @token.txt | jq .\n ```\n \"\"\",\n ),\n help=\"Generate token.\",\n )\n generate_token_sp.add_argument(\n \"--private-key\",\n default=Path(\"private_key.pem\"),\n type=Path,\n help=\"Path to RSA private key file\",\n )\n generate_token_sp.add_argument(\n \"--username\",\n default=\"someone\",\n help=\"Username to use in token\",\n )\n generate_token_sp.add_argument(\n \"--roles\",\n nargs=\"+\",\n default=[\"expert\", \"guru\"],\n help=\"Roles to use in token\",\n )\n onehour_in_minutes = 60\n generate_token_sp.add_argument(\n \"--lifetime\",\n default=onehour_in_minutes,\n type=int,\n help=\"Lifetime of token in minutes\",\n )\n generate_token_sp.add_argument(\n \"--issuer\",\n default=\"bartendercli\",\n help=\"Issuer of token\",\n )\n generate_token_sp.add_argument(\n \"--oformat\",\n default=\"plain\",\n choices=[\"header\", \"plain\"],\n help=\"Format of output\",\n )\n generate_token_sp.set_defaults(func=generate_token_subcommand)", "def do_help(self, args): \n if args.command:\n if len(args.command) > 1:\n command = args.command[0] +\"-\" + args.command[1]\n else:\n command = args.command[0]\n if command in self.subcommands:\n self.subcommands[command].print_help()\n else:\n print((\"'%s' is not a valid subcommand\") %\n args.command)\n else:\n self.parser.print_help()", "def add_commands(parser, functions, namespace=None, title=None,\n description=None, help=None):\n subparsers = get_subparsers(parser, create=True)\n\n if namespace:\n # make a namespace placeholder and register the commands within it\n assert isinstance(namespace, string_types)\n subsubparser = subparsers.add_parser(namespace, help=title)\n subparsers = subsubparser.add_subparsers(title=title,\n description=description,\n help=help)\n else:\n assert not any([title, description, help]), (\n 'Arguments \"title\", \"description\" or \"extra_help\" only make sense '\n 'if provided along with a namespace.')\n\n for func in functions:\n # XXX we could add multiple aliases here but it's a bit of a hack\n cmd_name = getattr(func, ATTR_ALIAS, func.__name__.replace('_','-'))\n cmd_help = func.__doc__\n command_parser = subparsers.add_parser(cmd_name, help=cmd_help)\n for a_args, a_kwargs in getattr(func, ATTR_ARGS, []):\n command_parser.add_argument(*a_args, **a_kwargs)\n command_parser.set_defaults(function=func)", "def add_usage(self, usage, actions, groups, prefix=''):\n #if prefix is None:\n # prefix = ''\n return super(SubcommandHelpFormatter, self).add_usage(usage, actions, groups, prefix='')", "def init_parser(subparsers):\n parser = subparsers.add_parser(COMMAND, help=\"Add a new task to the task list\")\n parser.add_argument(\"title\", type=str, help=\"The title of the new task\")\n parser.add_argument(\"description\", type=str, help=\"The description of the new task\")\n doto.cli.cmd.task.init_task_flags(parser)", "def register_command(subparser):\n update_parser = subparser.add_parser('update', help='Run the log files through an updater. Used to update '\n 'between versions of autology')\n update_parser.set_defaults(func=_main)\n\n # Arguments\n update_parser.add_argument('-f', '--files', help='Update the files that are currently defined in the log '\n 'directories', action='store_true')\n update_parser.add_argument('-t', '--templates', help='Install a new output template', action='store_true')\n update_parser.add_argument('-T', '--template-definition', help='Define a template definition to install',\n default=template_utilities.DEFAULT_TEMPLATES_URL)", "def make_parser():\n parser = argparse.ArgumentParser(prog=__file__.replace(\".py\", \"\"),\n description='simple $PATH tool')\n parser.add_argument('-n', '--nocolor', dest=\"color\",\n action=\"store_false\", default=True,\n help='Turn off ANSI color codes.')\n parser.add_argument('-w', '--nowarn', action=\"store_true\",\n help='Turn off path warnings.')\n subs = parser.add_subparsers(title='subcommands',\n description='The subcommands')\n\n sub = subs.add_parser('replace', description=\"Search & Replace $PATH\")\n sub.set_defaults(cmd='path_replace')\n sub.add_argument('terms', nargs='+',\n help='Format: search:replace, search:replace, ...')\n\n sub = subs.add_parser('show', description=\"Show $PATH compoents\")\n sub.set_defaults(cmd='path_show')\n sub.add_argument('-n', '--nocolor', dest=\"color\",\n action=\"store_false\", default=True,\n help='Turn off ANSI color codes.')\n sub.add_argument('-w', '--nowarn', action=\"store_true\",\n help='Turn off path warnings.')\n\n sub = subs.add_parser('which', description=\"Platform agnostic `which -a`\")\n sub.set_defaults(cmd='path_which')\n sub.add_argument('look', help='Look for this executable')\n sub.add_argument('-n', '--nocolor', dest=\"color\",\n action=\"store_false\", default=True,\n help='Turn off ANSI color codes.')\n sub.add_argument('-v', '--version', action=\"store_true\",\n help='Show version of exact matches.')\n\n return parser", "async def command_proc(self, message):\n parser = DiscordArgumentParser(description=\"A Test Command\", prog=\">stats\")\n parser.set_defaults(message=message)\n sp = parser.add_subparsers()\n\n sub_parser = sp.add_parser('user',\n description='test something')\n sub_parser.add_argument(\n \"user_id\",\n action=ValidUserAction,\n help=\"Mention of the user in question\",\n metavar=\"@user\",\n nargs=\"?\",\n )\n sub_parser.set_defaults(cmd=self._cmd_user)\n\n sub_parser = sp.add_parser('global',\n description='test something')\n sub_parser.set_defaults(cmd=self._cmd_global)\n\n try:\n self.log.info(\"Parse Arguments\")\n results = parser.parse_args(shlex.split(message.content)[1:])\n self.log.info(results)\n if type(results) == str:\n self.log.info(\"Got normal return, printing and returning\")\n self.log.info(type(results))\n await self.client.send_message(message.channel, results)\n return\n elif hasattr(results, 'cmd'):\n await results.cmd(results)\n return\n else:\n msg = parser.format_help()\n await self.client.send_message(message.channel, msg)\n return\n except NoValidCommands as e:\n # We didn't get a subcommand, let someone else deal with this mess!\n self.log.error(\"???\")\n pass\n except HelpNeeded as e:\n self.log.info(\"TypeError Return\")\n self.log.info(e)\n msg = f\"{e}. You can add `-h` or `--help` to any command to get help!\"\n await self.client.send_message(message.channel, msg)\n return\n pass\n\n return", "def format_sub_commands(self, formatter: DocsCommandHelpTextFormatter):\n with formatter.indented_section(name=\"Commands\", extra_indents=1):\n formatter.write_rd(\n [\n RowDefinition(self.docs_command.base_command + \" \" + command)\n for command in self.docs_command.all_commands\n ],\n col_max=50,\n )", "def SubParser(self):\n if not self._sub_parser:\n # pylint: disable=protected-access\n self._sub_parser = self._parser.add_subparsers(\n action=parser_extensions.CloudSDKSubParsersAction,\n calliope_command=self)\n return self._sub_parser", "def get_parser(subparsers, parent=None):\n parser = subparsers.add_parser(\n \"flow\",\n description=\"Invoke ML on MCU flow\",\n parents=[parent] if parent else [],\n add_help=(parent is None),\n )\n parser.set_defaults(func=handle)\n add_common_options(parser)\n add_context_options(parser)\n add_flow_options(parser)\n subparsers = parser.add_subparsers(dest=\"subcommand2\") # this line changed\n load.get_parser(subparsers)\n tune.get_parser(subparsers)\n build.get_parser(subparsers)\n compile_.get_parser(subparsers)\n run.get_parser(subparsers)", "def _add_create_command(subparser: _SubParsersAction):\r\n parser = subparser.add_parser('create', help='Create a new folder.') \r\n parser.add_argument(\r\n '--project',\r\n required=True,\r\n help='Project key of the project that the folder will be created under.'\r\n )\r\n parser.add_argument(\r\n '--name',\r\n required=False,\r\n help='Name of the folder.'\r\n )\r\n parser.add_argument(\r\n '--type',\r\n required=False,\r\n choices=['plan', 'case', 'cycle'],\r\n help='Type of folder to create.',\r\n )\r\n parser.set_defaults(cmd=CreateFolderCommand(parser))", "def _add_update_command(subparser: _SubParsersAction) -> None:\r\n parser = subparser.add_parser('update', help='Update an existing folder.')\r\n parser.add_argument(\r\n '--id',\r\n required=True,\r\n help='Unique id of the folder to be updated.'\r\n )\r\n parser.add_argument(\r\n '--name',\r\n required=True,\r\n help='Name to assign to the folder.'\r\n )\r\n parser.set_defaults(cmd=UpdateFolderCommand(parser))", "def load_into(subparser, as_cmd=None):\n p = subparser\n p.description = description\n\n if not as_cmd:\n as_cmd = default_name\n out = cli.CommandSuite(as_cmd, p)\n out.load_subcommand(topics)\n return out", "def add_arguments(self, sub_parser):\n sp = sub_parser", "def add_arg_parser(subparsers):\n # add\n add_p = subparsers.add_parser('add', description='Create a bundle from a .csv, .tsv, or a directory of files.')\n add_p.add_argument('-t', '--tag', nargs=1, type=str, action='append',\n help=\"Set one or more tags: 'dsdt add -t authoritative:True -t version:0.7.1'\")\n add_p.add_argument('bundle', type=str, help='The destination bundle in the current context')\n add_p.add_argument('path_name', type=str, help='File or directory of files to add to the bundle', action='store')\n add_p.set_defaults(func=lambda args: _add(args))", "def create_parser(self, prog_name, subcommand):\n parser = CommandParser(\n self, prog=\"%s %s\" % (os.path.basename(prog_name), subcommand),\n description=self.help or None,\n )\n parser.add_argument(\n '--version', action='version', version=self.get_version())\n\n self.add_arguments(parser)\n return parser", "def setup_parser_talk(subparsers):\r\n parser = subparsers.add_parser('talk', help='Freeseer talk database functions')\r\n parser.add_argument(\"action\", choices=['add', 'remove', 'clear', 'list'], nargs='?')\r\n parser.add_argument(\"-t\", \"--title\", type=unicode, help=\"Title\")\r\n parser.add_argument(\"-s\", \"--speaker\", type=unicode, help=\"Speaker\")\r\n parser.add_argument(\"-r\", \"--room\", type=unicode, help=\"Room\")\r\n parser.add_argument(\"-e\", \"--event\", type=unicode, help=\"Event\")\r\n parser.add_argument(\"-i\", \"--talk-id\", type=int, help=\"Talk ID\")", "def _init_cfg_subcmd(subparsers):\n cfg_related = subparsers.add_parser(\"cfg\")\n cfg_subparsers = cfg_related.add_subparsers(dest=\"cfg_subcommand\")\n\n cfg_write_parser = cfg_subparsers.add_parser(\"write\")\n cfg_write_parser.add_argument(\n \"--level\",\n choices=[\"user\", \"cwd\"],\n default=None,\n help=\"Specify if this config is for user or just the working directory.\",\n )\n cfg_write_parser.add_argument(\n \"--open\", action=\"store_const\", const=True, default=False\n )\n cfg_subparsers.add_parser(\"show\")\n\n cfg_export_parser = cfg_subparsers.add_parser(\"export\")\n cfg_export_parser.add_argument(\"--dir\", default=os.getcwd())\n\n return cfg_related", "def create_parser(general_defaults={}, constants={}, subcommand=MAIN):\n\n defaults = general_defaults['BigMLer']\n\n version = pkg_resources.require(\"BigMLer\")[0].version\n version_text = \"\"\"\\\nBigMLer %s - A Higher Level API to BigML's API\nCopyright 2012-2015 BigML\n\nLicensed under the Apache License, Version 2.0 (the \\\"License\\\"); you may\nnot use this file except in compliance with the License. You may obtain\na copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \\\"AS IS\\\" BASIS, WITHOUT\nWARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\nLicense for the specific language governing permissions and limitations\nunder the License.\"\"\" % version\n constants['version_text'] = version_text\n main_parser = argparse.ArgumentParser(\n description=\"A higher level API to BigML's API.\",\n epilog=\"Happy predictive modeling!\",\n version=version_text,\n formatter_class=argparse.RawTextHelpFormatter)\n subparsers = main_parser.add_subparsers()\n\n # list of options\n common_options = get_common_options(defaults=defaults, constants=constants)\n delete_options = get_delete_options(defaults=defaults)\n source_options = get_source_options(defaults=defaults)\n dataset_options = get_dataset_options(defaults=defaults)\n test_options = get_test_options(defaults=defaults)\n multi_label_options = get_multi_label_options(defaults=defaults)\n\n # subcommand options\n subcommand_options = {}\n # specific options\n subcommand_options[\"main\"] = get_main_options(defaults=defaults,\n constants=constants)\n # general options\n subcommand_options[\"main\"].update(common_options)\n subcommand_options[\"main\"].update(source_options)\n subcommand_options[\"main\"].update(dataset_options)\n subcommand_options[\"main\"].update(multi_label_options)\n subcommand_options[\"main\"].update(test_options)\n subcommand_options[\"main\"].update({\n '--source-tag': delete_options['--source-tag'],\n '--dataset-tag': delete_options['--dataset-tag'],\n '--model-tag': delete_options['--model-tag'],\n '--ensemble-tag': delete_options['--ensemble-tag'],\n '--prediction-tag': delete_options['--prediction-tag'],\n '--batch-prediction-tag': delete_options['--batch-prediction-tag']})\n\n main_options = subcommand_options[\"main\"]\n\n defaults = general_defaults[\"BigMLer analyze\"]\n subcommand_options[\"analyze\"] = get_analyze_options(defaults=defaults)\n subcommand_options[\"analyze\"].update(common_options)\n # we add the options that should be transmitted to bigmler main subcommands\n # in analyze\n subcommand_options[\"analyze\"].update({\n '--objective': main_options['--objective'],\n '--max-parallel-models': main_options['--max-parallel-models'],\n '--max-parallel-evaluations': main_options[\n '--max-parallel-evaluations'],\n '--model-fields': main_options['--model-fields'],\n '--balance': main_options['--balance'],\n '--no-balance': main_options['--no-balance'],\n '--number-of-models': main_options['--number-of-models'],\n '--sample-rate': main_options['--sample-rate'],\n '--replacement': main_options['--replacement'],\n '--missing-splits': main_options['--missing-splits'],\n '--pruning': main_options['--pruning'],\n '--weight-field': main_options['--weight-field'],\n '--replacement': main_options['--replacement'],\n '--objective-weights': main_options['--objective-weights'],\n '--replacement': main_options['--replacement'],\n '--model-attributes': main_options['--model-attributes'],\n '--ensemble-attributes': main_options['--ensemble-attributes'],\n '--tlp': main_options['--tlp'],\n '--randomize': main_options['--randomize']})\n\n defaults = general_defaults[\"BigMLer cluster\"]\n subcommand_options[\"cluster\"] = get_cluster_options(defaults=defaults)\n # general options\n subcommand_options[\"cluster\"].update(common_options)\n subcommand_options[\"cluster\"].update(source_options)\n subcommand_options[\"cluster\"].update(dataset_options)\n subcommand_options[\"cluster\"].update(test_options)\n subcommand_options[\"cluster\"].update({\n '--cpp': main_options['--cpp'],\n '--fields-map': main_options['--fields-map'],\n '--source-tag': delete_options['--source-tag'],\n '--dataset-tag': delete_options['--dataset-tag'],\n '--cluster-tag': delete_options['--cluster-tag'],\n '--centroid-tag': delete_options['--centroid-tag'],\n '--batch-centroid-tag': delete_options['--batch-centroid-tag'],\n '--prediction-info': main_options['--prediction-info'],\n '--prediction-header': main_options['--prediction-header'],\n '--prediction-fields': main_options['--prediction-fields'],\n '--reports': main_options['--reports'],\n '--remote': main_options['--remote'],\n '--no-batch': main_options['--no-batch']})\n\n defaults = general_defaults[\"BigMLer anomaly\"]\n subcommand_options[\"anomaly\"] = get_anomaly_options(defaults=defaults)\n # general options\n subcommand_options[\"anomaly\"].update(common_options)\n subcommand_options[\"anomaly\"].update(source_options)\n subcommand_options[\"anomaly\"].update(dataset_options)\n subcommand_options[\"anomaly\"].update(test_options)\n subcommand_options[\"anomaly\"].update({\n '--cpp': main_options['--cpp'],\n '--fields-map': main_options['--fields-map'],\n '--source-tag': delete_options['--source-tag'],\n '--dataset-tag': delete_options['--dataset-tag'],\n '--anomaly-tag': delete_options['--anomaly-tag'],\n '--anomaly-score-tag': delete_options['--anomaly-score-tag'],\n '--batch-anomaly-score-tag': delete_options['--batch-anomaly-score-tag'],\n '--prediction-info': main_options['--prediction-info'],\n '--prediction-header': main_options['--prediction-header'],\n '--prediction-fields': main_options['--prediction-fields'],\n '--reports': main_options['--reports'],\n '--remote': main_options['--remote'],\n '--no-batch': main_options['--no-batch']})\n\n\n subcommand_options[\"delete\"] = delete_options\n subcommand_options[\"delete\"].update(common_options)\n\n for subcommand in SUBCOMMANDS:\n subparser = subparsers.add_parser(subcommand)\n parser_add_options(subparser, subcommand_options[subcommand])\n\n # options to be transmitted from analyze to main\n chained_options = [\n \"--debug\", \"--dev\", \"--username\", \"--api-key\", \"--resources-log\",\n \"--store\", \"--clear-logs\", \"--max-parallel-models\",\n \"--max-parallel-evaluations\", \"--objective\", \"--tag\",\n \"--no-tag\", \"--no-debug\", \"--no-dev\", \"--model-fields\", \"--balance\",\n \"--verbosity\", \"--resume\", \"--stack_level\", \"--no-balance\",\n \"--args-separator\", \"--name\"]\n\n return main_parser, chained_options", "def _init_cfg_subcmd(subparsers):\n cfg_related = subparsers.add_parser(\"cfg\",)\n cfg_subparsers = cfg_related.add_subparsers(dest=\"cfg_subcommand\")\n\n cfg_write_parser = cfg_subparsers.add_parser(\"write\")\n cfg_write_parser.add_argument(\n \"--level\",\n choices=[\"user\", \"cwd\"],\n default=None,\n help=\"Specify if this config is for user or just the working directory.\",\n )\n cfg_write_parser.add_argument(\n \"--open\", action=\"store_const\", const=True, default=False\n )\n cfg_subparsers.add_parser(\"show\")\n\n cfg_export_parser = cfg_subparsers.add_parser(\"export\")\n cfg_export_parser.add_argument(\"--dir\", default=os.getcwd())\n\n return cfg_related", "def subcmd_help(word, word_eol):\n\tif len(word) > 1:\n\t\ttopic = word[1]\n\t\tif topic in subcommands:\n\t\t\tdoprint('help', subcommands[topic].__doc__)\n\t\telse:\n\t\t\tdoprint('help', 'Unknown subcommand \"%s\". Try \"/mt_irc help\".' % topic)\n\telse:\n\t\tfor subcmd in subcommands:\n\t\t\tdoprint('help', subcommands[subcmd].__doc__)", "def add_rm_parser(subparsers):\n rm_parser = subparsers.add_parser(\"rm\")\n rm_parser.set_defaults(func=rm_cli.main)\n rm_parser.add_argument('--scenario', '-s',\n dest=\"scenario\",\n help='Predefined scenario to use for exection')\n rm_parser.add_argument('--platform', dest=\"platform\",\n help=\"The platform to use \\\n(podman, docker, terraform, shell, python)\")\n rm_parser.add_argument('--vars', dest=\"vars\",\n default=\"\",\n help=\"extra variables\")\n rm_parser.add_argument('--debug', dest=\"debug\",\n action=\"store_true\",\n help=\"Enable debug level logging\")", "def set_parser(*, collected, parser=None):\n if parser is None:\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers()\n commands = unique(collected)\n for name, details in commands.items():\n original = details.original\n args = details.extra\n a_subparser = subparsers.add_parser(name)\n a_subparser.set_defaults(\n __gather_name__=name,\n __gather_command__=original,\n )\n for arg_details in args:\n a_subparser.add_argument(*arg_details.args, **dict(arg_details.kwargs))\n return parser", "def define_command(subparsers, command, callback, cmd_mapper):\n desc = callback.__doc__ or ''\n daemon_help = desc.strip().split('\\n')[0]\n arguments = getattr(callback, 'arguments', [])\n\n subparser = subparsers.add_parser(command, help=daemon_help,\n description=desc,\n add_help=False,\n formatter_class=HelpFormatter)\n subparser.add_argument('-h', '--help', action='help',\n help=argparse.SUPPRESS)\n cmd_mapper[command] = subparser\n for (args, kwargs) in arguments:\n subparser.add_argument(*args, **kwargs)\n subparser.set_defaults(func=callback)", "def main(args=None):\n\n from . import ArgumentParserSC\n\n scs, ers = get_subcommands()\n\n if ers:\n lines = ('- %s: %s' % (k, v) for k, v in ers.items())\n epilog = '\\n'.join(lines)\n epilog = 'Could not load the following subcommands:\\n' + epilog\n else:\n epilog = None\n\n parser = ArgumentParserSC('subcommand', scs, description='Lantz', epilog=epilog)\n\n parser.dispatch(args)", "def wrapper(callback):\n if self.subcommands.get(base_name) is None:\n self.subcommands[base_name] = {}\n\n self.subcommands[base_name][name] = SubSlashCommand(callback, base_name, name, description, options=options, guild_ids=guild_ids, default_permission=default_permission, guild_permissions=guild_permissions)", "def subcommand_group(self, base_names, name, description=MISSING, options=MISSING, guild_ids=MISSING, default_permission=True, guild_permission=MISSING):\n def wrapper(callback):\n \"\"\"The wrapper for the callback function. The function's parameters have to have the same name as the parameters specified in the slash command.\n\n `ctx` is of type :class:`~SlashedCommand` and is used for responding to the interaction and more\n\n Examples\n --------\n - no parameter:\n `async def command(ctx): ...`\n - required parameter \"number\":\n `async def command(ctx, number): ...`\n - optional parameter \"user\":\n `async def command(ctx, user=default_value)`\n - multiple optional parameters \"user\", \"number\":\n `async def command(ctx, user=default_value, number=default_value)`\n - one required and one optional parameter \"user\", \"text\":\n `async def command(ctx, user, text=default_value)`\n\n Note: Replace `default_value` with a value you want to be used if the parameter is not specified in discord, if you don't want a default value, just set it to `None`\n \"\"\"\n if self.subcommand_groups.get(base_names[0]) is None:\n self.subcommand_groups[base_names[0]] = {}\n self.subcommand_groups[base_names[0]][name] = SubSlashCommandGroup(callback, base_names, name, description, options=options, guild_ids=guild_ids, default_permission=default_permission, guild_permissions=guild_permission)\n\n return wrapper", "def add_parser(subparsers):\n parser = subparsers.add_parser('upload', help='upload build to Koji')\n\n parser.add_argument('--scm-url', required=True,\n help='SCM URL for this build, eg. git://...')\n parser.add_argument('--owner', required=True,\n help='koji user name that owns this build')\n parser.add_argument('--tag',\n help='tag this build, eg. ceph-3.2-xenial-candidate')\n parser.add_argument('--dryrun', action='store_true',\n help=\"Show what would happen, but don't do it\")\n parser.add_argument('--skip-log', action='store_true',\n help=\"Do not upload a .build log file\")\n parser.add_argument('directory', help=\"parent directory of a .dsc file\")\n parser.set_defaults(func=main)", "def make_parser():\n description = 'Interact with Twitter from command line interface'\n parser = argparse.ArgumentParser(description=description)\n\n subparsers = parser.add_subparsers(help='Available commands')\n\n # Subparsers for the \"update status / tweet\" command\n put_parser = subparsers.add_parser('tweet', help='Update status / post a tweet')\n put_parser.add_argument('message', help='The message to post, must be no longer than 140 characters')\n put_parser.set_defaults(command=\"tweet\")\n\n # Subparsers for the \"see homepage tweets\" command\n put_parser = subparsers.add_parser('home', help='See timeline of tweets on your Twitter homepage')\n put_parser.set_defaults(command='home')\n\n # Subparsers for the \"get trending posts\" command\n put_parser = subparsers.add_parser('trends', help='See trending topics globally or in your location')\n put_parser.add_argument('location', default='world', nargs='?',\n help='The name of a location of interest')\n put_parser.set_defaults(command='trends')\n\n return parser", "def addCommand(function, command, description, usage = None, minArgs = 0, maxArgs = 0, showUsage = True):\n None", "def configure_cli(subparsers) -> None: # type: ignore\n parser = subparsers.add_parser(\n name='items',\n description='Retrieve new/updated items from Aleph and send to CaiaSoft'\n )\n parser.set_defaults(cmd_name='items')", "def test_subCommandInTwoPlaces(self):\n class SubOpt(usage.Options):\n pass\n class OptFoo(usage.Options):\n subCommands = [\n ('foo', 'f', SubOpt, 'quux'),\n ]\n class OptBar(usage.Options):\n subCommands = [\n ('bar', 'b', SubOpt, 'quux'),\n ]\n oFoo = OptFoo()\n oFoo.parseOptions(['foo'])\n oBar=OptBar()\n oBar.parseOptions(['bar'])\n self.failUnless(hasattr(oFoo.subOptions, 'parent'))\n self.failUnless(hasattr(oBar.subOptions, 'parent'))\n self.failUnlessIdentical(oFoo.subOptions.parent, oFoo)\n self.failUnlessIdentical(oBar.subOptions.parent, oBar)", "def test_get_subcommand_help(self):\r\n subcommands = list(self.testcommand.subparser.choices.keys())\r\n for subcommand in subcommands:\r\n help_message = self.testcommand.get_help(subcommand=subcommand)\r\n self.assertEqual(1, help_message.count(\"usage\"))" ]
[ "0.76693493", "0.7625279", "0.761014", "0.74660397", "0.7304024", "0.7300243", "0.6937958", "0.687252", "0.6824965", "0.68249416", "0.6797736", "0.6796231", "0.67945975", "0.6792037", "0.6770885", "0.67248243", "0.6711912", "0.66614723", "0.6639564", "0.66390103", "0.6635527", "0.66086316", "0.65702987", "0.6568068", "0.65141344", "0.64924943", "0.6456027", "0.645294", "0.64483696", "0.64303404", "0.64245313", "0.64185804", "0.64176804", "0.63889796", "0.63760126", "0.6367608", "0.63653195", "0.63632596", "0.6342922", "0.63186246", "0.63121873", "0.6298771", "0.6283067", "0.62721384", "0.62459654", "0.6241569", "0.6238358", "0.6227204", "0.62247044", "0.62223", "0.6206398", "0.6181864", "0.613991", "0.61388427", "0.61294454", "0.6125365", "0.6124919", "0.6114709", "0.6100961", "0.6082422", "0.607949", "0.6074992", "0.6066729", "0.6055169", "0.60525906", "0.60500956", "0.60472494", "0.6043296", "0.6030393", "0.60293794", "0.6016882", "0.6009802", "0.5986705", "0.5986002", "0.59856987", "0.5950264", "0.5934062", "0.59294486", "0.5928653", "0.5928083", "0.5925111", "0.5917197", "0.5913542", "0.58937806", "0.58933777", "0.5889948", "0.5886769", "0.58849216", "0.58834636", "0.5860189", "0.58574814", "0.5844258", "0.5837549", "0.5827339", "0.5817781", "0.58141965", "0.5808988", "0.57969105", "0.5773223", "0.5768771" ]
0.81267315
0
The module that this command is registered to. Will return `None` if this command has not yet been registered.
def module(self) -> Optional[Module]: return self._module
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_module(self):\n return self.module", "def module(self):\n return self._module", "def module(self):\n return self._module", "def module(self):\n return self._module", "def module(self):\n return self._module", "def module(self):\n return self._module", "def module(self):\n return self.lib.module", "def get_module(cls, module=None):\n return module or sys.modules[cls.module_name()]", "def package(cls):\n packages = get_packages()\n return packages.modules.get(cls.__module__)", "def get_module(self):\n module = self.__class__.__module__.split('.')\n module = \".\".join(module[:-1])\n module = module + \".\" + self._get_valid_version().module\n return module", "def module_name(self):\n if hasattr(self, \"module\"):\n return self.module.__name__\n return None", "def _module(self):\n if self._module_cache is None:\n self._module_cache = load_module(self._name, self._path)\n return self._module_cache", "def get_command(self, ctx, cmd_name):\n path = \"%s.%s\" % (__name__, cmd_name)\n path = path.replace(\"-\", \"_\")\n try:\n module = importlib.import_module(path)\n return getattr(module, 'cli')\n except ModuleNotFoundError as ex:\n print(ex.name)\n return None", "def get_service_module(self):\n return self.__class__.__module__.split('.')[-2]", "def get_module_name(self):\n return self.module_name", "def get_module(cls, module_name):\n if cls.module_dict is None:\n # Init the module_dict once.\n cls.module_dict = {mod.name: mod for mod in cls.get_pb().modules}\n return cls.module_dict.get(module_name)", "def get_module_command_handler(self, name: str) -> Callable:\n if self.module is None:\n return\n cmnd = getattr(self.module, name, None)\n if cmnd is None or not (callable(cmnd) and hasattr(cmnd, FILEBASE_API_API_METHOD_MARKER_ATTRIB_NAME)):\n return None\n return cmnd", "def get_command(self):\n return self.command", "def get_command(self, module_name, command_name):\r\n actions = self.plugins.get(module_name) or {}\r\n if command_name in actions:\r\n return actions[command_name]\r\n if None in actions:\r\n return actions[None]\r\n raise InvalidCommand(module_name, command_name)", "def module_name(self):\n return self.lib.get_module_name()", "def module(self) -> \"TemplateModule\":\n return self._get_default_module()", "def name(self):\n module_filepath = inspect.getfile(type(self))\n module_filename = os.path.basename(module_filepath)\n command_name, _ = os.path.splitext(module_filename)\n return command_name", "def get_module(self, label):\n return self._registry.get(label, self._modules.get(label, None))", "def get_module(self, name: str) -> ModuleInstance:\n return self.modules[name]", "def module_name(self):\n return self.name", "def module_name(self):\n return self.name()", "def modulename():\n from inspect import getmodulename,getfile\n return getmodulename(getfile(lambda x:x))", "def module_name(cls):\n return __name__.split(\".\")[0]", "def get_module_name(self):\n attr = self._root.attrib['plugin']\n parts = attr.split('@')\n return parts[0]", "def find_module(self, name):\n if name in self.pool:\n return self.pool[name]\n else:\n return None", "def name(self):\n return self._modname", "def get_backend():\n\n return sys.modules[__name__]", "def get_backend():\n\n return sys.modules[__name__]", "def get_backend():\n return sys.modules[__name__]", "def get_backend():\n return sys.modules[__name__]", "def exposed_getmodule(self, name):\n return __import__(name, None, None, \"*\")", "def get_module_output(self, name):\n if name in self._modules:\n return self._modules[name].outputs['default']\n elif '_' in name:\n module, name = name.rsplit('_', 1)\n if module in self._modules:\n m = self._modules[module]\n if name in m.outputs:\n return m.outputs[name]\n raise KeyError('Could not find module output \"%s\"' % name)", "def get_compss_module(self):\n return self.compss_module", "def tool(self):\n tool_type = self.__class__.__module__.split('.')[-1]\n return g.config.tools[tool_type]", "def getCommand(self):\n return self.__cmd", "def module_name(self) -> Union[str, None]:\n if not self.view_func:\n return None\n elif self._controller_cls:\n return inspect.getmodule(self._controller_cls).__name__\n return inspect.getmodule(self.view_func).__name__", "def _get_module(module):\n try:\n return sys.modules[module]\n except KeyError:\n raise ValueError(\n module + \"is not a valid module name or it is not loaded\"\n )", "def base_module(self) -> nn.Module:\n return getattr(__import__(\"src.modules\", fromlist=[\"\"]), self.name)", "def module_name(self):\n return self.config_section", "def _find_module(model, mod_name):\n for name, module in model.named_modules():\n if name == mod_name:\n return module\n return None", "def command(self):\n return self._command", "def select_module(module, name):\n mod_name = \".\".join([\"biobox_cli\", module, name])\n try:\n __import__(mod_name)\n except ImportError:\n err_exit('unknown_command',\n {'command_type': str.replace(module, '_', ' '), 'command': name})\n return sys.modules[mod_name]", "def command_from_module_name(module_name):\n if module_name == '__main__':\n return os.path.basename(TAUCMDR_SCRIPT)\n return ' '.join(_command_as_list(module_name))", "def name(cls):\n return MODULE_NAME", "def get_other_module(self):\n return self._othermodule", "def find_module (self, name, path = None):\n return self if name in self.containments else None", "def get_command_handler(self) -> Callable:\n try:\n return globals()[self.command_handler]\n except KeyError:\n logging.error(\"command_handler function '%s' for command '%s' not found in global scope\" %\n (self.command_handler, self.name))\n except AttributeError:\n logging.error(\"command_handler for command '%s' not defined in command_definition.py\" % self.name)", "def get_module(module):\n return getattr(sys.modules, module, importlib.import_module(module))", "def get_command(self, ctx, name):\n try:\n if sys.version_info[0] == 2:\n name = name.encode('ascii', 'replace')\n mod = __import__('cli.commands.cmd_' + name, None, None, ['cli'])\n except ImportError:\n exit(1)\n\n return mod.cli", "def find_main_module(self):\n\n if self.type == 'passthrough':\n return None\n directory, basename = os.path.split(self.main_module)\n module, ext = os.path.splitext(basename)\n if ext:\n # if the module include the extension, just return its absolute\n # path\n return os.path.join(self.code_dir, self.main_module)\n\n # Otherwise, try to find the proper module, by assuming that there\n # is only one file with such name. Note that this may fail if\n # there are other files such as byte-compiled binaries, etc.\n found = glob.glob(os.path.join(self.code_dir, directory, module+'.*'))\n if not found:\n raise APIException('module not found: {}'\n .format(self.main_module), 400)\n\n return found[0]", "def get_command(self, ctx, name):\n commands = self._iter_commands()\n return commands[name].load()", "def get_module(self, mf_module):\n assert mf_module in [\"mf\", \"mt\", \"mp\"], \\\n f\"requested module {mf_module} is not one of 'mf', 'mt', 'mp'.\"\n\n try:\n return self.data[mf_module]\n except KeyError:\n raise KeyError(f\"module {mf_module} is not available in modflow model data.\")", "def _get_default_module(self, ctx: t.Optional[Context] = None) -> \"TemplateModule\":\n if self.environment.is_async:\n raise RuntimeError(\"Module is not available in async mode.\")\n\n if ctx is not None:\n keys = ctx.globals_keys - self.globals.keys()\n\n if keys:\n return self.make_module({k: ctx.parent[k] for k in keys})\n\n if self._module is None:\n self._module = self.make_module()\n\n return self._module", "def load(cls, name):\n try:\n return importlib.import_module(cls._plugins[name])\n except Exception as err:\n print(\"** could not load command [%s]:\\n%s\" % (name, err))", "def get_module(self, name):\n if name in self._modules:\n return self._modules[name]\n elif '_' in name:\n module, name = name.rsplit('_', 1)\n if module in self._modules:\n return self._modules[module]\n raise KeyError('Could not find module \"%s\"' % name)", "def LocalCommand(TestinfraBackend):\n return testinfra.get_backend(\"local://\").get_module(\"Command\")", "def module_name(self) -> str | None:\n try:\n return self._app_name.replace(\"-\", \"_\")\n except AttributeError:\n # If the app was created from an interactive prompt,\n # there won't be a module name.\n return None", "def get_command(self):\n if self.command is not None:\n return self.command\n elif self.parent is not None:\n return self.parent.get_command()\n else:\n return None", "def _get_module(self, name):\n module = self._modules.get(name)\n if not module:\n module = importlib.import_module(name)\n self._modules[name] = module\n return module", "def get_command(self):\n return self.c_dict['COMMAND']", "def get_module(self, cls_name, module_name='module'):\n if module_name not in self._module_dict:\n raise KeyError('{module_name} is not in registry')\n dd = self._module_dict[module_name]\n if cls_name not in dd:\n raise KeyError('{cls_name} is not registered in {module_name}')\n\n return dd[cls_name]", "def getmodulename(path):\r\n info = getmoduleinfo(path)\r\n if info: return info[0]", "def driver(self):\r\n ext = self.extensions[0]\r\n return ext.obj if ext.obj else ext.plugin", "def get(distro):\n this_module = sys.modules[__name__]\n try:\n return getattr(this_module, distro)\n except AttributeError:\n raise DistroNotSupported(distro)", "def _package(module):\n return (\n module.__name__ if module.__package__ is None else module.__package__\n )", "def command():\n return _config.command", "def name(self):\n if hasattr(self, \"module\"):\n return self.module.__name__.replace('_', '-')\n return None", "def __extract_module(log):\n module = \"UNKNOWN\"\n if \"module\" in log:\n module = log[\"module\"]\n elif \"executorName\" in log:\n module = log[\"executorName\"]\n elif \"http_uri\" in log:\n module = Transformer.__extract_module_from_url(log[\"http_uri\"])\n if module == \"UNKNOWN\" and \"header_referer\" in log:\n module = Transformer.__extract_module_from_url(log[\"header_referer\"])\n return module", "def get_tool(cls, tool_name):\n if cls.tool_dict is None:\n # Init the module_dict once.\n cls.tool_dict = {tool.name: tool for tool in cls.get_pb().tools}\n return cls.tool_dict.get(tool_name)", "def get_plugin(config_cls: Type) -> str:\n cls_module = inspect.getmodule(config_cls)\n return 'rastervision.' + cls_module.__name__.split('.')[1]", "async def _get_command_handler(self, command_type):\n if isinstance(command_type, str):\n module_name = 'command'\n module = import_module(module_name)\n handler = getattr(module, command_type)\n return command_type, handler", "def get_mod_name():\n return sys.argv[0].split(\"/\")[-1].split(\".py\")[0]", "def get_command(self,command):\n\t\treturn self.command_handlers[command]", "def returnMirrorModuleInst(self):\n\n # get network node\n networkNode = self.returnNetworkNode\n mirrorModule = cmds.getAttr(networkNode + \".mirrorModule\")\n\n # find instance through rig UI inst\n for inst in self.rigUiInst.moduleInstances:\n networkNode = inst.returnNetworkNode\n moduleName = cmds.getAttr(networkNode + \".moduleName\")\n if moduleName == mirrorModule:\n return inst", "def getCommand(self, name):\n return self.commands[name]()", "def load_parent_command(name):\n app_name = get_parent_commands()[name]\n module = import_module('%s.management.commands.%s' % (app_name, name))\n return module.Command", "def _get_module(dotted_path):\n package, module = dotted_path.rsplit('.', 1)\n return getattr(import_module(package), module)", "def get(self):\n if self.proc is not None:\n return self.proc.get()\n\n return None", "def lookup_module(id):\n return _registry[id]", "def show_module_from_frame():\n parentframe = inspect.currentframe().f_back.f_back.f_back\n parentname = parentframe.f_code.co_name\n module = inspect.getmodule(parentframe)\n print(\"Frame:\", parentframe)\n print(\"name:\", parentname)\n print(\"module:\", module)\n print(\"module name:\", nameof(module))\n print(\"qualified module name:\", qualified_name(module))\n print(\"module of module:\", moduleof(module))", "def get_cmd(self, command):\n return self.commands[command][\"cmd\"]", "def get_commandname(self):\n for line in self.helplines:\n if \"Usage:\" in line and self.parser_type is 'optparse':\n tmp = line.split()\n return tmp[1]\n if \"usage:\" in line and self.parser_type is 'argparse':\n tmp = line.split()\n return tmp[1]\n return None", "def options_module(self):\n if self.kolibri_options:\n module = self._return_module(self.kolibri_options)\n if module is None:\n logging.warning(\n \"{plugin} defined {module} kolibri options but the module was not found\".format(\n plugin=self.module_path, module=self.kolibri_options\n )\n )\n return module", "def get_command_with_name(self, command_name):\n return self.commands[command_name]", "def get_implementation(self):\n return self.__capabilities[\"IMPLEMENTATION\"]", "def get_module(module_type, spec, get_full_path, module_set_name=\"default\", required=True):\n try:\n upstream = spec.installed_upstream\n except spack.repo.UnknownPackageError:\n upstream, record = spack.store.db.query_by_spec_hash(spec.dag_hash())\n if upstream:\n module = spack.modules.common.upstream_module_index.upstream_module(spec, module_type)\n if not module:\n return None\n\n if get_full_path:\n return module.path\n else:\n return module.use_name\n else:\n writer = spack.modules.module_types[module_type](spec, module_set_name)\n if not os.path.isfile(writer.layout.filename):\n if not writer.conf.excluded:\n err_msg = \"No module available for package {0} at {1}\".format(\n spec, writer.layout.filename\n )\n raise ModuleNotFoundError(err_msg)\n elif required:\n tty.debug(\"The module configuration has excluded {0}: \" \"omitting it\".format(spec))\n else:\n return None\n\n if get_full_path:\n return writer.layout.filename\n else:\n return writer.layout.use_name", "def _get_module_path():\n\n return os.path.dirname(os.path.realpath(__file__))", "def find_module_by_fq_name(model, fq_mod_name):\n for module in model.modules():\n if hasattr(module, 'distiller_name') and fq_mod_name == module.distiller_name:\n return module\n return None", "def load_command(command_path):\n module_path, class_name = command_path.rsplit(\".\", 1)\n module = importlib.import_module(module_path)\n return getattr(module, class_name)()", "def get_cmd(self):\n return self.cmds.pop(0) if self.cmds else None", "def extension(self):\n return self.extensions[0]", "def get_cmd(self):\n return self.cmd", "def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"command\")", "def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"command\")", "def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"command\")" ]
[ "0.8003115", "0.7536935", "0.7536935", "0.7536935", "0.7536935", "0.7536935", "0.75261885", "0.7365347", "0.70169824", "0.68556315", "0.67025244", "0.66628325", "0.66449714", "0.6640801", "0.6626384", "0.652614", "0.6519318", "0.648883", "0.64871985", "0.64552623", "0.6444489", "0.63784087", "0.63415885", "0.6328508", "0.6255828", "0.62369883", "0.6207963", "0.6161047", "0.61501014", "0.61465055", "0.60850775", "0.6072914", "0.6072914", "0.60565233", "0.60565233", "0.60542154", "0.6040693", "0.6038562", "0.6032034", "0.59923583", "0.59911174", "0.5990651", "0.59659964", "0.59570056", "0.59507966", "0.5943623", "0.59142053", "0.59007263", "0.58795696", "0.5874054", "0.5873428", "0.58469695", "0.58393335", "0.58309203", "0.5821424", "0.58148825", "0.5812359", "0.58105123", "0.5807236", "0.58018535", "0.5794813", "0.57814634", "0.57780397", "0.5771055", "0.5751927", "0.57153314", "0.5701006", "0.5697978", "0.5680741", "0.5675604", "0.56734574", "0.5671935", "0.56587076", "0.56386864", "0.5634829", "0.563428", "0.56117713", "0.5610668", "0.5608871", "0.560624", "0.56050175", "0.5602561", "0.5578418", "0.55715615", "0.55616444", "0.55562425", "0.55559045", "0.5555376", "0.5553983", "0.55391216", "0.55345887", "0.55210394", "0.55185866", "0.549789", "0.5496735", "0.54842967", "0.5476662", "0.5470252", "0.5470252", "0.5470252" ]
0.74596643
7
The User that invoked the command.
def invoker(self) -> User: return self.msg.source
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def user(self):\n return self.getattr('user')", "def user(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user\")", "def user(self) -> Optional[str]:\n _args: list[Arg] = []\n _ctx = self._select(\"user\", _args)\n return _ctx.execute_sync(Optional[str])", "def user(self):\n return self._user", "def user(self):\n return self._user", "def user(self):\n return self._user", "def user(self):\n return self._user", "def user(self):\n return self._forced_user", "def user(self) -> str:\n return self._user", "def user(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user\")", "def user(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user\")", "def user(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user\")", "def user(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user\")", "def user(self):\n return self.owner.user", "def _get_current_user(self):\n\n out, err, exitcode = self.execute('whoami')\n\n if exitcode == 0:\n return User(out[0])\n return None", "def get_user(self):\n raise NotImplementedError", "def get_user(self):\n return self.user", "def get_user(self):\n return self.user", "def get_user(self):\n return None", "def user(self):\n return self._push.get('user', None)", "def repo_user(self):\n if 'repoze.who.identity' in self.environ:\n return self.environ['repoze.who.identity'].get('user')", "def _current_login_user(self):\n return self.env.uid", "def get_user(self):\n return str(self.request.user.id)", "def user(self):\n return self._project.user", "def get_user(self):\n return self.get('users/self')", "def __call__(self, req):\n return req.remote_user", "def get_current_user(self):\n return self.current_user", "def user(self):\n pass", "def get_user():\n return os.getenv(\"USER\")", "def target_user(self):\n return self.request.user", "def user():\n\treturn request.authorization.username if zk.get_http_login() else zk.get_username()", "def current_user(self):\n user_dict = self.auth.get_user_by_session()\n return self.auth.store.user_model.get_by_id(user_dict['user_id'])", "def user(self):\n return self.contact.user", "def get_current_user(self):\r\n return self.jira.current_user()", "def get_current_user():\n request = get_current_request()\n return getattr(request, 'user', None)", "def user(self) -> ClientUser:\n return self.http.user # type: ignore", "def user_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_name\")", "def get_user():\n global USER\n return USER", "def user(self):\n u = self.user_info\n return self.user_model.get_by_id(u['user_id']) if u else None", "def user(self):\n u = self.user_info\n return self.user_model.get_by_id(u['user_id']) if u else None", "def user(self):\n if self._user is None:\n pk, full_name = self.owner.split(',')\n pk = int(pk)\n self._user = User.objects.get(pk=pk)\n return self._user", "def get_user(self) -> User:\n return self.__user", "def whoami():\n return current_user._get_current_object()", "def owner(self) -> None:\n return self.bot.get_user(self.bot.config.owner_ids[0])", "def getUser(self):\n current_user = self.user\n return current_user", "def getUser(self):\n current_user = self.user\n return current_user", "def getUser(self):\n current_user = self.user\n return current_user", "def user_info(self):\n return self.auth.get_user_by_session()", "def getToUser(self):\n return self.toUser", "def get_user():\n return getpass.getuser()", "def get_user():\n return getpass.getuser()", "def user_info(self):\n \n return self.auth.get_user_by_session()", "def user(self):\n if not flask_login.current_user.is_anonymous():\n return flask_login.current_user._get_current_object()\n else:\n return None", "def user_name(self):\n return self._user_name", "def user(self):\n if self._user:\n return self._user\n else:\n return AnonymousUser()", "def head_node_user(self):\n return self._get_param(\"ClusterUser\")", "def current_user_info():\n\n return current_user", "def author(self):\r\n return self.user", "def getFromUser(self):\n return self.fromUser", "def get_userid(self):\n return util.kbase_env.user", "def actor_user(self):\n if self.actor:\n return User.objects.get(id=self.actor['id'])", "def get_current_user(self):\n return self.get_secure_cookie(\"user\")", "def get_current_user(self):\n return None", "def _get_current_user(self):\r\n real_user = self.runtime.get_real_user(self.runtime.anonymous_student_id)\r\n return real_user", "def get_car_user(self) -> str:\n return self.currentuser", "def run_as_user(self) -> pulumi.Input['RunAsUserStrategyOptionsArgs']:\n return pulumi.get(self, \"run_as_user\")", "def me():\n return current_user.get()", "def _get_user(self, call_info):\n unique_name = call_info['sender']\n uid = self._dbus_proxy.GetConnectionUnixUser(unique_name)\n return pwd.getpwuid(uid).pw_name", "def run_as_user(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"run_as_user\")", "def get_logged_in_user(self):\n return self.session_mgr.get_logged_in_user()", "def current_user(self):\n user_dict = self.auth.get_user_by_session()\n if user_dict is None:\n logging.debug(\"User dict is None.\")\n return None\n return self.auth.store.user_model.get_by_id(\n user_dict['user_id'], namespace=namespace_manager.get_namespace())", "def as_user(self, cmd):\n self.show(cmd, prefix=f'{self.user}$', )\n if self.dryrun:\n return None, b'', b''\n return self._as_user(cmd)", "def user_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_id\")", "def default_user(self) -> str:\n return OS_MAPPING.get(self.os, []).get(\"user\", None)", "def created_user(self):\n return self._created_user", "def nscaweb_user(self):\n return self.__get_option('nscaweb_user')", "def getUser(self):\n user = users.get_current_user()\n if not user:\n self.redirect(users.create_login_url(self.request.uri))\n else:\n return user", "def user_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_name\")", "def user_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_name\")", "def user_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_name\")", "def username(self):\n return self.user.username", "def user_name(self):\n\n return self._user_name", "def username(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"username\")", "def get_user_name(self):\n whoami = subprocess.Popen('whoami', stdin=None, stdout=subprocess.PIPE,\n shell=True, close_fds=True)\n whoami = whoami.communicate()[0]\n if '\\n' in whoami:\n newline_index = whoami.find('\\n')\n whoami = whoami[:newline_index]\n return whoami", "def username(self):\n return self._username()", "def created_by_user(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"created_by_user\")", "def get_ssh_user():\n\n return getpass.getuser()", "def user_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"user_id\")", "def get_object(self):\n return self.request.user", "def get_object(self):\n return self.request.user", "def get_object(self):\n return self.request.user", "def get_object(self):\n return self.request.user", "def get_object(self):\n return self.request.user", "def get_object(self):\n\n return self.request.user", "def user_name(self) -> str:\n return pulumi.get(self, \"user_name\")", "def user(self):\n\n return self.user_model", "def getCurrentUser(self) -> 'CurrentUser':\n return self.sObj.getCurrentUser()", "def get_s3_user(self):\n return self.__USER", "def admin_user(self) -> pulumi.Input['LabVirtualMachineAdminUserArgs']:\n return pulumi.get(self, \"admin_user\")", "def super_user(self) -> Optional[str]:\n return pulumi.get(self, \"super_user\")" ]
[ "0.80029744", "0.78830194", "0.7869089", "0.7831611", "0.7831611", "0.7831611", "0.7831611", "0.7830656", "0.77808654", "0.77667016", "0.77667016", "0.77667016", "0.7764792", "0.77462137", "0.7722865", "0.7709639", "0.77054626", "0.77054626", "0.7655964", "0.7575146", "0.75701255", "0.7530639", "0.74276525", "0.74219805", "0.73955524", "0.7351819", "0.7349282", "0.7331069", "0.7307507", "0.730673", "0.7196928", "0.71793187", "0.7174254", "0.7167799", "0.71664435", "0.7146493", "0.7131329", "0.71155566", "0.71139306", "0.710736", "0.71037", "0.71014637", "0.70648366", "0.7059271", "0.70530295", "0.70530295", "0.70530295", "0.70474595", "0.703691", "0.70285034", "0.70285034", "0.7023522", "0.70205146", "0.70168155", "0.7015479", "0.70071673", "0.7004228", "0.7001062", "0.6993933", "0.69892776", "0.69791436", "0.697886", "0.6964905", "0.6962849", "0.6962476", "0.69549584", "0.69484866", "0.6937172", "0.69178045", "0.6903592", "0.69004846", "0.69001883", "0.68943423", "0.6891484", "0.6883864", "0.687275", "0.6868439", "0.68630284", "0.68630284", "0.68630284", "0.68507963", "0.684815", "0.6842679", "0.68394655", "0.6831312", "0.6828283", "0.681118", "0.6801684", "0.68009156", "0.68009156", "0.68009156", "0.68009156", "0.68009156", "0.67909724", "0.67882234", "0.67808545", "0.6778369", "0.6773559", "0.6766303", "0.6765724" ]
0.74655455
22
Where the command was sent from. Can be either directly from a user, or from a user within a channel.
def source(self) -> Union[User, Channel]: return self.msg.destination
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def whoami( self, mess, args):\n return mess.getFrom()", "def whoami(self, mess, args):\n return mess.getFrom().getStripped()", "def invoker(self) -> User:\n return self.msg.source", "def getFromUser(self):\n return self.fromUser", "def reply_to(self):\n return self.receiver.remote_source.address", "def sender(self) -> str:\n return self._sender", "def _what_is_username(self):\n prompt = \"-?- Send to: \"\n sn = self._input(prompt)\n return sn", "def getToUser(self):\n return self.toUser", "def owner(self):\n \n if not self.logMessage is None:\n return self.logMessage[\"author\"]", "def reply_to(self):\n return self.getattr('reply_to')", "def get_from(self, ):\n return self.get_parameter.get('from')", "def get_sender_username(self, mess):\n jid = mess.getFrom()\n typ = mess.getType()\n username = jid.getNode()\n domain = jid.getDomain()\n if typ == \"chat\":\n return \"%s@%s\" %(username, domain)\n else:\n return \"\"", "def SenderScreenName(self):\n return self._sender_screen_name", "def showsender(self):\n return self.sender", "def user(self):\n return self.contact.user", "def action(self, user, channel, msg):\n # i.e. /me <something>\n user = user.split('!', 1)[0]\n self.logger.log(\"* %s %s\" % (user, msg))", "def action(self, user, channel, msg):\n user = user.split('!', 1)[0]\n self.logger.log(\"* %s %s\" % (user, msg))", "def user(self):\n return self._push.get('user', None)", "def __call__(self, req):\n return req.remote_user", "def owner(self) -> None:\n return self.bot.get_user(self.bot.config.owner_ids[0])", "def parse_bot_commands(self,slack_events):\n user = None\n for event in slack_events:\n if event[\"type\"] == \"message\" and not \"subtype\" in event:\n print(\"User: \" + event[\"user\"])\n user_id, message = self.parse_direct_mention(event[\"text\"])\n print(message)\n if user_id == self.starterbot_id:\n print('That was toward me!!!')\n return message, event[\"channel\"], event[\"user\"]\n\n return None, None, None", "def __str__(self):\n return '{} by @{}'.format(self.message, self.user.username)", "async def on_message(message):\r\n if message.channel.is_private: # < makes PMs show up on command line\r\n sendto = \"\"\r\n if message.author.name == bot.user.name:\r\n sendto = \"(-> {}) \".format(message.channel.user.name)\r\n print(\"{} {}{}: {}\".format(str(datetime.datetime.now())[5:19], sendto, message.author.name, message.content))\r\n if message.content.startswith('~'): # < makes commands case-insensitive\r\n a = message.content\r\n b = message.content.find(' ')\r\n if b != -1:\r\n c = message.content.replace(a[:b], a[:b].lower(), 1)\r\n else:\r\n c = message.content.lower()\r\n message.content = c\r\n await bot.process_commands(message)", "def _current_login_user(self):\n return self.env.uid", "def sender(self):\n key, alt = ('Sender', 'From') if not self.resent else \\\n ('Resent-Sender', 'Resent-From')\n value = self.get(key) or self.get(alt)\n _, addr = getaddresses([value])[0]\n return addr", "def repo_user(self):\n if 'repoze.who.identity' in self.environ:\n return self.environ['repoze.who.identity'].get('user')", "def sender(self) -> str:", "def sender(self):\n return self._sender", "def chat(self):\n return self._get(\"chat\")", "def parse_bot_commands(self, slack_events):\n for event in slack_events:\n if event[\"type\"] == \"message\" and \"subtype\" not in event:\n user_id, message = self.parse_direct_mention(event[\"text\"])\n if user_id == self.username:\n return message, event[\"channel\"]\n return None, None", "def message(self):\n return self.args[0]", "def whoAmI(self): #good\r\n\t\treturn self.read(0x75)", "def user(self):\n return self.owner.user", "def name(self):\n name = self.__telegram_info.message.from_user.name\n return name[0].upper() + name[1::]", "def origin(self) -> str:\n return self._event.get('origin')", "def botname(self):\n return settings.AIM_USERNAME", "def privmsg(self, user, channel, msg):\n user = user.split('!', 1)[0]\n self.logger.log(\"<%s> %s\" % (user, msg))\n\n # Check to see if they're sending me a private message\n if channel == self.nickname:\n self.on_pm(user,channel,msg)\n return\n\n # Check to see if they're asking me for help\n if msg.startswith(self.nickname + \": help\"):\n msg = \"%s: I'm a little stupid at the minute; current commands I accept are:\" % user\n self.logged_msg(channel, msg)\n msg = \"%s: is the space open?\" % self.nickname\n self.logged_msg(channel, msg)\n return\n\n # If its a message directed at me, deal with it\n if msg.startswith(self.nickname + \":\"):\n self.on_msg(user, channel,msg)\n return\n\n\n # Otherwise check to see if it is a message directed at me\n if msg.startswith(self.nickname + \":\"):\n msg = \"%s: I am a log bot\" % user\n msg += \", say \\'%s:help\\' for more information\" % self.nickname\n self.logged_msg(channel, msg)\n return", "def userId(self) -> Optional[str]:\n return self.params.get(ChannelBuilder.PARAM_USER_ID, None)", "def sender(self):\n l = self.link\n if l and l.is_sender:\n return l\n else:\n return None", "def sender(self) -> Address:\n return self._sender", "def wm_desired_user(self):\n return self.get_par(\"drive\")", "def user(self):\n return self._forced_user", "def whois( self, mess, args):\n user = self.get_sender_username(mess)\n args = args.strip().replace(' ', '_')\n if user in self.users:\n self.log.info('%s queried whois %s.' % (user, args))\n if args in self.users.values():\n return filter(lambda u: self.users[u] == args, self.users)[0]\n else:\n return 'Nobody!'", "def getChannel(self):\r\n return self.channel", "def get_message(self, user):\n return None", "def user(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user\")", "def user(self) -> str:\n return self._user", "def get_position(self):\n return self.bot_client.send_command(_Command.GetPosition)", "def send_as_user(self, command, msg, user=None):\n user = self if user is None else user\n self._write(f':{user.ident} {command} {msg}')", "def __str__(self):\n return \"From {} at {}:{}/{}/{}\".format(self.owner.username, self.created_time.hour,\n self.created_time.minute, self.created_time.month,\n self.created_time.day)", "def target_user(self):\n return self.request.user", "def on_whoisuser(self, raw_msg, nick, user, host, **kwargs):", "def get_to(self, ):\n return self.get_parameter('to')", "def user(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user\")", "async def shell_chat(self, user: discord.Member, message: str):\n author = user.nick or user.name\n message = message.replace('\\n', '').replace('/', '').replace('§', '')\n self.console(f'say <{author}> {message}')", "def channel(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"channel\")", "def _get_user(self, call_info):\n unique_name = call_info['sender']\n uid = self._dbus_proxy.GetConnectionUnixUser(unique_name)\n return pwd.getpwuid(uid).pw_name", "async def treatme(self, ctx):\n await ctx.send(await self.cure_user(ctx, ctx.author))", "def str_ax_user(self) -> str:\n value = \"User: ??\"\n if self.STARTED and self.current_user:\n value = self.current_user.str_connect\n return value", "def user(self):\n pass", "def RecipientScreenName(self):\n return self._recipient_screen_name", "def user_cmd(self, username):\n print_debug(\"Executing USER\")\n command = \"USER %s\\r\\n\" % username\n msg_rec = self.send_and_log(self.s, command)\n return msg_rec", "def get_channel_id(event):\n return event.source.sender_id", "def owner(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"owner\")", "def owner(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"owner\")", "def handle_whoami(self, client_socket):\n user = self.clients[client_socket]['data'].decode('utf-8')\n print(f'User {user} queried their identity')\n msg = f'You are currently user {user}'\n self.log_and_send(client_socket, msg)", "def owner(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"owner\")", "def user(self):\n return self.getattr('user')", "def myself(self, mess, args):\n user = self.get_sender_username(mess)\n if user in self.users:\n self.message_queue.append('_%s %s_' % (self.users[user], args))\n self.log.info( '%s says %s in third person.' % (user, args))", "def get_command_prefix(self):\n return self.bot_data_file[\"commands_prefix\"]", "def UserMessage(self):\n return self._usermessage", "def on_command(server, user, command, args):", "def command_who(self, bot, update):\n\n messages = [\n 'Myles Braithwaite lives in Toronto where he runs a small '\n 'consluting company called [Monkey in your Soul]'\n '(https://monkeyinyoursoul.com/) (you should hire him because '\n \"he's awesome).\",\n 'You should follow him on [Twitter](https://twitter.com/mylesb) '\n 'or [Instagram](https://instagram.com/myles).',\n 'You can find his programming stuff on [GitHub]'\n '(https://github.com/myles) or [CodePen]'\n '(http://codepen.io/mylesb/).'\n ]\n\n self.send_messages(bot, update, messages)", "def channel(self):\n return self._channel", "def channel(self):\n return self._channel", "async def uid(message, user: ParamType.MIXER_USER):\n return \"@{} user id is: {}\".format(user.username, user.id)", "def privmsg(self):\n if(self.data[0]==\"orcbot\"):\n print \"target is orcbot\"\n self.target = self.orcbot_socket\n self.message = \":\" + SOCKET_TO_USERID[self.source] + \"!~@localhost \"+ self.message\n if(self.source == self.orcbot_socket):\n print \"source is orcbot\"\n self.target = USERID_TO_SOCKET[self.data[0]]\n self.message = \":orcbot!~@localhost \" + self.message\n self.send()", "def userhost(self):\n if self.user:\n return u\"%s@%s\" % (self.user, self.host)\n else:\n return self.host", "def raw_arg(self):\n command = self.usage()[self.usage_index].partition(\" \")[0]\n # TODO: would self.message.text_content be better here?\n message = self.message.content\n # remove command\n return message.partition(command)[2].strip()", "async def contact(self, ctx, *, message : str):\n # [p]contact\n\n if not User.objects.get(is_owner=True).exists():\n await self.bot.say(\"I have no owner set.\")\n return\n owner = User.objects.get(is_owner=True)[0].id\n author = ctx.message.author\n if ctx.message.channel.is_private is False:\n server = ctx.message.server\n source = \", server **{}** ({})\".format(server.name, server.id)\n else:\n source = \", direct message\"\n sender = \"From **{}** ({}){}:\\n\\n\".format(author, author.id, source)\n message = sender + message\n try:\n await self.bot.send_message(owner, message)\n except discord.errors.InvalidArgument:\n await self.bot.say(\"I cannot send your message, I'm unable to find\"\n \" my owner... *sigh*\")\n except discord.errors.HTTPException:\n await self.bot.say(\"Your message is too long.\")\n except:\n await self.bot.say(\"I'm unable to deliver your message. Sorry.\")\n else:\n await self.bot.say(\"Your message has been sent.\")", "def __get_sender_id(self):\n return self.__sender_id", "def command(self):\n return self._command", "def whoami(bot, event, *args):\n\n if bot.memory.exists(['user_data', event.user_id.chat_id, \"nickname\"]):\n try:\n fullname = '{0} ({1})'.format(event.user.full_name.split(' ', 1)[0]\n , bot.get_memory_suboption(event.user_id.chat_id, 'nickname'))\n except TypeError:\n fullname = event.user.full_name\n else:\n fullname = event.user.full_name\n\n bot.send_message_parsed(event.conv, _(\"<b>{}</b>, chat_id = <i>{}</i>\").format(fullname, event.user.id_.chat_id))", "def concert_alias(self):\n return self.msg.name", "def details(update, context):\n context.bot.send_message(chat_id=update.effective_chat.id, text=str(owner))", "def channel(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"channel\")", "def channel(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"channel\")", "def command_name(self):\n return None", "def username(self) -> str:\n return self._data['Owner']", "def recipient(self):\n return self._recipient", "def get_destination(post_request):\n return post_request.POST.get('recipient').split('@')[0]", "def user(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user\")", "def user(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user\")", "def user(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user\")", "def getReplyTo(self):\r\n return self.msg[\"Reply-To\"]", "def rel_command(self):\n return self.command.lstrip('/')", "def source_owner(self) -> str:\n return pulumi.get(self, \"source_owner\")", "def source_owner(self) -> str:\n return pulumi.get(self, \"source_owner\")", "def whereami(bot, event, *args):\n\n yield from bot.coro_send_message(\n event.conv,\n _(\"You are at <b><pre>{}</pre></b>, conv_id = <i><pre>{}</pre></i>\").format(\n bot.conversations.get_name(event.conv),\n event.conv.id_))", "def bot_owner_id(self):\n return self._bot_owner_id" ]
[ "0.7382036", "0.6963548", "0.6660693", "0.6343135", "0.62228334", "0.6178447", "0.6127587", "0.61155206", "0.6074623", "0.60027945", "0.59703183", "0.59151745", "0.5872834", "0.58546007", "0.5829677", "0.5820818", "0.5783627", "0.5776797", "0.57764834", "0.5776311", "0.5772935", "0.5747358", "0.5732561", "0.5724079", "0.5712256", "0.5690982", "0.5684387", "0.56797147", "0.56693435", "0.5665412", "0.56356525", "0.56310517", "0.56294286", "0.5602636", "0.55852795", "0.55834967", "0.558297", "0.55826724", "0.5570885", "0.55628043", "0.5562091", "0.5550248", "0.55355036", "0.5534274", "0.5529053", "0.55232155", "0.55149704", "0.5513495", "0.5502018", "0.5494006", "0.549124", "0.54873693", "0.54872435", "0.5479431", "0.54783064", "0.5474391", "0.544562", "0.5442412", "0.54387957", "0.54371214", "0.54324675", "0.5424996", "0.5424248", "0.5419576", "0.5419576", "0.541818", "0.5398385", "0.53952", "0.53927034", "0.53925616", "0.5390589", "0.5388554", "0.53861004", "0.53758955", "0.53758955", "0.53716266", "0.53651315", "0.53572994", "0.5356669", "0.53452766", "0.53429085", "0.53386843", "0.5321961", "0.5310119", "0.5306313", "0.529279", "0.529279", "0.52881217", "0.52866817", "0.52809846", "0.5278868", "0.527716", "0.527716", "0.527716", "0.526644", "0.5261461", "0.52562857", "0.52562857", "0.52537274", "0.5247643" ]
0.6407649
3
The invoked subcommand name, if one was invoked. For subcommands with aliases, the name returned is always the canonical name that the aliases are associated with. For this reason, this attribute should be preferred to extracting the subcommand name from `ParsedCommand.args`.
def subcmd(self) -> Optional[str]: return self._subcmd
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def command_name(self):\n return None", "def get_commandname(self):\n for line in self.helplines:\n if \"Usage:\" in line and self.parser_type is 'optparse':\n tmp = line.split()\n return tmp[1]\n if \"usage:\" in line and self.parser_type is 'argparse':\n tmp = line.split()\n return tmp[1]\n return None", "def get_sub_name(self):\n return self.sub_name", "def _name(self):\n return self._arguments[0].split('(')[0]", "def get_command_name(args):\n\n # First argument would always be atlas or manage.py, i.e the calling interface\n if len(args) < 2:\n CommandError.print_to_err(f\"Name of command missing. Valid commands are - {VALID_COMMANDS}\")\n\n return args[1]", "def _name(self):\n return self.arguments[0].split('(')[0]", "def get_subcmd(self, name: str) -> \"CommandHelp\":\n try:\n return self.subcmds[name]\n except KeyError:\n # Try looking up by alias\n for sub_name, sub_help in self.subcmds.items():\n for alias in sub_help.aliases:\n if name == alias:\n return self.subcmds[sub_name]\n raise", "def name(self):\n\t\treturn self.args[0]", "def __str__(self):\n if not self._args and not self.subcommand:\n return self.cmd\n elif not self._args and self.subcommand:\n return '{} {}'.format(\n self.cmd, self.subcommand)\n elif self._args and not self.subcommand:\n return '{} {}'.format(\n self.cmd, ' '.join(self._args))\n else:\n return '{} {} {}'.format(\n self.cmd, self.subcommand, ' '.join(self._args))", "def _subcommand_for_name(self, name):\n for subcommand in self.subcommands:\n if name == subcommand.name or \\\n name in subcommand.aliases:\n return subcommand\n return None", "def get_name(self) -> str:\n # read the original value passed by the command\n name = self.raw_param.get(\"name\")\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return name", "def subresource_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"subresource_name\")", "def name(self):\n module_filepath = inspect.getfile(type(self))\n module_filename = os.path.basename(module_filepath)\n command_name, _ = os.path.splitext(module_filename)\n return command_name", "def subresource_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subresource_name\")", "def subresource_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subresource_name\")", "def get_command_name(command_element) -> str:\n proto_element = command_element.find('proto')\n for name in proto_element.iter('name'):\n return name.text\n return ''", "def _getArgStr(self):\n return \"name=%r\" % (self.name)", "def command_with_prefix(self):\n return self.endpoint_prefix.rstrip('/') + self.command", "def __getFullCommandName(self, command, type):\n return 'cmd_%s_%s' % (type, command)", "def _get_arg_name(self, arg, variable_name):", "def _subcommand_name(ignore=()):\n NON_ANIM_UTILS = [\"cfg\", \"--help\", \"-h\"]\n NON_ANIM_UTILS = [util for util in NON_ANIM_UTILS if util not in ignore]\n\n # If a subcommand is found, break out of the inner loop, and hit the break of the outer loop\n # on the way out, effectively breaking out of both loops. The value of arg will be the\n # subcommand to be taken.\n # If no subcommand is found, none of the breaks are hit, and the else clause of the outer loop\n # is run, setting arg to None.\n\n for item in NON_ANIM_UTILS:\n for arg in sys.argv:\n if arg == item:\n break\n else:\n continue\n break\n else:\n arg = None\n\n return arg", "def rel_command(self):\n return self.command.lstrip('/')", "def call_name(self):\n return str(self.executable.name)", "def _func_named(self, arg):\n result = None\n target = 'do_' + arg\n if target in dir(self):\n result = target\n else:\n if self.abbrev: # accept shortened versions of commands\n funcs = [func for func in self.keywords if func.startswith(arg) and func not in self.multilineCommands]\n if len(funcs) == 1:\n result = 'do_' + funcs[0]\n return result", "def get_description(self):\n return self['command_name']", "def subnetwork_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subnetwork_name\")", "def get_subcommand_for_name(name: str) -> Optional[Subcommand]:\n matching = [s for s in subcommands if s.name.lower() == name.lower()]\n if matching:\n return matching[0]", "def fetch_subcommand(self, name):\n try:\n subcommand_class = self.subcommands[name]\n except KeyError:\n self.print_command_unkown_error(name)\n sys.exit(1)\n return subcommand_class(self.prog, name, self.argv[2:], self.stdout)", "def nested_subcmd(self, depth: int = 2) -> Optional[str]:\n # pylint: disable=protected-access\n current = 0\n subparser = self.parser\n try:\n while current < depth:\n action = subparser._actions[0]\n if isinstance(action, _SubParsersAction):\n subparser = action.choices[self.args[action.dest]]\n current += 1\n else:\n return None\n return subparser.name.split()[-1]\n except (IndexError, KeyError, TypeError):\n return None", "def name(self):\n if self.user_provided_name is not None:\n return self.user_provided_name\n else:\n return super().name", "def short_name(self) -> str:\n return self.name_components[-1]", "def __resolveCommandFunction(self, command, e):\n return self.__getFullCommandName(command, self.__resolveCommandType(command, e))", "def raw_arg(self):\n command = self.usage()[self.usage_index].partition(\" \")[0]\n # TODO: would self.message.text_content be better here?\n message = self.message.content\n # remove command\n return message.partition(command)[2].strip()", "def name(self) -> str:\n if hasattr(self, \"_name\"):\n return self._name\n _args: list[Arg] = []\n _ctx = self._select(\"name\", _args)\n return _ctx.execute_sync(str)", "def name(self) -> str:\n if hasattr(self, \"_name\"):\n return self._name\n _args: list[Arg] = []\n _ctx = self._select(\"name\", _args)\n return _ctx.execute_sync(str)", "def name(self) -> str:\n if hasattr(self, \"_name\"):\n return self._name\n _args: list[Arg] = []\n _ctx = self._select(\"name\", _args)\n return _ctx.execute_sync(str)", "def name(self) -> str:\n if hasattr(self, \"_name\"):\n return self._name\n _args: list[Arg] = []\n _ctx = self._select(\"name\", _args)\n return _ctx.execute_sync(str)", "def full_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"full_name\")", "def get_name(self, ):\n return self.get_parameter('name')", "def get_qualified_name(self):\n return self.attributes[\"qualifiedName\"]", "def get_name(self):\n return self.key().name().split(':', 1)[1]", "def _find_subcommand(args):\n subcmd = args[1]\n if subcmd in [\n \"cfg\"\n # , 'init',\n ]:\n return subcmd\n else:\n return None", "def fully_qualified_name(self) -> str:\n return pulumi.get(self, \"fully_qualified_name\")", "def get_absname(self):\n if self.animal == None: # no parent animal\n return self.name\n else:\n return '.'.join((self.animal.name, self.name))", "def name(self):\n suffix = ''\n if 'suffix' in self.context['strategy'] \\\n and self.context['strategy']['suffix'] is not None \\\n and len(self.context['strategy']['suffix']) > 0:\n suffix = \"_\" + self.context['strategy']['suffix']\n\n return self.get_name(self.context, suffix)", "def get_name(self):\n return self.normalize_name(self.name)", "def getName(self):\n return _libsbml.Submodel_getName(self)", "def path_name(self):\n return self.full_name", "def get_short_name(self):\n return self.full_name.split(' ')[0]", "def get_name(self):\n return self.options['name']", "def get_command(self) -> str:\n return 'title'", "def get_subname(get, node):\n if isinstance(node, ast.Attribute) and get(node.value) == 'parent':\n return node.attr", "def full_name(self) -> str:\n return self._name", "def get_command_prefix(self):\n return self.bot_data_file[\"commands_prefix\"]", "def real_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"real_name\")", "def name(self) -> str:\n return self.get_full_name()", "def argparse_subparser_name():\n return \"plugins\"", "def get_name(self):\n return self.__name", "def get_name(self):\n return self.__name", "def get_name(self):\n return self.__name", "def name_prefix(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"name_prefix\")", "def name_prefix(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"name_prefix\")", "def name_prefix(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"name_prefix\")", "def name_prefix(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"name_prefix\")", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def name(self):\n try:\n return self._name\n except AttributeError:\n if self.is_task:\n try:\n return self.pos_str\n except:\n return os.path.basename(self.workdir)\n else:\n return os.path.basename(self.workdir)", "def name(cls):\n return arg.s()(cls.func).func.__name__", "def full_name(self):\n return self._full_name", "def full_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"full_name\")", "def get_name(self):\r\n return self.__name", "def get_name(self):\r\n return self.__name", "def get_name(self):\r\n return self.__name", "def get_name(self):\r\n return self.__name", "def shortname(self):\n return self.get(\"shortName\")", "def get_arg(self, name):\n return getattr(self.args, f\"{self.key}_{self.alias}_{name}\")", "def get_short_name(self):\r\n return self.name", "def name(self):\n return self._alias", "def get_full_name(self):\n return self.name", "def get_full_name(self):\n return self.name", "def get_full_name(self):\n return self.name", "def get_full_name(self):\n return self.name", "def get_full_name(self):\n return self.name", "def longname(self):\n if not self.parent:\n return self.name\n return '%s.%s' % (self.parent.longname, self.name)", "def full_name(self) -> str:\n # return self.separator.join(map(lambda x: x.name, self.path()))\n return self.separator.join(map(lambda x: x.tagged_name, self.path()))", "def get_short_name(self):\n\n return self.name", "def get_short_name(self):\n\n return self.name", "def arg1(self) -> str:\n if self.command_type() is CommandType.arithmetic:\n return self.current_command\n\n if self.command_type() is CommandType._return:\n raise RuntimeError('Cannot call arg1 on CommandType._return')\n\n return self.current_command_split[1]", "def fullname(self):\n name = self.name or \"\"\n if self._parent and self._parent.fullname:\n name = path_splitter.join([self._parent.fullname, name])\n return name.replace(\":\", \"_\") # replace for graphviz escaping", "def get_name(self):\n\t\treturn self.__name", "def getname(self):\n return self.__name", "def get_name(self):\n return self.id.split('.')[-1:][0]", "def test_subcommand_arg_name_conflict(self):\n subcommand = {\n var: cli_parser.__dict__.get(var)\n for var in cli_parser.__dict__\n if var.isupper() and var.startswith(\"COMMANDS\")\n }\n for group, command in subcommand.items():\n for com in command:\n conflict_arg = [arg for arg, count in Counter(com.args).items() if count > 1]\n assert (\n [] == conflict_arg\n ), f\"Command group {group} function {com.name} have conflict args name {conflict_arg}\"", "def get_full_name(self):\n\n return self.name" ]
[ "0.6581749", "0.6499524", "0.6446358", "0.6352551", "0.6317428", "0.62414986", "0.6189401", "0.6147004", "0.6130716", "0.61187094", "0.60870796", "0.60323155", "0.5961086", "0.5867416", "0.5867416", "0.5861084", "0.58426297", "0.58400583", "0.5821133", "0.58019865", "0.57991683", "0.57839674", "0.5759702", "0.57539445", "0.5745656", "0.574303", "0.573899", "0.57144", "0.5713606", "0.56809866", "0.5677715", "0.56294644", "0.5612671", "0.5600796", "0.5600796", "0.5600796", "0.5600796", "0.5593297", "0.5590802", "0.5590217", "0.5582005", "0.5578794", "0.5577915", "0.55733895", "0.5568928", "0.55676067", "0.55417997", "0.5524463", "0.5503874", "0.54911005", "0.54888856", "0.5483906", "0.5478851", "0.5476196", "0.5471265", "0.54600966", "0.54589593", "0.5458898", "0.5458898", "0.5458898", "0.545425", "0.545425", "0.545425", "0.545425", "0.54534173", "0.54534173", "0.54534173", "0.54534173", "0.54534173", "0.54534173", "0.54534173", "0.54534173", "0.5448091", "0.54294634", "0.54226494", "0.54158646", "0.5414119", "0.5414119", "0.5414119", "0.5414119", "0.5412498", "0.54077494", "0.540686", "0.54038936", "0.540307", "0.540307", "0.540307", "0.540307", "0.540307", "0.53976744", "0.5394955", "0.5394412", "0.5394412", "0.5388043", "0.53873897", "0.5383433", "0.5361652", "0.53595835", "0.5359582", "0.5357133" ]
0.6821528
0
Get the name of a nested subcommand. Like the `subcmd` property, the name returned is always the canonical name for the subcommand. The `depth` parameter determines how many levels of nesting to traverse; the default of ``2`` gets the first nested subcommand. As a consequence, a value of ``1`` is the same as `subcmd`.
def nested_subcmd(self, depth: int = 2) -> Optional[str]: # pylint: disable=protected-access current = 0 subparser = self.parser try: while current < depth: action = subparser._actions[0] if isinstance(action, _SubParsersAction): subparser = action.choices[self.args[action.dest]] current += 1 else: return None return subparser.name.split()[-1] except (IndexError, KeyError, TypeError): return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_subcmd(self, name: str) -> \"CommandHelp\":\n try:\n return self.subcmds[name]\n except KeyError:\n # Try looking up by alias\n for sub_name, sub_help in self.subcmds.items():\n for alias in sub_help.aliases:\n if name == alias:\n return self.subcmds[sub_name]\n raise", "def get_subcommand_for_name(name: str) -> Optional[Subcommand]:\n matching = [s for s in subcommands if s.name.lower() == name.lower()]\n if matching:\n return matching[0]", "def fetch_subcommand(self, name):\n try:\n subcommand_class = self.subcommands[name]\n except KeyError:\n self.print_command_unkown_error(name)\n sys.exit(1)\n return subcommand_class(self.prog, name, self.argv[2:], self.stdout)", "def _subcommand_for_name(self, name):\n for subcommand in self.subcommands:\n if name == subcommand.name or \\\n name in subcommand.aliases:\n return subcommand\n return None", "def subcmd(self) -> Optional[str]:\n return self._subcmd", "def _find_subcommand(args):\n subcmd = args[1]\n if subcmd in [\n \"cfg\"\n # , 'init',\n ]:\n return subcmd\n else:\n return None", "def _subcommand_name(ignore=()):\n NON_ANIM_UTILS = [\"cfg\", \"--help\", \"-h\"]\n NON_ANIM_UTILS = [util for util in NON_ANIM_UTILS if util not in ignore]\n\n # If a subcommand is found, break out of the inner loop, and hit the break of the outer loop\n # on the way out, effectively breaking out of both loops. The value of arg will be the\n # subcommand to be taken.\n # If no subcommand is found, none of the breaks are hit, and the else clause of the outer loop\n # is run, setting arg to None.\n\n for item in NON_ANIM_UTILS:\n for arg in sys.argv:\n if arg == item:\n break\n else:\n continue\n break\n else:\n arg = None\n\n return arg", "def get_subname(get, node):\n if isinstance(node, ast.Attribute) and get(node.value) == 'parent':\n return node.attr", "def get_sub_name(self):\n return self.sub_name", "def getattr_nested(obj, name):\n dots = name.count('.')\n if dots == 0:\n return getattr(obj, name)\n else:\n first, rest = name.split('.', 1)\n return getattr_nested(getattr(obj, first), rest)", "def subresource_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"subresource_name\")", "def subcmd_help(word, word_eol):\n\tif len(word) > 1:\n\t\ttopic = word[1]\n\t\tif topic in subcommands:\n\t\t\tdoprint('help', subcommands[topic].__doc__)\n\t\telse:\n\t\t\tdoprint('help', 'Unknown subcommand \"%s\". Try \"/mt_irc help\".' % topic)\n\telse:\n\t\tfor subcmd in subcommands:\n\t\t\tdoprint('help', subcommands[subcmd].__doc__)", "def find_subcommands():\n clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)\n subclasses = [cls for cls in clsmembers if issubclass(cls[1], (Subcommand, LocalSubcommand))\n and cls[1] not in [Subcommand, LocalSubcommand]]\n\n subcommands = []\n for subclass in subclasses:\n name = '-'.join([part.lower() for part in re.findall('[A-Z][a-z]*', subclass[0])])\n subcommands.append((name, subclass[1]))\n return subcommands", "def test_get_call_name2(self):\n tree = ast.parse(\"a.b.c.d(x,y)\").body[0].value\n\n name = b_utils.get_call_name(tree, {\"a\": \"alias.x.y\"})\n self.assertEqual(\"alias.x.y.b.c.d\", name)\n\n name = b_utils.get_call_name(tree, {\"a.b\": \"alias.x.y\"})\n self.assertEqual(\"alias.x.y.c.d\", name)\n\n name = b_utils.get_call_name(tree, {\"a.b.c.d\": \"alias.x.y\"})\n self.assertEqual(\"alias.x.y\", name)", "def longname(self):\n if not self.parent:\n return self.name\n return '%s.%s' % (self.parent.longname, self.name)", "def subresource_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subresource_name\")", "def subresource_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subresource_name\")", "def construct_subcommand(\n parser,\n hooks=None,\n arg_filter=None,\n is_root=True\n):\n subcommands = []\n options = []\n args = []\n subcommand = {}\n hooks = {} if hooks is None else hooks\n subcommand_hook = hooks.get(\"subcommand\")\n\n if is_root:\n subcommand[\"name\"] = parser.prog\n\n for arg in parser._actions:\n if arg_filter is not None and arg_filter(arg):\n continue\n if arg.nargs == argparse.PARSER:\n subcommand.update(get_base_suggestion(arg))\n help_map = {a.dest: a.help for a in arg._choices_actions}\n\n nested_subcommands = {}\n for name, nested_parser in arg.choices.items():\n if nested_parser in nested_subcommands:\n nested_subcommands[nested_parser][\"name\"].append(name)\n else:\n nested_subcommands[nested_parser] = {\n \"name\": [name],\n **construct_subcommand(\n nested_parser,\n hooks=hooks,\n arg_filter=arg_filter,\n is_root=False\n ),\n }\n if name in help_map and help_map[name] != argparse.SUPPRESS:\n nested_subcommands[nested_parser][\"description\"] = str(help_map[name])\n for p, nested_subcommand in nested_subcommands.items():\n if len(nested_subcommand[\"name\"]) == 1:\n nested_subcommand[\"name\"] = nested_subcommand[\"name\"][0]\n if subcommand_hook:\n subcommand_hook(nested_subcommand, p)\n subcommands.append(nested_subcommand)\n elif arg.option_strings:\n options.append(construct_option(arg, hooks, parser))\n else:\n args.extend(construct_args(arg, hooks, parser))\n\n if subcommands:\n subcommand[\"subcommands\"] = subcommands\n if options:\n subcommand[\"options\"] = options\n if args:\n subcommand[\"args\"] = args\n\n if is_root and subcommand_hook:\n subcommand_hook(subcommand, parser)\n\n return subcommand", "def __str__(self):\n if not self._args and not self.subcommand:\n return self.cmd\n elif not self._args and self.subcommand:\n return '{} {}'.format(\n self.cmd, self.subcommand)\n elif self._args and not self.subcommand:\n return '{} {}'.format(\n self.cmd, ' '.join(self._args))\n else:\n return '{} {} {}'.format(\n self.cmd, self.subcommand, ' '.join(self._args))", "def getNestedCVTerm(self, *args):\n return _libsbml.CVTerm_getNestedCVTerm(self, *args)", "def subnetwork_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subnetwork_name\")", "def test_get_call_name1(self):\n tree = ast.parse(\"a.b.c.d(x,y)\").body[0].value\n name = b_utils.get_call_name(tree, {})\n self.assertEqual(\"a.b.c.d\", name)", "def sub_command(self, name: str=None, description: str=None, options: list=None, connectors: dict=None, **kwargs):\r\n def decorator(func):\r\n if self.child_type is None:\r\n if len(self.registerable.options) > 0:\r\n self.registerable.options = []\r\n self.child_type = Type.SUB_COMMAND\r\n \r\n new_func = SubCommand(\r\n func,\r\n name=name,\r\n description=description,\r\n options=options,\r\n connectors=connectors,\r\n **kwargs\r\n )\r\n self.children[new_func.name] = new_func\r\n self.registerable.options.append(new_func.option)\r\n return new_func\r\n return decorator", "def get_tree_str(self, depth: int = 0) -> str:\n temp = \" \" * depth + str(self.head) + \"\\n\"\n for son in self.sons:\n temp += son.get_tree_str(depth + 1)\n return temp", "def cog_subcommand(\n *,\n base,\n subcommand_group=None,\n name=None,\n description: str = None,\n base_description: str = None,\n base_desc: str = None,\n subcommand_group_description: str = None,\n sub_group_desc: str = None,\n guild_ids: typing.List[int] = None,\n options: typing.List[dict] = None,\n connector: dict = None\n):\n base_description = base_description or base_desc\n subcommand_group_description = subcommand_group_description or sub_group_desc\n\n def wrapper(cmd):\n desc = description or inspect.getdoc(cmd)\n if options is None:\n opts = manage_commands.generate_options(cmd, desc, connector)\n else:\n opts = options\n\n _sub = {\n \"func\": cmd,\n \"name\": name or cmd.__name__,\n \"description\": desc,\n \"base_desc\": base_description,\n \"sub_group_desc\": subcommand_group_description,\n \"guild_ids\": guild_ids,\n \"api_options\": opts,\n \"connector\": connector,\n }\n return CogSubcommandObject(_sub, base, name or cmd.__name__, subcommand_group)\n\n return wrapper", "def hierarchy_name(self, adjust_for_printing=True):\n if adjust_for_printing: adjust = lambda x: adjust_name_for_printing(x)\n else: adjust = lambda x: x\n if self.has_parent():\n return self._parent_.hierarchy_name() + \".\" + adjust(self.name)\n return adjust(self.name)", "def get_command_name(command_element) -> str:\n proto_element = command_element.find('proto')\n for name in proto_element.iter('name'):\n return name.text\n return ''", "def expansion_key(symbol, expansion):\n if isinstance(expansion, tuple):\n expansion = expansion[0]\n if not isinstance(expansion, str):\n children = expansion\n expansion = all_terminals((symbol, children))\n return symbol + \" -> \" + expansion", "def tree(self, depth_index=0):\r\n print(self.tree_str(depth_index))", "def test_subCommandInTwoPlaces(self):\n class SubOpt(usage.Options):\n pass\n class OptFoo(usage.Options):\n subCommands = [\n ('foo', 'f', SubOpt, 'quux'),\n ]\n class OptBar(usage.Options):\n subCommands = [\n ('bar', 'b', SubOpt, 'quux'),\n ]\n oFoo = OptFoo()\n oFoo.parseOptions(['foo'])\n oBar=OptBar()\n oBar.parseOptions(['bar'])\n self.failUnless(hasattr(oFoo.subOptions, 'parent'))\n self.failUnless(hasattr(oBar.subOptions, 'parent'))\n self.failUnlessIdentical(oFoo.subOptions.parent, oFoo)\n self.failUnlessIdentical(oBar.subOptions.parent, oBar)", "def get_sub_by_name(name, submasters=None):\n if not submasters:\n submasters = get_global_submasters()\n\n if name in submasters.get_all_sub_names():\n return submasters.get_sub_by_name(name)\n\n try:\n val = int(name)\n s = Submaster(\"#%d\" % val, leveldict={val : 1.0}, temporary=True)\n return s\n except ValueError:\n pass\n\n try:\n subnum = Patch.get_dmx_channel(name)\n s = Submaster(\"'%s'\" % name, leveldict={subnum : 1.0}, temporary=True)\n return s\n except ValueError:\n pass\n\n # make an error sub\n return Submaster('%s' % name)", "def argparse_subparser_name():\n return \"plugins\"", "def get_command_name(args):\n\n # First argument would always be atlas or manage.py, i.e the calling interface\n if len(args) < 2:\n CommandError.print_to_err(f\"Name of command missing. Valid commands are - {VALID_COMMANDS}\")\n\n return args[1]", "def sub_command(self, name: str=None, description: str=None, options: list=None, connectors: dict=None, **kwargs):\r\n def decorator(func):\r\n new_func = SubCommand(\r\n func,\r\n name=name,\r\n description=description,\r\n options=options,\r\n connectors=connectors,\r\n **kwargs\r\n )\r\n self.children[new_func.name] = new_func\r\n self.option.options.append(new_func.option)\r\n return new_func\r\n return decorator", "def tree(ctx):\n hokusai.print_command_tree(ctx.find_root().command)", "def subcommand(wrapped):\n def callback(scanner, name, ob):\n scanner.subcommands[ob.name] = ob\n venusian.attach(wrapped, callback, category='subcommands')\n return wrapped", "def get_caller_name(depth=2, mod=True, cls=False, mth=False):\n stack = inspect.stack()\n start = 0 + depth\n if len(stack) < start + 1:\n return ''\n parent_frame = stack[start][0]\n name = []\n module = inspect.getmodule(parent_frame)\n if module and mod:\n name.append(module.__name__)\n if cls and 'self' in parent_frame.f_locals:\n name.append(parent_frame.f_locals['self'].__class__.__name__)\n if mth:\n codename = parent_frame.f_code.co_name\n if codename != '<module>':\n name.append(codename)\n del parent_frame, stack\n return '.'.join(name)", "def commands_for_submode(prefix):\n candidates = _lookup_command_candidates(prefix, command_registry)\n # print type(candidates), [x['self'] for x in candidates]\n # print [x.get('all-help') for x in candidates]\n if debug.cli():\n # print 'commands_for_submode:', sorted(dict([[x['name']['title'] if type(x['name']) == dict else x['name'], None] for x in candidates]).keys())\n pass\n return candidates", "def cog_subcommand(\n *,\n base,\n subcommand_group=None,\n name=None,\n description: str = None,\n base_description: str = None,\n base_desc: str = None,\n base_default_permission: bool = True,\n base_permissions: typing.Dict[int, list] = None,\n subcommand_group_description: str = None,\n sub_group_desc: str = None,\n guild_ids: typing.List[int] = None,\n options: typing.List[dict] = None,\n connector: dict = None,\n):\n base_description = base_description or base_desc\n subcommand_group_description = subcommand_group_description or sub_group_desc\n guild_ids = guild_ids if guild_ids else []\n if not base_permissions:\n base_permissions = {}\n\n def wrapper(cmd):\n decorator_permissions = getattr(cmd, \"__permissions__\", None)\n if decorator_permissions:\n base_permissions.update(decorator_permissions)\n\n desc = description or inspect.getdoc(cmd)\n if options is None:\n opts = manage_commands.generate_options(cmd, desc, connector)\n else:\n opts = options\n\n if guild_ids and not all(isinstance(item, int) for item in guild_ids):\n raise IncorrectGuildIDType(\n f\"The snowflake IDs {guild_ids} given are not a list of integers. Because of discord.py convention, please use integer IDs instead. Furthermore, the command '{name or cmd.__name__}' will be deactivated and broken until fixed.\"\n )\n\n _cmd = {\n \"func\": None,\n \"description\": base_description,\n \"guild_ids\": guild_ids.copy(),\n \"api_options\": [],\n \"default_permission\": base_default_permission,\n \"api_permissions\": base_permissions,\n \"connector\": {},\n \"has_subcommands\": True,\n }\n\n _sub = {\n \"func\": cmd,\n \"name\": name or cmd.__name__,\n \"description\": desc,\n \"base_desc\": base_description,\n \"sub_group_desc\": subcommand_group_description,\n \"guild_ids\": guild_ids,\n \"api_options\": opts,\n \"connector\": connector,\n }\n return CogSubcommandObject(base, _cmd, subcommand_group, name or cmd.__name__, _sub)\n\n return wrapper", "def get_name(self):\n return self.children[0]", "def pretty_print(self,depth=0):\n\t\tfor i in range(depth):\n\t\t\tprint \"\\t\",\n\t\t\t\t\n\t\tprint self.__str__()\n\t\t\n\t\tfor c in self.tree.children:\n\t\t\tc.viz.pretty_print(depth+1)", "def cli():\n pass # do nothing here, it just defines the name for other subcommands", "def _print_with_depth(self, string, depth):\n print(\"{0}{1}\".format(\" \" * depth, string))", "def subcommands(self) -> list[\"ProjectCommand\"]:\n _args: list[Arg] = []\n _ctx = self._select(\"subcommands\", _args)\n _ctx = ProjectCommand(_ctx)._select_multiple(\n _description=\"description\",\n _name=\"name\",\n _result_type=\"resultType\",\n )\n return _ctx.execute_sync(list[ProjectCommand])", "def get_command(self, ctx, cmd_name):\n cmd_name = self.MAP.get(cmd_name, cmd_name)\n return click.Group.get_command(self, ctx, cmd_name)", "def svn_info_t_depth_get(svn_info_t_self): # real signature unknown; restored from __doc__\n pass", "def pf_one_node(self, depth):\n if self.op == NodeOperation.CHAR:\n line = 'char: {}'.format(self.char)\n else:\n line = 'op: {}'.format(self.op)\n out = '-' * depth + line\n return out", "def tree(node):\n subtrees = []\n for arg in node.args:\n subtrees.append(tree(arg))\n s = print_node(node)+pprint_nodes(subtrees)\n return s", "def print_tree(node, depth=1):\n for child in node:\n print(\" \" * depth + child.get_name())\n print_tree(child, depth+1)", "def test_get_subcommand_help(self):\r\n subcommands = list(self.testcommand.subparser.choices.keys())\r\n for subcommand in subcommands:\r\n help_message = self.testcommand.get_help(subcommand=subcommand)\r\n self.assertEqual(1, help_message.count(\"usage\"))", "def test_get_subcommand_help(self):\n subcommands = list(self.testcommand.subparser.choices.keys())\n for subcommand in subcommands:\n help_message = self.testcommand.get_help(subcommand=subcommand)\n self.assertEqual(1, help_message.count(\"usage\"))", "def pretty_print(tree, depth=0):\r\n if depth == 0:\r\n print('TREE')\r\n\r\n for index, split_criterion in enumerate(tree):\r\n sub_trees = tree[split_criterion]\r\n\r\n # Print the current node: split criterion\r\n print('|\\t' * depth, end='')\r\n print('+-- [SPLIT: x{0} = {1} {2}]'.format(split_criterion[0], split_criterion[1], split_criterion[2]))\r\n\r\n # Print the children\r\n if type(sub_trees) is dict:\r\n pretty_print(sub_trees, depth + 1)\r\n else:\r\n print('|\\t' * (depth + 1), end='')\r\n print('+-- [LABEL = {0}]'.format(sub_trees))", "def get_param(self, name: str, depth: int = 0) -> \"Param\":\n parts: List[str] = name.split(\".\")\n if depth < len(parts) - 1:\n part = parts[depth + 1]\n if part not in self._stack:\n return None\n if not isinstance(\n self._stack[part], # type: ignore\n ParamNamespace,\n ):\n return self._stack[part] # type: ignore\n return self._stack[part].get_param(name, depth + 1) # type: ignore\n if name in self.names:\n return self\n return None", "def getName(self):\n return _libsbml.Submodel_getName(self)", "def __getFullCommandName(self, command, type):\n return 'cmd_%s_%s' % (type, command)", "def subsystem(self) -> str:\n return self.raw.get(\"subsystem\", \"\")", "def setup_subparser(name, description, commands):\n subparser = SUBPARSER.add_parser(\n name,\n help=description\n )\n subparser.add_argument(\n 'sub_command',\n metavar='sub_command',\n type=str,\n nargs='+',\n help='Which command to run. Options: %s' % ', '.join(commands),\n choices=commands\n )\n\n return subparser", "def getLevelNames(names):\n topNames = []\n deeperNames = []\n for item in names:\n if isinstance(item, str):\n topNames.append(item)\n else:\n topNames.append(item[0])\n # Names immediately under the current level must be\n # qualified with the current level full name\n for j in item[1]:\n if isinstance(j, str):\n subname = '%s/%s' % (item[0], j)\n else: # j is a 2-tuple\n jlist = list(j)\n jlist[0] = '%s/%s' % (item[0], jlist[0])\n subname = tuple(jlist)\n deeperNames.append( subname)\n return topNames, deeperNames", "def sub_command_group(self, name=None, **kwargs):\r\n def decorator(func):\r\n if self.child_type is None:\r\n if len(self.registerable.options) > 0:\r\n self.registerable.options = []\r\n self.child_type = Type.SUB_COMMAND_GROUP\r\n \r\n new_func = SubCommandGroup(func, name=name, **kwargs)\r\n self.children[new_func.name] = new_func\r\n self.registerable.options.append(new_func.option)\r\n return new_func\r\n return decorator", "def get_tensor_name(subgraph, tensor_idx):\n return subgraph.Tensors(tensor_idx).Name().decode(\"utf-8\")", "def fetch_command(self, global_options, subcommand):\r\n commands = self.get_commands(global_options)\r\n try:\r\n klass = commands[subcommand]\r\n except KeyError:\r\n sys.stderr.write(\"Unknown command: %r\\nType '%s help' for usage.\\nMany commands will only run at project directory, maybe the directory is not right.\\n\" % \\\r\n (subcommand, self.prog_name))\r\n sys.exit(1)\r\n return klass", "def name(self):\n\t\treturn self.args[0]", "def GetSubkeyByName(self, name):", "def _name(self):\n return self._arguments[0].split('(')[0]", "def add_subcommand(\n subp: _SubParsersAction, name: str, description: Optional[str] = None, **kwargs\n ) -> \"CommandParser\":\n desc_help = {\"description\": description, \"help\": description}\n return subp.add_parser(name, **desc_help, **kwargs)", "def get_subcommand_query(query_str: str) -> Optional[SubcommandQuery]:\n if not query_str:\n return None\n\n # spilt:\n # \"subcommand_name rest of query\" -> [\"subcommand_name\", \"rest of query\"\"]\n query_parts = query_str.strip().split(None, maxsplit=1)\n\n if len(query_parts) < 2:\n query_str = \"\"\n else:\n query_str = query_parts[1]\n\n subcommand = get_subcommand_for_name(query_parts[0])\n if subcommand:\n return SubcommandQuery(subcommand=subcommand, query=query_str)", "def fsl_sub(*args, **kwargs):\n cmd = ['fsl_sub']\n cmd += wutils.applyArgStyle('--', singlechar_args=True, **kwargs)\n cmd += list(args)\n return cmd", "def getSubGlyph(self, *args):\n return _libsbml.GeneralGlyph_getSubGlyph(self, *args)", "def main(ctx):\n\n print(\"Mode:\", ctx.invoked_subcommand)", "def print_tree(tree, depth=0):\n print('+','--'*depth,tree[0])\n if isinstance(tree[1], str):\n print('|',' '*depth,'->',tree[1])\n return\n if isinstance(tree[1],Terminal):\n print('|',' '*depth,'->',repr(tree[1]))\n return\n for subtree in tree[1]:\n print_tree(subtree, depth+1)", "def get_command(self, ctx: Context, cmd_name: str) -> Command:\n next_command = self.command.pop(0)\n if not self.command:\n return DocsBaseCommand(\n name=next_command,\n short_help=f\"Documentation for {self.command_string}\",\n callback=self.command_callback,\n )\n return DocsSubCommand(command=self.command)", "def visualize(tree, depth=0):\r\n\r\n if depth == 0:\r\n print('TREE')\r\n\r\n for index, split_criterion in enumerate(tree):\r\n sub_trees = tree[split_criterion]\r\n\r\n # Print the current node: split criterion\r\n print('|\\t' * depth, end='')\r\n print('+-- [SPLIT: x{0} = {1}]'.format(split_criterion[0], split_criterion[1]))\r\n\r\n # Print the children\r\n if type(sub_trees) is dict:\r\n visualize(sub_trees, depth + 1)\r\n else:\r\n print('|\\t' * (depth + 1), end='')\r\n print('+-- [LABEL = {0}]'.format(sub_trees))", "def _var_name_sub(self, sprintf, quote=False):\n q = ''\n if quote:\n q = \"'\"\n name_list = map(lambda x: q + self.cdict[x][0] + q, sprintf[\"vars\"] )\n return sprintf[\"text\"] % tuple(name_list)", "def test_handle_subcommand_help(self):\n subcommands = list(self.testcommand.subparser.choices.keys())\n for subcommand in subcommands:\n command = f\"team {subcommand} --help\"\n ret, code = self.testcommand.handle(command, user)\n self.assertEqual(1, ret.count(\"usage\"))\n self.assertEqual(code, 200)\n\n command = f\"team {subcommand} -h\"\n ret, code = self.testcommand.handle(command, user)\n self.assertEqual(1, ret.count(\"usage\"))\n self.assertEqual(code, 200)\n\n command = f\"team {subcommand} --invalid argument\"\n ret, code = self.testcommand.handle(command, user)\n self.assertEqual(1, ret.count(\"usage\"))\n self.assertEqual(code, 200)", "def get_nested_attr(__o: object, __name: str, *args) -> Any:\n def _getattr(__o, __name):\n return getattr(__o, __name, *args)\n return reduce(_getattr, [__o] + __name.split('.')) # type: ignore", "def __repr__ (self, depth=None):\n\t\ts=[];add=s.append\n\t\t\n\t\tadd (\"%s%s\" % (myglobals.getIndent(self.level), self.name))\n\t\tif depth is None or self.level < depth:\n\t\t\tfor status in self.selected:\n\t\t\t\tobj = status.fsObj\n\t\t\t\t# if obj.level > depth:\n\t\t\t\t\t# # print 'level (%d) exceeds depth, skipping' % obj.level\n\t\t\t\t\t# continue\n\t\t\t\tif isinstance (obj, WorkingDirectory):\n\t\t\t\t\t# print \"DIRECTORY %s\" % obj.name\n\t\t\t\t\tif not obj.selected.isempty():\n\t\t\t\t\t\tadd (str(obj))\n\t\t\t\telif isinstance (obj, JloFile):\n\t\t\t\t\tif os.path.exists(obj.path):\n\t\t\t\t\t\tadd (\"%s (%s)!!!\" % ( str(obj), status.flag))\n\t\t\t\t\t\t# add (\"%s%s (%s)!!!\" % (myglobals.getIndent(self.level), str(obj), status.flag))\n\t\t\t\t\telse:\n\t\t\t\t\t\tadd (\"%s%s (%s)???\" % (myglobals.getIndent(self.level), str(obj), status.flag))\n\t\t\t\telse:\n\t\t\t\t\t## missing directory\n\t\t\t\t\tadd (\"%s%s (missing)##\" % (myglobals.getIndent(self.level+1), obj.name))\n\t\treturn '\\n'.join (s)", "def print_help(self, prog_name, subcommand):\r\n parser = self.create_parser(prog_name, subcommand)\r\n parser.print_help()", "def test_subcommand_arg_name_conflict(self):\n subcommand = {\n var: cli_parser.__dict__.get(var)\n for var in cli_parser.__dict__\n if var.isupper() and var.startswith(\"COMMANDS\")\n }\n for group, command in subcommand.items():\n for com in command:\n conflict_arg = [arg for arg, count in Counter(com.args).items() if count > 1]\n assert (\n [] == conflict_arg\n ), f\"Command group {group} function {com.name} have conflict args name {conflict_arg}\"", "def get_node_name(name: str) -> str:\n if is_control_dependency(name):\n return name[1:]\n return name.split(':', maxsplit=1)[0]", "def get_complete_name(self):\n if self.parent_id:\n name = '%s / %s'%(self.parent_id.get_complete_name(), self.name)\n else:\n name = self.name\n \n return name", "def getCommandsHelp(root=__name__):\n groups = {}\n commands = sorted([i for i in getCommands(root).iteritems() if i[0] != '__module__'])\n for cmd, topcmd in commands:\n module = topcmd['__module__']\n descr = getattr(module, 'SHORT_DESCRIPTION', \"FIXME: No description\")\n group = getattr(module, 'GROUP', None)\n name = '{:<12}'.format(cmd)\n groups.setdefault(group, []).append(' %s %s' % (name, descr))\n \n parts = []\n for group, members in groups.iteritems():\n if group:\n parts.append(group+' subcommands:')\n else:\n parts.append('subcommands:')\n parts.extend(members)\n parts.append('')\n return '\\n'.join(parts)", "def _name(self):\n return self.arguments[0].split('(')[0]", "def sub(proto, *args):\n try:\n text = proto.format(*args)\n except:\n text = \"--\"\n #print sub(\"WARNING: Couldn't sub {} with {}\", proto, args)\n return text", "def add_subcommands(self, name='subcmd', arg_kws=None, optional=False):\n if self._subcmds is not None:\n raise RuntimeError(\"This config already has subcommands.\")\n if name in self.ReservedVariables or name[0] == '_':\n raise ValueError(\"Config variable name '%s' is reserved.\" % name)\n if name in self.confvariable:\n raise ValueError(\"Config variable '%s' is already defined.\" % name)\n if arg_kws is None:\n arg_kws = dict(title=\"subcommands\")\n else:\n arg_kws = dict(arg_kws)\n arg_kws['dest'] = name\n subparsers = self.argparser.add_subparsers(**arg_kws)\n var = ConfigSubCmds(name, optional, self, subparsers)\n self.confvariable[name] = var\n self.confvariables.append(var)\n self._subcmds = var\n return var", "def path_child(path):\n return path_to_str(parse_path(path)[1:])", "def _func_named(self, arg):\n result = None\n target = 'do_' + arg\n if target in dir(self):\n result = target\n else:\n if self.abbrev: # accept shortened versions of commands\n funcs = [func for func in self.keywords if func.startswith(arg) and func not in self.multilineCommands]\n if len(funcs) == 1:\n result = 'do_' + funcs[0]\n return result", "def find(sub, arg):\n\n if sub == 'collections':\n res = api.find_collections(arg)\n elif sub == 'domains':\n res = api.find_domains(arg)\n elif sub == 'problems':\n res = api.find_problems(arg)\n else:\n print(\"Error: Unrecognized sub-command, {0}\".format(sub))\n exit(1)\n\n pprint.pprint(res)", "def getIndexForSubGlyph(self, *args):\n return _libsbml.GeneralGlyph_getIndexForSubGlyph(self, *args)", "def command_name(self):\n return None", "def sub_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sub_path\")", "def get_level_name(target_level, cwd=None):\n if cwd is None:\n cwd = os.getcwd()\n\n this_level = level(cwd)\n this_idx = levels.index(this_level)\n target_idx = levels.index(target_level)\n i = this_idx\n cw = cwd\n pp = \"\"\n while i >= target_idx:\n cw, pp = os.path.split(cw)\n i -= 1\n return pp", "def Visit(self, node, parent, is_group):\n command = cli_tree.Command(node, parent, include_hidden_flags=False)\n return command", "def _get_commands_section(registry, title=\"Commands\", hdg_level1=\"#\",\n hdg_level2=\"=\", output_dir=None):\n file_per_topic = output_dir is not None\n lines = [title, hdg_level1 * len(title), \"\"]\n if file_per_topic:\n lines.extend([\".. toctree::\", \" :maxdepth: 1\", \"\"])\n\n cmds = sorted(bzrlib.commands.builtin_command_names())\n for cmd_name in cmds:\n cmd_object = bzrlib.commands.get_cmd_object(cmd_name)\n if cmd_object.hidden:\n continue\n heading = cmd_name\n underline = hdg_level2 * len(heading)\n text = cmd_object.get_help_text(plain=False, see_also_as_links=True)\n help = \"%s\\n%s\\n\\n%s\\n\\n\" % (heading, underline, text)\n if file_per_topic:\n topic_id = _dump_text(output_dir, cmd_name, help)\n lines.append(\" %s\" % topic_id)\n else:\n lines.append(help)\n\n return \"\\n\" + \"\\n\".join(lines) + \"\\n\"", "def m_get_subnode(node_name, index):\n\n # Initialize the contract classes. These classes only work when the smart contracts are already deployed\n# ens = ENS_class()\n# resolver = PublicResolver_class()\n\n subnode_hash = ens.subnode(node_name, index)\n\n # Check if we received a cero value (32 bytes will with zeroes)\n if subnode_hash == bytes(32):\n print(f\"There are no subnodes\")\n return\n\n # Try to resolve the name from the blockchain\n subnode_name = resolver.name(node_hash=subnode_hash)\n if len(subnode_name) > 0:\n print(\n f\"Subnode_hash: {subnode_hash.hex()}, Subnode name: {subnode_name}\")\n else:\n print(\n f\"Subnode_hash: {subnode_hash.hex()}, could not be resolved to a name.\")", "def print_help(self, prog_name, subcommand):\n parser = self.create_parser(prog_name, subcommand)\n parser.print_help()", "def print_help(self, prog_name, subcommand):\n parser = self.create_parser(prog_name, subcommand)\n parser.print_help()", "def print_help(self, prog_name, subcommand):\n parser = self.create_parser(prog_name, subcommand)\n parser.print_help()", "def getElementName(self):\n return _libsbml.ListOfSubmodels_getElementName(self)", "def _deal_with_super_sub_expanded(self, string, style=\"plain\"):\n if \"{\" in string:\n name, supers, subs = string, [], []\n else:\n name, supers, subs = split_super_sub(string)\n\n names = [translate(name) for name in name.split(\" \")]\n supers = [translate(sup) for sup in supers]\n subs = [translate(sub) for sub in subs]\n\n name = \" \".join(names)\n\n # apply the style only to the name\n if style == \"bold\":\n name = \"\\\\mathbf{{{}}}\".format(name)\n\n # glue all items together:\n if supers:\n name += \"^{%s}\" % \" \".join(supers)\n if subs:\n name += \"_{%s}\" % \" \".join(subs)\n\n return name", "def subcommand(self, base_name, name, description=MISSING, options=MISSING, guild_ids=MISSING, default_permission=True, guild_permissions=MISSING):\n def wrapper(callback):\n \"\"\"The wrapper for the callback function. The function's parameters have to have the same name as the parameters specified in the slash command.\n\n `ctx` is of type :class:`~SlashedCommand` and is used for responding to the interaction and more\n\n Examples\n --------\n - no parameter:\n `async def command(ctx): ...`\n - required parameter \"number\":\n `async def command(ctx, number): ...`\n - optional parameter \"user\":\n `async def command(ctx, user=default_value)`\n - multiple optional parameters \"user\", \"number\":\n `async def command(ctx, user=default_value, number=default_value)`\n - one required and one optional parameter \"user\", \"text\":\n `async def command(ctx, user, text=default_value)`\n\n Note: Replace `default_value` with a value you want to be used if the parameter is not specified in discord, if you don't want a default value, just set it to `None`\n \"\"\"\n if self.subcommands.get(base_name) is None:\n self.subcommands[base_name] = {}\n\n self.subcommands[base_name][name] = SubSlashCommand(callback, base_name, name, description, options=options, guild_ids=guild_ids, default_permission=default_permission, guild_permissions=guild_permissions)\n return wrapper" ]
[ "0.61825335", "0.5816208", "0.57917714", "0.5630253", "0.55556434", "0.55294657", "0.54540044", "0.5216835", "0.5201424", "0.49779016", "0.49343693", "0.48652846", "0.48608866", "0.48450202", "0.4835011", "0.47898704", "0.47898704", "0.47851962", "0.476425", "0.472209", "0.47188637", "0.465265", "0.46476865", "0.4645716", "0.46369252", "0.46112883", "0.45931447", "0.45909396", "0.4582103", "0.4546514", "0.4539239", "0.45322907", "0.45197827", "0.45114303", "0.44954538", "0.4471718", "0.44707248", "0.44329673", "0.44182086", "0.4394652", "0.43828392", "0.43818018", "0.43756312", "0.43674797", "0.4358467", "0.43583634", "0.43377095", "0.43326426", "0.43190917", "0.43170837", "0.43170518", "0.4315277", "0.43100244", "0.43042606", "0.4281929", "0.42763203", "0.42742866", "0.42677337", "0.42546785", "0.42492443", "0.42470977", "0.42450422", "0.42311507", "0.4228444", "0.42253637", "0.42139634", "0.4209558", "0.4207739", "0.42069575", "0.42013246", "0.4199702", "0.41962633", "0.41961655", "0.41875303", "0.4174052", "0.41729948", "0.41690937", "0.41668785", "0.41610786", "0.41585782", "0.4155913", "0.41480196", "0.41467166", "0.414169", "0.4140964", "0.41406393", "0.4128309", "0.41282314", "0.41223574", "0.4120978", "0.4119482", "0.41179514", "0.41144532", "0.41041926", "0.40998688", "0.40998688", "0.40998688", "0.4097789", "0.40918988", "0.40910348" ]
0.80556154
0
Get the `CommandHelp` object for the given subcommand. `name` may be an alias, in which case it is resolved to the appropriate subcommand.
def get_subcmd(self, name: str) -> "CommandHelp": try: return self.subcmds[name] except KeyError: # Try looking up by alias for sub_name, sub_help in self.subcmds.items(): for alias in sub_help.aliases: if name == alias: return self.subcmds[sub_name] raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetch_subcommand(self, name):\n try:\n subcommand_class = self.subcommands[name]\n except KeyError:\n self.print_command_unkown_error(name)\n sys.exit(1)\n return subcommand_class(self.prog, name, self.argv[2:], self.stdout)", "def HelpForCmd(self, name):\n canonical_name = self._cmd_alias_list.get(name)\n if not canonical_name:\n raise CmdNotFoundError('Command not found: \"%s\"' % name)\n cmd = self._cmd_list[canonical_name]\n if cmd.__doc__.strip():\n flags_help = ''\n cmd_flags = self._flag_values_by_cmd[canonical_name]\n if cmd_flags.RegisteredFlags():\n prefix = ' '\n flags_help += '%s\\nFlags for %s:\\n' % (prefix, name)\n flags_help += cmd_flags.GetHelp(prefix + ' ')\n flags_help = _DeleteSpecialFlagHelp(flags_help)\n flags_help += '\\n\\n'\n return cmd.__doc__ + flags_help\n else:\n raise AssertionError('No class docstring found for command %s' % name)", "def _subcommand_for_name(self, name):\n for subcommand in self.subcommands:\n if name == subcommand.name or \\\n name in subcommand.aliases:\n return subcommand\n return None", "def get_command(self, ctx: Context, cmd_name: str) -> Command:\n next_command = self.command.pop(0)\n if not self.command:\n return DocsBaseCommand(\n name=next_command,\n short_help=f\"Documentation for {self.command_string}\",\n callback=self.command_callback,\n )\n return DocsSubCommand(command=self.command)", "def get_subcommand_for_name(name: str) -> Optional[Subcommand]:\n matching = [s for s in subcommands if s.name.lower() == name.lower()]\n if matching:\n return matching[0]", "async def module_command_help(self, ctx, parsed):\n\n def _create_commandhelp(request):\n usage, desc = request.format_help().split(\"\\n\\n\")[:2]\n usage = usage.partition(\" \")[2]\n desc = desc.rstrip()\n args, opts, subcmds, aliases = {}, {}, {}, []\n prev_arg = ()\n for arg in request._get_positional_actions():\n name = arg.metavar or arg.dest\n if isinstance(arg, _SubParsersAction):\n args[name] = (arg.help, True)\n prev_sub = ()\n for subname, subparser in arg.choices.items():\n # Aliases follow the canonical name\n if prev_sub and subparser is prev_sub[1]:\n subcmds[prev_sub[0]].aliases.append(subname)\n else:\n subcmds[subname] = _create_commandhelp(subparser)\n # Don't include parent command in subcommand name\n subcmds[subname].name = subname\n prev_sub = (subname, subparser)\n else:\n # Aliases follow the canonical name\n if prev_arg and arg is prev_arg[1]:\n args[prev_arg[0]].aliases.append(name)\n else:\n args[name] = (arg.help, False)\n prev_arg = (name, arg)\n for opt in request._get_optional_actions():\n names = tuple(opt.option_strings)\n if opt.nargs == 0 or opt.const:\n # Don't make it seem like flag options take a value\n metavar = None\n else:\n metavar = opt.metavar or opt.dest\n opts[names] = (metavar, opt.help)\n return CommandHelp(\n HelpType.CMD,\n request.name,\n desc,\n usage,\n aliases=aliases,\n args=args,\n opts=opts,\n subcmds=subcmds,\n )\n\n if parsed.args[\"command\"]:\n help_args = parsed.args[\"command\"]\n if len(help_args) > 1 and help_args[0:2] == [\"help\"] * 2:\n await ctx.reply_command_result(parsed, \"I'm afraid that you're far beyond any help...\")\n return\n try:\n request = self._commands[help_args[0]]\n except KeyError:\n cmd_help = CommandHelp(HelpType.NO_SUCH_CMD, help_args[0])\n else:\n cmd_help = _create_commandhelp(request)\n help_args.pop(0)\n subcmd = cmd_help\n for sub_request in help_args:\n try:\n parent = subcmd\n subcmd = cmd_help.get_subcmd(sub_request)\n except KeyError:\n cmd_help = CommandHelp(HelpType.NO_SUCH_SUBCMD, sub_request, parent=parent)\n break\n else:\n cmd_help = subcmd\n elif parsed.args[\"module\"]:\n mod_id = parsed.args[\"module\"]\n if mod_id not in self._features and mod_id != \"core\":\n cmd_help = CommandHelp(HelpType.NO_SUCH_MOD, mod_id)\n else:\n try:\n parsers = [parser for parser in self._commands.iter_by_module(mod_id)]\n except KeyError:\n parsers = []\n desc = parsers[0].module.description\n cmds = {}\n for parser in parsers:\n mod = cmds.setdefault(mod_id, {})\n mod[parser.name] = parser.description\n cmd_help = CommandHelp(HelpType.MOD, mod_id, desc, cmds=cmds)\n else:\n cmds = {}\n for mod_id, parsers in self._commands.pairs():\n for parser in parsers:\n mod = cmds.setdefault(mod_id, {})\n mod[parser.name] = parser.description\n cmd_help = CommandHelp(HelpType.ALL, cmds=cmds)\n await ctx.core_command_help(parsed, cmd_help)", "def get_command_with_name(self, command_name):\n return self.commands[command_name]", "def get_command(self, ctx, cmd_name):\n cmd_name = self.MAP.get(cmd_name, cmd_name)\n return click.Group.get_command(self, ctx, cmd_name)", "async def help_command(self, ctx, *, cmd_name: str=None):\n bot_prefix = '@Randy '\n # Get command object\n cmd_obj = self.cmd(cmd_name)\n\n # Handle no command found\n if cmd_obj is None:\n return await ctx.error(f'Command {cmd_name} not found')\n em = discord.Embed(title=cmd_obj.name, description=cmd_obj.help, color=self.color)\n\n # Input aliases and parameters to embed\n if cmd_obj.aliases:\n em.add_field(name='Aliases', value='\\n'.join([f'\\u2022 {x}' for x in cmd_obj.aliases]))\n if cmd_obj.clean_params:\n em.add_field(name='Parameters', value='\\n'.join([f'\\u2022 {x}' for x in cmd_obj.clean_params]))\n\n # Handle group commands\n if isinstance(cmd_obj, commands.core.Group):\n em.add_field(name='Group commands',\n value='\\n'.join([f'\\u2022 {x}' for x in cmd_obj.commands]),\n inline=False)\n\n # Add usage last\n em.add_field(name='Usage',\n value=f'```{bot_prefix}\\u200b{cmd_name} '\n f'{\" \".join([f\"<{x}>\" for x in cmd_obj.clean_params])}```',\n inline=False)\n\n await ctx.send(embed=em)", "def get_command(self, ctx, name):\n commands = self._iter_commands()\n return commands[name].load()", "def try_command(commandName, commandOptsList):\n retString = help.__doc__ # default to help doc\n if commandName in commandDict.keys():\n retString = commandDict[commandName](commandName, commandOptsList)\n return retString", "def get_help(self):\n helpstr = \"\"\n helpstr += self.get_usage()\n helpstr += \"\\n\"\n helpstr += textwrap.fill(self.expand_prog_name(\"Type '%prog help <subcommand>' for help on a specific subcommand.\"), 78)\n helpstr += \"\\n\"\n helpstr += textwrap.fill(self.expand_prog_name(\"Type '%prog --version' to see the program version.\"), 78)\n helpstr += \"\\n\"\n helpstr += textwrap.fill(self.expand_prog_name(\"Type '%prog --verbose-load' to see the packages and plug-ins detected, and if plug-ins are successfully loaded.\"), 78)\n helpstr += \"\\n\\n\"\n\n helpstr += textwrap.fill(\"Subcommands consist of built-in subcommands and subcommands provided by installed plug-ins.\", 78)\n helpstr += \"\\n\\n\"\n\n helpstr += \"Available subcommands:\\n\"\n helpstr += self.sbtools.get_subcommands()\n\n return helpstr", "def help_for_command(command):\n help_text = pydoc.text.document(command)\n # remove backspaces\n return re.subn('.\\\\x08', '', help_text)[0]", "def print_help(self, prog_name, subcommand):\r\n parser = self.create_parser(prog_name, subcommand)\r\n parser.print_help()", "def get_command(self, ctx, cmd_name):\n path = \"%s.%s\" % (__name__, cmd_name)\n path = path.replace(\"-\", \"_\")\n try:\n module = importlib.import_module(path)\n return getattr(module, 'cli')\n except ModuleNotFoundError as ex:\n print(ex.name)\n return None", "def get_command_help(self, module_name, command_name):\r\n command = self.env.get_command(module_name, command_name)\r\n\r\n default_format = 'raw'\r\n if sys.stdout.isatty():\r\n default_format = 'table'\r\n\r\n arg_doc = command.__doc__\r\n\r\n if 'confirm' in command.options:\r\n arg_doc += \"\"\"\r\nPrompt Options:\r\n -y, --really Confirm all prompt actions\r\n\"\"\"\r\n\r\n if '[options]' in arg_doc:\r\n arg_doc += \"\"\"\r\nStandard Options:\r\n --format=ARG Output format. [Options: table, raw] [Default: %s]\r\n -C FILE --config=FILE Config file location. [Default: ~/.softlayer]\r\n --debug=LEVEL Specifies the debug noise level\r\n 1=warn, 2=info, 3=debug\r\n --timings Time each API call and display after results\r\n --proxy=PROTO:PROXY_URL HTTP[s] proxy to be use to make API calls\r\n -h --help Show this screen\r\n\"\"\" % default_format\r\n return arg_doc.strip()", "def get_command(self, ctx, name):\n try:\n if sys.version_info[0] == 2:\n name = name.encode('ascii', 'replace')\n mod = __import__('cli.commands.cmd_' + name, None, None, ['cli'])\n except ImportError:\n exit(1)\n\n return mod.cli", "def get_help(self,command):\n if \"help\" in self.commands[command]:\n return self.commands[command][\"help\"]\n else:\n return \"No help defined for this command.\"", "def print_help(self, prog_name, subcommand):\n parser = self.create_parser(prog_name, subcommand)\n parser.print_help()", "def print_help(self, prog_name, subcommand):\n parser = self.create_parser(prog_name, subcommand)\n parser.print_help()", "def print_help(self, prog_name, subcommand):\n parser = self.create_parser(prog_name, subcommand)\n parser.print_help()", "async def help(self, ctx, *, command_name: str=None):\n bot_prefix = '@Randy '\n # Shortcut to command search\n if command_name is not None:\n return await ctx.invoke(self.cmd('help command'), cmd_name=command_name)\n\n em = discord.Embed(title='Help',\n description='**Permissions:** The permissions required to function :-\\n'\n '`Send Messages`, `Manage Messages`, `Embed Links`\\n'\n '--\\nTo get help or more information on a specific command, use:\\n'\n '`{bot_prefix}help <command name>`\\n'\n '--\\nRead my messy code [here](http://github.com/xKynn/RandomRumble)'\n '--\\nIf you like my work and would like to help me, '\n 'Ko-Fi/Paypal: [Link](https://ko-fi.com/D1D6EXXV)\\n',\n color=self.color)\n\n em.set_footer(text=\"Contact me at Demo#7645\")\n\n # This can't go in the init because help isn't loaded last & thus misses some commands\n em.add_field(name=\"Commands\", value=' • '+'\\n • '.join(f\"***{c.name}*** - {c.short_doc}\" for c in self.bot.commands if\n c.name not in ['pob', 'link', 'convert']))\n try:\n await ctx.send(embed=em)\n except:\n await ctx.send(\"`Embed Links` permission is required to see the help!\")", "def help(ctx, topic, **kw):\n # The help command implementation is taken from\n # https://www.burgundywall.com/post/having-click-help-subcommand\n if topic is None:\n click.echo(ctx.parent.get_help())\n else:\n click.echo(main.commands[topic].get_help(ctx))", "def get_commandname(self):\n for line in self.helplines:\n if \"Usage:\" in line and self.parser_type is 'optparse':\n tmp = line.split()\n return tmp[1]\n if \"usage:\" in line and self.parser_type is 'argparse':\n tmp = line.split()\n return tmp[1]\n return None", "def getCommand(self, name):\n return self.commands[name]()", "def make_help_cmd(cmd, docstring):\n def help_cmd(message=docstring, cmd=cmd):\n print('=' * 15)\n print('\\nHelp for command %s:\\n' % (cmd,))\n print(message.strip())\n print('')\n print('=' * 15)\n print('')\n\n return help_cmd", "def do_help(self, command_name):\n if not command_name:\n print _PVBlotInterp.__doc__\n print \"The following commands are supported:\"\n print \" \",\n blotish_commands = self._blotish_commands.keys()\n blotish_commands.sort()\n for c in blotish_commands:\n print c,\n print\n print\n print \"For more information on any command, try help <command>.\"\n return\n try:\n command = self.get_unique_command(command_name)\n print command.__doc__\n except blotish.BlotishError, err:\n blot_common.print_blot_error(err)", "async def _help(ctx, *, command_name: str=None):\n if command_name:\n command = bot.get_command(command_name)\n if not command:\n return await ctx.send(\"No such command!\")\n return await ctx.send(f\"```\\n{ctx.prefix}{command.name} {command.signature}\\n\\n{command.help or 'Missing description'}```\")\n description = []\n for name, cog in bot.cogs.items():\n entries = [\" - \".join([cmd.name, cmd.short_doc or \"Missing description\"]) for cmd in cog.get_commands() if await _can_run(cmd, ctx) and not cmd.hidden]\n if entries:\n description.append(f\"**{name}**:\")\n description.append(\"• \" + \"\\n• \".join(entries))\n await ctx.send(embed=discord.Embed(description=\"\\n\".join(description), color=ctx.me.color))", "def get_command(self, command_name: str):\n self._bot.all_commands.get(command_name, None)", "def print_specific_help(tool_name):\r\n if tool_name not in AvailableCommands.commands:\r\n print 'Command is not supported: {0}'.format(tool_name)\r\n return\r\n cmd = AvailableCommands.commands[tool_name]\r\n\r\n print 'Usage of {0}:'.format(cmd.name)\r\n print '\\nAccepted input types:\\n{0}'.format(str(list(cmd.input_types)))\r\n print '\\nOutput types:\\n{0}'.format(str(cmd.output_types))\r\n print '\\nMandatory arguments:\\n{0}'.format(str(cmd.user_mandatory_args))\r\n print '\\nOptional arguments:\\n{0}'.format(str(cmd.user_optional_args))\r\n print '\\nParallelizable:\\n{0}'.format(str(cmd.parallelizable))\r\n print '\\nAdditional description:\\n{0}'.format(str(cmd.help_description))\r\n print ''", "def _get_command(self, command_name):\n try:\n return self._commands[command_name]\n except KeyError:\n raise UnsupportedCommand(\n \"Command: {} not supported\".format(command_name)\n )", "def fetch_command(self, subcommand):\n try:\n app_name = get_commands()[subcommand]\n except KeyError:\n sys.stderr.write(\"Unknown command: %r\\nType '%s help'\"\n \" for usage.\\n\" % \\\n (subcommand, self.prog_name))\n sys.exit(1)\n if isinstance(app_name, BaseCommand):\n # If the command is already loaded, use it directly.\n klass = app_name\n else:\n klass = load_command_class(app_name, subcommand)\n return klass", "def subcmd_help(word, word_eol):\n\tif len(word) > 1:\n\t\ttopic = word[1]\n\t\tif topic in subcommands:\n\t\t\tdoprint('help', subcommands[topic].__doc__)\n\t\telse:\n\t\t\tdoprint('help', 'Unknown subcommand \"%s\". Try \"/mt_irc help\".' % topic)\n\telse:\n\t\tfor subcmd in subcommands:\n\t\t\tdoprint('help', subcommands[subcmd].__doc__)", "def get_command(self, ctx, name):\n if name not in self.daemon.list_actions():\n return None\n\n action = self.daemon.get_action(name)\n\n @wraps(action)\n def command(*args, **kwargs):\n return action(*args, **kwargs)\n\n if name in {'start', 'stop', 'restart'}:\n if name in {'start', 'restart'}:\n command = click.option(\n '--debug', is_flag=True,\n help='Do NOT detach and run in the background.',\n )(command)\n if name in {'stop', 'restart'}:\n command = click.option(\n '--force', is_flag=True,\n help='Kill the daemon forcefully after the timeout.',\n )(command)\n command = click.option(\n '--timeout', type=int, default=None,\n help=('Number of seconds to wait for the daemon to stop. '\n 'Overrides \"stop_timeout\" from daemon definition.'),\n )(command)\n if isinstance(self.daemon, MultiDaemon):\n command = click.option(\n '--worker-id', type=int, default=None,\n help='The ID of the worker to {}.'.format(name),\n )(command)\n elif name == 'status':\n command = click.option(\n '--fields', type=str, default=None,\n help='Comma-separated list of process info fields to display.',\n )(command)\n command = click.option(\n '--json', is_flag=True,\n help='Show the status in JSON format.',\n )(command)\n if isinstance(self.daemon, MultiDaemon):\n command = click.option(\n '--worker-id', type=int, default=None,\n help='The ID of the worker whose status to get.',\n )(command)\n else:\n # This is a custom action so try to parse the CLI options\n # by inspecting the function\n for option_args, option_kwargs in _parse_cli_options(action):\n command = click.option(\n *option_args, **option_kwargs)(command)\n\n # Make it into a click command\n command = click.command(name)(command)\n\n return command", "def fetch_command(self, global_options, subcommand):\r\n commands = self.get_commands(global_options)\r\n try:\r\n klass = commands[subcommand]\r\n except KeyError:\r\n sys.stderr.write(\"Unknown command: %r\\nType '%s help' for usage.\\nMany commands will only run at project directory, maybe the directory is not right.\\n\" % \\\r\n (subcommand, self.prog_name))\r\n sys.exit(1)\r\n return klass", "async def getHelp(ctx, cmd):\n wikiMods = discord.utils.get(ctx.message.author.guild.roles, name=\"Wiki Moderator\")\n cmdInfo = next((c for c in COMMAND_INFO if c[\"name\"] == cmd or cmd in c[\"aliases\"]), None)\n if cmdInfo == None:\n return assembleEmbed(\n title=f\"`{cmd}`\",\n desc=\"Cannot find command with this name. Try again buddy.\",\n webcolor=\"red\"\n )\n else:\n roles = [(discord.utils.get(ctx.message.author.guild.roles, name=r)) for r in cmdInfo['access']]\n commandFields = [\n {\n \"name\": \"Parameters\",\n \"value\": \"\\n\".join([f\"`{p['name']}` - {p['description']}\" for p in cmdInfo['parameters']]) if len(cmdInfo['parameters']) > 0 else \"`none`\",\n \"inline\": False\n }\n ]\n # If command has flags show those, if not do nothing\n if 'flags' in cmdInfo:\n commandFields.append({\n \"name\": \"Flags\",\n \"value\": \"\\n\".join([f\"`-{u['name']}` - {u['description']}\" for u in cmdInfo['flags']]),\n \"inline\": False\n })\n # Add available roles\n commandFields.extend([\n {\n \"name\": \"Usage\",\n \"value\": \"\\n\".join([f\"`{u['cmd']}` - {u['result']}\" for u in cmdInfo['usage']]),\n \"inline\": False\n },\n {\n \"name\": \"Available To\",\n \"value\": \"\\n\".join([f\"{r.mention}\" for r in roles]),\n \"inline\": False\n }\n ]\n )\n return assembleEmbed(\n title=f\"`!{cmdInfo['name']}`\",\n desc=f\"{cmdInfo['description']}\",\n fields=commandFields,\n webcolor=\"gold\"\n )", "def load_parent_command(name):\n app_name = get_parent_commands()[name]\n module = import_module('%s.management.commands.%s' % (app_name, name))\n return module.Command", "def GetToolShortHelp(self, tool_id):\r\n\r\n tool = self.FindTool(tool_id)\r\n if not tool:\r\n return \"\"\r\n\r\n return tool.short_help", "def help_parser():\n usage = \"usage: %prog help command\"\n description = \"Lists all available commands in the transifex command \"\\\n \"client. If a command is specified, the help page of the specific \"\\\n \"command is displayed instead.\"\n\n parser = OptionParser(usage=usage, description=description)\n return parser", "def help(cmd, cmdArgs):\n global commandDict\n retInfo = []\n if len(cmdArgs) > 0:\n #return help on a single function\n if cmdArgs[0] in commandDict.keys():\n return commandDict[cmdArgs[0]].__doc__\n\n #else, return general info\n retInfo = ['pypeople: A command line tool for vCard management',\n 'Version:' + __version__,\n 'Available Commands:']\n #fill in more info here\n for cmdName in commandDict.keys():\n cmdFunc = commandDict[cmdName]\n cmdDoc = str(cmdName) + ': ' + str(cmdFunc.__doc__) if cmdFunc.__doc__ is not None else 'Undocumented Function'\n retInfo.append('\\t' + cmdDoc)\n\n return '\\n'.join(retInfo)", "def add_subcommand(\n subp: _SubParsersAction, name: str, description: Optional[str] = None, **kwargs\n ) -> \"CommandParser\":\n desc_help = {\"description\": description, \"help\": description}\n return subp.add_parser(name, **desc_help, **kwargs)", "def get_command_help_message(\n project_dictionary: Dictionaries, command_name: str\n ) -> str:\n command_template = CommandSendCommand.get_command_template(\n project_dictionary, command_name\n )\n return misc_utils.get_cmd_template_string(command_template)", "def get(self, command_name):\n if command_name not in self._commands:\n raise CommandNotFound(\"Command {} not found\".format(command_name))\n return self._commands[command_name]", "def fetch_command(self, subcommand):\n # Get commands outside of try block to prevent swallowing exceptions\n commands = get_commands()\n try:\n app_name = commands[subcommand]\n except KeyError:\n possible_matches = get_close_matches(subcommand, commands)\n sys.stderr.write(\"Unknown command: %r\" % subcommand)\n if possible_matches:\n sys.stderr.write(\". Did you mean %s?\" % possible_matches[0])\n sys.stderr.write(\"\\nType '%s help' for usage.\\n\" % self.prog_name)\n sys.exit(1)\n if isinstance(app_name, BaseCommand):\n # If the command is already loaded, use it directly.\n klass = app_name\n else:\n klass = load_command_class(app_name, subcommand)\n return klass", "def rpc_help(self, cmd: str = None) -> str:\n if cmd:\n return self._call_command([\"help\", cmd])\n return self._call_command([\"help\"])", "def add_subparser(sp, name, **kwargs):\n kwargs[\"add_help\"] = False\n kwargs['formatter_class'] = ap.ArgumentDefaultsHelpFormatter\n sparser = sp.add_parser(name, **kwargs)\n\n sparser.add_argument(\"-h\", \"--help\", action=custom_help(),\n help=\"print the short or long help\")\n\n return sparser", "def get_usage_command(self):\n return textwrap.fill(self.sbtools.parser.expand_prog_name(\"Type '%prog help %s' for usage.\") % (self.tool.get_command()), 78)", "def get_usage_command(self):\n return textwrap.fill(self.expand_prog_name(\"Type '%prog help' for usage information.\"), 78)", "def do_help(self, args): \n if args.command:\n if len(args.command) > 1:\n command = args.command[0] +\"-\" + args.command[1]\n else:\n command = args.command[0]\n if command in self.subcommands:\n self.subcommands[command].print_help()\n else:\n print((\"'%s' is not a valid subcommand\") %\n args.command)\n else:\n self.parser.print_help()", "def help(self, irc, msg, args, command):\n command = map(callbacks.canonicalName, command)\n (maxL, cbs) = irc.findCallbacksForArgs(command)\n if maxL == command:\n if len(cbs) > 1:\n names = sorted([cb.name() for cb in cbs])\n irc.error(format('That command exists in the %L plugins. '\n 'Please specify exactly which plugin command '\n 'you want help with.', names))\n else:\n assert cbs, 'Odd, maxL == command, but no cbs.'\n irc.reply(cbs[0].getCommandHelp(command, False))\n else:\n irc.error(format('There is no command %q.',\n callbacks.formatCommand(command)))", "async def get_help(ctx, commandename=\"\"):\n to_send = \"\"\"\\n> \\n> **Commands List** : \\n\"\"\"\n to_send += \"> \\n\"\n for commande in client.commands:\n to_send += \"> \\n\"\n to_send += f\"\"\"> `!{commande.name}` : {commande.description}\\n\"\"\"\n\n to_send += \"> \\n\"\n to_send += f\"\"\"> _Bot created for the **Ursina** discord server_ \\n\"\"\"\n await ctx.send(content=to_send)", "def _find_subcommand(args):\n subcmd = args[1]\n if subcmd in [\n \"cfg\"\n # , 'init',\n ]:\n return subcmd\n else:\n return None", "def get_command(self, object_name, user_key = None):\n\t\treturn self.get_object('command',object_name, user_key = user_key)", "def get_command(self, module_name, command_name):\r\n actions = self.plugins.get(module_name) or {}\r\n if command_name in actions:\r\n return actions[command_name]\r\n if None in actions:\r\n return actions[None]\r\n raise InvalidCommand(module_name, command_name)", "def do_help(self, arg):\n if arg:\n # Getting help for a specific command\n funcname = self._func_named(arg)\n if funcname:\n # No special behavior needed, delegate to cmd base class do_help()\n cmd.Cmd.do_help(self, funcname[3:])\n else:\n # Show a menu of what commands help can be gotten for\n self._help_menu()", "def GetToolLongHelp(self, tool_id):\r\n\r\n tool = self.FindTool(tool_id)\r\n if not tool:\r\n return \"\"\r\n\r\n return tool.long_help", "def cmd_help(ctx):\n echo(ctx.parent.get_help())", "def help(self, cmd=\"\", *, fail=\"\"):\n class_dict = dict(type(self).__dict__)\n # Add this function to class, so that when subclassing,\n # help for help is found\n class_dict.update({\"help\": self.help})\n if cmd.startswith(self.predicate):\n # Strip predicate\n cmd = cmd[len(self.predicate) :]\n # Check that command exists and is not\n # private, protected or special method\n if (not cmd.startswith(\"_\")) and cmd in class_dict.keys():\n item = class_dict[cmd]\n if callable(item):\n if item.__doc__:\n return \"Help on command '{}':\\n. {}\".format(\n cmd, \"\\n. \".join(cleandoc(item.__doc__).split(\"\\n\"))\n )\n return \"No help on command '{}'\".format(cmd)\n # If no cmd given or wrong cmd given, return commands\n commands = []\n for key, value in class_dict.items():\n if not key.startswith(\"_\"):\n if callable(value):\n commands.append(key)\n msg = (\n \"Commands:\\n {}\".format(\", \".join(commands))\n + \"\\n for more help on command, use \"\n + \"{}help command\".format(self.predicate)\n )\n if fail:\n msg = fail + \"\\n\" + msg\n return msg", "def command_help(self, command):\n self.commands[command].command_help()", "def get_command(self, context, name):\n\t\tif name not in self.commands:\n\t\t\tclue = lnk.errors.Message('Did you mess up the default settings?',\n\t\t\t\t\t\t\t\t\t level=0)\n\t\t\ttry_message = lnk.errors.Message(\"See what 'lnk config -k service'\"\n\t\t\t\t\t\t\t\t\t \t\t \" says.\", level=1)\n\t\t\traise lnk.errors.UsageError('Invalid default service.',\n\t\t\t\t\t\t\t\t\t\tClue=clue,\n\t\t\t\t\t\t\t\t\t\tTry=try_message)\n\t\treturn self.commands[name]", "def getCommandsHelp(root=__name__):\n groups = {}\n commands = sorted([i for i in getCommands(root).iteritems() if i[0] != '__module__'])\n for cmd, topcmd in commands:\n module = topcmd['__module__']\n descr = getattr(module, 'SHORT_DESCRIPTION', \"FIXME: No description\")\n group = getattr(module, 'GROUP', None)\n name = '{:<12}'.format(cmd)\n groups.setdefault(group, []).append(' %s %s' % (name, descr))\n \n parts = []\n for group, members in groups.iteritems():\n if group:\n parts.append(group+' subcommands:')\n else:\n parts.append('subcommands:')\n parts.extend(members)\n parts.append('')\n return '\\n'.join(parts)", "def __getattr__(self, name):\n return Command(self.cmd, name)", "def help(self):\n help = ''\n cmds = [(x, y) for x, y in Commands.__dict__.iteritems()]\n cmds.sort(key=lambda x: x[0])\n for name, member in cmds:\n if name.startswith('cmd_') and callable(member):\n help += ' %s\\n' % ' '.join([name[4:]] +\n ['<%s>' % x for x in\n inspect.getargspec(member).args[1:]])\n if member.__doc__:\n help += ' %s\\n' % member.__doc__.splitlines()[0]\n return 'Available commands:\\n%s' % help", "def help(cls, entry: \"TaskEntry\"):\n executor = entry.executor\n\n if cls.__doc__:\n out = fmt.FormatList(executor)\n out.add(fmt.Header(f\"Help: {cls.name}\"))\n out.add(fmt.Line(cls.__doc__))\n out.add(fmt.Footer())\n executor.send(out)\n else:\n executor.msg(text=\"Help is not implemented for this command.\")", "def parse_help_command(args):\r\n if len(args) < 2:\r\n print_generic_help()\r\n elif len(args) == 2:\r\n print_specific_help(args[1])\r\n else:\r\n print 'Error! Found too many arguments for --help! Use --help with ' \\\r\n '0 arguments to print generic help or type --help <tool_name> ' \\\r\n 'to get tool specific help!'", "def do_help(self, args):\n if getattr(args, 'command', None):\n if args.command in self.subcommands:\n self.subcommands[args.command].print_help()\n else:\n raise CommandError(\"'%s' is not a valid subcommand\" %\n args.command)\n else:\n self.parser.print_help()", "def help_command(bot: Phial) -> str:\n help_text = cast(str, bot.config.get(\"baseHelpText\", \"\"))\n if help_text:\n help_text += \"\\n\"\n for command in bot.commands:\n if command.hide_from_help_command:\n continue\n command_doc = command.help_text\n if not command_doc:\n # If no help text default to blank string\n command_doc = \"\"\n command_help_text = parse_help_text(command_doc)\n help_text += \"*{0}* - {1}\\n\".format(command.pattern_string, command_help_text)\n return help_text", "def cog_subcommand(\n *,\n base,\n subcommand_group=None,\n name=None,\n description: str = None,\n base_description: str = None,\n base_desc: str = None,\n subcommand_group_description: str = None,\n sub_group_desc: str = None,\n guild_ids: typing.List[int] = None,\n options: typing.List[dict] = None,\n connector: dict = None\n):\n base_description = base_description or base_desc\n subcommand_group_description = subcommand_group_description or sub_group_desc\n\n def wrapper(cmd):\n desc = description or inspect.getdoc(cmd)\n if options is None:\n opts = manage_commands.generate_options(cmd, desc, connector)\n else:\n opts = options\n\n _sub = {\n \"func\": cmd,\n \"name\": name or cmd.__name__,\n \"description\": desc,\n \"base_desc\": base_description,\n \"sub_group_desc\": subcommand_group_description,\n \"guild_ids\": guild_ids,\n \"api_options\": opts,\n \"connector\": connector,\n }\n return CogSubcommandObject(_sub, base, name or cmd.__name__, subcommand_group)\n\n return wrapper", "def DoHelp(options, args):\n __pychecker__ = 'unusednames=options'\n if len(args) == 1 and args[0] in COMMAND_USAGE_TEXT:\n print(COMMAND_USAGE_TEXT[args[0]])\n else:\n raise gclient_utils.Error(\"unknown subcommand '%s'; see 'gclient help'\" %\n args[0])", "def do_help(self, arg):\n if arg:\n funcname = self.func_named(arg)\n if funcname:\n fn = getattr(self, funcname)\n try:\n fn.optionParser.print_help(file=self.stdout)\n except AttributeError:\n cmd.Cmd.do_help(self, funcname[3:])\n else:\n cmd.Cmd.do_help(self, arg)", "def do_help(self, args):\n if getattr(args, 'command', None):\n if args.command in self.subcommands:\n self.subcommands[args.command].print_help()\n else:\n raise Exception(\"'%s' is not a valid subcommand\" %\n args.command)\n else:\n self.parser.print_help()", "def do_help(self, args):\n if getattr(args, 'command', None):\n if args.command in self.subcommands:\n self.subcommands[args.command].print_help()\n else:\n raise exc.CommandError(\"'%s' is not a valid subcommand\" %\n args.command)\n else:\n self.parser.print_help()", "def do_help(self, args):\n if getattr(args, 'command', None):\n if args.command in self.subcommands:\n self.subcommands[args.command].print_help()\n else:\n raise exc.CommandError(\"'%s' is not a valid subcommand\" %\n args.command)\n else:\n self.parser.print_help()", "def build_subcommands_parser(parser, module):\n mdefs = module.__dict__\n keys = list(mdefs.keys())\n keys.sort()\n subparsers = parser.add_subparsers(help='sub-command help')\n for command in keys:\n if command.startswith('pub_'):\n func = module.__dict__[command]\n parser = subparsers.add_parser(command[4:], help=func.__doc__)\n parser.set_defaults(func=func)\n argspec = inspect.signature(func)\n positionals = []\n short_opts = set([])\n for arg in argspec.parameters.values():\n if arg.default == inspect.Parameter.empty:\n positionals += [arg]\n else:\n param_name = arg.name.replace('_', '-')\n short_opt = param_name[0]\n if not (param_name.startswith('no') or\n (short_opt in short_opts)):\n opts = ['-%s' % short_opt, '--%s' % param_name]\n else:\n opts = ['--%s' % param_name]\n short_opts |= set([short_opt])\n if isinstance(arg.default, list):\n parser.add_argument(*opts, action='append')\n elif isinstance(arg.default, dict):\n parser.add_argument(*opts, type=json.loads)\n elif arg.default is False:\n parser.add_argument(*opts, action='store_true')\n elif arg.default is not None:\n parser.add_argument(*opts, default=arg.default)\n else:\n parser.add_argument(*opts)\n if positionals:\n for arg in positionals[:-1]:\n parser.add_argument(arg.name)\n parser.add_argument(positionals[-1].name, nargs='*')", "def dispatch_help(args):\n command = args.subcommand\n if command is None:\n command = 'help'\n args.parsers[command].print_help()\n\n cmd_func = getattr(EtcMaint, 'cmd_%s' % command, None)\n if cmd_func:\n lines = cmd_func.__doc__.splitlines()\n print('\\n%s\\n' % lines[0])\n paragraph = []\n for l in dedent('\\n'.join(lines[2:])).splitlines():\n if l == '':\n if paragraph:\n print('\\n'.join(wrap(' '.join(paragraph), width=78)))\n print()\n paragraph = []\n continue\n paragraph.append(l)\n if paragraph:\n print('\\n'.join(wrap(' '.join(paragraph), width=78)))", "def help_command(update: Update, _: CallbackContext) -> None:\n update.message.reply_text('Help!')", "def help_command(update: Update, _: CallbackContext) -> None:\n update.message.reply_text('Help!')", "def help(command=None):\n if command is None: \n # print first line of docstring\n for cmd in commands:\n ds = commands[cmd].__doc__.split('\\n')[0]\n print \"%-16s %s\" % (cmd,ds)\n else:\n print commands[command].__doc__", "def command_help(self):\n print(\"Command \", self)\n print(\"\\t\\thelp (Get help for command)\")\n\n params = self.params.copy()\n del params[\"help\"]\n\n if len(params) == 0:\n print(\"This command has no parameters\")\n return\n\n print(\"Parameters:\")\n for info in params.values():\n print(\" %s\" % info.get_basic_info())\n description = info.get_desc()\n if description != \"\":\n print(textwrap.fill(description,\n initial_indent=\" \",\n subsequent_indent=\" \",\n width=70))", "def get_user_command_by_name(self, uid, command_name):\n uc_data = self.list_user_commands(uid)\n for uc in uc_data:\n if uc['name'] == command_name:\n return ZenossUserCommand(\n self.api_url,\n self.api_headers,\n self.ssl_verify,\n uc,\n parent=self._check_uid(uid)\n )\n\n return None", "async def meow(self, ctx: vbu.Context):\n\n if ctx.invoked_subcommand is None:\n return await ctx.send_help(ctx.command)", "def help_command(update, context):\n update.message.reply_text('Help!')", "def __init__(self, *args, **kwargs):\n # The subcommand array, with the help command included.\n self.subcommands = list(kwargs.pop('subcommands', []))\n self.subcommands.append(self._HelpSubcommand)\n\n # A more helpful default usage.\n if 'usage' not in kwargs:\n kwargs['usage'] = \"\"\"\n %prog [global options] COMMAND [ARGS...]\n %prog help COMMAND\"\"\"\n\n # Super constructor.\n optparse.OptionParser.__init__(self, *args, **kwargs)\n\n # Adjust the help-visible name of each subcommand.\n for subcommand in self.subcommands:\n subcommand.parser.prog = '%s %s' % \\\n (self.get_prog_name(), subcommand.name)\n\n # Our root parser needs to stop on the first unrecognized argument.\n self.disable_interspersed_args()", "def find_command(cmd):\n if cmd:\n root = '.'.join([COMMANDS_PACKAGE_NAME] + cmd)\n else:\n root = COMMANDS_PACKAGE_NAME\n try:\n return _get_commands(root)['__module__'].COMMAND\n except KeyError:\n LOGGER.debug('%r not recognized as a TAU command', cmd)\n resolved = _resolve(cmd, cmd, _COMMANDS[SCRIPT_COMMAND])\n LOGGER.debug('Resolved ambiguous command %r to %r', cmd, resolved)\n return find_command(resolved)\n except AttributeError as err:\n raise InternalError(\"'COMMAND' undefined in %r\" % cmd) from err", "def cmd_help(self, commands=None, usage=False):\n if commands:\n usage = True\n commands = {self.approx.decmd(c.lower()) for c in commands}\n rejects = commands - self.approx.keys()\n for reject in rejects:\n self.put_pretty(\"No command named %r\" % reject)\n continue\n commands -= rejects\n if self.debug:\n assert not any(self.approx.encmd(r) in self.mod_commands for\n r in rejects)\n assert all(self.approx.encmd(c) in self.mod_commands for\n c in commands)\n if not commands:\n return\n requested = zip(commands, (self.approx[c] for c in commands))\n else:\n requested = self.approx.items()\n help = znc.CTable()\n help.AddColumn(\"Command\")\n help.AddColumn(\"Usage\" if usage else \"Description\")\n from itertools import zip_longest\n #\n for command, parser in requested:\n if usage:\n upre = \"usage: %s\" % command\n rest = (parser.format_usage()\n .replace(upre, \"\", 1)\n .replace(\"[-h] \", \"\", 1))\n desc = [l.strip() for l in rest.split(\"\\n\") if l.strip()]\n else:\n desc = [parser.description]\n for line, comm in zip_longest(desc, (command,), fillvalue=\"\"):\n help.AddRow()\n help.SetCell(\"Command\", comm)\n help.SetCell(\"Usage\" if usage else \"Description\", line)\n #\n s_line = znc.String()\n strung = []\n while help.GetLine(len(strung), s_line):\n strung.append(s_line.s)\n also = \" (<command> [-h] for details)\"\n strung[1] = strung[1].replace(len(also) * \" \", also, 1)\n self.put_pretty(\"\\n\".join(strung))", "def command_help(self, *args, **kwargs):\n print(\"Commands available:\\n\")\n for name in dir(self):\n if not name.startswith(\"command_\"):\n continue\n name_clean = name[len(\"command_\"):]\n print(\"%s:\\n - %s\\n\" % (name_clean, getattr(self, name).__doc__.strip()))", "def help_command(update: Update) -> None:\n #update.message.reply_text('Help!')", "def help(ctx):\n click.echo(ctx.parent.get_help())", "def custom_help(split_string=\"<++>\"):\n class CustomHelp(ap._HelpAction):\n def __call__(self, parser, namespace, values, option_string=None):\n\n # create the help string and store it into a string\n try:\n from StringIO import StringIO ## for Python 2\n except ImportError:\n from io import StringIO ## for Python 3\n fstr = StringIO()\n try:\n parser.print_help(file=fstr)\n help_str = fstr.getvalue()\n finally:\n fstr.close()\n\n # create the regular expression to match the desciption\n descmatch = r'{0}(.+?){0}(.+?){0}'\n # escape possible dangerous characters\n esplit_string = re.escape(split_string)\n re_desc = re.compile(descmatch.format(esplit_string),\n flags=re.DOTALL)\n\n # select the case according to which option_string is selected\n if option_string == '-h':\n to_sub = r'\\1'\n elif option_string == '--help':\n to_sub = r'\\1\\2'\n\n print(re_desc.sub(to_sub, help_str))\n parser.exit()\n\n return CustomHelp", "def usage(self, subcommand):\n usage = '%%prog %s [options] %s' % (subcommand, self.args)\n if self.help:\n return '%s\\n\\n%s' % (usage, self.help)\n else:\n return usage", "def cmd_help(args):", "def help_parser():\n description = \"Lists all available commands in the transifex command \"\\\n \"client. If a command is\\nspecified, the help page of the specific \"\\\n \"command is displayed instead.\"\n\n parser = ArgumentParser(description=description)\n parser.add_argument(\"command\", action=\"store\", nargs='?', default=None,\n help=\"One of the tx commands.\")\n return parser", "def help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text('Help!')", "def help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text('Help!')", "def help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text('Help!')", "def help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text('Help!')", "def setup_subparser(name, description, commands):\n subparser = SUBPARSER.add_parser(\n name,\n help=description\n )\n subparser.add_argument(\n 'sub_command',\n metavar='sub_command',\n type=str,\n nargs='+',\n help='Which command to run. Options: %s' % ', '.join(commands),\n choices=commands\n )\n\n return subparser", "def test_get_invalid_subcommand_help(self):\n self.assertEqual(self.testcommand.get_help(),\n self.testcommand.get_help(subcommand=\"foo\"))", "def get_command(self, kword: str):\n # Step Zero is to make sure that the name does not belong to a REAL command.\n zero, mod = super().get_command(kword)\n if zero:\n return zero, mod\n\n # Otherwise, first, ensure that the keyword does in fact exist in the custom list.\n command = self.config.commands.get(kword, None)\n if not command:\n return None, None\n response = command[\"com\"]\n\n # Build the function to return the response. Note that \"self\" exists already.\n async def cmd_custom(args, src, **_):\n if args:\n member = self.get_member(src, args[0].strip())\n tag = member.mention if member else None\n else:\n tag = None\n\n nsfw = command.get(\"nsfw\", False)\n if nsfw and src.channel.id not in self.config.get(\"nsfwChannels\"):\n return None\n\n # Replace tags where needed.\n try:\n output = response.format(\n self=src.author.name,\n myID=src.author.id,\n tag=tag or src.author.mention,\n )\n except KeyError:\n return None\n else:\n return output\n\n # Specify the docstring and name so that !help will work on this.\n short = response.replace(\"{\", \"{{\").replace(\"}\", \"}}\")\n if len(short) > 80:\n short = short[:77] + \"...\"\n cmd_custom.__doc__ = (\n \"__Custom command__: Return the following text: ```{}```\\n\\n\".format(short)\n + command.get(\n \"desc\",\n \"This is a custom command, so available help text is limited, but at the same time, the command is very simple. All it does is return a string, although the string may include formatting tags for invoker name, invoker ID, and a targeted mention.\",\n )\n + \"\\n\\nSyntax: `{p}\"\n + kword.lower()\n + (\" <user_ID>\" if \"{tag}\" in response else \"\")\n + \"`\"\n )\n cmd_custom.__name__ = \"cmd_\" + kword.lower()\n\n return cmd_custom, None", "def sub_command(self, name: str=None, description: str=None, options: list=None, connectors: dict=None, **kwargs):\r\n def decorator(func):\r\n new_func = SubCommand(\r\n func,\r\n name=name,\r\n description=description,\r\n options=options,\r\n connectors=connectors,\r\n **kwargs\r\n )\r\n self.children[new_func.name] = new_func\r\n self.option.options.append(new_func.option)\r\n return new_func\r\n return decorator" ]
[ "0.70336854", "0.70271784", "0.664786", "0.6645423", "0.647728", "0.6419414", "0.6092256", "0.60677403", "0.6052447", "0.5968284", "0.59641016", "0.5962373", "0.5933287", "0.5915913", "0.59141576", "0.5910189", "0.59012985", "0.5898384", "0.5896387", "0.5896387", "0.5896387", "0.5874593", "0.58736277", "0.58553797", "0.58513886", "0.58218646", "0.57758015", "0.57577544", "0.5678175", "0.56685257", "0.56615955", "0.56504315", "0.56408596", "0.5638695", "0.5628421", "0.55654156", "0.5528292", "0.5472517", "0.54568887", "0.543404", "0.54178107", "0.5404743", "0.5403392", "0.5399655", "0.5398487", "0.5391995", "0.53828853", "0.5380106", "0.5378751", "0.5361629", "0.53590196", "0.53575", "0.5348355", "0.534564", "0.53439873", "0.5342542", "0.5334688", "0.53201544", "0.5316892", "0.53146714", "0.5309684", "0.5308156", "0.5281143", "0.5263051", "0.5261659", "0.5238337", "0.5215823", "0.5209691", "0.5206552", "0.5205712", "0.5204943", "0.5183547", "0.5183547", "0.51605946", "0.51387304", "0.5136214", "0.5136214", "0.51317483", "0.51289636", "0.51188785", "0.5098308", "0.50918996", "0.5088853", "0.50864774", "0.50753355", "0.5072259", "0.5063416", "0.506308", "0.504508", "0.50430447", "0.503564", "0.5032788", "0.5027315", "0.5027315", "0.5027315", "0.5027315", "0.50261086", "0.5025272", "0.5013312", "0.49988922" ]
0.78726345
0
Loads a multilayer png file and return a list of all feature loaded as numpy arrays. You can use np.concatenate(x,2) to create a 3D array of size
def load_png_data(): m=1 #训练文件个数 n=1 #测试文件个数 train_set_x=[]#训练数据集 train_set_y=[]#训练标签集 test_set_x=[]#测试数据集 test_set_y=[]#测试标签集 train_data={} train_path=r".\dataset\train_label\\" dirs=os.listdir(train_path) for file in dirs: srcImg=cv2.imread(train_path+file) #将label数据集保存为numpy格式并保存 npImg=np.array(srcImg) np.save(train_path+str(m)+'.npy',npImg) train_set_x.append(npImg) NoiseImg = GaussianNoise(srcImg, 25, 4, 0.8) npNoiseImg = np.array(NoiseImg) cv2.imwrite(r".\dataset\trainset\\"+str(m)+'.png', NoiseImg, [int(cv2.IMWRITE_PNG_STRATEGY_DEFAULT)]) np.save(r".\dataset\trainset\\" + str(m) + '.npy', npNoiseImg) train_set_y.append(npNoiseImg) m=m+1 train_data['train_set_x']=train_set_x train_data['train_set_y']=train_set_y test_path = r".\dataset\test_label\\" dirs_test = os.listdir(test_path) for file in dirs_test: srcImg=cv2.imread(test_path+file) #将label数据集保存为numpy格式并保存 npImg=np.array(srcImg) np.save(test_path+str(n)+'.npy',npImg) test_set_x.append(npImg) NoiseImg = GaussianNoise(srcImg, 25, 4, 0.8) npNoiseImg = np.array(NoiseImg) cv2.imwrite(r".\dataset\testset\\"+str(n)+'.png', NoiseImg, [int(cv2.IMWRITE_PNG_STRATEGY_DEFAULT)]) np.save(r".\dataset\testset\\" + str(n) + '.npy', npNoiseImg) test_set_y.append(npNoiseImg) n=n+1 train_data['test_set_x']=test_set_x train_data['test_set_y']=test_set_y np.savez(r"E:\DeepLearning\CNNDenoiser\dataset\train_data.npz",**train_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_image_patch(filename):\n im = Image.open(filename) # .convert('L')\n width, height = im.size\n pixels = list(im.getdata())\n features = [pixels[i * width:(i + 1) * width] for i in range(height)]\n features = np.asarray(im, dtype=np.float32).flatten()\n features /= 255.0\n return features", "def load_image(filename):\n im = Image.open(filename) # .convert('L')\n width, height = im.size\n pixels = list(im.getdata())\n features = [pixels[i * width:(i + 1) * width] for i in range(height)]\n features = np.asarray(im, dtype=np.float32)\n features /= 255.0\n return features", "def extract_images(filename,lx):\n print('Extracting', filename,'aaaaaa')\n \n data=numpy.loadtxt(filename,dtype='int64')\n dim=data.shape[0]\n data=data.reshape(dim, lx, lx, 1) \n # Convert shape from [num examples, rows, columns, depth]\n # to [num examples, rows*columns] (assuming depth == 1)\n data = data.reshape(data.shape[0],\n data.shape[1] * data.shape[2])\n # Convert from [0, 255] -> [0.0, 1.0].\n data = data.astype(numpy.float64)\n # images = numpy.multiply(images, 1.0 / 255.0) # commented since it is ising variables\n data = numpy.multiply(data, 1.0 ) # multiply by one, instead\n print(data.shape)\n return data", "def getFeatures(filedir):\r\n lbs = getLabels(filedir)\r\n width, height = getSize(filedir)\r\n features = [os.listdir(filedir + 's' + str(lbs[i])) for i in range(len(lbs))]\r\n for i in range(len(lbs)):\r\n for j in range(len(features[i])):\r\n im = Image.open(filedir + 's' + str(lbs[i]) + '/' + features[i][j]) # type(im): <class 'PIL.PpmImagePlugin.PpmImageFIle'>\r\n im = im.convert('L') # type(im): <class 'PIL.Image.Image'>\r\n data = im.getdata() # type(data): <class 'ImagingCore'>\r\n img = np.reshape(list(data), (height, width))\r\n features[i][j] = img\r\n return features", "def load_features(inputfile, load_bin=False, save_path=None):\n if load_bin:\n return bin2matrix(inputfile)\n X, y = [], []\n pf = FileOfPaths(inputfile)\n pb = progressbar.ProgressBar(pf.numberFiles())\n for n, _ in enumerate(pf):\n y.append(pf.getY())\n X.append(pf.getFeatures())\n pb.update()\n X = np.array(X).astype(float)\n y = np.array(y).astype(int)\n if save_path:\n ym = np.array([y])\n Xy = np.append(X, ym.T, axis=1)\n np.save(save_path, Xy)\n return X, y", "def load_batch(filename: str) -> Tuple[ndarray, ndarray, ndarray]:\n dataDict = unpickle(filename)\n print(\"1\", dataDict[b\"data\"][1, :])\n X = (dataDict[b\"data\"] / 255).T\n print(\"2\", X[:, 1])\n y = np.array(dataDict[b\"labels\"])\n Y = np.eye(10)[y].T\n return X, Y, y", "def load_images(self, image_path):\n X_train = []\n\n # Load all files from the image path using Image.open.\n for i in recursive_list(image_path):\n # Open images as ???\n img = Image.open(i)\n # Convert to NP array.\n img = np.asarray(img)\n # Append them into higher order array.\n if img.shape == (128, 128, 3):\n X_train.append(img)\n\n # return all the images concatenated as a 4D array\n return np.asarray(X_train)", "def get_data(folder):\n X = []\n y = []\n\n for seismic_type in os.listdir(folder):\n if not seismic_type.startswith('.'):\n if seismic_type in ['Class1']:\n label = '0'\n else:\n label = '1'\n for image_filename in os.listdir(folder + seismic_type):\n img_file = cv2.imread(folder + seismic_type + '/' + image_filename)\n if img_file is not None:\n # Downsample the image to 120, 160, 3\n #img_file = scipy.misc.imresize(arr=img_file, size=(120, 160, 3))\n img_arr = np.asarray(img_file)\n # img_arr = image.img_to_array(img_arr)\n X.append(img_arr)\n y.append(label)\n X = np.asarray(X)\n y = np.asarray(y)\n return X,y", "def load_food_image_batch(filename, num):\n with open(filename, 'rb') as f:\n datadict = pickle.load(f)\n url_parts = datadict['Image URL'].split(\"/\")\n img_fn = url_parts[-1]\n with open(img_fn):\n X = f.read()\n Y = datadict['coarse_labels']\n X = X.reshape(num, 3, 32, 32).transpose(0,2,3,1).astype(\"float\")\n Y = np.array(Y)\n return X, Y", "def load_data(class_fnames):\n X = []\n y = []\n for label, fnames in enumerate(class_fnames):\n for fname in fnames:\n X.append(cv2.imread(fname))\n y.append(label)\n X = np.stack(X)\n y = np.stack(y)\n return X, y", "def load_test_data():\n X = []\n y = []\n for fname in os.listdir(test_dir):\n label = int(fname.split(\"_\")[0])\n img = plt.imread(os.path.join(test_dir, fname))\n X.append(img)\n y.append(label)\n X = np.stack(X)\n y = np.stack(y)\n return X, y", "def load_data(path):\n\n\t# Create a list of all files ending in .jpg\n\tim_list = list_images(path, '.jpg')\n\n\t# Create labels\n\tlabels = [int(im_name.split('/')[-1][0]) for im_name in im_list]\n\tfeatures = []\n\n\t# Create features from the images\n\t# TOD.O: iterate over images paths\n\tfor im_path in im_list:\n\t\t# TOD.O: load image as a gray level image\n\t\tim = np.array(Image.open(im_path).convert('L'))\n\t\t# TOD.O: process the image to remove borders and resize\n\t\tim = process_image(im)\n\t\t# TOD.O: append extracted features to the a list\n\t\tfeatures.append(extract_features(im))\n\n\t# TOD.O: return features, and labels\n\treturn features, labels", "def get_data(path):\n all_images_as_array=[]\n label=[]\n for filename in os.listdir(path):\n try:\n if re.match(r'positive',filename):\n label.append(1)\n else:\n label.append(0)\n img=cv2.imread(path + filename)\n (b, g, r)=cv2.split(img)\n img=cv2.merge([r,g,b])\n np_array = np.asarray(img)\n l,b,c = np_array.shape\n np_array = np_array.reshape(l*b*c,)\n all_images_as_array.append(np_array)\n except:\n continue\n return np.array(all_images_as_array), np.array(label)", "def load_data(path,size, scale = True):\n images = os.listdir(path)\n images.sort()\n\n X = []\n for i, img in enumerate(images):\n photo = plt.imread(os.path.join(path,img))\n if size:\n photo = tf.image.resize(photo, (size, size))\n X.append(photo)\n \n X = np.array(X)\n if scale:\n X = X/X.max() \n return X", "def loadData(path):\r\n X = []\r\n y = []\r\n dir1 = os.listdir(path)\r\n for d1 in dir1:\r\n dir2 = os.listdir(path+'/'+d1)\r\n for d2 in dir2:\r\n if int(d1) == 0:\r\n image = cv2.imread(path+r'/'+d1+r'/'+d2, 0)\r\n X.append(np.array(image, dtype=np.float32).reshape(-1) / 255.0)\r\n y.append(1)\r\n elif int(d1) == 1:\r\n image = cv2.imread(path+r'/'+d1+r'/'+d2, 0)\r\n X.append(np.array(image, dtype=np.float32).reshape(-1) / 255.0)\r\n y.append(-1)\r\n X = np.array(X, dtype=np.float32)\r\n y = np.array(y, dtype=np.int64)\r\n perm = np.random.permutation(X.shape[0])\r\n X = X[perm]\r\n y = y[perm]\r\n return X, y", "def _load_multipage_tiff(path):\n return np.array([np.array(p) for p in ImageSequence.Iterator(Image.open(path))])", "def loadImages(loadPath):\n img_array = []\n for filename in glob.glob(loadPath):\n img = cv2.imread(filename)\n height, width, layers = img.shape\n size = (width, height)\n img_array.append(img)\n\n return img_array", "def load_labeled_data():\n\n images = []\n labels = []\n\n for i in range(1, 10):\n path = (\"selflabeled\", str(i), \"*.jpg\")\n filenames = glob.glob(\"/\".join(path))\n images_one_type = [cv2.imread(img) for img in filenames]\n labels_one_type = [i] * len(images_one_type)\n images += images_one_type\n labels += labels_one_type\n\n return images, labels", "def load_data(fname):\n pathname = \"data/\" + fname\n data = pickle.load(open(pathname, 'rb'), encoding='latin1')\n images = np.array([img[:-1] for img in data])\n ys = [int(img[-1]) for img in data]\n length = len(ys)\n labels = np.zeros((length, 10))\n\n for i in range(length):\n labels[i, ys[i]] = 1\n\n return images, labels", "def load_pts_features(path):\n\n #\n # Your code here\n #\n\n pts = [np.empty((123, 2)), np.empty((123, 2))]\n feats = [np.empty((123, 128)), np.empty((123, 128))]\n\n return pts, feats", "def load_image_data():\n print(\"Loading image data...\")\n label_dict = get_label_vectors()\n categories = [c for c in os.listdir('images/') if c[0] != '.'] # ignore\n labels = [] # instantiate list for image labels\n data = [] # instantiate list for image data\n for i in categories:\n path = 'images/{}/'.format(i) # define path to category folder\n for j in os.listdir(path): # get images from category folder\n labels.append(label_dict[i]) # append label vector\n data.append(cv2.imread(path + j).flatten()) # append flattened image data\n\n labels = np.array(labels) # convert lists to array\n data = np.array(data)\n print(\"Done.\")\n\n return labels, data", "def load_raster(input):\n dem = gdal.Open(input)\n\n nodata = []\n layers = []\n for i in range(1, dem.RasterCount+1):\n band = dem.GetRasterBand(i)\n data = band.ReadAsArray(0, 0, dem.RasterXSize, dem.RasterYSize)\n layers.append(data)\n nodata.append(band.GetNoDataValue())\n\n if len(layers) > 1:\n layers = N.dstack(layers) \n\n info = attrdict(\n metadata=dem.GetMetadata_Dict(),\n grid=Grid.from_gdal(dem),\n nodata=nodata)\n\n return (info,layers)", "def _get_data(path):\n archive = np.load(path)\n images = archive['faceData']\n return images", "def load_X():\n x1 = np.asarray([-1, -1, 1, -1, 1, -1, -1, 1]).reshape((1, 8))\n x2 = np.asarray([-1, -1, -1, -1, -1, 1, -1, -1]).reshape((1, 8))\n x3 = np.asarray([-1, 1, 1, -1, -1, 1, -1, 1]).reshape((1, 8))\n X = np.vstack([x1, x2, x3])\n\n return X", "def load_from_array():\n\n x = np.load(settings.data(\"x.npy\")).reshape(-1, 1, 224, 224)\n y = np.load(settings.data(\"y.npy\"))\n\n return x, y", "def import_fimage(filepath):\n \n im_path = os.path.join(str(filepath))\n im_files = glob.glob(im_path)\n im_col = []\n \n for image in im_files:\n imc = plt.imread(image,0) # changed from cv2.imread\n imcg = np.array(imc)\n try:\n xy = np.shape(imcg) #added\n except:\n continue\n imgr = imcg.reshape((xy[0]*xy[1],)) #added\n imgr = np.array(imgr)\n im_col.append(imgr.transpose())\n \n return(im_col)", "def read_training_pixels_from_multi_images(input, subImg_folder, subLabel_folder):\n img_list = io_function.get_file_list_by_ext('.tif', subImg_folder, bsub_folder=False)\n label_list = io_function.get_file_list_by_ext('.tif', subLabel_folder, bsub_folder=False)\n img_list.sort()\n label_list.sort()\n\n if len(img_list) < 1 or len(label_list) < 1:\n raise IOError('No tif images or labels in folder %s or %s' % (subImg_folder, subLabel_folder))\n if len(img_list) != len(label_list):\n raise ValueError('the number of images is not equal to the one of labels')\n\n # read them one by one\n Xs, ys = [], []\n for img, label in zip(img_list, label_list):\n # # test by hlc\n # polygon_index_img = os.path.basename(img).split('_')[-3]\n # # print(polygon_index_img)\n # if polygon_index_img not in [str(83), str(86)] :\n # continue\n\n X_aImg, y_a = read_training_pixels(img, label)\n Xs.append(X_aImg)\n ys.append(y_a)\n\n X_pixels = np.concatenate(Xs, axis=1)\n y_pixels = np.concatenate(ys, axis=0)\n X_pixels = np.transpose(X_pixels, (1, 0))\n basic.outputlogMessage(str(X_pixels.shape))\n basic.outputlogMessage(str(y_pixels.shape))\n\n return X_pixels, y_pixels", "def load_images(file):\n\timage_list = [] # List for storing all the images\n\ttargets = []\n\t\n\tfor filename in glob.glob(file + '/*.png'):\n\t\t# ==================\n\t\t# Reading the image\n\t\t# ==================\n\t\timage = scipy.misc.imread(filename).astype(np.float32)\n\t\t\n\t\t# ================================\n\t\t# Converting the image to a vector\n\t\t# ================================\n\t\timage = image.flatten() # (784, )\n\t\t\n\t\t# ==============================\n\t\t# Normalizing the image to numpy\n\t\t# ==============================\n\t\timage = image / 255.0\n\t\timage = image - 0.5\n\t\timage = image * 2.0\n\t\t\n\t\t# ===============================\n\t\t# Appending the image to the list\n\t\t# ===============================\n\t\timage_list.append(image)\n\t\t\n\t\t_, value = filename.split('\\\\')\n\t\t# print(value[0])\n\t\ttargets.append(int(value[0]))\n\t\n\timage_list = np.array(image_list)\n\ttargets = np.array(targets)\n\t\n\t# ================================================\n\t# \t\t\tShuffling the data\n\t# ================================================\n\timage_list, targets = shuffle(image_list, targets)\n\t\n\ttrain_images, test_images, train_targets, test_targets = split(image_list, targets)\n\treturn train_images, test_images, train_targets, test_targets", "def load_images(filename='training_images'): \n file_path = os.path.join(DATA_DIR, filename)\n with open(file_path, 'rb') as f:\n b = f.read() # hope ya get it all\n\n # grab the first four numbers ...\n # fmt='>i' means big-endian int32\n magic, n_images, n_rows, n_cols = (struct.unpack('>i', b[i*4:(i+1)*4]) for i in range(4))\n\n # i am a god-fearing man\n assert magic[0] == 2051, \"bad magic number, what do?\"\n\n\n # so i think you can use the standard libary's \"array\" for this, just\n # because binary data of any sort is kinda dodgy, but this grabs 'the rest'\n # format='B' means unsigned char === 'uint8', and apparently endianness doesn't matter\n image_stream = array.array('B', b[16:])\n\n # so each 28*28 byte portion of image_stream is a flattened image. these two\n # numpy.reshape calls get it into the desired shape for A. maybe could\n # combine it into one call, idk. anyway, each flattened image appears as a\n # row, and there is a row for each image.\n image_first = numpy.reshape(image_stream, (n_images[0], n_rows[0], n_cols[0]))\n images = image_first.reshape(n_images[0], n_rows[0]*n_cols[0])\n\n # convert to float in [0,1]\n images = images.astype('f') / 255\n\n return images", "def load_features(feature_path):\n if not os.path.exists(os.path.join(feature_path, f\"0_features.npy\")): \n raise ValueError(f\"The provided location {feature_path} does not contain any representation files\")\n\n ds_list, chunk_id = [], 0\n while os.path.exists(os.path.join(feature_path, f\"{chunk_id}_features.npy\")): \n features = ch.from_numpy(np.load(os.path.join(feature_path, f\"{chunk_id}_features.npy\"))).float()\n labels = ch.from_numpy(np.load(os.path.join(feature_path, f\"{chunk_id}_labels.npy\"))).long()\n ds_list.append(ch.utils.data.TensorDataset(features, labels))\n chunk_id += 1\n\n print(f\"==> loaded {chunk_id} files of representations...\")\n return ch.utils.data.ConcatDataset(ds_list)", "def read_x_data(data_dir):\n files = glob.glob(os.path.join(data_dir, '*.jpg'))\n return [(os.path.basename(file), io.imread(file)) for file in files]", "def load_svhn_images(folder_path):\n images = []\n for file in os.listdir(folder_path):\n if file.endswith(\".png\"):\n image = Image.open(file)\n image.load()\n # Load image data as 1 dimensional array\n # We're using float32 to save on memory space\n feature = np.array(image, dtype=np.float32)\n images.append(feature)\n\n return images", "def get_image_features(paths: pd.Series) -> np.array:\r\n # Pretrained image classification model to convert images into embeddings\r\n image_model = tf.keras.applications.EfficientNetB7(weights='imagenet',\r\n include_top=False,\r\n input_shape=(IMG_SIZE, IMG_SIZE, 3),\r\n pooling='avg')\r\n image_model = tf.keras.Sequential(\r\n [tf.keras.models.load_model(image_model),\r\n tf.keras.layers.Layer(2560, dtype='float16')] # This layer reduces precision of float numbers\r\n )\r\n\r\n # Transform paths to files into tf.data.Dataset\r\n input_data = tf.data.Dataset.from_tensor_slices(paths)\r\n # Preprocess images\r\n input_data = input_data.map(process_path, num_parallel_calls=AUTOTUNE)\r\n input_data = configure_for_performance(input_data)\r\n\r\n # Convert all images into embeddings and average colors\r\n features = image_model.predict(input_data,\r\n batch_size=BATCH_SIZE,\r\n use_multiprocessing=True,\r\n workers=-1)\r\n print('Image features extracted. Shape:', features.shape)\r\n\r\n return features", "def _load_batch_file(filename):\n # Load the pickled data-file.\n data = _unpickle(filename)\n # Get the raw images.\n raw_images = data[b'data']\n # Get the class-numbers for each image. Convert to numpy-array.\n cls = np.array(data[b'labels'])\n # Convert the images.\n images = _convert_images(raw_images)\n\n return images, cls", "def load_images(folder_path, num_images):\n imgs = np.zeros(shape=[num_images, 400, 400, 3])\n for i in range(1, num_images + 1):\n image_name = \"satImage_%.3d\" % i\n image_path = folder_path + image_name + \".png\"\n if os.path.isfile(image_path):\n print('Loading ' + image_path)\n img = mpimg.imread(image_path)\n\n #imgs[i - 1] = np.asarray(img).reshape(400, 400, 3)\n imgs[i - 1] = img.reshape(400, 400, 3)\n else:\n print('File ' + image_path + ' does not exist')\n return imgs", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb')as f:\r\n datadict = p.load(f)\r\n \r\n X = datadict['data']\r\n Y = datadict['labels']\r\n \r\n print X.shape\r\n X = X.reshape(X.shape[0], SHAPE[0], SHAPE[1], SHAPE[2])\r\n Y = np.array(Y)\r\n return X, Y", "def _load_X_y(path, subset='train'):\n y_path = join(path, '{}-labels-idx1-ubyte.gz'.format(subset))\n X_path = join(path, '{}-images-idx3-ubyte.gz'.format(subset))\n\n with gzip.open(y_path, 'rb') as y_file:\n y = np.frombuffer(y_file.read(), dtype=np.uint8,\n offset=8)\n with gzip.open(X_path, 'rb') as X_file:\n X = np.frombuffer(X_file.read(), dtype=np.uint8,\n offset=16).reshape(len(y), 784)\n return X, y", "def load(data, feature):\n #Settings\n train_path = os.path.join(\"data\", data, feature) #put your image path here if you want to override current directory\n\n X = []\n y = []\n for f in os.listdir(train_path):\n (X_i, y_i) = cPickle.load(open(os.path.join(train_path,f), \"rb\"))\n if type(X_i) is np.ndarray:\n X_i = X_i.tolist()\n X = X + X_i #Append the two lists together\n y = y + y_i\n assert np.size(X,0) == 50000 or np.size(X,0) == 10000\n assert np.size(y) == 50000 or np.size(y) == 10000\n # Raws are stored as SimpleCV Images so they can easily be converted to\n # features using SimpleCV\n # Since machine learning aglorithms take feature vectors as inputs, we\n # flatten the underlying 3D matrices of the images here.\n if feature == \"raw\":\n X = map (lambda img: img.getNumpy().flatten(), X)\n return X,y", "def load_source_png_images(self, num_slice):\n if self.subject is None:\n print Console.WARNING + 'You need to specify a subject first' + Console.ENDC\n return\n data = [] \n for l in self.locations.LABELS:\n slice_file = self.locations.SOURCE_PNG % (l, num_slice)\n \n #print 'Loading Input Image \\t\\t%s'%slice_file \n slice_data = misc.imread(slice_file) \n data.append(slice_data)\n \n return data #images in the same order as labels", "def read_images_lenstool(image_file = 'image.all'):\n #FIXME - is it a good idea to keep this one line function?\n x_img = np.loadtxt(image_file, usecols = (1, 2))\n return x_img", "def load_image(file_name):\n if not osp.exists(file_name):\n print('{} not exist'.format(file_name))\n return\n image = np.asarray(io.imread(file_name))\n if len(image.shape)==3 and image.shape[2]>3:\n image = image[:, :, :3]\n # print(image.shape) #should be (x, x, 3)\n return image", "def __readImages(self, filename):\n print 'Reading images from %s ...' % filename\n images = []\n with open(filename, 'rb') as infile:\n infile.read(4) # ignore magic number\n count = struct.unpack('>i', infile.read(4))[0]\n rows = struct.unpack('>i', infile.read(4))[0]\n columns = struct.unpack('>i', infile.read(4))[0]\n\n for i in xrange(count):\n data = infile.read(rows*columns)\n image = np.fromstring(data, dtype=np.uint8)\n image = image.reshape((rows, columns))\n image = 255 - image # now black digit on white background\n images.append(image)\n return images", "def load_images(path, p=1, feature=None, transform=None):\n\n images = os.listdir(path)\n images = random.sample(images, math.ceil(len(images) * p))\n\n loaded = [\n load_image(\n os.path.join(path, img),\n feature=feature, transform=transform)\n for img in images]\n\n return np.array([x for x in loaded if x is not None])", "def load_geojsons2(filepath):\n jsons = glob(os.path.join(filepath, '*.json'))\n features = []\n for json_path in tqdm(jsons, desc='loading geojson files'):\n with open(json_path) as f:\n data_dict = json.load(f)\n features.append(data_dict)\n\n obj_coords = list()\n image_ids = list()\n class_indices = list()\n class_names = list()\n\n for feature in tqdm(features, desc='extracting features'):\n for i in range(len(feature['object'])):\n if feature['object'][i]['label'] != 'gbg':\n image_ids.append(feature['filename'])\n obj_coords.append(feature['object'][i]['points'])\n class_indices.append(int(feature['object'][i]['label'][-1])-1)\n class_names.append(feature['object'][i]['label'])\n \n return image_ids, obj_coords, class_indices, class_names", "def read_file ( filename ):\r\n\t# lecture de l'en-tete\r\n\tinfile = open ( filename, \"r\" ) \r\n\tnb_classes, nb_features = [ int( x ) for x in infile.readline().split() ]\r\n\r\n\t# creation de la structure de donnees pour sauver les images :\r\n\t# c'est un tableau de listes (1 par classe)\r\n\tdata = np.empty ( 10, dtype=object ) \r\n\tfiller = np.frompyfunc(lambda x: list(), 1, 1)\r\n\tfiller( data, data )\r\n\r\n\t# lecture des images du fichier et tri, classe par classe\r\n\tfor ligne in infile:\r\n\t\tchamps = ligne.split ()\r\n\t\tif len ( champs ) == nb_features + 1:\r\n\t\t\tclasse = int ( champs.pop ( 0 ) )\r\n\t\t\tdata[classe].append ( map ( lambda x: float(x), champs ) ) \r\n\tinfile.close ()\r\n\r\n\t# transformation des list en array\r\n\toutput = np.empty ( 10, dtype=object )\r\n\tfiller2 = np.frompyfunc(lambda x: np.asarray (x), 1, 1)\r\n\tfiller2 ( data, output )\r\n\r\n\treturn output", "def read_images(idx_filename, feature_type='ip-above', only_images=True):\n with gzip.open(idx_filename, 'rb') as f:\n magic_numbers = f.read(4)\n # print('magic_number', magic_numbers)\n assert magic_numbers[0] == 0 and magic_numbers[1] == 0\n if magic_numbers[2] != 8:\n raise AssertionError('Only support for unsigned char')\n shape = magic_numbers[3]\n # print('shape', shape)\n num_examples = int.from_bytes(f.read(4), byteorder='big')\n # print('number of examples',num_examples)\n dimensions = []\n for _ in range(shape - 1):\n dimensions.append(int.from_bytes(f.read(4), byteorder='big'))\n # print('dimensions', dimensions)\n data_list = []\n for _ in range(num_examples):\n each_data_point = _read(dimensions, f)\n session_info = _extract_session_info(each_data_point[:SESSION_BYTE_LEN])\n if feature_type == 'payload-len':\n actual_pkt_count = int.from_bytes(each_data_point[SESSION_BYTE_LEN:SESSION_BYTE_LEN + 4],\n byteorder='big')\n actual_byte_count = int.from_bytes(each_data_point[SESSION_BYTE_LEN + 4:SESSION_BYTE_LEN + 6],\n byteorder='big')\n other_data = each_data_point[SESSION_BYTE_LEN + 6:]\n elif feature_type == 'ip-above':\n actual_pkt_count = each_data_point[SESSION_BYTE_LEN]\n other_data = each_data_point[SESSION_BYTE_LEN + 1:]\n else:\n raise AssertionError('feature_type could only be \"payload-len\" or \"ip-above\"')\n if only_images is True:\n data_list.append(other_data)\n elif only_images is False:\n if feature_type == 'ip-above':\n data_list.append((session_info, actual_pkt_count, other_data))\n elif feature_type == 'payload-len':\n data_list.append((session_info, actual_pkt_count, actual_byte_count, other_data))\n else:\n raise AssertionError('feature_type could only be \"payload-len\" or \"ip-above\"')\n else:\n raise AssertionError('only_images could only be True or False')\n\n assert f.read() == b''\n\n data_list = np.array(data_list)\n return data_list", "def extract_labels(filename, num_images):\n\n# this function definition has been taken from internet \n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64) #Interpret a buffer as a 1-dimensional array\n return labels", "def extract_features(self, images: List[np.ndarray]) -> List[np.ndarray]:\n pass", "def load_data():\n\n training_files_dir = \"digits/trainingDigits\"\n training_files = os.listdir(training_files_dir)\n file_num = len(training_files)\n hw_labels = []\n\n training_mat = zeros((file_num, 32 * 32))\n for i in xrange(file_num):\n filename = training_files[i]\n file_label = int((filename.split(\".\")[0]).split(\"_\")[0])\n hw_labels.append(file_label)\n training_mat[i, :] = img2vector(training_files_dir + '/' + filename)\n\n return training_mat, hw_labels", "def load_groundtruths(folder_path, num_images):\n imgs = []\n for i in range(1, num_images + 1):\n image_name = \"satImage_%.3d\" % i\n image_path = folder_path + image_name + \".png\"\n if os.path.isfile(image_path):\n print('Loading ' + image_path)\n img = mpimg.imread(image_path)\n # See if it is better to use dtype = int\n hot_img = convert_image_to_hot(img)\n imgs.append(hot_img)\n else:\n print('File ' + image_path + ' does not exist')\n #imgs = np.around(imgs) # Uncomment if we want to round values.\n imgs_array = np.asarray(imgs)\n return imgs_array", "def load_digits():\n \n images, target = [], []\n for image_file in digit_image_filenames:\n image = cv2.imread(image_file)\n if image is None:\n raise RuntimeError(\"Failed to read the image file '{}'\".format(\n image_file))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n for slice in image_slices(image, 0):\n for i, character in enumerate(image_slices(slice, 1)):\n target.append(i)\n images.append(character)\n \n return images, target", "def load_images(tags_pict):\n img_data_list = []\n for p in tags_pict.index :\n img_path = tags_pict.full_path[p]\n img = load_img(img_path, target_size= inputShape)\n x = img_to_array(img)\n x = np.expand_dims(img, axis=0)\n # pre-process the image using the appropriate function based on the\n # model that has been loaded (i.e., mean subtraction, scaling, etc.)\n x = preprocess_input(x)\n img_data_list.append(x)\n img_data = np.array(img_data_list)\n img_data=np.rollaxis(img_data,1,0)\n img_data=img_data[0]\n return(img_data)", "def load_dataset(path_test, width, height):\n tot_images = 0\n for label in listdir(path_test):\n label_full = join(path_test, label)\n for img_name in listdir(label_full):\n tot_images += 1\n\n # allocate the memory\n # THE DTYPE is float, should be the right one\n all_images = np.zeros((tot_images, width, height, 3))\n\n true_labels = []\n num_images = 0\n for label in listdir(path_test):\n label_full = join(path_test, label)\n for img_name in listdir(label_full):\n # for img_name in listdir(label_full)[:10]:\n img_name_full = join(label_full, img_name)\n print(f\"Opening {img_name_full} {width}\")\n\n image = cv2.imread(img_name_full)\n\n image = cv2.resize(image, (width, height))\n\n # scale the pixel values to [0, 1]\n image = image.astype(\"float\") / 255.0\n\n all_images[num_images, :, :, :] = image\n\n num_images += 1\n true_labels.append(label)\n\n print(f\"All_images.shape {all_images.shape}\")\n\n # cv2.imshow('Resized all_images[0]', all_images[0])\n # cv2.waitKey(0)\n\n return all_images, true_labels", "def main():\n labels, data = load_image_data()\n print(labels.shape, data.shape)", "def _read_datafile(self,path):\n \tlabels, images = [], []\n \twith gzip.GzipFile(path) as f:\n \t for line in f:\n \t vals = line.strip().split()\n \t labels.append(float(vals[0]))\n \t images.append([float(val) for val in vals[1:]])\n \tlabels = np.array(labels, dtype=np.int32)\n \tlabels[labels == 10] = 0 # fix weird 0 labels\n \timages = np.array(images, dtype=np.float32).reshape(-1, 16, 16, 1)\n \timages = (images + 1) / 2\n \treturn images, labels", "def load_data_1d(path, dset):\n labels, imgs, _, _ = load_data(path, dset)\n print(\"images.shape=%s, labels.shape=%s\" % (imgs.shape, labels.shape))\n return labels, imgs", "def import_image(filepath):\n \n im_path = os.path.join(str(filepath))\n im_files = glob.glob(im_path)\n im_col = []\n \n for imc in im_files:\n #imc = plt.imread(img, 0) # changed from cv2.imread\n img = cv2.imread(imc, cv2.COLOR_BGR2GRAY)\n imcg = np.array(img)\n try:\n xy = np.shape(imcg) #added\n except:\n continue\n imgr = imcg.reshape((xy[0]*xy[1],)) #added\n imgr = np.array(imgr)\n im_col.append(imgr.transpose())\n \n return(im_col)", "def load_test_data(image_path):\n raw = []\n image_filename = dict()\n count = 0\n for filename in glob.glob(image_path):\n name = os.path.basename(filename)[:-4]\n try:\n im = Image.open(filename)\n im = im.convert('L')\n im = im.resize((img_rows, img_cols))\n raw.append(np.array(im))\n image_filename[count] = name\n count += 1\n im.close()\n except IOError:\n print('Error loading image ', filename)\n return [raw, image_filename]", "def create_feature_matrix1(imagepath, H,W):\r\n \r\n features_list = []\r\n imagelist =os.listdir(imagepath)\r\n\r\n print(len(imagelist))\r\n for image in imagelist:\r\n # load image\r\n img = cv2.imread(os.path.join(imagepath,image),0)\r\n img_resized = cv2.resize(img,(H,W)) # Resizing input image\r\n features = img_resized.flatten()\r\n features_list.append(features)\r\n\r\n return features_list", "def extract_feature(network_proto_path,\n network_model_path,\n image_list, data_mean, layer_name, image_as_grey = False):\n net = caffe.Net(network_proto_path, network_model_path, caffe.TEST)\n transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})\n transformer.set_input_scale('data', 1)\n transformer.set_transpose('data', (2, 0, 1))\n blobs = OrderedDict([(k, v.data) for k, v in net.blobs.items()])\n\n shp = blobs[layer_name].shape\n print blobs['data'].shape\n\n batch_size = blobs['data'].shape[0]\n print blobs[layer_name].shape\n\n features_shape = (len(image_list), shp[1])\n features = np.empty(features_shape, dtype='float32', order='C')\n for idx, path in zip(range(features_shape[0]), image_list):\n img = caffe.io.load_image(path, color=False)\n prob = net.forward_all(data=np.asarray([transformer.preprocess('data', img)]))\n print np.shape(prob['prob'])\n blobs = OrderedDict([(k, v.data) for k, v in net.blobs.items()])\n features[idx, :] = blobs[layer_name][0, :].copy()\n print '%d images processed' % (idx + 1)\n features = np.asarray(features, dtype='float32')\n return features", "def load(self, filename):\n data = np.load(temp_dir + '/' + filename + '.npz')\n return data['chip_ids'], data['core_ids'], data['cx_ids']", "def load_faces(path, ext=\".pgm\"):\n \n #\n # You code here\n #\n \n images = []\n img_shape = (0, 0)\n\n for root, dirs, files in os.walk(path):\n for file in files:\n if ext in file: # check if file is of pgm-type\n img_path = os.path.join(root, file)\n img = plt.imread(img_path) # Read the image\n img_shape = img.shape\n img = img.flatten() # Transform 2D image into vector M = height x width\n images.append(img)\n\n img_array = np.asarray(images) \n\n return img_array, img_shape", "def get_data(folder):\n X = []\n y = []\n for folderName in os.listdir(folder):\n if not folderName.startswith('.'):\n if folderName in ['NORMAL']:\n label = 0\n elif folderName in ['CNV']:\n label = 1\n elif folderName in ['DME']:\n label = 2\n elif folderName in ['DRUSEN']:\n label = 3\n else:\n label = 4\n for image_filename in tqdm(os.listdir(folder + folderName)):\n img_file = cv2.imread(folder + folderName + '/' + image_filename)\n if img_file is not None:\n img_file = skimage.transform.resize(img_file, (imageSize, imageSize, 3))\n img_arr = np.asarray(img_file)\n X.append(img_arr)\n y.append(label)\n X = np.asarray(X)\n y = np.asarray(y)\n return X,y", "def load_images(filelist):\n # pixel value range 0-255\n if not isinstance(filelist, list):\n im = Image.open(filelist).convert('L')\n return np.array(im).reshape(1, im.size[1], im.size[0], 1)\n data = []\n for file in filelist:\n im = Image.open(file).convert('L')\n data.append(np.array(im).reshape(1, im.size[1], im.size[0], 1))\n return data", "def get_features(files):\n files = files.tolist()\n return np.array([pipeline(file) for file in files])", "def load_geojsons(filepath):\n jsons = sorted(glob(os.path.join(filepath, '*.json')))\n features = []\n for json_path in tqdm(jsons, desc='loading geojson files'):\n with open(json_path) as f:\n data_dict = json.load(f)\n features.append(data_dict)\n\n obj_coords = list()\n image_ids = list()\n class_indices = list()\n class_names = list()\n\n for feature in tqdm(features, desc='extracting features'):\n for i in range(len(feature['object'])):\n if feature['object'][i]['label'] != 'gbg':\n try:\n image_ids.append(feature['file_name'])\n obj_coords.append(feature['object'][i]['box'])\n except:\n image_ids.append(feature['filename'])\n obj_coords.append(feature['object'][i]['points'])\n\n class_indices.append(int(feature['object'][i]['label'][-1])-1)\n class_names.append(feature['object'][i]['label'])\n \n return image_ids, obj_coords, class_indices, class_names", "def extract_images(filename):\n\tprint('Extracting', filename)\n\twith gzip.open(filename) as bytestream:\n\t\tmagic = _read32(bytestream)\n\t\tif magic != 2051:\n\t\t\traise ValueError('Invalid magic number %d in MNIST image file: %s' %(magic, filename))\n\t\tnum_images = _read32(bytestream)\n\t\trows = _read32(bytestream)\n\t\tcols = _read32(bytestream)\n\t\tbuf = bytestream.read(rows * cols * num_images)\n\t\tdata = numpy.frombuffer(buf, dtype=numpy.uint8)\n\t\tdata = data.reshape(num_images, rows, cols, 1)\n\t\treturn data", "def import_data(self, img_size):\n path = self._path\n images = []\n labels = []\n\n categs_name = [filename for filename in os.listdir(path)]\n for categ in categs_name:\n if isdir(join(path, categ)):\n\n for img_name in os.listdir(join(path, categ)):\n\n if \".jpg\" in img_name:\n\n img_name = self.correct_filename(img_name, categ)\n img_path = join(path, categ, img_name)\n img = cv2.imread(img_path)\n\n if img_size:\n dim = (img_size, img_size)\n try:\n img = cv2.resize(img, dim)\n except:\n print(img_name, \"has not been loaded.\")\n continue\n\n images.append(img)\n labels.append(categ)\n\n X = np.array(images)\n y = self.transform_labels(labels)\n\n return X, y", "def load_features(file):\n data = np.load(file, allow_pickle=True)\n return data", "def extract_images(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError(\n 'Invalid magic number %d in MNIST image file: %s' %\n (magic, filename))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def image_layers() -> List:\n return [\n \"sha256:94b2db70f7476c98f4c4a1b7a922136e0c5600d2d74905407ad364dcca2bf852\",\n \"sha256:22426f366c51f26105aa9a6c6c9aea9fff0f21b7aabfc97870727577edaa3260\",\n ]", "def loadDataset(dataset):\n # List of images.\n images = []\n\n\n\n # Read all filenames from the dataset.\n for filename in dataset:\n # Read the input image.\n image = cv2.imread(filename)\n\n # Add the current image on the list.\n if image is not None: \n images.append(image)\n else:\n print(\"Could not read file: {}\".format(filename))\n sys.exit()\n\n # Return the images list.\n return images", "def extract_images(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError(\n 'Invalid magic number %d in MNIST image file: %s' %\n (magic, filename))\n num_images = _read32(bytestream)[0]\n rows = _read32(bytestream)[0]\n cols = _read32(bytestream)[0]\n #print('check', magic, num_images, rows, cols, rows * cols * num_images)\n buf = bytestream.read(rows * cols * num_images)\n data = numpy.frombuffer(buf, dtype=numpy.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n return labels", "def read_batch(self):\n imgs = []\n labels = []\n idx = np.random.choice(self.nImgs,self.batch_size)\n \tfor i in idx:\n imgs.append(cv2.imread(self.data_files[i]))\n \t labels.append(cv2.imread(self.label_files[i]))\n \timgs,labels = np.array(imgs),np.array(labels)\n imgs = (imgs - self.mean)/self.stddev\n \tlabels = (labels - self.mean)/self.stddev\n return imgs,labels", "def read_img(filename) -> Tuple[np.ndarray, np.ndarray]:\n img = cv2.imread(filename, 3)\n labimg = cv2.cvtColor(cv2.resize(img, (config.IMAGE_SIZE, config.IMAGE_SIZE)), cv2.COLOR_BGR2Lab)\n return np.reshape(labimg[:, :, 0], (config.IMAGE_SIZE, config.IMAGE_SIZE, 1)), labimg[:, :, 1:]", "def load_data(file_name):\n data = np.load(file_name, allow_pickle=True)\n\n X, y = [], []\n\n for mfccs, label in data:\n X.append(mfccs)\n y.append(label)\n\n X = np.array(X)\n y = np.array(y)\n\n X = X.reshape(*X.shape, 1)\n y = y.reshape(-1, 1)\n\n return X, y", "def get_features_images(image_paths, hyperparams):\n # Create a list to append feature vectors to\n features = []\n # Iterate through the list of images\n for file in image_paths:\n # Read in each one by one\n image = mpimg.imread(file)\n # Get features\n f = get_features_image(image, hyperparams)\n features.append(f)\n # Return list of feature vectors\n return features", "def extract_data(filename, num_images, starting_id, context_factor):\n imgs = []\n for i in range(starting_id, num_images+starting_id):\n imageid = \"satImage_%.3d\" % i\n image_filename = filename + imageid + \".png\"\n if os.path.isfile(image_filename):\n print ('Loading ' + image_filename)\n img = mpimg.imread(image_filename)\n\n\n imgs.append(img)\n else:\n print ('File ' + image_filename + ' does not exist')\n\n num_images = len(imgs)\n IMG_WIDTH = int(imgs[0].shape[0]/DOWNSCALE)\n IMG_HEIGHT = int(imgs[0].shape[1]/DOWNSCALE)\n N_PATCHES_PER_IMAGE = (IMG_WIDTH/IMG_PATCH_SIZE)*(IMG_HEIGHT/IMG_PATCH_SIZE)\n\n\n img_patches = [img_crop_context(imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE,context_factor, sub_mean=True) for i in range(num_images)]\n data = [img_patches[i][j] for i in range(len(img_patches)) for j in range(len(img_patches[i]))]\n data = np.asarray(data)\n return data", "def _extract_array(tiffs: list[np.ndarray], idx: int, shape: tuple[int, ...], dtype: type | np.dtype) -> np.ndarray:\n feature_arrays = (np.atleast_3d(img)[..., idx] for img in tiffs)\n return np.asarray(list(feature_arrays), dtype=dtype).reshape(*shape, 1)", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n return labels", "def load_images(input_dir, batch_shape=[2000,299,299,3]):\n \n filenames = []\n idx = 0\n filepaths=tf.gfile.Glob(os.path.join('./', '*.png'))\n print(len(filepaths))\n print(filepaths)\n batch_shape[0]=len(filepaths)\n batch_size = batch_shape[0]\n print(batch_shape)\n print(\"ZZZ\")\n images = np.zeros(batch_shape, dtype=np.float32)\n \n for filepath in filepaths:\n# with tf.gfile.Open(filepath) as f:\n# image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 255.0\n image = np.array(scipy.misc.imresize(scipy.misc.imread(filepath),(299,299)),dtype=np.float32)/255\n \n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image -0.5 #* 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n return filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n return filenames, images", "def extract_images(f):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %(magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def extract_images(f):\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %\n (magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = numpy.frombuffer(buf, dtype=numpy.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def extract_features(\n img,\n n_sigmas,\n multichannel=True,\n intensity=True,\n edges=True,\n texture=True,\n sigma_min=0.5,\n sigma_max=16,\n):\n if multichannel: #img.ndim == 3 and multichannel:\n all_results = (\n extract_features_2d(\n dim,\n img[..., dim],\n n_sigmas,\n intensity=intensity,\n edges=edges,\n texture=texture,\n sigma_min=sigma_min,\n sigma_max=sigma_max,\n )\n for dim in range(img.shape[-1])\n )\n features = list(itertools.chain.from_iterable(all_results))\n else:\n features = extract_features_2d(0,\n img,\n n_sigmas,\n intensity=intensity,\n edges=edges,\n texture=texture,\n sigma_min=sigma_min,\n sigma_max=sigma_max,\n )\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Feature extraction complete')\n\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n logging.info('Memory mapping features to temporary file')\n\n features = memmap_feats(features)\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n return features #np.array(features)", "def load_img(file_list, dir_path):\n data = []\n for file in file_list:\n img = plt.imread(dir_path + file)\n # Convert RGB image to grayscale\n if len(img.shape) == 3:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Resize image to desired size\n img = cv2.resize(img, (64, 64))\n # Store processed image to list\n data.append(img)\n return np.array(data)", "def extract_images(f):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %\n (magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def layers(self, features: Dict[str, torch.Tensor]) -> torch.Tensor:\n features: torch.Tensor = features[self.in_feature] # type: ignore[no-redef]\n decoded_images = self.network(features)[\"out\"]\n decoded_images = self.reshape(decoded_images)\n return decoded_images", "def load_dataset_into_numpy_array(img_path, mode=\"int32\"):\n files = os.listdir(img_path)\n result = np.asarray([])\n for file in files:\n result = np.concatenate(result, load_image_into_numpy_array(img_path + \"/\" + file, mode).reshape((-1, 1)))\n return result", "def imread(filename):\n return np.asarray(Image.open(filename), dtype=np.uint8)[..., :3]", "def get_images(image_folder_root, image_label_list):\n file_dcm=[]\n X = []\n y = []\n for file_name,label in image_label_list:\n try:\n current_file = pydicom.dcmread(image_folder_root + file_name + '.dcm')\n pixel_array = current_file.pixel_array\n if (pixel_array.shape != (512,512)):\n continue\n file_dcm.append((file_name,label,brain_window(current_file)))\n y.append(label)\n X.append(pydicom.dcmread(image_folder_root + file_name + '.dcm').pixel_array)\n except ValueError:\n continue\n return X,y", "def extract_images(f):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %\n (magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def read_local(path):\n files = os.listdir(path)\n imgs = []\n for f in files:\n if f.endswith(\".tiff\") or f.endswith(\".tif\"):\n img = Image.open(os.path.join(path, f))\n imgs.append(np.array(img))\n return imgs", "def load_data():\n X = load_pickle(config['image_paths']['train_images_pickle'])\n y = load_train_labels()\n y = to_categorical(y)\n test_indices = np.random.choice(len(X), int(len(X) * float(config['model']['test_size'])), replace=False)\n X_train = np.asarray([e for idx, e in enumerate(X) if idx not in test_indices])\n X_test = np.asarray([e for idx, e in enumerate(X) if idx in test_indices])\n y_train = np.asarray([e for idx, e in enumerate(y) if idx not in test_indices])\n y_test = np.asarray([e for idx, e in enumerate(y) if idx in test_indices])\n return X_train, y_train, X_test, y_test", "def load_ipl_as_array(path): \n img = PIL.Image.open(path).convert('RGBA')\n img = np.array(img)\n return img", "def _extract_images(self, filename):\n log.info('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = self._read32(bytestream)\n if magic != 2051:\n raise ValueError(\n 'Invalid magic number %d in MNIST image file: %s' %\n (magic, filename))\n num_images = self._read32(bytestream)\n rows = self._read32(bytestream)\n cols = self._read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def load_color_image_features(img_path):\n ac = scipy.misc.imread(img_path, mode='RGB')\n ac = ac / (255.0 / 2) - 1.0\n return np.array(ac)", "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,'%s-labels-idx1-ubyte.gz'% kind)\n\n images_path = os.path.join(path,'%s-images-idx3-ubyte.gz'% kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,offset=16).reshape(len(labels), 784)\n\n print(\"Dataset Loaded\")\n \n return images, labels", "def load_data_list(self):\n\n data = mat4py.loadmat(self.ann_file)['images']\n names = data['name']\n labels = data['class']\n parts = data['set']\n num = len(names)\n assert num == len(labels) == len(parts), 'get error ann file'\n\n if self.split == 'train':\n target_set = {1}\n elif self.split == 'val':\n target_set = {2}\n elif self.split == 'test':\n target_set = {3}\n else:\n target_set = {1, 2}\n\n data_list = []\n for i in range(num):\n if parts[i] in target_set:\n img_name = names[i]\n img_path = self.backend.join_path(self.img_prefix, img_name)\n gt_label = labels[i] - 1\n info = dict(img_path=img_path, gt_label=gt_label)\n data_list.append(info)\n\n return data_list" ]
[ "0.71335703", "0.6792827", "0.67380095", "0.67065763", "0.6583074", "0.6564765", "0.6550392", "0.6415319", "0.6408479", "0.639627", "0.63891566", "0.63802946", "0.6377724", "0.6368443", "0.6361748", "0.6327807", "0.6314867", "0.6306744", "0.629919", "0.62990034", "0.6297572", "0.6293031", "0.6291399", "0.62908316", "0.62580985", "0.6245388", "0.6244261", "0.6240537", "0.62355983", "0.6207915", "0.6203868", "0.6203755", "0.61935234", "0.6191838", "0.61807287", "0.61805123", "0.6168639", "0.6165186", "0.6141629", "0.6136401", "0.6136296", "0.6103863", "0.6094339", "0.6088846", "0.6077642", "0.60761666", "0.60696507", "0.60589933", "0.6049947", "0.6045991", "0.60378236", "0.6032944", "0.6026105", "0.60161096", "0.6012589", "0.6002751", "0.59946257", "0.59897053", "0.59888446", "0.59831864", "0.59761506", "0.5970272", "0.59692055", "0.5968745", "0.5965322", "0.5960021", "0.59571326", "0.5949282", "0.5945302", "0.5937145", "0.5925179", "0.5925056", "0.5919622", "0.5916078", "0.5913234", "0.5900391", "0.58978313", "0.5895109", "0.5890734", "0.58842075", "0.5881016", "0.58793885", "0.5866042", "0.5860409", "0.5857227", "0.5853294", "0.58494943", "0.5843144", "0.5839854", "0.5834707", "0.5833914", "0.5832586", "0.5830041", "0.58284974", "0.5826449", "0.5823433", "0.58233744", "0.58206266", "0.58200514", "0.5817328", "0.5815323" ]
0.0
-1
adjust nodes and edges
def add_graphics_theme_to_nx_graph( nx_graph, edge_color=None, node_size_factor=50, edge_size_factor=500): # node size, stroke for node_name, node_attrs in nx_graph.nodes(data=True): #node_size = nx_graph.nodes[node_name]["numexamples"] / float(node_size_factor) #node_size = nx_graph.nodes[node_name]["numexamples"] / float(nx_graph.graph["numexamples"]) #node_size *= node_size_factor node_size = 100 graphics = { "type": "ellipse", "w": node_size, "h": node_size, "fill": "#FFFFFF", "outline": "#000000", "width": 1.0, "fontSize": 14 } if nx_graph.nodes[node_name].get("graphics") is not None: nx_graph.nodes[node_name]["graphics"].update(graphics) else: nx_graph.nodes[node_name]["graphics"] = graphics # edges for start_node, end_node in nx_graph.edges(): for edge_idx in xrange(len(nx_graph[start_node][end_node])): #edge_width = nx_graph[start_node][end_node][edge_idx]["numexamples"] / float( # edge_size_factor) #edge_width = nx_graph[start_node][end_node][edge_idx]["numexamples"] / float( # nx_graph.graph["numexamples"]) #edge_width *= edge_size_factor edge_width = 1.0 graphics = { "type": "arc", "width": edge_width, "targetArrow": "delta" } if edge_color is not None: graphics["fill"] = edge_color if nx_graph[start_node][end_node][edge_idx].get("graphics") is not None: nx_graph[start_node][end_node][edge_idx]["graphics"].update(graphics) else: nx_graph[start_node][end_node][edge_idx]["graphics"] = graphics return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self):\n\n for node in self.nodes:\n for edge in node.edges:\n for i, edge_node in enumerate(edge.nodes):\n if edge_node.id != node.id:\n edge_node.add_edge(edge)\n\n return self", "def clean_edges(self):", "def update_cell_edges(self):\n self.cells['edges'] = -1\n for c in range(self.Ncells()):\n for i,(a,b) in enumerate(circular_pairs(self.cell_to_nodes(c))):\n self.cells['edges'][c,i] = self.nodes_to_edge(a,b)", "def Adjust(self):\r\n if not self.srcNode() or not self.destNode():\r\n return \r\n\r\n self.prepareGeometryChange()\r\n\r\n self.setSrcPoint(self.mapFromItem(self.srcNode(), self.srcNode().outputConnectionPoint()))\r\n self.setDestPoint(self.mapFromItem(self.destNode(), self.destNode().inputConnectionPoint()))", "def add_edges(self):\n for u in self.G.nodes():\n for v in self.G.nodes():\n if u != v and u != \"Sink\" and v != \"Source\":\n self.G.add_edge(\n u, v, cost=self.manhattan(u, v), time=self.manhattan(u, v)\n )", "def set_node_positions(self):", "def add_adj_nodes(self):\n\n for x, row in enumerate(self.grid):\n for y, cell in enumerate(row):\n if x-1 >= 0:\n cell.above = self.grid[x-1][y]\n if y+1 < len(self.grid[0]):\n cell.right = self.grid[x][y+1]\n if x+1 < len(self.grid):\n cell.below = self.grid[x+1][y]\n if y-1 >= 0:\n cell.left = self.grid[x][y-1]", "def set_edge_rows_cols(col, edges_lst, row, weight):\n if row == 0 and (col != 0 and col != COLS - 1): # first row but not the edges\n edges_lst.append((get_node_name(0, col), get_node_name(0, col - 1), weight))\n edges_lst.append((get_node_name(0, col), get_node_name(1, col), weight))\n edges_lst.append((get_node_name(0, col), get_node_name(0, col + 1), weight))\n elif row == ROWS - 1 and (col != 0 and col != COLS - 1): # last row but not the edges\n edges_lst.append((get_node_name(ROWS - 1, col), get_node_name(ROWS - 1, col - 1), weight))\n edges_lst.append((get_node_name(ROWS - 1, col), get_node_name(ROWS - 2, col), weight))\n edges_lst.append((get_node_name(ROWS - 1, col), get_node_name(ROWS - 1, col + 1), weight))\n elif col == 0 and (row != 0 and row != ROWS - 1): # first col but not edges\n edges_lst.append((get_node_name(row, 0), get_node_name(row - 1, 0), weight))\n edges_lst.append((get_node_name(row, 0), get_node_name(row, 1), weight))\n edges_lst.append((get_node_name(row, 0), get_node_name(row + 1, 0), weight))\n elif col == COLS - 1 and (row != 0 and row != ROWS - 1): # last col but not edges\n edges_lst.append((get_node_name(row, COLS - 1), get_node_name(row - 1, COLS - 1), weight))\n edges_lst.append((get_node_name(row, COLS - 1), get_node_name(row, COLS - 2), weight))\n edges_lst.append((get_node_name(row, COLS - 1), get_node_name(row + 1, COLS - 1), weight))", "def wsngraph():\n G = nx.Graph()\n G.add_node(1)\n G.add_node(2)\n G.add_node(3)\n G.add_node(4)\n G.add_node(5)\n G.add_node(6)\n G.add_node(7)\n G.add_node(8)\n G.add_node(9)\n G.add_node(10)\n G.add_node(11)\n G.add_node(12)\n G.add_edge(1,3,weight=1)\n G.add_edge(1,2,weight=6)\n G.add_edge(1,12,weight=16)\n G.add_edge(2,11,weight=12)\n G.add_edge(2,6,weight=10)\n G.add_edge(2,5,weight=11)\n G.add_edge(3,4,weight=10)\n G.add_edge(3,7,weight=11)\n G.add_edge(3,8,weight=14)\n G.add_edge(3,9,weight=11)\n G.add_edge(4,7,weight=9)\n G.add_edge(5,6,weight=7)\n G.add_edge(5,9,weight=12)\n G.add_edge(6,9,weight=9)\n G.add_edge(7,10,weight=10)\n G.add_edge(8,10,weight=2)\n G.add_edge(8,11,weight=11)\n G.add_edge(8,9,weight=12)\n G.add_edge(9,11,weight=8)\n G.add_edge(10,12,weight=3)\n G.pos={}\n G.pos[1]=(6,4)\n G.pos[2]=(-1,3.7)\n G.pos[3]=(4.7,3.5)\n G.pos[4]=(5.3,3.2)\n G.pos[5]=(0,3)\n G.pos[6]=(1.4,3.4)\n G.pos[7]=(5,2.6)\n G.pos[8]=(4.7,0)\n G.pos[9]=(1.4,2.4)\n G.pos[10]=(5.2,0.5)\n G.pos[11]=(1.3,0)\n G.pos[12]=(6,2.4)\n elarge=[(u,v) for (u,v,d) in G.edges(data=True) if d['weight'] > 8]\n esmall=[(u,v) for (u,v,d) in G.edges(data=True) if d['weight'] <= 8]\n nx.draw_networkx_nodes(G,G.pos,node_color='w')\n nx.draw_networkx_edges(G,G.pos,elarge,width=3,edge_color='r',alpha=0.3)\n nx.draw_networkx_edges(G,G.pos,esmall,width=1,edge_color='b',alpha=0.3)\n nx.draw_networkx_labels(G,G.pos)\n ax=plt.gca()\n ax.axison = False\n label = {} \n for (u,v) in G.edges():\n d = G.get_edge_data(u,v)\n label[(u,v)]=d['weight']\n edge_label=nx.draw_networkx_edge_labels(G,G.pos,edge_labels=label)\n\n return(G)", "def merge_edges(self,edges=None,node=None):\n if edges is None:\n edges=self.node_to_edges(node)\n assert len(edges)==2\n if node is None:\n Na=self.edge_to_nodes(edges[0])\n Nb=self.edge_to_nodes(edges[1])\n for node in Na:\n if node in Nb:\n break\n else:\n raise self.GridException(\"Edges %s do not share a node\"%(edges))\n A,C=edges\n B=node\n # which side is which?\n if self.edges['nodes'][A,0] == B:\n Ab=0\n else:\n Ab=1\n if self.edges['nodes'][C,0] == B:\n Cb=0\n else:\n Cb=1\n\n # safety checks - respective sides of the edges should be compatible.\n # left side cells, in the sense of looking from A to C\n assert self.edges['cells'][A,1-Ab] == self.edges['cells'][C,Cb]\n assert self.edges['cells'][A,Ab] == self.edges['cells'][C,1-Cb]\n\n # cell/edge invariants do not hold for a brief moment\n # this could be a problem if modify_cell tries to update a lookup\n # for edges. May have to revisit.\n for c in self.edges['cells'][A]:\n if c>=0: # it's a real cell\n c_nodes=[n\n for n in self.cell_to_nodes(c)\n if n!=B ]\n self.modify_cell(c,nodes=c_nodes)\n\n # Edge A will be the one to keep\n # modify_edge knows about changes to nodes\n new_nodes=[ self.edges['nodes'][A,1-Ab],\n self.edges['nodes'][C,1-Cb] ]\n if Ab==0: # take care to preserve orientation\n new_nodes=new_nodes[::-1]\n\n self.delete_edge(C)\n # expanding modify_edge into a delete/add allows\n # a ShadowCDT to maintain valid state\n # self.modify_edge(A,nodes=new_nodes)\n # be careful to copy A's entries, as they will get overwritten\n # during the delete/add process.\n edge_data=rec_to_dict(self.edges[A].copy())\n\n self.delete_edge(A)\n self.delete_node(B)\n edge_data['nodes']=new_nodes\n self.add_edge(_index=A,**edge_data)\n return A", "def update_node2edge(self):\n self.node2edge = {e.child : e for e in self.edge}\n childrenset = set(self.node2edge.keys())\n rootset = set(e.parent for e in self.edge).difference(childrenset)\n if len(rootset) > 1:\n raise Warning(\"there should be a single root: \" + str(rootset))\n if len(rootset) == 0:\n raise Exception(\"there should be at least one root!\")\n self.root = rootset.pop()", "def clean_edges(self):\n for from_node in self.all_nodes():\n for to_node in self.all_nodes():\n if from_node == to_node:\n continue\n dup = list(filter(lambda x: x.from_node == from_node and x.to_node == to_node, self.edges))\n if len(dup) > 1:\n for d in dup[1:]:\n self.edges.remove(d)", "def cfdProcessNodeTopology(self):\r\n self.nodeElements = self.cfdInvertConnectivity(self.elementNodes)\r\n self.nodeFaces = self.cfdInvertConnectivity(self.faceNodes)", "def add_all_edges(self):\n for n1 in self.vertices():\n for n2 in self.vertices():\n if n1 != n2:\n self.add_edge((n1, n2))", "def _build_graph_general(self): \n\n #Find a canonical coloring scheme\n #Each node has a color that is determined by the non-mapped aspects\n nodecolors=set()\n for nl in self.net.iter_node_layers():\n nodecolors.add(self._slice_node_layer_not_allowed(nl))\n nodecolors_sorted=sorted(list(nodecolors))\n del nodecolors\n self._assert_full_order(nodecolors_sorted)\n self.colormap=dict( ((color,colorid) for colorid,color in enumerate(nodecolors_sorted) ))\n\n #each aux node has a color that is determined by the aspect\n self.auxcolormap=dict( ((auxcolor, auxcolorid+len(self.colormap)) for auxcolorid,auxcolor in enumerate(sorted(self.asp)) ) )\n\n\n #Add the underlying network\n #node-layers:\n for nl in self.net.iter_node_layers():\n nlid=self._get_node_id(nl)\n color=self._slice_node_layer_not_allowed(nl)\n colorid=self.colormap[color]\n self.add_node(nlid,colorid)\n\n #edges between node-layers:\n for nl1 in self.net.iter_node_layers():\n for nl2 in self.net[nl1]:\n nl1id=self._get_node_id(nl1)\n nl2id=self._get_node_id(nl2)\n self.add_link(nl1id,nl2id)\n\n\n #Add the auxiliary nodes and edges\n #add the aux nodes\n for a in self.asp:\n for elayer in self.net.slices[a]:\n auxid=self._get_auxnode_id( (a,elayer) )\n auxcolorid=self.auxcolormap[a]\n self.add_node(auxid,auxcolorid)\n \n #add the aux edges\n for nl in self.net.iter_node_layers():\n for a in self.asp:\n nlid=self._get_node_id(nl)\n auxid=self._get_auxnode_id( (a,nl[a]) )\n self.add_link(nlid,auxid)", "def set_right_edges(self):\n for v in self:\n for e in v.edges_list:\n e.linked[0]=v\n e.linked[1]=self[self.search_index_by_coordinates(e.linked[1].coordinates)]\n for e in self.list_of_edges:\n e.linked[0]=self[self.search_index_by_coordinates(e.linked[0].coordinates)]\n e.linked[1]=self[self.search_index_by_coordinates(e.linked[1].coordinates)]", "def multipleInBetweenLayerEdgesIntoNodeWithNoFixedPortOrder(self):\n graph = self.graph\n makeLayer = self.makeLayer\n addNodesToLayer = self.addNodesToLayer\n eastWestEdgeFromTo = self.eastWestEdgeFromTo\n\n leftLayer = makeLayer(graph)\n leftNodes = addNodesToLayer(2, leftLayer)\n rightLayer = makeLayer(graph)\n rightNodes = addNodesToLayer(2, rightLayer)\n\n self.addInLayerEdge(rightNodes[0], rightNodes[1], PortSide.WEST)\n eastWestEdgeFromTo(leftNodes[0], rightNodes[1])\n eastWestEdgeFromTo(leftNodes[0], rightNodes[1])\n\n return graph", "def add_resets_edges(graph, start):\n for node in graph.nodes:\n neighbors = list(graph[node])\n if neighbors == [node]:\n graph.add_edge(node, start, label=\"RESET / \")", "def add_edges(self, new_edges):\n self.edges = self._edges + ensure_list(new_edges)\n for (nd_out, nd_in) in ensure_list(new_edges):\n self.predecessors[nd_in.name].append(nd_out)\n self.successors[nd_out.name].append(nd_in)\n if self._sorted_nodes is not None:\n # starting from the previous sorted list, so it's faster\n self.sorting(presorted=self.sorted_nodes + [])", "def _build_edges(self, build_edge_to : Callable[['SpatialGraph.Node'], List[int]]) -> None:\n for node in self.nodes.values():\n for idx in build_edge_to(node):\n if not node.has_edge_to(idx) \\\n and not idx == node.id: # prevent self-links\n node.neighbours[idx] = self.nodes[idx]\n # put reverse edge ?\n if not self.directed:\n self.nodes[idx].neighbours[node.id] = node", "def update_nodes(nodes, svg_h):\n for i in range(0, len(nodes)):\n nodes[i,2] = svg_h-nodes[i,2]\n return nodes", "def update_cell_nodes(self):\n self.cells['nodes'] = -1\n\n for c in range(self.Ncells()):\n # consider two edges at a time, and find the common node\n for i,(ja,jb) in enumerate(circular_pairs(self.cell_to_edges(c))):\n for n in self.edges['nodes'][ja,:]: \n if n in self.edges['nodes'][jb]:\n self.cells['nodes'][c,i] = n\n break", "def copy_edge_attributes_to_refined(g_orig,g_new):\n def copy_attributes(j_orig,j_new):\n for field,type_ in g_new.edge_dtype:\n if field not in ['nodes','cells'] and not field.startswith('_'):\n g_new.edges[field][j_new] = g_orig.edges[field][j_orig]\n\n # loop through setting edge marks\n # edges between a midpoint and one of the\n # endpoints\n for j in range(g_new.Nedges()):\n a,b = g_new.edges['nodes'][j]\n if a>b:\n a,b = b,a\n # only care about edges where one node is original,\n # the other is a midpoint.\n # there are no edges where both are original, and\n # edges with both as midpoints or with one as a center\n # always internal.\n if a < g_orig.Nnodes() and b>=g_orig.Nnodes() and b<g_orig.Nnodes()+g_orig.Nedges():\n j_orig = b - g_orig.Nnodes()\n copy_attributes(j_orig,j)", "def create_node2edges_on2freq_grid(self):\n trip_id2model = pickle.load(open('pickles/trip_id2model.pickle','rb'))\n old_trip_id = -1\n model = trip_id2model[1]\n sub_x = 5\n sub_y = 5\n node2edges_on2sub_grid2points = {}\n for line in self.lines:\n trip_id,lat,lon = normalize_simple(line)\n if trip_id != old_trip_id:\n #print trip_id\n model = trip_id2model[trip_id]\n old_trip_id = trip_id\n node = self.gps_to_node(lat,lon)\n if node == -1:\n continue\n #print \"pushed through\"\n incident_edges = self.incident_edges(node)\n edges_on = []\n for edge in incident_edges:\n if model[edge] == 1:\n edges_on.append(edge)\n edges_on.sort()\n edges_on = tuple(edges_on)\n min_lat,max_lat,min_lon,max_lon = self.coords_to_min_max_lat_lon(self.node_to_coords(node))\n\n sub_row,sub_col = gen_gps_to_coords(lat,lon,sub_x,sub_y,min_lat,max_lat,min_lon,max_lon)\n sub_tuple = (sub_row,sub_col)\n if node not in node2edges_on2sub_grid2points:\n node2edges_on2sub_grid2points[node] = {}\n edges_on2sub_grid2points = node2edges_on2sub_grid2points[node]\n if edges_on not in edges_on2sub_grid2points:\n edges_on2sub_grid2points[edges_on] = defaultdict(list)\n sub_grid2points = edges_on2sub_grid2points[edges_on]\n points = sub_grid2points[sub_tuple]\n node2edges_on2sub_grid2points[node][edges_on][sub_tuple].append([lat,lon])\n #points.append([lat,lon])\n\n print node2edges_on2sub_grid2points.keys()\n print node2edges_on2sub_grid2points[2].keys()\n print node2edges_on2sub_grid2points[2][(2,3)].keys()\n \n node2edges_on2median = {}\n for node in node2edges_on2sub_grid2points:\n print node\n edges_on2sub_grid2points = node2edges_on2sub_grid2points[node]\n node2edges_on2median[node] = {}\n for edges_on in edges_on2sub_grid2points:\n sub_grid2points = edges_on2sub_grid2points[edges_on]\n best_spot = (-1,-1)\n best_score = 0\n for spot in sub_grid2points:\n score = len(sub_grid2points[spot])\n if score > best_score:\n best_score = score\n best_spot = spot\n node2edges_on2median[node][edges_on] = list_median(sub_grid2points[spot])\n \n with open('pickles/node2edges_on2median-%d-%d.pickle' % (self.rows,self.cols),'wb') as output:\n pickle.dump(node2edges_on2median,output)", "def connect_nodes(self):\n for src_id, trg_id in itertools.product(self.selected_nodes, repeat=2):\n if src_id != trg_id:\n app.edges[src_id].add(trg_id)\n self.mark_as_unsaved()\n self.update()", "def set_four_edges(col, row, edges_lst, weight):\n if row == 0 and col == 0: # node (0,0)\n edges_lst.append((get_node_name(0, 0), get_node_name(1, 0), weight))\n edges_lst.append((get_node_name(0, 0), get_node_name(0, 1), weight))\n elif row == 0 and col == COLS - 1: # node (0,COLS-1)\n edges_lst.append((get_node_name(0, COLS - 1), get_node_name(0, COLS - 2), weight))\n edges_lst.append((get_node_name(0, COLS - 1), get_node_name(1, COLS - 1), weight))\n elif row == ROWS - 1 and col == 0: # node (ROWS-1,0)\n edges_lst.append((get_node_name(ROWS - 1, 0), get_node_name(ROWS - 2, 0), weight))\n edges_lst.append((get_node_name(ROWS - 1, 0), get_node_name(ROWS - 1, 1), weight))\n elif row == ROWS - 1 and col == COLS - 1: # node (ROWS-1,COLS-1)\n edges_lst.append((get_node_name(ROWS - 1, COLS - 1), get_node_name(ROWS - 2, COLS - 1), weight))\n edges_lst.append((get_node_name(ROWS - 1, COLS - 1), get_node_name(ROWS - 1, COLS - 2), weight))", "def update_nodes(self):\n raise NotImplementedError('ERROR: sweeper has to implement update_nodes(self)')", "def set_edges(self, edges):\n self._tree.set_edges(edges)\n self._program = make_propagation_program(self._tree.tree_grid)", "def reset_edges(self):\n super().reset_edges()\n\n # If we're in default state, notheing to rest\n if self._modified_weighted_adj_matrices is None:\n return\n\n # Degrees are reset, so we need to reset the original weight scaling\n if self.scale_weights and not self.scaling_skipped:\n self._scale_weights_to_degree()\n self._generate_weighted_adj_matrices()\n else:\n # No weight scaling so just load prev values from cache\n self.weighted_adj_matrices = {**self.weighted_adj_matrices, **self._modified_weighted_adj_matrices}\n self._modified_weighted_adj_matrices = None", "def sub_graph_merging(self):", "def __filterEdges(self):", "def set_general_edge(col, edges_lst, row, weight):\n edges_lst.append((get_node_name(row, col), get_node_name(row, col - 1), weight))\n edges_lst.append((get_node_name(row, col), get_node_name(row, col + 1), weight))\n edges_lst.append((get_node_name(row, col), get_node_name(row - 1, col), weight))\n edges_lst.append((get_node_name(row, col), get_node_name(row + 1, col), weight))", "def _update_nodes_ids(self, change=None):\n self._nodes_filter.val_range = self.nodes_range\n self.nodes_ids = self._nodes_filter.val_ids\n self._update_edges_filtered(change)", "def twoEdgesIntoSamePortCrossesWhenSwitched(self):\n graph = self.graph\n makeLayer = self.makeLayer\n eastWestEdgeFromTo = self.eastWestEdgeFromTo\n addNodeToLayer = self.addNodeToLayer\n addPortOnSide = self.addPortOnSide\n addEdgeBetweenPorts = self.addEdgeBetweenPorts\n\n leftLayer = makeLayer(graph)\n rightLayer = makeLayer(graph)\n\n topLeft = addNodeToLayer(leftLayer)\n bottomLeft = addNodeToLayer(leftLayer)\n topRight = addNodeToLayer(rightLayer)\n bottomRight = addNodeToLayer(rightLayer)\n\n topRightPort = addPortOnSide(topRight, PortSide.WEST)\n bottomLeftPort = addPortOnSide(bottomLeft, PortSide.EAST)\n addEdgeBetweenPorts(bottomLeftPort, topRightPort)\n\n topLeftPort = addPortOnSide(topLeft, PortSide.EAST)\n addEdgeBetweenPorts(topLeftPort, topRightPort)\n\n eastWestEdgeFromTo(bottomLeft, bottomRight)\n\n return graph", "def __decorate_nodes(nodes, space):\n for n in nodes:\n if n not in self.__node_spaces:\n self.__node_spaces[n] = set([])\n self.__node_spaces[n].add(space)\n pred_nodes = self.__plan_graph.subjects(AGORA.next, n)\n __decorate_nodes(pred_nodes, space)", "def edge_mapping(self):\n ...", "def set_edges(self, edges):\n assert len(edges) == self._num_edges\n self._tree_grid = make_tree(edges)", "def intEdge(t):\n t.children = [(r,int(w)) for r,w in t.children]\n for r,w in t.children:\n intEdge(r)", "def run_adding_edges(self):\n indices = np.where(self.X==0)\n idx=[]\n for i in range(len(indices[0])):\n idx.append((indices[0][i],indices[1][i]))\n idx = np.array(idx)\n return self.node_equivalent(idx)", "def cell_edges(self):", "def _create_connections(self):\n self.predecessors = {}\n self.successors = {}\n for nd in self.nodes:\n self.predecessors[nd.name] = []\n self.successors[nd.name] = []\n\n for (nd_out, nd_in) in self.edges:\n self.predecessors[nd_in.name].append(nd_out)\n self.successors[nd_out.name].append(nd_in)", "def extend_nodes_links(sd, nodes, links):\n nnodes = nodes.tolist()\n nlinks = links.tolist()\n tmp = []\n nid = len(nodes)\n for i in range(0, len(sd)):\n if sd[i] not in tmp:\n tmp.append(sd[i])\n else:\n nnodes.append(nodes[sd[i]]+[0,0,FLUCT])\n tmp.append(nid)\n # make sure not add links to dummy leaf.\n sd[i] = adjust_sd(sd[i], nodes, links)\n nlinks.append([sd[i],nid])\n nid = nid + 1\n nnodes, nlinks = rearange_nodes_links_old(tmp, np.array(nnodes), np.array(nlinks))\n return np.array(nnodes), np.array(nlinks)", "def multi_edge():\n from networkx.readwrite import json_graph\n import networkx as nx\n import autonetkit\n # returns a house graph\n data = {'directed': False,\n 'graph': [],\n 'links': [{'_ports': {'r4': 2, 'r5': 1},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 1},\n {'_ports': {'r2': 3, 'r4': 1},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 3},\n {'_ports': {'r2': 4, 'r4': 3},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 3},\n {'_ports': {'r3': 3, 'r5': 2},\n 'raw_interfaces': {},\n 'source': 1,\n 'target': 4},\n {'_ports': {'r1': 1, 'r2': 1},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 3},\n {'_ports': {'r1': 3, 'r2': 5},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 3},\n {'_ports': {'r1': 2, 'r3': 1},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r1': 4, 'r3': 4},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r1': 5, 'r3': 5},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r2': 2, 'r3': 2},\n 'raw_interfaces': {},\n 'source': 3,\n 'target': 4}],\n 'multigraph': True,\n 'nodes': [{'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r4 to r2', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r4 to r5', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r4 to r2', 'id': 'eth2'}},\n 'asn': 2,\n 'device_type': 'router',\n 'id': 'r4',\n 'label': 'r4',\n 'x': 675,\n 'y': 300},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r5 to r4', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r5 to r3', 'id': 'eth1'}},\n 'asn': 2,\n 'device_type': 'router',\n 'id': 'r5',\n 'label': 'r5',\n 'x': 675,\n 'y': 500},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r1 to r2', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r1 to r2', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r1',\n 'label': 'r1',\n 'x': 350,\n 'y': 400},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r2 to r1', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r2 to r3', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r2 to r4', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r2 to r4', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r2 to r1', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r2',\n 'label': 'r2',\n 'x': 500,\n 'y': 300},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r3 to r2', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r3 to r5', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r3',\n 'label': 'r3',\n 'x': 500,\n 'y': 500}]}\n graph = json_graph.node_link_graph(data)\n anm = autonetkit.anm.NetworkModel()\n g_in = anm.add_overlay(\"input\")\n g_in._replace_graph(nx.MultiGraph(graph))\n # TODO: check if should build overlays here rather than clone in?\n g_phy = anm[\"phy\"]\n g_phy._replace_graph(graph)\n return anm", "def merge_nodes(self,n0,n1):\n # -- Sanity checks - does not yet allow for collapsing edges.\n\n # if they share any cells, would update the cells, but for now\n # just signal failure.\n n0_cells=list(self.node_to_cells(n0))\n n1_cells=list(self.node_to_cells(n1))\n cell_to_edge_cache={}\n\n for c in n1_cells:\n if c in n0_cells:\n print(\"cell %d common to both nodes\"%c)\n raise GridException(\"Not ready for merging nodes in the same cell\")\n # otherwise record and fix up below\n\n # while we're looping, cache the edges as they will\n # be mutated along the way.\n cell_to_edge_cache[c]=self.cell_to_edges(c).copy()\n\n # do they share an edge, but not already fixed in the above stanza?\n j=self.nodes_to_edge(n0,n1)\n if j is not None:\n raise GridException(\"Not ready for merging endpoints of an edge\")\n\n edge_map={} # index of superceded edge => superceding edge\n\n # Update edges of n1 to point to n0\n # if that would cause a duplicate edge, then the n1 version is deleted\n n1_edges=list(self.node_to_edges(n1)) # make copy since we'll mutate it\n for j in n1_edges:\n if self.edges['nodes'][j,0]==n1:\n nj=0\n elif self.edges['nodes'][j,1]==n1:\n nj=1\n else:\n assert False # sanity check\n newnodes=self.edges[j]['nodes'].copy()\n newnodes[nj]=n0\n # it's possible that this is an edge which already exists\n jother=self.nodes_to_edge(*newnodes)\n if jother is not None:\n # want to keep jother, delete j. but is there info on\n # cells which should be brought over?\n edge_map[j]=jother\n # wait to delete j until after cells have been moved to jother.\n else:\n self.log.debug(\"Modifying edge j=%d\"%j)\n self.modify_edge(j,nodes=newnodes)\n\n # -- Transition any cells. \n for c in n1_cells:\n # update the node list:\n cnodes=self.cell_to_nodes(c).copy()\n nc=list(cnodes).index(n1)\n cnodes[nc]=n0\n\n # Dangerous to use cell_to_edges, since it may\n # have to consult the edge topology, which is disrupted\n # in the above code. \n # cell_to_edges: first checks cells['edges'], may \n # go to cell_to_nodes(c): that's safe.\n # and nodes_to_edge\n # -> node_to_edges, which in turn may consult self.edges['nodes']\n\n #cedges=self.cell_to_edges(c).copy()\n cedges=cell_to_edge_cache[c]\n\n for ji,j in enumerate(cedges):\n if j in edge_map:\n # is there were edges['cells'] should be updated?\n\n # sever the edge=>cell pointer, to p\n # could just set to [-1,-1], but this keeps things very explicit\n # for debugging\n j_cells=list(self.edges['cells'][j])\n j_cells_side=j_cells.index(c)\n j_cells[ j_cells_side ] = -1\n self.modify_edge(j,cells=j_cells)\n\n # and modify the receiving edge, too\n jo=edge_map[j]\n jo_cells=list(self.edges['cells'][jo])\n # which side of jo? a bit tedious...\n if list(self.edges['nodes'][j]).index(n1) == list(self.edges['nodes'][jo]).index(n0):\n # same orientation\n jo_cells_side=j_cells_side\n elif list( self.edges['nodes'][j]).index(n1) == 1-list(self.edges['nodes'][jo]).index(n0):\n jo_cells_side=1-j_cells_side\n else:\n raise Exception(\"Failed in some tedium\")\n assert jo_cells[jo_cells_side]<0\n jo_cells[jo_cells_side]=c\n self.modify_edge(edge_map[j],cells=jo_cells)\n # yikes. any chance that worked?\n\n cedges[ji]=edge_map[j]\n\n # maybe this is where we'd update cells['edges'] too?\n self.modify_cell(c,nodes=cnodes,edges=cedges)\n\n for dead_edge in edge_map:\n self.delete_edge(dead_edge)\n\n self.delete_node(n1)", "def update(self, edges) -> None:\n for v1, v2 in edges:\n self.add(v1, v2)", "def _add_edge(src, dst):\n module_nodes[src].weights[dst] += 1\n module_nodes[dst].weights[src] += 1", "def update_weights(self):\r\n\r\n inedges=self.in_edges\r\n for edge in inedges:\r\n weight=edge.weight+self.learning_rate*self.delta*(edge.source.activation)\r\n edge.change_weight(weight)", "def create_nodes(self):", "def rearange_nodes_links_old(idx, nodes, links):\n nodes = nodes[idx,:]\n for i in range(0, len(links)):\n links[i, 0] = idx.index(links[i, 0])\n links[i, 1] = idx.index(links[i, 1])\n for i in range (0, len(links)):\n links[i] = sorted(links[i])\n \n # Sort links according to the source.\n links = links[links[:,0].argsort()]\n idx = update_idx_links(links[:,0], links[:,1])\n links = links[idx]\n return nodes, links", "def assign_nodes_to_edges(nodes, edges):\n\n from scipy.spatial import cKDTree\n \n edges_arr = np.vstack(edges)\n kdt_nodes = cKDTree(nodes)\n\n # closest node id and discard computed distances ('_,')\n _, pairs = kdt_nodes.query(x=edges_arr, k=1)\n # refactor list of successive ids for start and end of edges into 2D array\n pairs = np.vstack((pairs[::2], pairs[1::2])).T\n\n new_edges = []\n for pair in pairs[:,:]:\n new_edges.append(np.array(nodes[pair]))\n \n return new_edges, pairs", "def populate_graph(self):", "def edges(self, e):\n self._edges = e", "def _refine_matrix_with_additional_connections(self):\n new_graph = self.graph.copy()\n for node in tqdm.tqdm(self.graph.nodes(), disable=not self.verbose):\n if self.graph.node[node][\"type\"] == \"hashtag\":\n for neighbour in self.graph.neighbors(node):\n if self.graph.node[neighbour][\"type\"] == \"username\":\n for other_node in self.graph.neighbors(neighbour):\n if self.graph.node[other_node][\"type\"] == \"hashtag\" \\\n and not self.graph.has_edge(node, other_node) \\\n and not node == other_node:\n new_graph.add_edge(node, other_node)\n self.graph = new_graph", "def remap_nodes(self, new_node_mapping):\n # because all nodes are SchemaNodeIDs (i.e. objects), we only need to reassign nodes one way\n # changes propagate to chains, chain root_nodes, and parents automatically\n for chain in self.chains:\n for edge in chain:\n head, tail = edge\n if head in new_node_mapping.keys():\n head.value = new_node_mapping[head]\n if tail in new_node_mapping.keys():\n tail.value = new_node_mapping[tail]", "def set_node_centers(self):\n # Node positions\n self.node_centers = []\n\n if self.n_states == 2:\n self.figsize = (10, 4)\n self.xlim = (-5, 5)\n self.ylim = (-2, 2)\n self.node_centers = [[-4,0], [4,0]]\n elif self.n_states == 3:\n self.figsize = (10, 6)\n self.xlim = (-5, 5)\n self.ylim = (-3, 3)\n self.node_centers = [[-3,-2], [3,-2], [-3,2]]\n elif self.n_states == 4:\n self.figsize = (8, 8)\n self.xlim = (-5, 5)\n self.ylim = (-5, 5)\n self.node_centers = [[-4,4], [4,4], [4,-4], [-4,-4]]", "def graph_format_update(g):\n if if_graph_adheres(g, allowed_extensions={'multi_rel', 'qualifier_rel', 'v-structure'}):\n return g\n g = copy_graph(g, with_iclass=True)\n new_edgeset = []\n for edge in g.get('edgeSet', []):\n if edge.get(\"type\") == 'time':\n edge['kbID'] = \"P585v\"\n elif \"argmax\" in edge:\n del edge['argmax']\n new_edgeset.append({\"type\": 'time', \"kbID\": \"P585v\", \"argmax\": 'time'})\n elif \"argmin\" in edge:\n del edge['argmin']\n new_edgeset.append({\"type\": 'time', \"kbID\": \"P585v\", \"argmin\": 'time'})\n elif \"num\" in edge:\n new_edgeset.append({\"type\": 'time', \"kbID\": \"P585v\", 'right': edge['num']})\n del edge['num']\n if edge.get(\"type\") == 'iclass':\n for iclass in sorted(edge.get(\"canonical_right\", []), key=len):\n new_edgeset.append({'type': 'iclass', 'kbID': edge.get(\"kbID\"), 'canonical_right': iclass})\n else:\n new_edgeset.append(edge)\n g['edgeSet'] = new_edgeset\n return g", "def add_nodes(self):\n for node_id in self.nodes:\n x = self.nodes[node_id][0]\n y = self.nodes[node_id][1]\n if node_id == 0:\n self.G.add_node(\"Source\", x=x, y=y, demand=0)\n self.G.add_node(\"Sink\", x=x, y=y, demand=0)\n else:\n self.G.add_node(node_id, x=x, y=y, demand=0)", "def set_connection_between_nodes(self):\n\n for i, node in enumerate(self.list_empty_nodes):\n line = node.labyrinth_position[0]\n column = node.labyrinth_position[1]\n\n for j in range(i+1, len(self.list_empty_nodes)):\n line_j = self.list_empty_nodes[j].labyrinth_position[0]\n column_j = self.list_empty_nodes[j].labyrinth_position[1]\n \n if i != j and ((line == line_j and column == column_j - 1) \\\n or (line == line_j and column == column_j + 1) \\\n or (column == column_j and line == line_j - 1) \\\n or (column == column_j and line == line_j + 1)) \\\n and (not node in self.list_empty_nodes[j].connected_to) \\\n and (not self.list_empty_nodes[j] in node.connected_to):\n node.connected_to.append(self.list_empty_nodes[j])\n self.list_empty_nodes[j].connected_to.append(node)", "def __init__(self, nodes: Set[Node], edges: Set[Edge]):\n super().__init__()\n self.nodes = nodes\n for edge in edges:\n self.add_edge(edge)\n self.leaves = {node for node in self.nodes if self.degree_out(node) == 0}\n root_nodes = self.nodes - self.leaves\n root_edges = {edge for edge in self.edges if edge.node_to not in self.leaves}\n self.root_graph = Mask(root_nodes, root_edges, self)", "def update_node(self, node, updating_node):\n out_edges = list(self.source_net.edges(node, data=True))\n self.remove_node(node)\n self.source_net.add_node(node, attr_dict=self.source_net.nodes[updating_node]['attr_dict'])\n self.source_net.add_edges_from(out_edges)\n\n # Transfer incoming edges\n for u, v, data in self.source_net.in_edges(updating_node, data=True):\n self.source_net.add_edge(u, node, **data)\n\n self.remove_node(updating_node)", "def update_edge_weights(self):\n # set all weights to 0\n for arc in self.arc_info.keys():\n self.arc_info[arc][\"weight\"] = 0\n # iterate through all paths and add weights to arcs\n for (path, weight) in zip(self.paths, self.weights):\n for arc in path:\n # Count this path's flow toward the arc's total\n self.arc_info[arc][\"weight\"] = self.arc_info[arc][\"weight\"] + \\\n weight", "def apply_edges(self, func=\"default\", edges=ALL, inplace=True):\n super(BaseGraphStore, self).apply_edges(func, edges, inplace=True)", "def multipleInBetweenLayerEdgesIntoNodeWithNoFixedPortOrderCauseCrossings(self):\n graph = self.graph\n makeLayer = self.makeLayer\n addNodesToLayer = self.addNodesToLayer\n eastWestEdgeFromTo = self.eastWestEdgeFromTo\n\n leftLayer = makeLayer(graph)\n leftNodes = addNodesToLayer(2, leftLayer)\n rightLayer = makeLayer(graph)\n rightNodes = addNodesToLayer(3, rightLayer)\n\n self.addInLayerEdge(rightNodes[0], rightNodes[2], PortSide.WEST)\n eastWestEdgeFromTo(leftNodes[0], rightNodes[1])\n eastWestEdgeFromTo(leftNodes[0], rightNodes[1])\n\n return graph", "def twoEdgesIntoSamePortResolvesCrossingWhenSwitched(self):\n graph = self.graph\n makeLayer = self.makeLayer\n eastWestEdgeFromTo = self.eastWestEdgeFromTo\n addNodeToLayer = self.addNodeToLayer\n addPortOnSide = self.addPortOnSide\n addEdgeBetweenPorts = self.addEdgeBetweenPorts\n\n leftLayer = makeLayer(graph)\n rightLayer = makeLayer(graph)\n\n topLeft = addNodeToLayer(leftLayer)\n bottomLeft = addNodeToLayer(leftLayer)\n topRight = addNodeToLayer(rightLayer)\n bottomRight = addNodeToLayer(rightLayer)\n\n topLeftPort = addPortOnSide(topLeft, PortSide.EAST)\n bottomLeftPort = addPortOnSide(bottomLeft, PortSide.EAST)\n bottomRightPort = addPortOnSide(bottomRight, PortSide.WEST)\n\n addEdgeBetweenPorts(topLeftPort, bottomRightPort)\n addEdgeBetweenPorts(bottomLeftPort, bottomRightPort)\n\n eastWestEdgeFromTo(bottomLeft, topRight)\n\n return graph", "def __edgeRouter(self):\r\n def getEndpoint(nodeTuple, pointList, direction, isReversedEdge):\r\n \"\"\" Gets the nearest arrow endpoint. Handles edge reversal \"\"\"\r\n if((direction == 'start' and not isReversedEdge)\r\n or (direction == 'end' and isReversedEdge)): \r\n endNode = nodeTuple[0]\r\n if(isReversedEdge):\r\n ix = -2\r\n iy = -1\r\n else:\r\n ix = 0\r\n iy = 1\r\n else: \r\n endNode = nodeTuple[1]\r\n if(isReversedEdge):\r\n ix = 0\r\n iy = 1\r\n else:\r\n ix = -2 \r\n iy = -1 \r\n \r\n # Is it connected to a named port!?!\r\n if(endNode.isConnectedByNamedPort(edgeObject)):\r\n handler = endNode.getConnectedByNamedPortHandler(nodeTuple[2]) \r\n return dc.coords(handler)[:2]\r\n \r\n # Not a named port...\r\n return list(endNode.getClosestConnector2Point( endNode, pointList[ix], \r\n pointList[iy])) \r\n \r\n \r\n \r\n #todo: improve method for spline arrows + add comments + optimize?\r\n print '----------------Dummy Edge Routing-----------------'\r\n for dummyEdge in NodeWrapper.ID2LayerEdgeDict.keys():\r\n \r\n dummyList = NodeWrapper.ID2LayerEdgeDict[dummyEdge]\r\n dummyNode = dummyList[0]\r\n dummyChild = dummyNode.children.keys()[0]\r\n linkFlagList = dummyNode.children[dummyChild]\r\n \r\n # Real nodes at start/end of the edge\r\n edgeSourceNode = dummyNode.parents.keys()[0]\r\n edgeSourceNode = edgeSourceNode.getASGNode().graphObject_\r\n dummyNode = dummyList[-1]\r\n edgeTargetNode = dummyNode.children.keys()[0]\r\n #print 'Dummy edge number', dummyEdge,\r\n #print dummyList[0].parents.keys()[0].getName(), edgeTargetNode.getName()\r\n edgeTargetNode = edgeTargetNode.getASGNode().graphObject_\r\n nodeTuple = [edgeSourceNode, edgeTargetNode, None]\r\n \r\n # Some edges are internally reversed to break cycles, when drawing\r\n # this must be taken into account\r\n isReversedEdge = False\r\n edgesToRoute = []\r\n for linkNode, isReversed in linkFlagList:\r\n edgesToRoute.append(linkNode)\r\n if(isReversed):\r\n isReversedEdge = True\r\n \r\n # Get all the points the edge must pass through (sorted by layer order)\r\n dummyList.sort(lambda a, b: cmp(a.getLayer(), b.getLayer()))\r\n if(isReversedEdge):\r\n dummyList.reverse()\r\n sortedDummyRouteList = []\r\n for node in dummyList:\r\n sortedDummyRouteList += node.getEdgePosition()\r\n \r\n # Set the coordinates of the edge directly \r\n # This is complicated by the fact that AToM3 treats edges as two\r\n # segments that join poorly (for spline arrows)\r\n for edgeObject in edgesToRoute: \r\n dc = edgeObject.graphObject_.dc\r\n linkObj = edgeObject.graphObject_ \r\n tag = linkObj.tag\r\n \r\n if(isReversedEdge):\r\n inPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n else:\r\n inPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n \r\n #print 'Dummy route', sortedDummyRouteList\r\n numPoints = len(sortedDummyRouteList) / 2\r\n # Add 2 extra control points for odd case (to make splines nice)\r\n if(numPoints % 2 == 1):\r\n if(numPoints == 1):\r\n center = sortedDummyRouteList\r\n else:\r\n start = sortedDummyRouteList[:numPoints - 1]\r\n end = sortedDummyRouteList[numPoints + 1:]\r\n center = sortedDummyRouteList[numPoints - 1:numPoints + 1]\r\n \r\n if(not isReversedEdge):\r\n newMid1 = [center[0], center[1] - 20]\r\n newMid2 = [center[0], center[1] + 20]\r\n else:\r\n newMid2 = [center[0], center[1] - 20]\r\n newMid1 = [center[0], center[1] + 20]\r\n \r\n \r\n if(numPoints == 1):\r\n sortedDummyRouteList = newMid1 + center + newMid2 \r\n else:\r\n sortedDummyRouteList = start + newMid1 + center + newMid2 + end\r\n centerIndex = numPoints - 1 + 2\r\n \r\n # Add 1 extra control point for even case (to make splines nice)\r\n else:\r\n start = sortedDummyRouteList[:numPoints]\r\n end = sortedDummyRouteList[numPoints:]\r\n center = [start[-2] + (end[0] - start[-2]) / 2, \r\n start[-1] + (end[1] - start[-1]) / 2]\r\n sortedDummyRouteList = start + center + end \r\n centerIndex = numPoints\r\n \r\n # Now I know where the center is... so lets move the center object\r\n # Is the edge object a hyperlink?\r\n if(len(edgeObject.in_connections_ + edgeObject.out_connections_) > 2):\r\n fromObjs = []\r\n for semObj in edgeObject.in_connections_:\r\n fromObjs.append(semObj.graphObject_)\r\n toObjs = []\r\n for semObj in edgeObject.out_connections_:\r\n toObjs.append(semObj.graphObject_)\r\n optimizerHyperLink(dc, linkObj, fromObjs, toObjs, 0, 0, 0, center )\r\n continue\r\n \r\n else:\r\n linkObj.moveTo(* center)\r\n \r\n # Go through the 2 segments in the link\r\n nodeTuple[2] = edgeObject\r\n for connTuple in linkObj.connections:\r\n itemHandler = connTuple[0]\r\n direction = connTuple[1]\r\n \r\n if( direction ): \r\n inPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'start', isReversedEdge)\r\n\r\n segCoords = inPoint + sortedDummyRouteList[:centerIndex+2]\r\n else: \r\n outPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'end', isReversedEdge) \r\n segCoords = sortedDummyRouteList[centerIndex:] + outPoint\r\n segCoords = self.__reverseCoordList(segCoords)\r\n \r\n # Applies the changed coords to the canvas\r\n dc.coords( * [itemHandler] + segCoords ) \r\n \r\n # This may change the associated link drawings: \r\n # move them to the new point \r\n if( direction ):\r\n linkObj.updateDrawingsTo(inPoint[0], inPoint[1], itemHandler, \r\n segmentNumber=1)\r\n else:\r\n linkObj.updateDrawingsTo(outPoint[0], outPoint[1], itemHandler, \r\n segmentNumber=2)", "def update_edges(subgraph, graph_name, bb):\n top_subgraph = get_top_parent(subgraph, graph_name)\n edges = extract_edges(top_subgraph)\n for edge in edges:\n if(edge.get_style() is not None):\n style = edge.get_style()\n if(edge.get_color() is not None):\n color = edge.get_color()\n if(edge.get_label() is not None):\n label = edge.get_label()\n node_head = edge.get_source()\n node_tail = edge.get_destination()\n bb_head = get_bb(node_head)\n bb_tail = get_bb(node_tail)\n if(bb_head >= bb or bb_tail > bb):\n top_subgraph.del_edge(node_head, node_tail, 0)\n if bb_head >= bb:\n if bb_tail > bb:\n add_edge(top_subgraph, update_edge_node_name(node_head, bb_head), update_edge_node_name\n (node_tail, bb_tail), style=style, color=color, label=label)\n else:\n add_edge(top_subgraph, update_edge_node_name(node_head, bb_head), node_tail, style=style, \n color=color, label=label)\n else:\n add_edge(top_subgraph, node_head, update_edge_node_name(node_tail, bb_tail), \n style=style, color=color, label=label)\n \n #si bb_n < bb et bb_s <= bb on touche pas\n #sinon\n # si bb_n >= bb:\n # si bb_s >= bb:\n # creer edge (n+1, s+1)\n # sinon:\n # creer edge (n+1, s)\n # sinon:\n # si bb_s > bb:\n # creer edge (n, s+1)", "def plot_edges(self, node_list):\n tree = MarkerArray()\n id = 1\n for node in self.node_list:\n if node.parent:\n # edge between nodes\n path = Marker()\n path.header.frame_id = \"map\"\n path.header.stamp = rospy.get_rostime()\n path.ns = \"markers\"\n path.id = id\n id += 1\n path.type = path.LINE_STRIP\n path.action = path.ADD\n path.scale.x = self.rviz_tuning_plt\n path.color.a = 1.0\n\n path.color.r = 1.0\n path.color.g = 0.7\n path.color.b = 0.0\n\n path.lifetime = rospy.Duration()\n path.pose.orientation.w = 1.0\n\n p1 = Point()\n p1.x = node.parent.x\n p1.y = node.parent.y\n p1.z = 0.02\n path.points.append(p1)\n\n p2 = Point()\n p2.x = node.x\n p2.y = node.y\n p2.z = 0.02\n path.points.append(p2)\n \n tree.markers.append(path)\n\n self.pub_edges.publish(tree)", "def add_cell_and_edges(self,nodes,**kws): \n for a,b in circular_pairs(nodes):\n j=self.nodes_to_edge(a,b)\n if j is None:\n self.add_edge(nodes=[a,b])\n return self.add_cell(nodes=nodes,**kws)", "def transform(nodes, weights, new_corners):\n if nodes.shape[1] == 1:\n x_0 = new_corners[0, :]\n x_1 = new_corners[1, :]\n M = np.zeros((1, 1))\n M[:, 0] = 0.5 * (x_1 - x_0)\n origin = np.array([-1.0])\n elif nodes.shape[1] == 2:\n x_0 = new_corners[0, :]\n x_1 = new_corners[1, :]\n x_2 = new_corners[2, :]\n M = np.zeros((2, 2))\n M[:, 0] = 0.5 * (x_1 - x_0)\n M[:, 1] = 0.5 * (x_2 - x_0)\n origin = np.array([-1.0, -1.0])\n elif nodes.shape[1] == 3:\n x_0 = new_corners[0, :]\n x_1 = new_corners[1, :]\n x_2 = new_corners[2, :]\n x_3 = new_corners[3, :]\n M = np.zeros((3, 3))\n M[:, 0] = 0.5 * (x_1 - x_0)\n M[:, 1] = 0.5 * (x_2 - x_0)\n M[:, 2] = 0.5 * (x_3 - x_0)\n origin = np.array([-1.0, -1.0, -1.0])\n\n offset = -M @ origin + x_0\n volume_fraction = np.abs(np.linalg.det(M))\n return np.add(nodes @ M.T, offset), volume_fraction * weights", "def addEdges(self, edges):\n for edge in edges:\n self.addEdge(edge[0], edge[1], edge[2])", "def getNodesInDifferentLayoutUnitsPreventSwitch(self):\n makeLayers = self.makeLayers\n addNodesToLayer = self.addNodesToLayer\n eastWestEdgeFromTo = self.eastWestEdgeFromTo\n addNorthSouthEdge = self.addNorthSouthEdge\n\n layers = makeLayers(2)\n leftNodes = addNodesToLayer(2, layers[0])\n rightNodes = addNodesToLayer(3, layers[1])\n\n eastWestEdgeFromTo(leftNodes[0], rightNodes[1])\n\n addNorthSouthEdge(\n PortSide.EAST, rightNodes[2], rightNodes[1], leftNodes[0], True)\n\n rightNodes[1].inLayerLayoutUnit = rightNodes[2]\n rightNodes[2].inLayerLayoutUnit = rightNodes[2]\n\n return self.graph", "def ConstrDict(raw_data):\n if (path.exists(\"processed_out.txt\") and\n path.exists(\"processed_in.txt\")):\n with open(\"processed_out.txt\") as out:\n global out_edges\n out_edges = pickle.load(out)\n with open(\"processed_in.txt\") as fin:\n global in_edges\n in_edges = pickle.load(fin)\n print len(in_edges.keys())\n with open(\"nodes.txt\") as n:\n global nodes\n nodes = pickle.load(n)\n print \"nodes: \", len(nodes)\n else:\n # read each line and construct a dictionary to store\n # sources and destinations\n for line in raw_data: \n splitted_line = line.split()\n # source is the first element in a line, the rest of elements\n # are destinations\n threshold = 10000\n src, dests = splitted_line[0], splitted_line[1:threshold]\n # if src is not in the dictionary, create a key-value pair for\n # this src\n out_edges.setdefault(src, set())\n\n # put all destinations into the list of the corresponding src\n out_edges[src].update(set(dests))\n\n # construct a set to store all nodes appearing\n nodes.add(src)\n nodes.update(set(dests))\n\n # create the list of inedges for each node\n for i in out_edges[src]:\n in_edges.setdefault(i, set())\n in_edges[i].add(src)\n\n nodes = list(nodes)\n # shuffle the order of nodes\n shuffle(nodes)\n\n with open(\"processed_out.txt\", \"wb\") as out:\n pickle.dump(out_edges, out)\n with open(\"processed_in.txt\", \"wb\") as fin:\n pickle.dump(in_edges, fin)\n with open(\"nodes.txt\", \"wb\") as n:\n pickle.dump(nodes, n)\n\n\n # construct edge list\n for src, dests in out_edges.iteritems():\n pairs = [(src, dest) for dest in dests if (src, dest) not in\n exists]\n edges.extend(pairs)", "def add_edges(self, edges):\n if self.is_weighted():\n for vertex1, vertex2, weight in edges:\n self.add_edge(vertex1, vertex2, weight)\n else:\n for vertex1, vertex2 in edges:\n self.add_edge(vertex1, vertex2)", "def update_new_weights(self, node):\n for edge in self.dep_graph.out_edges(node):\n ratio = self.roles[edge[0]].deps[edge[1]]\n new_weight = self.roles[node].new_rep / ratio\n self.dep_graph.add_edge(*edge, new_weight=new_weight)", "def alter_network(self, add=[], remove=[]):\n\n self.network.edges.add_many(add)\n self.network.edges.remove_many(remove)\n return self.score_network()", "def __saveEdges(self, edges):", "def _reset_node_ids(self, start_id: int):\n\n add_id = start_id - self.min_node_id()\n for tree_idx, _ in enumerate(self.nodes):\n self.nodes[tree_idx].nodes['id'] += add_id\n self.edges[tree_idx] += add_id", "def edges(self, layout):\n return", "def mutateEdge(g, edges, directed, connected):\n if ((directed and g.e == g.n ** 2 - g.n)\n or (not directed and g.e == (g.n ** 2 - g.n) / 2)): # Complete graph\n return\n\n if (g.e > edges):\n while g.e != edges:\n removeEdge(g, directed)\n g.e -= 1\n elif (g.e < edges):\n while g.e != edges:\n addEdge(g, directed, connected)\n g.e += 1\n else: # Edge count is correct, just do an edge swap for the mutation\n removeEdge(g, directed)\n addEdge(g, directed, connected)", "def build_edges(self):\n print(\"Constructing Edges.\")\n # -----------------------------------------\n # TODO: You should write this method!\n\n # Note: this method may take some time to run - it is likely to be O(N^2), and some lists have N = 10,000 words or more.\n # (I've had students decide that their program was \"broken\" and quit it before this process finished... every time,\n # not realizing that the program was working hard behind the scenes.)\n # I recommend that you keep track of the number of edges you have added, and if it is a multiple of 1000, print\n # something so that you know your program is making progress.\n n = len(self.vertices)\n\n\n\n \n # -----------------------------------------\n print(\"Done Constructing Edges.\\n------------------------------------\")", "def __drawNodes(self, levelDictionary, linkNodeDict, topLeft):\r\n setSmooth = self.__optionsDatabase.get('Spline optimization') \r\n setCurvature = self.__optionsDatabase.get('Arrow curvature') \r\n minOffsetY = self.__optionsDatabase.get('yOffset') \r\n minOffsetX = self.__optionsDatabase.get('xOffset') \r\n giveExtraSpaceForLinks = self.__optionsDatabase.get('addEdgeObjHeight') \r\n\r\n # Caclulate x, y offsets\r\n offsetX = 0\r\n levelInt2offsetY = dict()\r\n for levelInt in levelDictionary.keys():\r\n currentLevel = levelDictionary[levelInt]\r\n levelInt2offsetY[levelInt] = 0\r\n \r\n # Calculate maximum node size on a per level basis (X is for all levels)\r\n # Then add minimum seperation distance between nodes\r\n for node in currentLevel:\r\n # getSize returns node width, and height of the node & child link icon\r\n x, y = node.getSize(giveExtraSpaceForLinks)\r\n offsetX = max(offsetX, x)\r\n levelInt2offsetY[levelInt] = max(levelInt2offsetY[levelInt], y) \r\n \r\n \r\n maxOffsetX = offsetX + minOffsetX\r\n halfOffsetX = offsetX / 2\r\n \r\n # Send nodes to their final destination, assign final pos to dummy edges\r\n x, y = topLeft\r\n for levelInt in levelDictionary.keys():\r\n currentLevel = levelDictionary[levelInt] \r\n longEdgeOffset = [halfOffsetX, levelInt2offsetY[levelInt] / 3]\r\n \r\n # Move each node in the level (Dummy edges save the pos but don't move)\r\n for node in currentLevel:\r\n node.moveTo(x + node.getGridPosition() * maxOffsetX, y, longEdgeOffset)\r\n \r\n # Increment y for the next iteration\r\n y += levelInt2offsetY[levelInt] + minOffsetY\r\n \r\n # Self-looping edges (Must move these manually into position)\r\n for selfLoopedEdge in NodeWrapper.SelfLoopList: \r\n x, y = selfLoopedEdge.getEdgePosition()\r\n obj = selfLoopedEdge.getASGNode().graphObject_\r\n obj.moveTo(x, y)\r\n\r\n # Re-doing links can take a while, lets show something in meanwhile...\r\n self.atom3i.parent.update()\r\n \r\n # Re-wire the links to take into account the new node positions\r\n selectedLinks = []\r\n for obj in linkNodeDict.values():\r\n selectedLinks.append(obj)\r\n optimizeLinks(self.cb, setSmooth, setCurvature, \r\n selectedLinks=selectedLinks)\r\n \r\n # Re-doing links can take a while, lets show something in meanwhile...\r\n self.atom3i.parent.update()\r\n \r\n # Route multi-layer edges\r\n self.__edgeRouter()", "def transition(self):\n for node in self.net.nodes():\n if node not in self.evidence:\n self.update_node(node)", "def merge_graph(self, other):\n self.add_nodes( (nLabel,nInfo) for nLabel,nInfo in other.nodes() )\n \n for nLabel,nInfo in other.nodes():\n for edgeLabel,edgeInfo in other.edgesFrom(nLabel):\n self.add_edge(edgeLabel,edgeInfo)", "def setup_ant(self):\n self.visited_nodes[1:] = []\n self.actual_node = self.start_pos", "def relativize_coordinates(self):\n if len(self.nodes) + len(self.connecting) < 1:\n return\n smallest_c = (self.nodes+self.connecting)[0].c\n for node in self.nodes+self.connecting:\n if node.c < smallest_c:\n smallest_c = node.c\n for node in self.nodes+self.connecting:\n node.c = node.c - smallest_c", "def draw_edges(self):\n pass", "def set_both_connections(self, new_node):\n distance_to_new = self.current_node.distance_between(new_node.location)\n self.current_node.set_adjacent_from_direction(distance_to_new, new_node)\n reverse_distance = new_node.distance_between(self.current_node.location)\n new_node.set_adjacent_from_direction(reverse_distance, self.current_node)", "def transform(g):\n min_nidx = min(g.nodes)\n max_nidx = max(g.nodes)\n\n if min_nidx == 0 and max_nidx == g.number_of_nodes() - 1: # Everything already labeled as wanted\n return g\n\n nodes = sorted(g.nodes) # Get the sorted nodes\n # Relabel the nodes by their index in the list\n relabel_dict = {nidx: idx for idx, nidx in enumerate(nodes)}\n\n # Also shift node labels (important for saucy)\n if 'label' in g.nodes[g.nodes.keys()[0]]:\n for n_id in g.nodes:\n g.nodes[n_id]['label'] = relabel_dict[g.nodes[n_id]['label']]\n\n g = nx.relabel_nodes(g, relabel_dict)\n assert min(g.nodes) == 0 and max(g.nodes) == g.number_of_nodes() - 1\n\n return g", "def add_edges_from(self, edges: Iterable):\n for i, j in edges:\n self.add_edge(i, j)", "def compute_node_positions(self):\n pass", "def make_edges(graph, pos, unscaled,show_all, set_width):\n\n edge_traces = []\n edge_text_xs = []\n edge_text_ys = []\n edge_text_labels = []\n for edge in graph.edges():\n width = graph.edges()[edge]['weight']\n\n if width < .6 and show_all is False:\n continue\n if set_width is not None:\n width = set_width\n #Make it so smaller edges are more transparent. These numbers are a bit random, I jusst played wit them until they looked good.\n transparency = max(.5,round(width/5,2))\n\n\n #royalblue\n color_string = f'rgba(65, 105, 225, {transparency})'\n\n char1 = edge[0]\n char2 = edge[1]\n x0, y0 = pos[char1]\n x1, y1 = pos[char2]\n\n x = [x0, x1, None]\n y = [y0, y1, None]\n\n #Add edges (i.e. actual lines that appear)\n edge_trace = go.Scatter(x = x,\n y = y,\n line = dict(width = width,\n color = color_string),\n mode = 'lines')\n edge_traces.append(edge_trace)\n\n #Calculate midpoints, get the number of conenctions that should be displayed\n edge_text_xs.append((x0+x1)/2)\n edge_text_ys.append((y0+y1)/2)\n connections = unscaled.edges()[edge]['weight']\n edge_text_labels.append(char1.capitalize() + ' -- ' + char2.capitalize() + f': {connections} connections')\n\n #Add midpoint text trace\n edge_text_trace = go.Scatter(x = edge_text_xs,\n y = edge_text_ys,\n text = edge_text_labels,\n textposition = \"bottom center\",\n textfont_size = 10,\n mode = 'markers',\n hoverinfo = 'text',\n marker = dict(color = 'rgba(0,0,0,0)',\n size = 1,\n line = None))\n\n return edge_traces, edge_text_trace", "def connect_nodes(self):\n node1 = str(self.form.node1_text.toPlainText())\n node2 = str(self.form.node2_text.toPlainText())\n weight = str(self.form.weight_text.toPlainText())\n self.form.node1_text.clear()\n self.form.node2_text.clear()\n self.form.weight_text.clear()\n\n if not node1 or not node2 or not weight: \n self.show_dialog(\"Empty argument.\")\n return\n \n try:\n weight = int(weight)\n except:\n self.show_dialog(\"Weight should be an integer.\")\n return\n\n if self.G.has_edge(node1, node2):\n self.show_dialog(f\"Edge: {node1, node2} is already constructed.\")\n\n else:\n self.G.add_edge(node1, node2, weight=weight)\n self.form.plot_canvas.plot(self.G)", "def create_shallow_simplified_edges_graph(df, start, end):\n ports = pd.unique(df[['Source Port', 'Destination Port']].values.ravel('K'))\n ips = pd.unique(df[['Source IP', 'Destination IP']].values.ravel('K'))\n\n G = nx.Graph(mode=\"interval\", start=start, end=end)\n G.add_nodes_from(ports, type=\"port\")\n G.add_nodes_from(ips, type=\"ip\")\n\n interval_info = {}\n for index, row in df.iterrows():\n src_ip = row['Source IP']\n src_port = row['Source Port']\n dst_ip = row['Destination IP']\n dst_port = row['Destination Port']\n label = row['Label']\n interval_info['timestamp'] = row['Timestamp']\n\n # -1: malicious, 1: benign, 0: unlabelled\n G.nodes[src_ip]['interval_ground_truth'] = convert_label_to_numerical(label)\n G.nodes[dst_ip]['interval_ground_truth'] = convert_label_to_numerical(label)\n G.nodes[src_port]['interval_ground_truth'] = convert_label_to_numerical(label)\n G.nodes[dst_port]['interval_ground_truth'] = convert_label_to_numerical(label)\n\n # Setting labels for nodes on the current window\n G.nodes[src_ip]['label'] = src_ip\n G.nodes[src_port]['label'] = src_port\n G.nodes[dst_ip]['label'] = dst_ip\n G.nodes[dst_port]['label'] = dst_port\n\n # Connecting Src IP -> Dst IP nodes together\n if G.has_edge(src_ip, dst_ip):\n G.edges[src_ip, dst_ip]['weight'] += 1\n else:\n G.add_edge(src_ip, dst_ip, weight=1, label_class=label, type=\"ip_to_ip\")\n\n # Connecting Src IP -> Src Port\n if G.has_edge(src_ip, src_port):\n G.edges[src_ip, src_port]['weight'] += 1\n else:\n G.add_edge(src_ip, src_port, weight=1, label_class=label, type=\"ip_to_port\")\n\n # Connecting Src Port -> Dst Port\n if G.has_edge(src_port, dst_port):\n G.edges[src_port, dst_port]['weight'] += 1\n else:\n G.add_edge(src_port, dst_port, weight=1, label_class=label, type=\"port_to_port\")\n\n # Connecting Dst Port -> Dst IP\n if G.has_edge(dst_port, dst_ip):\n G.edges[dst_port, dst_ip]['weight'] += 1\n else:\n G.add_edge(dst_port, dst_ip, weight=1, label_class=label, type=\"ip_to_port\")\n return G, interval_info", "def patch_resize(c, graph, node_select):\r\n return c", "def __init__(self, from_node, to_node, span=None):\n self.from_node = from_node\n self.to_node = to_node\n self.span = span\n self.dummyedges = []", "def test_wires_to_edges(self):\n g = nx.lollipop_graph(4, 1)\n r = wires_to_edges(g)\n\n assert r == {0: (0, 1), 1: (0, 2), 2: (0, 3), 3: (1, 2), 4: (1, 3), 5: (2, 3), 6: (3, 4)}", "def create_graph_network(start_node, connections):\n graph = nx.Graph()\n graph.add_node(start_node)\n print(connections.index)\n graph.add_nodes_from(connections.index)\n edge_list = list(zip(itertools.repeat(start_node), connections.index))\n print(\"edge list is \", edge_list)\n graph.add_edges_from(edge_list)\n for i in graph.edges():\n graph[i[0]][i[1]]['weight'] = connections.loc[i[1]]['count']\n # graph[i[0]][i[1]]['proposal_number'] = connections.loc[i[1]]['proposal_number']\n # graph[i[0]][i[1]]['institution'] = connections.loc[i[1]]['institution']\n # graph[i[0]][i[1]]['proposal_title'] = connections.loc[i[1]]['proposal_title']\n # graph[i[0]][i[1]]['project_status'] = connections.loc[i[1]]['project_status']\n\n # Adding random position data to the graph.\n # pos = nx.spring_layout(graph, k=1)\n pos = nx.circular_layout(graph)\n nx.set_node_attributes(graph, 'pos', pos)\n return graph", "def draw_edges(self):\n nx.draw_networkx_edges(self.G, pos=self.positions)", "def _connect_neighbours(self):\n for prev in self.unvisited:\n for next in self.unvisited:\n if (next[0] == prev[0] and next[1] == prev[1] + 1) or (next[0] == prev[0] + 1 and next[1] == prev[1]):\n self.graph.addEdge((prev, next))\n self.visited.add(prev)\n self.visited.add(next)\n if self._find_intersection():\n self.intersection.append(prev)\n self.intersection.append(next)", "def add_edge(self, from_website, to_link):\n self.graph[to_link - 1, from_website - 1] = 1", "def __relax_edge(self, node):\n if node == str(self.get_start()):\n distance_to_parent = 0 # no parent for the starting point\n else:\n parent = self.path[node]\n distance_to_parent = self.distances[parent] + 1\n # try to relax the stretched edge\n if self.distances[node] > distance_to_parent:\n self.distances[node] = distance_to_parent" ]
[ "0.6937063", "0.69099474", "0.63684916", "0.6333464", "0.6302931", "0.62989634", "0.62662923", "0.62522674", "0.6176229", "0.6170671", "0.6169153", "0.615478", "0.61263245", "0.6126319", "0.6120976", "0.6110921", "0.60948086", "0.60822624", "0.6066578", "0.60385305", "0.6017414", "0.59834313", "0.5977689", "0.5968384", "0.595422", "0.59528744", "0.5949504", "0.59475416", "0.5931964", "0.59213215", "0.5912775", "0.5912263", "0.5905941", "0.5904575", "0.5886994", "0.5883116", "0.5876441", "0.58703446", "0.58663654", "0.58624", "0.5861169", "0.5857916", "0.5843809", "0.58421946", "0.5839482", "0.58380675", "0.58368367", "0.58264625", "0.58178675", "0.5802155", "0.5797212", "0.578834", "0.57864046", "0.5767577", "0.57637894", "0.5761033", "0.57531446", "0.5743089", "0.57388735", "0.57383025", "0.5722586", "0.5712037", "0.57093614", "0.5697168", "0.5696616", "0.56923056", "0.5683122", "0.56805253", "0.56801283", "0.56683534", "0.56346416", "0.56293917", "0.5628551", "0.5617326", "0.561412", "0.56085396", "0.5605993", "0.56031394", "0.5601388", "0.55905086", "0.558415", "0.5583074", "0.5581676", "0.55692834", "0.5565217", "0.5553917", "0.5543316", "0.5543012", "0.55415905", "0.5538019", "0.5529328", "0.55282086", "0.55275154", "0.55243665", "0.55184793", "0.55147266", "0.55096763", "0.5507881", "0.5505358", "0.55019695", "0.5501805" ]
0.0
-1
preparatory function for writing out to gml
def stringize_nx_graph(nx_graph): # graph attributes for key in nx_graph.graph.keys(): if isinstance(nx_graph.graph[key], (list, set, np.ndarray)): nx_graph.graph[key] = ",".join([ str(val) for val in list(nx_graph.graph[key])]) # node attributes for node_name, node_attrs in nx_graph.nodes(data=True): for key in node_attrs.keys(): if isinstance(nx_graph.nodes[node_name][key], (list, set, np.ndarray)): nx_graph.nodes[node_name][key] = ",".join([ str(val) for val in nx_graph.nodes[node_name][key]]) # adjust node name for nice output in cytoscape new_node_name = re.sub(r"HCLUST.\d+_", "", node_name) new_node_name = new_node_name.replace(".UNK.0.A", "") nx_graph.nodes[node_name]["name"] = new_node_name # edge attributes for start_node, end_node in nx_graph.edges(): for edge_idx in xrange(len(nx_graph[start_node][end_node])): edge_attrs = nx_graph[start_node][end_node][edge_idx] for key in edge_attrs.keys(): if isinstance(edge_attrs[key], (list, set, np.ndarray)): nx_graph[start_node][end_node][edge_idx][key] = ",".join([ str(val) for val in nx_graph[start_node][end_node][edge_idx][key]]) return nx_graph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_graphml_output(self, path):\n self.restructure_edge_info()\n self.restructure_node_info()\n return nx.write_graphml(self.G, path)", "def write_mm(g, fn):\n f = open(fn, \"w\")\n f.write(\"%d %d %d\\n\" % (g.vcount(), g.vcount(), g.ecount()))\n\n if g.is_weighted():\n for e in g.es():\n f.write(\"%d %d %.4f\\n\" % (e.source, e.target, e[\"weight\"]))\n else:\n for e in g.es():\n f.write(\"%d %d 1\\n\" % (e.source, e.target))\n\n f.close()", "def toGML(self):\n raise NotImplementedError", "def write(self, outfilename):\n\n nx.write_gpickle(self.graph, outfilename)", "def write_gml(self, f):\n G = self.graph.copy()\n\n # networkx doesn't like writing non-string attributes to GML\n for u, v in G.edges:\n for key in list(G[u][v].keys()):\n G[u][v][key] = str(G[u][v][key])\n nx.readwrite.gml.write_gml(G, f)", "def write_graph(graph, output_fp):\n output = output_fp + \".gexf\"\n print(f\"Graph written to {output}, visualise in gephi or similar\")\n nx.write_gexf(graph, output)", "def write(self,grfile):\n grfile = open(grfile + \".gr\",\"w\")\n \n for i in range(1,self.n_max):\n grfile.write(str(self.r[i]) + \"\\t\" + str(self.gr[i]) + \"\\n\")\n\n grfile.close()", "def writer(output, output_name, output_data):\n\n kml = simplekml.Kml(name=output_name)\n for exif in output_data:\n if('Latitude' in exif.keys() and\n 'Latitude Reference' in exif.keys() and\n 'Longitude Reference' in exif.keys() and\n 'Longitude' in exif.keys()):\n\n if 'Original Date' in exif.keys():\n dt = exif['Original Date']\n else:\n dt = 'N/A'\n\n if exif['Latitude Reference'] == 'S':\n latitude = '-' + exif['Latitude']\n else:\n latitude = exif['Latitude']\n\n if exif['Longitude Reference'] == 'W':\n longitude = '-' + exif['Longitude']\n else:\n longitude = exif['Longitude']\n\n kml.newpoint(name=exif['Name'],\n description='Originally Created: ' + dt,\n coords=[(longitude, latitude)])\n else:\n pass\n kml.save(os.path.join(output, output_name))", "def save_graph(self, path):\n if path.split('.')[-1]=='gexf':\n nx.write_gexf(self.graph, path)\n else:\n nx.write_gpickle(self.graph, path)", "def save_grdfile(grddata,depdata,outname,is31=True):\n \n if outname==None:\n print('save_grdfile requires a filename to save.')\n return\n try:\n fp=open(outname,'w')\n except IOError:\n print('save_grdfile: invalid filename.')\n return data\n if is31:\n fp.write('Node Number = %d\\n' % len(depdata['node_num']) )\n fp.write('Cell Number = %d\\n' % len(grddata['nv']) )\n for i in range(0,len(grddata['nv'])):\n fp.write('%d %d %d %d %d\\n'% (grddata['ele_num'][i],grddata['nv'][i,0],grddata['nv'][i,1],grddata['nv'][i,2],0))\n\n for i in range(0,len(depdata['node_num'])):\n fp.write('%d %f %f %f\\n'% (depdata['node_num'][i],depdata['x'][i],depdata['y'][i],depdata['h'][i]))\n fp.close()\n \n return", "def write_graph(g, filename):\n with open(filename, 'w') as f:\n f.write(repr(g))", "def save(self, filename):\n target = open(filename, 'w')\n target.write(\"\\\\data\\\\\\n\")\n target.write(\"ngram 1=\" + str(len(self.f1)) + \"\\n\\n\")\n target.write(\"\\\\1-grams:\\n\")\n for w,p in sorted(self.f1.items()): \n target.write(str(p) + \" \" + w + \"\\n\")\n target.write(\"\\\\end\\\\\\n\")\n target.close()", "def saveGraph(self, filename):\n nx.write_yaml(self.G,filename)", "def writeSBML(*args):\n return _libsbml.writeSBML(*args)", "def GenerateXML(dictionary, fileName=\"labelling.xml\") : \n root = gfg.Element(\"annotation\") \n #the big section is called Annotation\n for key in dictionary:\n #for every polygon list in inside object witho subelement name and attributes and the type \"polygon\"\n objectElement = gfg.Element(\"object\") \n root.append(objectElement) \n subElement1 = gfg.SubElement(objectElement, \"name:\".strip(\":\"))\n subElement1.text = str(dictionary[key][\"name\"])\n subElement2 = gfg.SubElement(objectElement, \"attributes\".strip(\":\"))\n subElement2.text = str(dictionary[key][\"attributes\"])\n subElement3 = gfg.SubElement(objectElement, \"polygon\")\n \n for i in range(0, len(dictionary[key])-2):\n #for every vertex of the polygon list it's rounded x, y on xml\n SubInsidePolygon = gfg.SubElement(subElement3, \"pt\")\n sub_x = gfg.SubElement(SubInsidePolygon, \"x\")\n sub_y = gfg.SubElement(SubInsidePolygon, \"y\")\n sub_x.text = str(int(round(dictionary[key][\"x_y_\" + str(i)][0])))\n sub_y.text = str(int(round(dictionary[key][\"x_y_\" + str(i)][1])))\n tree = gfg.ElementTree(root) \n #create the xml tree\n with open (fileName, \"wb\") as files : \n tree.write(files) \n #if xml does not exist create one otherwise rewrite to it", "def write_gff3(self,gff3_file=None):\r\n # write the new gff3\r\n if gff3_file:\r\n outfile = open(gff3_file, 'w')\r\n else:\r\n outfile = sys.stdout\r\n for set in self.sets:\r\n if isinstance(set, GT_seq_location):\r\n outfile.write(set.compose())\r\n else:\r\n outfile.write(set)\r\n outfile.close()", "def writeMathMLToString(*args):\n return _libsbml.writeMathMLToString(*args)", "def write_to_txt(batch, filepath, typ='vector', verbose ='dataset'):\n if typ == 'params':\n with open(filepath, \"w\") as txt:\n for param_pack in batch:\n txt.write(\"{} ; {} ; {} \\n\".format(param_pack.alphaL, \n param_pack.alphaR, param_pack.label))\n #txt.write(str(param_pack.alphaL) +\", \"+str(param_pack.alphaR) +\"\\n\")\n elif typ == 'vector':\n if verbose == 'human':\n with open(filepath, \"w\") as txt:\n for vector in batch:\n txt.write(str(vector[0]) + \"\\n\")\n txt.write(str(vector[1]) + \"\\n\")\n elif verbose == 'dataset':\n with open(filepath, \"w\") as txt:\n for vector in batch:\n #txt.write(str(vector[0].x) +\";\"+str(vector[0].y) +\";\"+ str(vector[0].angle) +\";\"+ str(vector[0].norm()) + \"\\n\")\n txt.write(str(vector[1].x) +\";\"+ str(vector[1].angle) +\";\"+ str(vector[1].norm()) + \"\\n\")", "def write_megam_file(train_toks, encoding, stream, bernoulli: bool = ..., explicit: bool = ...):\n ...", "def write_nml(self, nml_write_path):\n\n # If the object does not have any trees, construct an empty tree before writing to enable webKnossos import\n if self.num_trees() == 0:\n self.add_tree()\n\n nml = self._skeleton_to_nml()\n with open(nml_write_path, \"wb\") as f:\n wknml.write_nml(f, nml)", "def write_output(label1, label2, label3, submission_file):\n with open(submission_file, 'w') as f:\n f.write('Id,Bound'+ '\\n')\n for index, lab in enumerate(label1):\n f.write(str(index) + ',' + str(int(lab)) + '\\n')\n for index, lab in enumerate(label2):\n f.write(str(len(label1) + index) + ',' + str(int(lab)) + '\\n')\n for index, lab in enumerate(label3):\n f.write(str(len(label1) + len(label2) + index) + ',' + str(int(lab)))\n if index < len(label3) - 1:\n f.write('\\n')", "def kmlWriter(output_data, output_dir, output_name):\n msg = 'Writing ' + output_name + ' KML output.'\n print '[+]', msg\n logging.info(msg)\n # Instantiate a Kml object and pass along the output filename\n kml = simplekml.Kml(name=output_name)\n for exif in output_data:\n if 'Latitude' in exif.keys() and 'Latitude Reference' in exif.keys() and 'Longitude Reference' in exif.keys() and 'Longitude' in exif.keys():\n\n if 'Original Date' in exif.keys():\n dt = exif['Original Date']\n else:\n dt = 'N/A'\n\n if exif['Latitude Reference'] == 'S':\n latitude = '-' + exif['Latitude']\n else:\n latitude = exif['Latitude']\n\n if exif['Longitude Reference'] == 'W':\n longitude = '-' + exif['Longitude']\n else:\n longitude = exif['Longitude']\n\n kml.newpoint(name=exif['Name'], description='Originally Created: ' + dt,\n coords=[(longitude, latitude)])\n else:\n pass\n kml.save(os.path.join(output_dir, output_name))", "def write_gro(top, filename, precision=3):\n pos_array = np.ndarray.copy(top.positions)\n pos_array = _validate_positions(pos_array)\n\n with open(filename, \"w\") as out_file:\n out_file.write(\n \"{} written by GMSO {} at {}\\n\".format(\n top.name if top.name is not None else \"\",\n gmso.__version__,\n str(datetime.datetime.now()),\n )\n )\n out_file.write(\"{:d}\\n\".format(top.n_sites))\n out_file.write(_prepare_atoms(top, pos_array, precision))\n out_file.write(_prepare_box(top))", "def write_output(self):", "def recordStatGraph(g, path):\n g.write(path, xml_declaration=True, encoding='utf-8', method='xml')", "def save(self, output, data):", "def write_gro(gro_file):\n \n f = open(gro_file,'a')\n st = \"gro write by hoomd-blue step=%i\\n\"%get_step()\n f.write(st)\n f.write(' '+str(len(system.particles))+'\\n')\n for i, p in enumerate(system.particles):\n #st = \"%5d%-5s%5s%5d%8.3f%8.3f%8.3f%8.4f%8.4f%8.4f\\n\"%(i,'NON',p.type,p.postion,p.velocity)\n st = \"%5d%-5s%5s%5d%8.3f%8.3f%8.3f%8.4f%8.4f%8.4f\\n\"%(i+1,'SOL',p.type,i+1,\n p.position[0],p.position[1],p.position[2],p.velocity[0],p.velocity[1],p.velocity[2])\n f.write(st)\n st = \"%10.5f%10.5f%10.5f\\n\"%(system.box.Lx,system.box.Ly,system.box.Lz)\n f.write(st)\n f.close()", "def write_kml(self,varnames):\n if type(varnames) is str:\n varnames=(varnames,)\n content=[]\n for varname in varnames:\n content.append(self.image2kml(varname))\n kml=self.__class__.kmlstr % \\\n {'content':'\\n'.join(content),\\\n 'prog':self.__class__.progname}\n f=open(self.__class__.kmlname,'w')\n f.write(kml)\n f.close()", "def writeGraph2File(self, file, genGraph=1, isRootNode=0, rootNodeName = \"rootNode\", \\\r\n indent=\" \", genConstraints = 0, fileName = '', genGGcode = 0, parentName=\"self\", \\\r\n genImports = 0, depth = 1, nodesToGenList = [] ):\r\n\r\n # generate code for the nodes...\r\n counter =0\r\n if( not nodesToGenList ):\r\n for nodetype in self.nodeTypes:\t\t\t\t\t\t# iterate on all the node types...\r\n for node in self.listNodes[nodetype]:\t\t\t\t\t# Iterate on all the nodes of each type\r\n node.genAttributesCode(file, genGraph, None, isRootNode, rootNodeName, indent, genConstraints, 1, genGGcode, parentName, genImports, depth + 1 )\r\n if self.isSubclass(node, 'ASG'):\t\t\t\t\t# if it is a subclass of ASG, ws should include the file generated (hierarchical modeling)\r\n newFile = fileName+str(counter)\r\n file.write(indent+'exec \"from '+newFile+' import '+newFile+'\\\\n\" in self.__dict__, self.__dict__\\n')\r\n file.write(indent+'self.'+newFile+'(self, self.obj'+str(node.objectNumber)+') \\n\\n')\r\n counter = counter + 1\r\n else:\r\n for node in nodesToGenList:\t\t\t\t\r\n node.genAttributesCode(file, genGraph, None, isRootNode, rootNodeName, indent, genConstraints, 1, genGGcode, parentName, genImports, depth + 1 )\r\n if self.isSubclass(node, 'ASG'):\t\t\t\t\t# if it is a subclass of ASG, ws should include the file generated (hierarchical modeling)\r\n newFile = fileName+str(counter)\r\n file.write(indent+'exec \"from '+newFile+' import '+newFile+'\\\\n\" in self.__dict__, self.__dict__\\n')\r\n file.write(indent+'self.'+newFile+'(self, self.obj'+str(node.objectNumber)+') \\n\\n')\r\n counter = counter + 1\r\n \r\n \r\n # if fileName has a value, we are saving a model, we must generate a function to hold the connections...\r\n if fileName != '':\r\n # if we are not dealing with a hierarchical model, an extra method is not needed..\r\n hierarchical = self.isHierarchical()\r\n if hierarchical:\r\n file.write('\\ndef '+fileName+'_connections(self, rootNode):\\n')\r\n\r\n\r\n #-------- Modified by Ximeng Sun / Apr 9,2005 for large conn nums --------\r\n file.write('\\n')\r\n writed = 0\r\n # generate code for the connections...\r\n if( not nodesToGenList ):\r\n for nodetype in self.nodeTypes:\r\n for node in self.listNodes[nodetype]:\r\n if isRootNode: \r\n if(node.__dict__.has_key('name')):\r\n debugName = ' named ' + node.name.toString() + '\\n'\r\n else:\r\n debugName = ' of type ' + node.__class__.__name__ + '\\n'\r\n file.write(indent+'# Connections for obj'+str(node.objectNumber)\r\n +' (graphObject_: '+node.graphObject_.tag + ')' + debugName)\r\n file.write(indent+'self.drawConnections(\\n')\r\n res = node.genConnectionsCode(file, genGraph, isRootNode, \r\n indent, 1, writed)\r\n if isRootNode: \r\n file.write(' )\\n')\r\n else:\r\n for node in nodesToGenList:\r\n if isRootNode: file.write(indent+'self.drawConnections(')\r\n res = node.genConnectionsCode(file, genGraph, isRootNode, indent, 1, \r\n writed, nodesToGenList = nodesToGenList)\r\n if isRootNode: file.write(' )\\n')\r\n file.write('\\n')\r\n #------------ End of modification by Ximeng Sun / Apr 9,2005 -------------\r\n \r\n \r\n # if rootNode and I'm generating a function (filename != '')\r\n # then call subModel's functions for connections...\r\n if isRootNode and fileName != '': # if main model\r\n counter = 0\r\n if( not nodesToGenList ):\r\n for nodetype in self.nodeTypes: # iterate, to search for all submodels\r\n for node in self.listNodes[nodetype]:\r\n if self.isSubclass(node, 'ASG'): # found a submodel\r\n file.write(indent+'self.'+fileName+str(counter)+'_connections( self, self.obj'+str(node.objectNumber)+')\\n')\r\n writed = 1\r\n counter = counter + 1\r\n else:\r\n for node in nodesToGenList:\r\n if self.isSubclass(node, 'ASG'): # found a submodel\r\n file.write(indent+'self.'+fileName+str(counter)+'_connections( self, self.obj'+str(node.objectNumber)+')\\n')\r\n writed = 1\r\n counter = counter + 1\r\n \r\n \r\n if fileName != '' and (not writed) and hierarchical: # we must write 'pass', because nothing has been writed in the function!!\r\n file.write(indent+'pass\\n')", "def write(self):", "def write(self):", "def write_file(self):\n if self.it_num % 5 == 0:\n #plt.imshow(self.grid)\n #plt.savefig(\"output%.4d.png\" % self.it_num, bbox_inches='tight')\n io.savemat(\"MLOutput%.4d\" % self.it_num, { \"Grid\":self.grid})", "def _write_goft(parameters):\n # Format\n fmt = block_to_format[\"GOFT\"]\n fmt = str2format(fmt[5])\n\n values = [x for x in parameters[\"generator_history\"]]\n out = write_record(values, fmt, multi=True)\n\n return out", "def export(fileprefix, hedges):\n with open(fileprefix + '.txt', 'w') as f:\n for h in hedges:\n s = \"\"\n for node in h[0]: #each node in the tail\n s += str(node) + \"|\"\n s = s[:-1]\n s += '\\t'\n for node in h[1]: #each node in the head\n s += str(node) + \"|\"\n s = s[:-1]\n s += '\\t'\n s += '1' + '\\n' #assigns weight for the hedge, currently always set to 1\n f.write(s)", "def save_graph(self, filename, fileType):\n if fileType == \"GML Format\":\n nx.write_gml(self.graph, filename+\".gml\")\n if fileType == \"Adjacency list\":\n nx.write_adjlist(self.graph, filename+\".adjlist\")\n if fileType == \"YAML\":\n nx.write_yaml(self.graph, filename + \".yaml\")", "def write_ptm_gridfile(self,fn):\n vertex_hdr = \" Vertex Data: vertex_number, x, y\"\n poly_hdr = \" Polygon Data: polygon_number, number_of_sides,center_x, center_y, center_depth, side_indices(number_of_sides), marker(0=internal,1=open boundary)\"\n side_hdr = \" Side Data: side_number, side_depth, node_indices(2), cell_indices(2), marker(0=internal,1=external,2=flow boundary,3=open boundary)\"\n\n with open(fn,'wt') as fp:\n # write header counts\n fp.write(\" Number of Vertices\\n\")\n fp.write(\" %20d\\n\"%self.Nnodes())\n fp.write(\" Number of Polygons\\n\")\n fp.write(\" %20d\\n\"%self.Ncells())\n fp.write(\" Number of Sides\\n\")\n fp.write(\" %20d\\n\"%self.Nedges())\n fp.write(\" NODATA (land) value\\n\")\n fp.write(\" -9999.000000000\\n\")\n\n # write vertex info\n fp.write(vertex_hdr+\"\\n\")\n for v in range(self.Nnodes()):\n fp.write(\" %10d %16.7f %16.7f\\n\"%(v+1,\n self.nodes['x'][v,0],\n self.nodes['x'][v,1]))\n\n # write polygon info\n fp.write(poly_hdr+\"\\n\")\n cell_write_str1 = \" %10d %10d %16.7f %16.7f %16.7f \"\n cell_depths = self.cell_depths()\n for e in range(self.Ncells()):\n edges = self.cells['edges'][e,:]\n edges[edges<0] = -1\n edge_str = \" \".join( [\"%10d\"%(s+1) for s in edges] )\n edge_str = edge_str+\" %10d\\n\"%(self.cells['mark'][e])\n nsides = sum(edges>=0)\n fp.write(cell_write_str1%(e+1,\n nsides,\n self.cells['_center'][e,0],\n self.cells['_center'][e,1],\n cell_depths[e]))\n fp.write(edge_str)\n \n # write side info\n fp.write(side_hdr+\"\\n\")\n edge_depths = self.edge_depths()\n edge_write_str = \" %10d %16.7f %10d %10d %10d %10d %10d\\n\"\n for s in range(self.Nedges()):\n edges = self.edges['cells'][s,:]\n edges[edges<0] = -1 \n nodes = self.edges['nodes'][s,:]\n nodes[nodes<0] = -1\n fp.write(edge_write_str%(s+1,\n edge_depths[s],\n nodes[0]+1,\n nodes[1]+1,\n edges[0]+1,\n edges[1]+1,\n self.edges['mark'][s]))", "def site2nrml(model, params_dict): \n \"\"\"\n # Some XML definitions\n NAMESPACE = 'http://openquake.org/xmlns/nrml/0.4'\n GML_NAMESPACE = 'http://www.opengis.net/gml'\n SERIALIZE_NS_MAP = {None: NAMESPACE, 'gml': GML_NAMESPACE} \n gml_ns = SERIALIZE_NS_MAP['gml']\n \"\"\"\n \n # Head matter \n root = etree.Element(_tag='nrml', nsmap={'gml': 'http://www.opengis.net/gml'})\n root.set('xmlns', 'http://openquake.org/xmlns/nrml/0.4')\n root.append(etree.Comment('%s' % '%s site model' %(model)))\n \n\n # Define Site Model Name \n sMod = etree.SubElement(root, \"siteModel\")\n sMod.set('name', model + ' Site Model')\n \n # Define sub element\n \n for key in params_dict:\n \n site = etree.SubElement(sMod, \"site\")\n site.set('lon', '%s' % key[0])\n site.set('lat', '%s' % key[1])\n site.set('vs30', '%s' % params_dict[key][0])\n site.set('vs30Type', '%s' % 'inferred')\n site.set('z1pt0', '%s' % '%3.3f' % float(params_dict[key][1]))\n site.set('z2pt5', '%s' % '%3.3f' % float(params_dict[key][2]))\n \n #print(getMinMax(params_dict))\n \n # Form tree and write to xml\n root_tree = etree.ElementTree(root)\n outFile = open((out_directory + '/' + out_filename), 'wb')\n root_tree.write(outFile, encoding=\"utf-8\", xml_declaration=True, pretty_print=True)", "def writeout(self):\n out_file = ''.join(['theta_w_t', str(self.t), '.dat'])\n data_list = [] \n\n for i in xrange(self.n_params): \n data_list.append( self.theta_t[i,:] ) \n\n data_list.append(self.w_t)\n\n np.savetxt(\n out_file, \n (np.vstack(np.array(data_list))).T, \n delimiter='\\t'\n )\n\n return None", "def writeSBMLToFile(*args):\n return _libsbml.writeSBMLToFile(*args)", "def create_grp_file(data, model_name, gp_var, outputModelFilesDirectory):\n\n dimx = None\n dimy = None\n if len(data.shape) == 1:\n dimy = 1\n dimx = data.shape[0]\n else:\n dimx, dimy = data.shape\n data = np.ones(dimx)\n\n if not (gp_var == None):\n i = 1\n for key in sorted(gp_var.keys()):\n\n for index in gp_var[key]:\n data[index] = i\n\n i += 1\n\n\n f = open(os.path.join(outputModelFilesDirectory, model_name + '.grp'), 'w')\n\n print >>f, '/NumWaves\\t1'\n print >>f, '/NumPoints\\t%d\\n' %dimx\n print >>f, '/Matrix'\n np.savetxt(f, data, fmt='%d', delimiter='\\t')\n\n f.close()", "def write_gro(strucC,data_file):\n #\n latvec = strucC.getLatVec()\n\n gro_lines = \" com2gro \\n\"\n gro_lines += \" %-2i \\n\" %( int(len(strucC.ptclC)) )\n atom_indx = 0 \n for pid, pt_i in strucC.ptclC:\n atom_indx += 1\n if( atom_indx > 10000): atom_indx = 1\n r_i = pt_i.position\n r_i_nm = [units.convert_angstroms_nm(r_i[0]) ,units.convert_angstroms_nm(r_i[1]) ,units.convert_angstroms_nm(r_i[2]) ]\n gro_lines += \"%5d%-5s%5s%5d%8.3f%8.3f%8.3f\\n\" % (atom_indx,pt_i.tagsDict[\"resname\"][:5],pt_i.tagsDict[\"label\"][:5],atom_indx,r_i_nm[0],r_i_nm[1],r_i_nm[2] )\n if( atom_indx > 99999 ):\n atom_indx = 1\n \n gro_lines += \" %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f \\n\" % (units.convert_angstroms_nm(latvec[0][0]),units.convert_angstroms_nm(latvec[1][1]),units.convert_angstroms_nm(latvec[2][2]),units.convert_angstroms_nm(latvec[0][1]),units.convert_angstroms_nm(latvec[0][2]),units.convert_angstroms_nm(latvec[1][0]),units.convert_angstroms_nm(latvec[1][2]),units.convert_angstroms_nm(latvec[2][0]),units.convert_angstroms_nm(latvec[2][1])) \n\n F = open( data_file, 'w' )\n F.write(gro_lines)\n F.close()", "def write_gro(strucC,data_file):\n #\n latvec = strucC.getLatVec()\n\n gro_lines = \" com2gro \\n\"\n gro_lines += \" %-2i \\n\" %( int(len(strucC.ptclC)) )\n atom_indx = 0 \n for pid, pt_i in strucC.ptclC:\n atom_indx += 1\n if( atom_indx > 10000): atom_indx = 1\n r_i = pt_i.position\n r_i_nm = [units.convert_angstroms_nm(r_i[0]) ,units.convert_angstroms_nm(r_i[1]) ,units.convert_angstroms_nm(r_i[2]) ]\n gro_lines += \"%5d%-5s%5s%5d%8.3f%8.3f%8.3f\\n\" % (atom_indx,pt_i.tagsDict[\"resname\"][:5],pt_i.tagsDict[\"label\"][:5],atom_indx,r_i_nm[0],r_i_nm[1],r_i_nm[2] )\n if( atom_indx > 99999 ):\n atom_indx = 1\n \n gro_lines += \" %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f \\n\" % (units.convert_angstroms_nm(latvec[0][0]),units.convert_angstroms_nm(latvec[1][1]),units.convert_angstroms_nm(latvec[2][2]),units.convert_angstroms_nm(latvec[0][1]),units.convert_angstroms_nm(latvec[0][2]),units.convert_angstroms_nm(latvec[1][0]),units.convert_angstroms_nm(latvec[1][2]),units.convert_angstroms_nm(latvec[2][0]),units.convert_angstroms_nm(latvec[2][1])) \n\n F = open( data_file, 'w' )\n F.write(gro_lines)\n F.close()", "def writeToXml(imageName, imageSize, imagePath, allCellInfo, outputFile, files):\n\n root = Element('annotation')\n root.set('verified', 'no')\n\n filesUsed = SubElement(root, 'files')\n # folder.text = 'WBC'\n filesUsed.text = str(files)\n filename = SubElement(root, 'filename')\n filename.text = imageName\n path = SubElement(root, 'path')\n path.text = imagePath\n source = SubElement(root, 'source')\n database = SubElement(source, 'database')\n database.text = 'Unknown'\n size = SubElement(root, 'size')\n width = SubElement(size, 'width')\n width.text = str(imageSize[0])\n height = SubElement(size, 'height')\n height.text = str(imageSize[1])\n depth = SubElement(size, 'depth')\n depth.text = '3'\n segmented = SubElement(root, 'segmented')\n segmented.text = \"0\"\n\n for cell in allCellInfo:\n name_str, xmin_str, ymin_str, xmax_str, ymax_str = cell\n objectTag = SubElement(root, 'object')\n name = SubElement(objectTag, 'name')\n name.text = name_str\n pose = SubElement(objectTag, 'pose')\n pose.text = 'Unspecified'\n truncated = SubElement(objectTag, 'truncated')\n truncated.text = '0'\n difficult = SubElement(objectTag, 'difficult')\n difficult.text = '0'\n bndbox = SubElement(objectTag, 'bndbox')\n xmin = SubElement(bndbox, 'xmin')\n xmin.text = xmin_str\n ymin = SubElement(bndbox, 'ymin')\n ymin.text = ymin_str\n xmax = SubElement(bndbox, 'xmax')\n xmax.text = xmax_str\n ymax = SubElement(bndbox, 'ymax')\n ymax.text = ymax_str\n\n tree = ET.ElementTree(root)\n tree.write(outputFile)", "def write_graph(self, graph_def):\n write_name = self.params.model_name+\"_v\"+self.params.version+\".pb\"\n self.writer = tf.compat.v1.summary.FileWriter(self.params.save_dir, graph=self.graph)\n tf.io.write_graph(graph_def,\n logdir=self.params.save_dir, name=write_name, as_text=False)\n self.logger.log_info(\"Graph def saved in file %s\"%self.params.save_dir+write_name)", "def _write_network_file(graph, out_name, out_format=None, data=False,weight=False):\n\n if out_format==None:\n out_format=\"edges\"\n os.makedirs(os.path.dirname(out_name), exist_ok=True)\n #print(\"writing graph of format \" + out_format + \" at \" + out_name)\n if out_format == 'edges':\n nx.write_edgelist(graph, \"%s.edges\" % (out_name), data=data)\n elif out_format == 'gefx':\n nx.write_gexf(graph, \"%s.gefx\" % (out_name))\n elif out_format == 'gml':\n nx.write_gml(graph, \"%s.gml\" % (out_name))\n elif out_format == 'pajek':\n nx.write_pajek(graph, \"%s.pajek\" % (out_name))\n elif out_format == 'ncol':\n nx.write_edgelist(graph, \"%s.ncol\" % (out_name), delimiter='\\t',data=weight)\n elif out_format == 'graphML' :\n g = nx.write_graphml(graph, \"%s.graphml\" % (out_name))\n else:\n raise Exception(\"UNKNOWN FORMAT \" + out_format)", "def write_out4fp(fname,specorder,nspcs,agr,nr,rmax,pairs,nperline=6):\n ndat = nr *len(pairs)\n data = np.zeros(ndat)\n n = 0\n for pair in pairs:\n isid,jsid = pair\n for i in range(nr):\n data[n] = agr[isid,jsid,i]\n n += 1\n\n with open(fname,'w') as f:\n f.write('# RDF for pairs: ')\n for pair in pairs:\n si = specorder[pair[0]-1]\n sj = specorder[pair[1]-1]\n f.write(' {0:s}-{1:s},'.format(si,sj))\n f.write('\\n')\n f.write('# rmax, nr = {0:.3f}, {1:d}\\n'.format(rmax,nr))\n f.write('#\\n')\n #...Num of data, weight for the data\n f.write(' {0:6d} {1:7.3f}\\n'.format(ndat, 1.0))\n j0 = 0\n while True:\n f.write(' '.join('{0:12.4e}'.format(data[j]) for j in range(j0,j0+nperline) if j < ndat))\n f.write('\\n')\n j0 += nperline\n if j0 >= ndat:\n break\n\n return None", "def export_gexf(rotulos,similaridades,nome_arquivo,threshold,excluir_negativos):\n\n tbl = dict.fromkeys(i for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith('P'))\n\n arquivo = codecs.open(nome_arquivo + \".gexf\",\"w\",\"utf-8\")\n arquivo.write('<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n')\n arquivo.write('<gexf xmlns=\"http://www.gexf.net/1.2draft\" version=\"1.2\">\\n')\n arquivo.write('\\t<graph mode=\"static\" defaultedgetype=\"undirected\">\\n')\n arquivo.write('\\t\\t\\t<nodes>\\n')\n arquivo.flush()\n\n cont=0\n docs = list(rotulos.keys())\n for key in docs:\n rotulo = re.sub(r'[<>]', '', rotulos[key].translate(tbl))\n arquivo.write(u\"\\t\\t\\t\\t<node id=\\\"%d\\\" label=\\\"%s\\\"/>\\n\" % (docs.index(key), rotulo))\n\n cont = cont+1\n if cont == 50:\n arquivo.flush()\n cont = 0\n\n arquivo.write('\\t\\t\\t</nodes>\\n')\n arquivo.write('\\t\\t\\t<edges>\\n')\n arquivo.flush()\n\n cont=0\n for similaridade in similaridades:\n if(excluir_negativos and (similaridade[2] < 0)):\n continue\n\n if abs(similaridade[2]) >= threshold:\n arquivo.write(\"\\t\\t\\t\\t<edge source=\\\"%d\\\" target=\\\"%d\\\" weight=\\\"%f\\\" />\\n\" % (docs.index(similaridade[0]),docs.index(similaridade[1]),similaridade[2]))\n\n cont = cont+1\n if cont == 50:\n arquivo.flush()\n cont = 0\n\n arquivo.write('\\t\\t\\t</edges>\\n')\n arquivo.write('\\t</graph>\\n')\n arquivo.write('</gexf>')\n arquivo.close() # you can omit in most cases as the destructor will call it", "def connect_and_write_gml(self, f):\n G = self.graph.copy()\n node_base_set = set([i[:-2] for i in list(G.nodes)])\n for node in node_base_set:\n G.add_edge(node + \"_b\", node + \"_e\")\n\n # networkx doesn't like writing non-string attributes to GML\n for u, v in G.edges:\n for key in list(G[u][v].keys()):\n G[u][v][key] = str(G[u][v][key])\n nx.readwrite.gml.write_gml(G, f)", "def writeStatsToFile( gfname, sfname, tgraph ):\n ParProbG = graphWithCutoff(gfname, 0.0)\n with open(sfname,'wb') as ofile:\n for u,v in itertools.combinations( tgraph.nodes(), 2 ):\n ofile.write(\"{0} {1}\\n\".format( ParProbG[u][v]['weight'] if ParProbG.has_edge(u,v) else 0.0, 1 if tgraph.has_edge(u,v) else 0) )", "def graph_list_to_pajek(G_list):\n def _write_pajek(A, node_labels=None, index_from=0):\n \"\"\"Return multiplex representation of multiplex network adjacency matrix A\n\n Providing an adjacency tensor where A[:, :, k] is adjacency matrix of temporal\n layer k, return a pajek format representation of the temporal network which weights interlayer\n edges by state node neighborhood similarity. \n\n Parameters\n ----------\n A : numpy.3darray\n 3d tensor where each A[:, :, k] is a layer adjacency matrix\n max_trans_prob : float/str\n Cap on interlayer edge weights. 'square' for square penalty.\n power_penalty : int/float\n Power to jaccard similarity betw. state nodes to penalize low similarity\n index_from : int\n From which number to index nodes and layers in pajek format from\n style : bool\n Either 'zigzag', 'vertical', or 'simple'. 'vertical' will give working results but is\n essentially wrong use of Infomap, 'simple' should be possible to use in Infomap but is not\n at this point, so 'zigzag' is preferred because it is an explicit representation of the way\n the network should be represented internally in Infomap.\n\n Returns\n -------\n out_file : string\n A network string in multiplex format\n intid_to_origid : dict\n Key-value pairs of node integer id and original id\n origid_to_intid : dict\n Reverse of intid_to_origid\n \"\"\"\n\n def _write_outfile(A):\n \"\"\"Write nodes and intra/inter-edges from A and J to string.\"\"\"\n def __remove_symmetry_A(A):\n A_triu = defaultdict(int)\n for (i, j, k), w in list(A.items()):\n if j > i:\n A_triu[(i, j, k)] = w\n return A_triu\n def __write_nodes(outfile):\n outfile += \"*Vertices %d\" % Nn\n for nid, label in enumerate(nodes):\n outfile += '\\n%d \"%s\" 1.0' % (nid + index_from, str(label))\n return outfile\n def __write_intra_edges(outfile):\n outfile += \"\\n*Intra\\n# layer node node [weight]\"\n for (i, j, k), w in list(__remove_symmetry_A(A).items()):\n outfile += '\\n%d %d %d %f' % (\n k + index_from, # layer\n nodemap[i] + index_from, # node\n nodemap[j] + index_from, # node\n w # weight\n )\n return outfile\n\n outfile = \"\"\n outfile = __write_nodes(outfile)\n outfile = __write_intra_edges(outfile)\n\n return outfile\n\n nodes = sorted(set([n for i, j, _ in list(A.keys()) for n in [i, j]]))\n Nn = len(nodes)\n Nl = len(set([k for i, j, k in list(A.keys())]))\n\n nodemap = dict(list(zip(nodes, list(range(Nn)))))\n\n return _write_outfile(A)\n\n def _create_adjacency_matrix(layer_edges):\n \"\"\"Return 3d adjacency matrix of the temporal network.\n \n Input\n -----\n layer_edges : dict\n \n Output\n ------\n A : dict\n \"\"\"\n A = defaultdict(int)\n for l, edges in list(layer_edges.items()):\n for edge in edges:\n A[(edge[0], edge[1], l)] += 1\n A[(edge[1], edge[0], l)] += 1 \n return A\n \n return _write_pajek(\n _create_adjacency_matrix(\n dict(list(zip(list(range(len(G_list))), [G.edges() for G in G_list])))\n )\n )", "def writeMCToGR3File(filename, mc):\n nodes = np.vstack((mc.x, mc.y)).T\n nodalValues = mc.data[:, 0, 0].squeeze()[:, None]\n connectivity = mc.connectivity\n openBndNodes = []\n landBndNodes = []\n writeGR3File(filename, '', nodes, nodalValues, connectivity, mc.boundaries)", "def gexf_graph():\n # you must replace these lines and supply your own graph\n \n \n \n my_gexf = Gexf(\"JiajiaXie\", \"My awesome graph\")\n graph=my_gexf.addGraph(\"undirected\", \"static\", \"My awesome networks\")\n \n atr1=graph.addNodeAttribute('Type',type='string')\n\n\n for set in data_specific:\n if graph.nodeExists(set['set_num']) ==0:\n tm1=graph.addNode(set['set_num'], set['name'], r='0', g='0', b='0')\n tm1.addAttribute(atr1,\"set\")\n\n\n\n counter_test=1\n for set, part in data_parts.items():\n for key, part_list in part.items():\n interme =part_list['color']\n red=interme[0]+interme[1]\n green=interme[2]+interme[3]\n blue=interme[4]+interme[5]\n\n red_de=str(int(red,16))\n green_de=str(int(green,16))\n blue_de=str(int(blue,16))\n if graph.nodeExists(part_list['id'])==0:\n tm2=graph.addNode(part_list['id'], part_list['part_name'],r=red_de, g=green_de, b = blue_de)\n tm2.addAttribute(atr1,\"part\")\n\n\n counter_test+=1\n graph.addEdge(\"_\"+str(counter_test), set, part_list['id'], part_list['quantity'])\n\n\n\n f=open('bricks_graph.gexf','wb')\n my_gexf.write(f)\n\n\n return my_gexf.graphs[0]", "def _write_outfile(A):\n def __remove_symmetry_A(A):\n A_triu = defaultdict(int)\n for (i, j, k), w in list(A.items()):\n if j > i:\n A_triu[(i, j, k)] = w\n return A_triu\n def __write_nodes(outfile):\n outfile += \"*Vertices %d\" % Nn\n for nid, label in enumerate(nodes):\n outfile += '\\n%d \"%s\" 1.0' % (nid + index_from, str(label))\n return outfile\n def __write_intra_edges(outfile):\n outfile += \"\\n*Intra\\n# layer node node [weight]\"\n for (i, j, k), w in list(__remove_symmetry_A(A).items()):\n outfile += '\\n%d %d %d %f' % (\n k + index_from, # layer\n nodemap[i] + index_from, # node\n nodemap[j] + index_from, # node\n w # weight\n )\n return outfile\n\n outfile = \"\"\n outfile = __write_nodes(outfile)\n outfile = __write_intra_edges(outfile)\n\n return outfile", "def write(self,data): \n \n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n\n # We will store these in a separate file and link them to the level2s\n fname = data.filename.split('/')[-1]\n units = {'A':'K','x0':'degrees','y0':'degrees','sigx':'degrees','sigy':'degrees','sigy_scale':'none','B':'K','phi':'radians'}\n\n outfile = '{}/{}_{}'.format(self.output_dir,self.prefix,fname)\n\n print ('WRITING: ',outfile)\n output = h5py.File(outfile,'a')\n\n # Set permissions and group\n os.chmod(outfile,0o664)\n shutil.chown(outfile, group='comap')\n\n ##\n ## Narrow channel fits\n ##\n\n for valerr in ['Values','Errors','Chi2']:\n if f'Gauss_Narrow_{valerr}' in output:\n del output[f'Gauss_Narrow_{valerr}']\n gauss_fits = output.create_group(f'Gauss_Narrow_{valerr}')\n gauss_fits.attrs['FitFunc'] = self.model.__name__\n gauss_fits.attrs['source_el'] = self.source_positions['mean_el']\n gauss_fits.attrs['source_az'] = self.source_positions['mean_az']\n\n dnames = self.map_parameters\n dsets = [self.map_fits[valerr][...,iparam] for iparam in range(self.map_fits[valerr].shape[-1])]\n\n for (dname, dset) in zip(dnames, dsets):\n if dname in output:\n del output[dname]\n print(dname,dset.shape,units[dname])\n gauss_dset = gauss_fits.create_dataset(dname, data=dset)\n gauss_dset.attrs['Unit'] = units[dname]\n \n\n output.attrs['SourceFittingVersion'] = __version__\n output.attrs['source'] = self.getSource(data)\n output.close()\n self.linkfile(data)", "def saveGraph (self, filename) :\n\t\tss = \"digraph {\\n\"\n\t\tfor key, rules in self.production_rules.items() :\n\t\t\tfor rule in rules :\n\t\t\t\tr = [op.val for op in rule]\n\t\t\t\tr = [i.replace (\"-\", \"\") for i in r]\n\t\t\t\tr = [i.replace (\".\", \"\") for i in r]\n\t\t\t\tr = [i.replace (\"\\'\\'\", \"eps\") for i in r]\n\t\t\t\tr = [i.replace (\"\\\"\\\"\", \"eps\") for i in r]\n\t\t\t\tr = [i.replace (\"/\", \"_\") for i in r]\n\t\t\t\tk = key.replace (\"-\", \"\")\n\t\t\t\tk = k.replace (\"/\", \"_\")\n\t\t\t\tk = k.replace (\".\", \"_tok\")\n\t\t\t\tss += \"\\t\" + k + \" -> \" \n\t\t\t\tss += \" -> \".join (r)\n\t\t\t\tss += \" ;\\n\"\n\t\tss += \"}\"\n\t\tfilestream = open (filename + '.dot', 'w') \n\t\tfilestream.write(ss)\n\t\tfilestream.close ()\n\t\tcmd = 'dot -Tpng -o ' + filename + '.png ' + filename + '.dot'\n\t\tos.system (cmd)\n\t\tcmd = 'rm ' + filename + '.dot'\n\t\tos.system (cmd)", "def save_ml_output(arrays, out_fname, force):\n if not force:\n if os.path.isfile(out_fname):\n return\n try:\n os.makedirs(os.path.dirname(out_fname))\n except FileExistsError:\n pass\n np.save(out_fname, arrays, allow_pickle=False)", "def writeSBML(self, *args):\n return _libsbml.SBMLWriter_writeSBML(self, *args)", "def to_svg(self, outfile, scaling, precision, attributes):\n outfile.write('<g id=\"')\n outfile.write(self.name.replace(\"#\", \"_\"))\n outfile.write('\" ')\n outfile.write(attributes)\n outfile.write(\">\\n\")\n for polygon in self.polygons:\n polygon.to_svg(outfile, scaling, precision)\n for path in self.paths:\n path.to_svg(outfile, scaling, precision)\n for label in self.labels:\n label.to_svg(outfile, scaling, precision)\n for reference in self.references:\n reference.to_svg(outfile, scaling, precision)\n outfile.write(\"</g>\\n\")", "def test_save_geometric(self):\n G = nx.random_geometric_graph(20, 0.1)\n env = Environment(topology=G)\n f = io.BytesIO()\n env.dump_gexf(f)", "def k2g(\n kml_path_or_buffer,\n output_dir,\n feature_collection_name,\n style_type,\n style_filename,\n separate_folders,\n):\n style, *layers = m.convert(\n kml_path_or_buffer,\n style_type=style_type,\n separate_folders=separate_folders,\n feature_collection_name=feature_collection_name,\n )\n\n # Create output directory if it doesn't exist\n output_dir = pl.Path(output_dir)\n if not output_dir.exists():\n output_dir.mkdir(parents=True)\n output_dir = output_dir.resolve()\n\n # Write style file\n path = output_dir / style_filename\n with path.open(\"w\") as tgt:\n json.dump(style, tgt)\n\n # Create filenames for layers\n stems = m.disambiguate(m.to_filename(layer[\"name\"]) for layer in layers)\n filenames = [f\"{stem}.geojson\" for stem in stems]\n\n # Write layer files\n for i in range(len(layers)):\n path = output_dir / filenames[i]\n with path.open(\"w\") as tgt:\n json.dump(layers[i], tgt)", "def save_grd(filename, meta, map):\n if os.path.exists(filename):\n raise ValueError(\"File already exists: {}\".format(filename))\n if map.shape != (meta['NX'], meta['NY'], meta['NCOMP']):\n raise ValueError(\"The map shape does not match the metadata dictionary.\")\n points = meta['NX'] * meta['NY']\n components = meta['NCOMP']\n data = np.empty((points, 2 * components))\n for component in range(components):\n data[:, 2 * component] = map[:, :, component].reshape(points, order='F').real\n data[:, 2 * component + 1] = map[:, :, component].reshape(points, order='F').imag\n with open(filename, 'w') as f:\n for line in meta['header']:\n f.write('{}\\n'.format(line))\n f.write('{:2d}\\n'.format(meta['KTYPE']))\n f.write('{:12d}{:12d}{:12d}{:12d}\\n'.format(meta['NSET'], meta['ICOMP'], meta['NCOMP'], meta['IGRID']))\n f.write('{:12d}{:12d}\\n'.format(meta['IX'], meta['IY']))\n f.write(' {: 0.10E} {: 0.10E} {: 0.10E} {: 0.10E}\\n'.format(meta['XS'], meta['YS'], meta['XE'], meta['YE']))\n f.write('{:12d}{:12d}{:12d}\\n'.format(meta['NX'], meta['NY'], meta['KLIMIT']))\n for p in range(points):\n f.write(''.join([float_to_string(number) for number in data[p, :]]) + '\\n')", "def write_pajek(ml_edgelist, index_from=0):\n def _build_adjacency_tensor(ml_edgelist, index=\"zero\"):\n \"\"\"Return adjacency tensor representation of multilayer edgelist.\"\"\"\n layers = sorted(set(ml_edgelist['layer']))\n nodes = set(list(ml_edgelist['node1']) + list(ml_edgelist['node2']))\n ind = dict((n, i) for i, n in enumerate(nodes))\n\n A = defaultdict(int)\n for l in layers:\n for _, row in ml_edgelist.loc[ml_edgelist['layer'] == l].iterrows():\n # Must add both ways if undirected so A becomes symmetrical. If only added one-way\n # triu will only be connections from 'node1' and and tril from 'node2' or vice versa.\n if index == \"zero\":\n A[(ind[row['node1']], ind[row['node2']], l)] += 1\n A[(ind[row['node2']], ind[row['node1']], l)] += 1\n else:\n A[(row['node1'], row['node2'], l)] += 1\n A[(row['node2'], row['node1'], l)] += 1\n return A, dict((v, k) for k, v in list(ind.items()))\n\n def _write_outfile(A):\n \"\"\"Write nodes and intra/inter-edges from A and J to string.\"\"\"\n def __remove_symmetry_A(A):\n A_triu = defaultdict(int)\n for (i, j, k), w in list(A.items()):\n if j > i:\n A_triu[(i, j, k)] = w\n return A_triu\n def __write_nodes(outfile):\n outfile += \"*Vertices %d\" % Nn\n for nid, label in enumerate(nodes):\n outfile += '\\n%d \"%s\" 1.0' % (nid + index_from, labelmap[label])\n return outfile\n def __write_edges(outfile):\n outfile += \"\\n*Intra\\n# layer node node [weight]\"\n sorted_A_sparse = sorted(list(__remove_symmetry_A(A).items()), key=lambda ind__: ind__[0][2])\n for (i, j, k), w in sorted_A_sparse:\n outfile += '\\n%d %d %d %f' % (\n k + index_from, # layer\n nodemap[i] + index_from, # node\n nodemap[j] + index_from, # node\n w # weight\n )\n return outfile\n \n outfile = \"\"\n outfile = __write_nodes(outfile)\n outfile = __write_edges(outfile)\n \n return outfile\n\n A, labelmap = _build_adjacency_tensor(ml_edgelist)\n\n nodes = sorted(set([n for i, j, _ in list(A.keys()) for n in [i, j]]))\n Nn = len(nodes)\n Nl = len(set([k for i, j, k in list(A.keys())]))\n\n nodemap = dict(list(zip(nodes, list(range(Nn)))))\n\n return _write_outfile(A)", "def export_topology(topology, filename='topology.gml'):\n\n nx.write_gml(topology, filename)", "def write(self, cull=False):\n if cull:\n cull_prefixes(self).write()\n else:\n ser = self.g.serialize(format='nifttl', encoding='utf-8')\n with open(self.filename, 'wb') as f:\n f.write(ser)\n #print('yes we wrote the first version...', self.name)", "def _write_keypoint_results(keypoint_results, gt_folder, pred_folder):", "def gwrite(self, output, format='ascii.commented_header'):\n self.write(output, format=format)", "def write(self, out):", "def writeSBMLToString(*args):\n return _libsbml.writeSBMLToString(*args)", "def save(self, path=\"\"):\n path = path + \"model_\" + str(self.name) + \".txt\"\n if os.path.isfile(path):\n os.remove(path)\n f = open(path, \"w+\")\n for ident in self.networks:\n f.write(ident + \"_\" + self.networks[ident].descriptor.codify_components() + \"_\" + str(self.networks[ident].taking.size) + \",\" + self.networks[ident].taking.type + \"_\" + str(self.networks[ident].producing.size) + \",\" + self.networks[ident].producing.type + \"_\" +\n str(self.networks[ident].depth) + \"_\" + \",\".join(self.reachable[ident]) + \"_\" + \",\".join(self.comps_below[ident]) + \"\\n\")\n f.write(\"\\n\")\n\n for ident in self.inputs:\n f.write(ident + \"_\" + str(self.inputs[ident].producing.size) + \"_\" + self.inputs[ident].producing.type + \"_\" + str(self.inputs[ident].depth) + \"\\n\")\n f.write(\"\\n\")\n\n for ident in self.outputs:\n f.write(ident + \"_\" + str(self.outputs[ident].taking.size) + \"_\" + self.outputs[ident].taking.type + \"_\" + str(self.outputs[ident].depth) + \"_\" + \",\".join(self.comps_below[ident]) + \"\\n\")\n f.write(\"\\n\")\n\n for con in self.connections:\n f.write(self.connections[con].codify() + \"\\n\")\n #f.write(\"\\n\")\n\n f.close()\n\n return path", "def training_data_g_output(P=\"{P:d}\", T=\"{T:.2f}\", J=\"{J:d}\"):\n return _training_data_g_output.format(P, T, J)", "def export(self, outpath):\n fout = open(outpath, \"w\")\n\n # Header takes the guesswork out of loading by recording how many lines, vector dims\n fout.write(str(self.n_words) + \" \" + str(self.n_dim) + \"\\n\")\n for token in self.id2word:\n vector_components = [\"%.6f\" % number for number in self[token]]\n vector_as_string = \" \".join(vector_components)\n\n out_line = token + \" \" + vector_as_string + \"\\n\"\n fout.write(out_line)\n\n fout.close()", "def export_to_file(self, path, graph_format):\n try:\n logging.info(\"Saving RDF data to \" + str(path))\n with open(path, \"wb\") as out_file:\n out_file.write(self.g.serialize(format=graph_format, encoding=\"UTF-8\"))\n except Exception as e:\n logging.error(\"Error while saving RDF results \"+str(e))", "def export_gds(lum_app, filename, top_cell_name, layer_def,\n n_circle = 64, n_ring = 64, n_custom = 64, n_wg = 64,\n round_to_nm = 1, grid = 1e-9, max_objects = 10000):\n \n print(\"Exporting from Lumerical application to GDS II...\")\n layer_def_str = \"layer_def = [\"\n for i in range(0,len(layer_def)):\n if i == (len(layer_def) - 1):\n # Insert end bracket and semi-colon at end of array\n layer_def_str = layer_def_str + str(layer_def[i]) + \"];\"\n elif (i + 1) % 4 == 0:\n # Insert semi-colon after every 4 params\n layer_def_str = layer_def_str + str(layer_def[i]) + \";\"\n else:\n layer_def_str = layer_def_str + str(layer_def[i]) + \",\"\n \n lsf_script = str(\"gds_filename = '{}.gds';\".format(filename) +\n \"top_cell = '{}';\".format(top_cell_name) +\n layer_def_str.format(-220.0e-9/2, 220.0e-9/2) +\n \"n_circle = {};\".format(n_circle) +\n \"n_ring = {};\".format(n_ring) +\n \"n_custom = {};\".format(n_custom) +\n \"n_wg = {};\".format(n_wg) +\n \"round_to_nm = {};\".format(round_to_nm) +\n \"grid = {};\".format(grid) +\n \"max_objects = {};\".format(max_objects) +\n \"Lumerical_GDS_auto_export;\")\n #return lsf_script\n # Run lsf script to export gds\n lum_app.cd(os.getcwd())\n lum_app.eval(lsf_script)\n return os.path.join(os.getcwd(), filename+\".gds\")", "def write_updates(self, writer, predictions, ground_truth, input, iter, hyps):\n batch_size, _, _, _ = predictions.shape\n ground_truth = ground_truth.to(DEVICE)\n\n output_input_gt = torch.cat((predictions, ground_truth), dim=0)\n grid = torchvision.utils.make_grid(output_input_gt,\n scale_each=True,\n nrow=batch_size,\n normalize=True).cpu().detach().numpy()\n writer.add_image(\"Output_vs_gt\", grid, iter)\n\n writer.add_scalar(\"psnr\", self.get_psnr(predictions, ground_truth), iter)\n writer.add_scalar(\"damp\", self.get_damp(), iter)\n writer.add_figure(\"heightmap\", self.get_heightmap_fig(), iter)\n\n psf = self.get_psf(hyps)\n plt.figure()\n plt.imshow(psf)\n plt.colorbar()\n fig = plt.gcf()\n plt.close()\n writer.add_figure(\"psf\", fig, iter)", "def output_gpx(points, output_filename):\n from xml.dom.minidom import getDOMImplementation\n def append_trkpt(pt, trkseg, doc):\n trkpt = doc.createElement('trkpt')\n trkpt.setAttribute('lat', '%.8f' % (pt['lat']))\n trkpt.setAttribute('lon', '%.8f' % (pt['lon']))\n trkseg.appendChild(trkpt)\n \n doc = getDOMImplementation().createDocument(None, 'gpx', None)\n trk = doc.createElement('trk')\n doc.documentElement.appendChild(trk)\n trkseg = doc.createElement('trkseg')\n trk.appendChild(trkseg)\n \n points.apply(append_trkpt, axis=1, trkseg=trkseg, doc=doc)\n \n with open(output_filename, 'w') as fh:\n doc.writexml(fh, indent=' ')", "def write(file_path, kml_str):\n\n fa.text_writer(file_path, kml_str)", "def write_data():", "def _write_outfile(A):\n def __remove_symmetry_A(A):\n A_triu = defaultdict(int)\n for (i, j, k), w in list(A.items()):\n if j > i:\n A_triu[(i, j, k)] = w\n return A_triu\n def __write_nodes(outfile):\n outfile += \"*Vertices %d\" % Nn\n for nid, label in enumerate(nodes):\n outfile += '\\n%d \"%s\" 1.0' % (nid + index_from, labelmap[label])\n return outfile\n def __write_edges(outfile):\n outfile += \"\\n*Intra\\n# layer node node [weight]\"\n sorted_A_sparse = sorted(list(__remove_symmetry_A(A).items()), key=lambda ind__: ind__[0][2])\n for (i, j, k), w in sorted_A_sparse:\n outfile += '\\n%d %d %d %f' % (\n k + index_from, # layer\n nodemap[i] + index_from, # node\n nodemap[j] + index_from, # node\n w # weight\n )\n return outfile\n \n outfile = \"\"\n outfile = __write_nodes(outfile)\n outfile = __write_edges(outfile)\n \n return outfile", "def write(self, filename, detailed=False):\n fname, fext = os.path.splitext(filename)\n fext = fext[1:] # drop .\n p = self._to_dot(detailed)\n p.graph_attr['ordering'] = 'out'\n p.render(\n filename=filename,\n format=fext)", "def write_labels():\n with open('../data/labels.txt', 'w') as labels_file:\n labels = generate_labels()\n labels_file.write('\\n'.join(labels))", "def write_graph_list(name, graph_list, data_root):\n\n data_path = os.path.join(data_root, name)\n if not os.path.exists(data_path):\n os.makedirs(data_path)\n\n # compute right number of trailing zeros for file names\n format_positions = ceil(log10(len(graph_list)))\n\n for i, g in enumerate(graph_list):\n lines = nx.generate_gml(g)\n\n # stupid networkx requires labels to be equal to node ids.\n # we need to fix this\n def sanitize_labels(x):\n def getint(v:str):\n return int(v.strip('\"'))\n if x.find('label') == -1:\n return x + '\\n'\n else:\n v = x[10:]\n label = g.node[getint(v)]['label']\n return f' label \"{label}\"\\n'\n\n fixed_lines = map(sanitize_labels, lines)\n\n f = open(os.path.join(data_path, f'{i:0{format_positions}d}.gml'), 'w')\n f.writelines(fixed_lines)\n f.close()", "def save_parameters(gp, target):\n pdict = {}\n pdict['likelihood'] = gp.likelihood.get_free_state()[0]\n pdict['kern_variance'] = gp.kern.variance.get_free_state()[0]\n pdict['kern_lengthscale'] = list(gp.kern.lengthscales.get_free_state())\n pdict['log_likelihood'] = gp._objective(gp.get_free_state())[0][0]\n #pdict = {n:list(gp[n].flatten()) for n in gp.parameter_names()}\n with open(target, 'w') as f:\n json.dump(pdict, f)", "def save_chicago_graph(G, path=\"chicago.xml\"):\n\n\tox.save_graphml(G, filename=path)", "def write_gexf_format(graph_file, adjacency, users_map, node_size=def_node_size,\n node_color=def_node_color, node_label=def_node_label,\n label_size_threshold=-1):\n graph = open(graph_file, mode='w')\n graph.write(\"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <gexf xmlns=\"http://www.gexf.net/1.2draft\" xmlns:viz=\"http://www.gexf.net/1.1draft/viz\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.gexf.net/1.2draft http://www.gexf.net/1.2draft/gexf.xsd\" version=\"1.2\">\n <graph mode=\"static\" defaultedgetype=\"directed\">\n \"\"\")\n graph.write('<nodes>\\n')\n for user_name in users_map:\n id = users_map[user_name][SERIAL_IDX]\n size = node_size(user_name)\n r, g, b = node_color(user_name)\n label = '' if size < label_size_threshold else node_label(user_name)\n graph.write('<node id=\"{0}\" label=\"{1}\">\\n'.format(id, label))\n graph.write('<viz:size value=\"{0}\"></viz:size>\\n'.format(size))\n graph.write('<viz:color r=\"{0}\" g=\"{1}\" b=\"{2}\" a=\"1\"/>'.format(r, g, b))\n graph.write('</node>\\n')\n graph.write('</nodes>\\n')\n graph.write('<edges>\\n')\n # iterate over all non-zero elements in the adjacency matrix\n for i, j in zip(*adjacency.nonzero()):\n graph.write('<edge source=\"{0}\" target=\"{1}\" weight=\"{2}\"/>\\n'.format(i, j,\n adjacency[i, j]))\n graph.write('</edges>\\n')\n graph.write('</graph>\\n')\n graph.write('</gexf>\\n')\n graph.close()", "def write_gspn(self, net):\n self.global_detection.write_places(net)\n self.global_detection.write_transitions(net)\n\n for f in self.farms:\n f.write_places(net)\n f.write_transitions(net)\n\n for airborne_instance in self.airborne:\n airborne_instance.write_places(net)\n airborne_instance.write_transitions(net)\n\n for indirect_instance in self.indirect:\n indirect_instance.write_places(net)\n indirect_instance.write_transitions(net)", "def gtrnadb_model_info(filename, output):\n r2dt.write_gtrnadb(filename, output)", "def write_mat_file(self):\n mat_dict = {}\n mat_dict['Lx_p'] = self.Lx_p\n mat_dict['Ly_p'] = self.Ly_p\n mat_dict['Lz_p'] = self.Lz_p\n mat_dict['Lo'] = self.obst.get_Lo()\n mat_dict['Ny_divs'] = self.N_divs\n mat_dict['rho_p'] = self.rho_p\n mat_dict['nu_p'] = self.nu_p\n mat_dict['snl'] = list(np.union1d(self.obst_list[:],self.solid_list[:]))\n mat_dict['inl'] = list(self.inlet_list[:])\n mat_dict['onl'] = list(self.outlet_list[:])\n\n scipy.io.savemat('geometry_description',mat_dict)", "def _write_file(self, n, k, att, pol, selected, emb_dim, gold):\n target_file = self._get_target_name(n, k, att, pol, emb_dim)\n writer = csv.writer(open(os.path.join(target_file), \"w\"))\n header = [\"eid\", \"rids\", \"n\"]\n header = header + [\"gold_summary\"] if gold is not None else header\n header += [\"review_{}\".format(i) for i in range(n)]\n header += [\"extraction\", \"input_text\"]\n writer.writerow(header)\n for row in selected:\n row = row[:3] + [gold[row[0]] if row[0] in gold else \"\"] + row[3:] if gold is not None else row\n writer.writerow(row)", "def write_to_kml(gps_df, output_path):\n coordinates = []\n for index, row in gps_df.iterrows():\n lat = (1 if row['Lat_dir'] == 'N' else -1) * (float(row['Lat'][0:2]) + (float(row['Lat'][2:]) / 60))\n long = (1 if row['Long_dir'] == 'E' else -1) * (float(row['Long'][0:3]) + (float(row['Long'][3:]) / 60))\n speed = row['Speed']\n coordinates.append((long, lat, speed))\n\n kml_file = kml.newlinestring(name='line', coords=coordinates)\n kml_file.linestyle.color = simplekml.Color.cyan\n kml_file.linestyle.width = 3\n kml_file.polystyle.color = simplekml.Color.cyan\n kml_file.altitudemode = simplekml.AltitudeMode.relativetoground\n kml_file.extrude = 1\n\n # stores all coordinates into the output file\n with open(output_path, \"w+\"):\n kml.save(output_path, format=True)", "def export_gexf_termos(rotulos,similaridades,nome_arquivo,threshold,excluir_negativos):\n\n tbl = dict.fromkeys(i for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith('P'))\n\n arquivo = codecs.open(nome_arquivo + \".gexf\",\"w\",\"utf-8\")\n arquivo.write('<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n')\n arquivo.write('<gexf xmlns=\"http://www.gexf.net/1.2draft\" version=\"1.2\">\\n')\n arquivo.write('\\t<graph mode=\"static\" defaultedgetype=\"undirected\">\\n')\n arquivo.write('\\t\\t\\t<nodes>\\n')\n arquivo.flush()\n\n cont=0\n cont2=0;\n for key in rotulos:\n arquivo.write(u\"\\t\\t\\t\\t<node id=\\\"%d\\\" label=\\\"%s\\\"/>\\n\" % (cont2,key))\n cont = cont+1\n cont2 = cont2+1\n if cont == 50:\n arquivo.flush()\n cont = 0\n\n arquivo.write('\\t\\t\\t</nodes>\\n')\n arquivo.write('\\t\\t\\t<edges>\\n')\n arquivo.flush()\n\n cont=0\n for similaridade in similaridades:\n if(excluir_negativos and (similaridade[2] < 0)):\n continue\n\n if abs(similaridade[2]) >= threshold:\n label = ' - '.join((similaridade[0],similaridade[1]))\n arquivo.write(\"\\t\\t\\t\\t<edge source=\\\"%d\\\" target=\\\"%d\\\" weight=\\\"%f\\\" label=\\\"%s\\\" />\\n\" % (rotulos.index(similaridade[0]),rotulos.index(similaridade[1]),similaridade[2],label))\n\n cont = cont+1\n if cont == 50:\n arquivo.flush()\n cont = 0\n\n arquivo.write('\\t\\t\\t</edges>\\n')\n arquivo.write('\\t</graph>\\n')\n arquivo.write('</gexf>')\n arquivo.close() # you can omit in most cases as the destructor will call it", "def save_spgfile(datain,filepath,casename=None):\n\n data={}\n \n if casename==None:\n print('save_spgfile requires a filename to save.')\n return\n try:\n fp=open(filepath + casename+'_spg.dat','w')\n except IOError:\n print('save_spgfile: invalid case name.')\n return data\n\n fp.write('Sponge Node Number = %d\\n' % datain['spgf_num'] )\n for i in range(0,datain['spgf_num']):\n fp.write('%d %f %f\\n'% (datain['spgf_nodes'][i],datain['spgf_distance'][i],datain['spgf_value'][i]))\n fp.close()", "def write(self, text):\n text = open(text, 'w')\n text.write('File type = \"ooTextFile\"\\n')\n text.write('Object class = \"TextGrid\"\\n\\n')\n text.write('xmin = %f\\n' % self.__xmin)\n text.write('xmax = %f\\n' % self.__xmax)\n text.write('tiers? <exists>\\n')\n text.write('size = %d\\n' % self.__n)\n text.write('item []:\\n')\n for (tier, n) in zip(self.__tiers, range(1, self.__n + 1)):\n text.write('\\titem [%d]:\\n' % n)\n if tier.__class__ == IntervalTier: \n text.write('\\t\\tclass = \"IntervalTier\"\\n')\n text.write('\\t\\tname = \"%s\"\\n' % tier.name())\n text.write('\\t\\txmin = %f\\n' % tier.xmin())\n text.write('\\t\\txmax = %f\\n' % tier.xmax())\n text.write('\\t\\tintervals: size = %d\\n' % len(tier))\n for (interval, o) in zip(tier, range(1, len(tier) + 1)): \n text.write('\\t\\t\\tintervals [%d]:\\n' % o)\n text.write('\\t\\t\\t\\txmin = %f\\n' % interval.xmin())\n text.write('\\t\\t\\t\\txmax = %f\\n' % interval.xmax())\n text.write('\\t\\t\\t\\ttext = \"%s\"\\n' % interval.mark())\n else: # PointTier\n text.write('\\t\\tclass = \"TextTier\"\\n')\n text.write('\\t\\tname = \"%s\"\\n' % tier.name())\n text.write('\\t\\txmin = %f\\n' % tier.xmin())\n text.write('\\t\\txmax = %f\\n' % tier.xmax())\n text.write('\\t\\tpoints: size = %d\\n' % len(tier))\n for (point, o) in zip(tier, range(1, len(tier) + 1)):\n text.write('\\t\\t\\tpoints [%d]:\\n' % o)\n text.write('\\t\\t\\t\\ttime = %f\\n' % point.time())\n text.write('\\t\\t\\t\\tmark = \"%s\"\\n' % point.mark())\n text.close()", "def export_tikz(nodes, scale, path):\n filename = asksaveasfile(defaultextension=\".tex\")\n if filename:\n _file = open(filename.name, 'w')\n\n _file.write(\"\\\\begin{tikzpicture}\\n\")\n _file.write(\"\\\\begin{axis}[%\\n\")\n _file.write(\"width=\\\\textwidth,\\n\")\n _file.write(\"scale only axis,\\n\")\n _file.write(\"xmin=-100,\\n\")\n _file.write(\"xmax=2700,\\n\")\n _file.write(\"ymin=-100,\\n\")\n _file.write(\"ymax=2100,\\n\")\n _file.write(\"y dir=reverse,\\n\")\n _file.write(\"axis x line*=bottom,\\n\")\n _file.write(\"axis y line*=left\\n\")\n _file.write(\"]\\n\")\n\n for group in get_groups(nodes):\n _file.write(\n \"\"\"\\\\addplot [color=black,mark size=5.0pt,\n only marks,mark=*,mark options={solid,\n fill=\"\"\" + group.lower() + \"},forget plot]\\n\")\n _file.write(\"table[row sep=crcr]{%\\n\")\n for node in nodes:\n if node.color == group:\n _file.write(\n str(node.x_coord * scale) + \" \" +\n str(node.y_coord * scale) + \"\\\\\\\\\\n\")\n _file.write(\"};\\n\")\n\n if not path is None:\n _file.write(\"\\\\addplot [draw=black,forget plot]\\n\")\n _file.write(\"table[row sep=crcr]{%\\n\")\n for path_node in path['Tour']:\n print(path_node)\n node = nodes[int(path_node)]\n print(node)\n _file.write(\n str(node.x_coord * scale) + \" \" +\n str(node.y_coord * scale) + \"\\\\\\\\\\n\")\n _file.write(\"};\\n\")\n _file.write(\"\\\\end{axis}\\n\")\n _file.write(\"\\\\end{tikzpicture}%\\n\")\n _file.close()", "def writeGP(loc, fname, data, header, ncol=6):\n size = len(data)\n nrow = int(size / ncol)\n size_last_row = size % ncol\n\n lines = \"\"\n for line in np.reshape(range(nrow * ncol), (nrow, ncol)):\n for val in line:\n lines += \"{:^20.6e}\".format(data[val]) + 3 * \" \"\n lines = lines.rstrip(3 * \" \") + \"\\n\"\n\n if size_last_row:\n for i in range(1, size_last_row + 1):\n lines += \"{:^20.6e}\".format(data[-i]) + 3 * \" \"\n lines = lines.rstrip(3 * \" \")\n\n with open(\"/\".join([loc, fname]), \"w\") as f:\n f.writelines(header)\n f.writelines(lines)\n return", "def save_to_file(self, representation, filename) -> bool:\n filename = \"data/\" + filename\n output_matrix = self.get_graph(representation)\n if isinstance(output_matrix, list):\n with open(filename, \"w+\") as f:\n for row in output_matrix:\n f.write(\" \".join(str(item) for item in row))\n f.write(\"\\n\")\n return True\n elif isinstance(output_matrix, np.ndarray):\n with open(filename, \"w+\") as f:\n np.savetxt(f, output_matrix, fmt=\"%i\")\n return True\n else:\n return False", "def save_tr_graph(self, output_path, delimiter, write_stats=False, write_weights=False, write_dir=True):\n pp.save_graph(self._TG, output_path=output_path, delimiter=delimiter, write_stats=write_stats,\n write_weights=write_weights, write_dir=write_dir)", "def write_checkpoint(self):\n self.file_checkpoint_data = open(self.path_checkpoint, \"a+\")\n array_to_write = [str(self.radious), self.type_feature, self.type_filtering, self.h_filterig]\n self.file_checkpoint_data.write(','.join(array_to_write) + \"\\n\")\n self.file_checkpoint_data.flush()", "def writeOutput(self, output):", "def save_model(self):\n print(\"\\nModels are integrated to be multi scale.\\nSaving to disk.\")\n self.column_names = [ \"x_\" + str(x) for x in range(self.embedding.shape[1])]\n self.embedding = pd.DataFrame(self.embedding, columns = self.column_names)\n self.embedding.to_csv(self.args.output, index = None)", "def write(self, path):\n\n with open(path, 'w') as file:\n file.write(f\"\\n{self.subject}\")\n file.write(8 * '\\n')\n file.write(\"0\")\n file.write(8 * '\\n')\n file.write(self.data.date[0].strftime(\"%d.%m.%Y\"))\n file.write(7 * '\\n')\n file.write(\"Unknown Line\")\n file.write(26 * '\\n')\n file.write(self.valid_measurements + \"\\n\")\n printing_df = self.data.drop(columns=['date', 'time'])\n printing_df['hours'] = self.data.time.map(lambda x: x.strftime(\"%H\"))\n printing_df['minutes'] = self.data.time.map(lambda x: x.strftime(\"%M\"))\n order = ['hours', 'minutes', 'SYS(mmHg)', 'DIA(mmHg)', 'UNKNOW_1', 'UNKNOW_2', 'CODE', 'UNKNOW_3']\n printing_df = printing_df[order]\n printing_df.fillna(-9999, inplace=True)\n printing_df.replace('EB', -9998, inplace=True)\n printing_df.replace('AB', -9997, inplace=True)\n printing_df[['SYS(mmHg)', 'DIA(mmHg)', 'UNKNOW_1', 'UNKNOW_2', 'CODE', 'UNKNOW_3']] = printing_df[\n ['SYS(mmHg)', 'DIA(mmHg)', 'UNKNOW_1', 'UNKNOW_2', 'CODE', 'UNKNOW_3']].astype(int).astype(str)\n printing_df.replace('-9999', '\"\"', inplace=True)\n printing_df.replace('-9998', '\"EB\"', inplace=True)\n printing_df.replace('-9997', '\"AB\"', inplace=True)\n printing_df.to_csv(file, header=None, index=None, quoting=csv.QUOTE_NONE, line_terminator='\\n')\n\n xml_node = ET.Element('XML')\n xml_node.extend(self._dict_to_etree(self.metadata))\n xml_line = ET.tostring(xml_node, encoding=\"unicode\")\n file.write(xml_line)", "def output(self):\n to_write = 'X '\n to_write += str(self.def_field['name'])+' '\n to_write += str(self.def_field['pin_number'])+' '\n to_write += str(self.def_field['x'])+' '\n to_write += str(self.def_field['y'])+' '\n to_write += str(self.def_field['length'])+' '\n to_write += self.def_field['direction']+' '\n to_write += str(self.def_field['size_num'])+' '\n to_write += str(self.def_field['size_name'])+' '\n #to_write += str(self.def_field['part'])+' '\n to_write += str(self.def_field['dmg'])+' '\n to_write += str(self.def_field['type'])+' '\n to_write += self.def_field['shape']\n to_write += '\\n'\n return to_write" ]
[ "0.6463148", "0.6297059", "0.62645066", "0.6240671", "0.6203814", "0.6203347", "0.61121655", "0.60968256", "0.60381806", "0.6023182", "0.6010435", "0.5995649", "0.5977328", "0.5961852", "0.59262574", "0.5919297", "0.5915878", "0.59035015", "0.58848506", "0.5878267", "0.58703214", "0.5862629", "0.58171046", "0.57974774", "0.57952476", "0.57399", "0.57271713", "0.57230663", "0.565965", "0.5653081", "0.5653081", "0.5649501", "0.56446725", "0.5634542", "0.5620433", "0.5617506", "0.5616194", "0.5611142", "0.56100994", "0.560762", "0.56054956", "0.56054956", "0.5586085", "0.5580491", "0.55745524", "0.5571761", "0.55680794", "0.55652094", "0.5564069", "0.5554501", "0.554973", "0.5547313", "0.5546139", "0.55451006", "0.5529796", "0.551503", "0.5511447", "0.55098015", "0.5476308", "0.5476063", "0.54729307", "0.54700524", "0.54576355", "0.54429305", "0.5441599", "0.5429723", "0.5428387", "0.54255706", "0.5420944", "0.54181564", "0.54126495", "0.5410987", "0.539717", "0.5391784", "0.53813094", "0.53803116", "0.53779775", "0.5375699", "0.53743535", "0.5368875", "0.53574604", "0.5352097", "0.5351215", "0.53488535", "0.5345249", "0.5324761", "0.53237665", "0.532077", "0.532018", "0.5318761", "0.53186953", "0.5316421", "0.53096175", "0.53051686", "0.5301466", "0.5299936", "0.52987397", "0.52943987", "0.52923054", "0.5288535", "0.52835923" ]
0.0
-1
build a network view
def main(): # files summary_file = sys.argv[1] pwms_to_tfs_file = sys.argv[2] expressed_tfs_file = sys.argv[3] # TODO # TODO pull in num regions to resize things? but complicated with overlaps etc # TODO edit edges with type of interaction # TODO may want to color by trajectory, to demonstrate waves of trajectory # read in data summary = pd.read_csv(summary_file, sep="\t") pwms_to_tfs = pd.read_csv(pwms_to_tfs_file, sep="\t") pwms_to_tfs = pwms_to_tfs[pwms_to_tfs["expressed"].notna()] pwms_to_filt_tfs = {} # dict: key - pwm names, vals - dict of ensembl ids to hgnc ids for line_idx in range(pwms_to_tfs.shape[0]): pwm_info = pwms_to_tfs.iloc[line_idx,:] pwm_name = pwm_info["hclust_model_name"] pwm_to_tf = dict(zip(pwm_info["expressed"].split(";"), pwm_info["expressed_hgnc"].split(";"))) pwms_to_filt_tfs[pwm_name] = pwm_to_tf # filter expressed hgncs for dynamic ones only tfs_filt = pd.read_csv(expressed_tfs_file, sep="\t", index_col=0) for pwm_name in pwms_to_filt_tfs.keys(): tfs_tmp = pwms_to_filt_tfs[pwm_name] for ensembl_tf in tfs_tmp.keys(): if ensembl_tf not in tfs_filt.index: del tfs_tmp[ensembl_tf] if len(tfs_tmp.keys()) == 0: del pwms_to_filt_tfs[pwm_name] pwms_to_filt_tfs[pwm_name] = tfs_tmp # add in tfs column tf1 = [] for pwm in summary["pwm1"]: tf_str = [] for ensembl_id in pwms_to_filt_tfs[pwm]: tf_str.append(pwms_to_filt_tfs[pwm][ensembl_id]) # TODO try add in max point expression = tfs_filt.loc[ensembl_id,:] max_idx = np.argmax(expression.values) tf_str.append(str(max_idx)) tf_str = (";").join(tf_str) tf1.append(tf_str) summary["tf1"] = tf1 tf2 = [] for pwm in summary["pwm2"]: tf_str = [] for ensembl_id in pwms_to_filt_tfs[pwm]: tf_str.append(pwms_to_filt_tfs[pwm][ensembl_id]) expression = tfs_filt.loc[ensembl_id,:] max_idx = np.argmax(expression.values) tf_str.append(str(max_idx)) tf_str = (";").join(tf_str) tf2.append(tf_str) summary["tf2"] = tf2 # remove failed rules summary = summary[~summary["interaction"].str.contains("FAILED")] # make graph graph = nx.from_pandas_edgelist(summary, "tf1", "tf2") # set up positions #pos = graphviz_layout(graph, prog="dot") pos = graphviz_layout(graph, prog="neato") scale_factor = 3 for key in pos.keys(): coords = pos[key] pos[key] = {"x": scale_factor*coords[0], "y": -scale_factor*coords[1]} nx.set_node_attributes(graph, pos, "graphics") # note this is diff from v1 to v2 in networkx # add graphics add_graphics_theme_to_nx_graph(graph) # write gml out_file = "summary.gml" nx.write_gml(stringize_nx_graph(graph), out_file, stringizer=str) # tfs: for each tf, get gene column return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_network(self):\n pass", "def networks(view):\n return \"network?\" \\\n \"_return_fields=\" \\\n \"extattrs,\" \\\n \"comment,\" \\\n \"network,\" \\\n \"network_view,\" \\\n \"utilization&\" \\\n \"network_view=\" + view + \\\n \"&_max_results=-25000\"", "def network_views():\n return 'networkview?'", "def build_and_display_network():\n bpn = NeuralNetwork.BackPropagationNetwork((input_nodes, hidden_nodes, output_nodes),[None, sigmoid, linear])\n DisplayNetwork.displayLayers(bpn.matrixDimension)\n\n return bpn", "def buildNetwork(self):\n\n # create the network node for our module\n self.networkNode = cmds.createNode(\"network\", name=self.modName)\n\n # create attributes\n self.addAttributes()\n\n return self.networkNode", "def network_view(request, simulation):\n # If the network is large, the display method is different.\n links = get_query('link', simulation)\n large_network = links.count() > NETWORK_THRESHOLD\n # File where the data for the network are stored.\n output_file = (\n '{0}/website_files/network_output/network_{1!s}.json'\n .format(settings.BASE_DIR, simulation.id)\n )\n if simulation.has_changed or not os.path.isfile(output_file):\n # Generate a new output file.\n output = network_output(simulation, large_network)\n with open(output_file, 'w') as f:\n json.dump(output, f)\n # Do not generate a new output file the next time (unless the\n # simulation changes).\n simulation.has_changed = False\n simulation.save()\n else:\n # Use data from the existing output file.\n with open(output_file, 'r') as f:\n output = json.load(f)\n context = {\n 'simulation': simulation,\n 'output': output,\n 'large_network': large_network,\n }\n return render(request, 'metro_app/network.html', context)", "def build_network(self):\n # Position the node centers\n self.set_node_centers()\n\n # Set the nodes\n self.nodes = []\n for i in range(self.n_states):\n node = Node(\n self.node_centers[i],\n self.node_radius,\n self.labels[i]\n )\n self.nodes.append(node)", "def network_view_run(request, simulation, run):\n # If the network is large, the display method is different.\n links = get_query('link', simulation)\n large_network = links.count() > NETWORK_THRESHOLD\n # Files where the data for the network are stored.\n network_file = (\n '{0}/website_files/network_output/network_{1}_{2}.json'\n .format(settings.BASE_DIR, simulation.id, run.id)\n )\n parameters_file = (\n '{0}/website_files/network_output/parameters_{1}_{2}.json'\n .format(settings.BASE_DIR, simulation.id, run.id)\n )\n results_file = (\n '{0}/website_files/network_output/results_{1}_{2}.json'\n .format(settings.BASE_DIR, simulation.id, run.id)\n )\n if (os.path.isfile(network_file)\n and os.path.isfile(parameters_file)\n and os.path.isfile(results_file)):\n # Load the data for the network.\n with open(network_file, 'r') as f:\n output = json.load(f)\n with open(parameters_file, 'r') as f:\n parameters = json.load(f)\n with open(results_file, 'r') as f:\n results = json.load(f)\n context = {\n 'simulation': simulation,\n 'output': output,\n 'large_network': large_network,\n 'parameters': parameters,\n 'results': results,\n }\n return render(request, 'metro_app/network.html', context)\n else:\n # The network file for the run does not exist.\n return HttpResponseRedirect(reverse('metro:simulation_manager'))", "def build_net(self, nodes, links, output_network, from_geometry=True, debug=False):\n _nodes = nodes.copy()\n _links = links.copy()\n\n if from_geometry:\n _nodes[['x', 'y']] = _nodes['geometry'].apply(lambda g: pd.Series([g.coords[0][0], g.coords[0][1]]))\n _nodes.drop(['geometry'], axis=1, errors='ignore', inplace=True)\n\n pandasdbf.write_dbf(_nodes, self.environment + r'\\temp_nodes_to_dbf.dbf', pre_process=False)\n pandasdbf.write_dbf(_links, self.environment + r'\\temp_links_to_dbf.dbf', pre_process=False)\n\n script_text = r\"\"\"\n\n RUN PGM=NETWORK PRNFILE=\"%s\\temp_net.prn\"\n FILEO NETO = \"%s\"\n FILEI LINKI[1] = \"%s\"\n FILEI NODEI[1] = \"%s\"\n ENDRUN\n\n \"\"\" % (\n self.environment,\n output_network,\n self.environment + r'\\temp_links_to_dbf.dbf',\n self.environment + r'\\temp_nodes_to_dbf.dbf'\n )\n\n # creating a cube script\n script = open(self.environment + r'\\build_net.s', 'w', encoding='latin')\n script.write(script_text)\n script.close()\n\n # runs the script with voyager.exe\n options = \"\"\"/Start /CloseWhenDone /Minimize /NoSplash\"\"\" if not debug else \"\"\n cmd = 'voyager.exe \"' + self.environment + r'\\build_net.s\" ' + options\n print(cmd)\n os.system(cmd)", "def gen_network(self):\n di = nx.DiGraph()\n di.add_edges_from(self.network_edges())\n di.add_nodes_from(self.network_nodes())\n self.network = di\n self.highlight_cycles()\n return self", "def networkcontainers(view):\n return \"networkcontainer?\" \\\n \"_return_fields=\" \\\n \"extattrs,\" \\\n \"comment,\" \\\n \"network,\" \\\n \"network_view,\" \\\n \"utilization&\" \\\n \"network_view=\" + view + \\\n \"&_max_results=-25000\"", "def create_network_graph(df_graph_tree):\n net = Network(height='750px', width='100%', directed=True, bgcolor='#222222', font_color='white')\n net.force_atlas_2based(gravity=-75)\n for index, row in df_graph_tree.iterrows():\n src = row['Source']\n dst = row['Target']\n label = row['Label']\n title = \"File fullname : {} <br> Type : {}\".format(row['Source'], row['File Type'])\n color = color_of_extension[row['File Type'].lower()] if row['File Type'].lower() in color_of_extension.keys() else 'grey'\n if row['File Type'] == 'folder':\n net.add_node(src, shape='text', label=label, color = color, title = title)\n else:\n net.add_node(src, shape='dot', label=label, color = color, title = title)\n if dst != '':\n #net.add_node(dst, label=label, title=title)\n net.add_edge(src, dst, value=1, color = '#6c6c6c')\n return net", "def _build_networks(self):\n self.online_convnet = self._create_network(name='Online')\n self.target_convnet = self._create_network(name='Target')\n self._net_outputs = self.online_convnet(self.state_ph, training=True)\n self._q_argmax = tf.argmax(self._net_outputs.q_values, axis=1)[0]\n self._replay_net_outputs = self.online_convnet(self._replay.states,\n training=True)\n self._replay_next_target_net_outputs = self.target_convnet(\n self._replay.next_states)", "def build_network(self, dimList, actType=\"Tanh\", verbose=True):\n self.Q_network = Model(dimList, actType, verbose=verbose)\n self.target_network = Model(dimList, actType)\n\n if self.device == torch.device(\"cuda\"):\n self.Q_network.cuda()\n self.target_network.cuda()\n\n self.build_optimizer()", "def _make_view(tabbed=False, split=False, scene_width=-1):\n view_options = VGroup(Item('headview', style='custom'), 'view_options',\n show_border=True, show_labels=False, label='View')\n\n scene = VGroup(Item('scene', show_label=False,\n editor=SceneEditor(scene_class=MayaviScene),\n dock='vertical', width=500),\n view_options)\n\n data_panel = VGroup(VGroup(Item('subject_panel', style='custom'),\n label=\"MRI Subject\", show_border=True,\n show_labels=False),\n VGroup(Item('lock_fiducials', style='custom',\n editor=EnumEditor(cols=2,\n values={False: '2:Edit',\n True: '1:Lock'}),\n enabled_when='fid_ok'),\n HGroup('hsp_always_visible',\n Label(\"Always Show Head Shape Points\"),\n show_labels=False),\n Item('fid_panel', style='custom'),\n label=\"MRI Fiducials\", show_border=True,\n show_labels=False),\n VGroup(Item('raw_src', style=\"custom\"),\n HGroup(Item('distance', show_label=True),\n 'omit_points', 'reset_omit_points',\n show_labels=False),\n Item('omitted_info', style='readonly',\n show_label=False),\n label='Head Shape Source (Raw)',\n show_border=True, show_labels=False),\n show_labels=False, label=\"Data Source\")\n\n coreg_panel = VGroup(Item('coreg_panel', style='custom'),\n label=\"Coregistration\", show_border=True,\n show_labels=False,\n enabled_when=\"fid_panel.locked\")\n\n if split:\n main_layout = 'split'\n else:\n main_layout = 'normal'\n\n if tabbed:\n main = HGroup(scene,\n Group(data_panel, coreg_panel, show_labels=False,\n layout='tabbed'),\n layout=main_layout)\n else:\n main = HGroup(data_panel, scene, coreg_panel, show_labels=False,\n layout=main_layout)\n\n view = View(main, resizable=True, handler=CoregFrameHandler(),\n buttons=NoButtons)\n return view", "def buildView(self, robDisplay, msg=''):\n # Constrution vue globale\n status=str()\n #msgEntet= '== Joueur{} ==\\n'.format(robDisplay)\n msgEntet=' '\n strLabView= self.buildViewMap(robDisplay)\n\n # Si pas de message, et vue pour un joueur et que au moins un joueur, rappel du nom du joueur\n if msg=='' and robDisplay>0 and len(self.lstRob)>0: \n msg='*Joueur {}*'.format(self.lstRob[robDisplay-1].name)\n if len(msg)>1 and msg[-1]!='\\n':\n msg+='\\n'\n # Affichage 'commande en cour' ou 'a vous:'\n if robDisplay>0: # Si c'est un joueur specifique\n robDisplay-=1 #Index de la liste commence a 0\n cmd= self.lstRob[robDisplay].cmd # Commande en cour d'execution\n if cmd=='':\n status='Entrez commande:'\n else:\n status= \"Commande en cours:'{}'\".format(cmd)\n strView=\"\\n%s%s%s%s\" % (msgEntet,strLabView,msg,status)\n return strView", "def _build_graph_general(self): \n\n #Find a canonical coloring scheme\n #Each node has a color that is determined by the non-mapped aspects\n nodecolors=set()\n for nl in self.net.iter_node_layers():\n nodecolors.add(self._slice_node_layer_not_allowed(nl))\n nodecolors_sorted=sorted(list(nodecolors))\n del nodecolors\n self._assert_full_order(nodecolors_sorted)\n self.colormap=dict( ((color,colorid) for colorid,color in enumerate(nodecolors_sorted) ))\n\n #each aux node has a color that is determined by the aspect\n self.auxcolormap=dict( ((auxcolor, auxcolorid+len(self.colormap)) for auxcolorid,auxcolor in enumerate(sorted(self.asp)) ) )\n\n\n #Add the underlying network\n #node-layers:\n for nl in self.net.iter_node_layers():\n nlid=self._get_node_id(nl)\n color=self._slice_node_layer_not_allowed(nl)\n colorid=self.colormap[color]\n self.add_node(nlid,colorid)\n\n #edges between node-layers:\n for nl1 in self.net.iter_node_layers():\n for nl2 in self.net[nl1]:\n nl1id=self._get_node_id(nl1)\n nl2id=self._get_node_id(nl2)\n self.add_link(nl1id,nl2id)\n\n\n #Add the auxiliary nodes and edges\n #add the aux nodes\n for a in self.asp:\n for elayer in self.net.slices[a]:\n auxid=self._get_auxnode_id( (a,elayer) )\n auxcolorid=self.auxcolormap[a]\n self.add_node(auxid,auxcolorid)\n \n #add the aux edges\n for nl in self.net.iter_node_layers():\n for a in self.asp:\n nlid=self._get_node_id(nl)\n auxid=self._get_auxnode_id( (a,nl[a]) )\n self.add_link(nlid,auxid)", "def build(self):\n self.logging = LoggingView(self.model, 20)\n\n l = [\n self.logging\n ]\n\n w = urwid.Filler(urwid.Pile(l), 'top')\n return w", "def viz_graph(self, show_ports=False, pydot_options=None):\n import networkx as nx\n G = nx.DiGraph()\n if pydot_options:\n G.graph['graph'] = pydot_options\n # instantiate objects\n for itask in self:\n task_inputs = itask[TaskSpecSchema.inputs]\n to_task = itask[TaskSpecSchema.task_id]\n to_type = itask[TaskSpecSchema.node_type]\n if to_task == \"\":\n to_task = OUTPUT_TYPE\n for iport_or_tid in task_inputs:\n # iport_or_tid: it is either to_port or task id (tid) b/c\n # if using ports API task_inputs is a dictionary otherwise\n # task_inputs is a list.\n taskin_and_oport = task_inputs[iport_or_tid] \\\n if isinstance(task_inputs, dict) else iport_or_tid\n isplit = taskin_and_oport.split('.')\n from_task = isplit[0]\n from_port = isplit[1] if len(isplit) > 1 else None\n if show_ports and from_port is not None:\n to_port = iport_or_tid\n common_tip = taskin_and_oport\n G.add_edge(from_task, common_tip, label=from_port)\n G.add_edge(common_tip, to_task, label=to_port)\n tnode = G.nodes[common_tip]\n tnode.update({\n # 'label': '',\n 'shape': 'point'})\n else:\n G.add_edge(from_task, to_task)\n\n # draw output ports\n if show_ports:\n\n if (to_type == OUTPUT_TYPE):\n continue\n task_node = get_node_obj(itask, tgraph_mixin=True)\n # task_outputs = itask.get(TaskSpecSchema.outputs, [])\n for pout in task_node._get_output_ports():\n out_tip = '{}.{}'.format(\n itask[TaskSpecSchema.task_id], pout)\n G.add_edge(to_task, out_tip, label=pout)\n tnode = G.nodes[out_tip]\n tnode.update({\n # 'label': '',\n 'shape': 'point'})\n return G", "def create_view(self) -> Optional[dict]:\n catch_keyboard_interrupt()\n\n db = mongodb_db(os.environ['DB_CONNECTION_STRING'])\n if self.model_version == 'latest':\n latest_model_ts = max(db.model.find().distinct('added_on'))\n d = db.model.find_one({'added_on': latest_model_ts})\n else:\n d = db.model.find_one({'_id': self.model_version})\n logger.debug(f'Model version: {d[\"_id\"]}')\n\n labels_vals = list(d['labels'].values())\n if self.method == 'mean':\n count_m = np.mean(labels_vals)\n elif self.method == 'median':\n count_m = np.median(labels_vals)\n else:\n count_m = 10\n\n excluded_labels = os.getenv('EXCLUDE_LABELS')\n if excluded_labels:\n excluded_labels = excluded_labels.split(',')\n else:\n excluded_labels = []\n\n labels_with_few_annos = []\n for k, v in d['labels'].items():\n if count_m > v and k not in excluded_labels:\n labels_with_few_annos.append(k)\n\n headers = self._make_headers()\n\n view_template = {\n 'data': {\n 'type': 'list',\n 'title': '',\n 'target': 'tasks',\n 'gridWidth': 4,\n 'columnsWidth': {},\n 'hiddenColumns': {\n 'explore': [\n 'tasks:annotations_results', 'tasks:annotations_ids',\n 'tasks:predictions_score', 'tasks:predictions_results',\n 'tasks:file_upload', 'tasks:created_at',\n 'tasks:updated_at'\n ],\n 'labeling': [\n 'tasks:id', 'tasks:completed_at',\n 'tasks:cancelled_annotations',\n 'tasks:total_predictions', 'tasks:annotators',\n 'tasks:annotations_results', 'tasks:annotations_ids',\n 'tasks:predictions_score',\n 'tasks:predictions_model_versions',\n 'tasks:predictions_results', 'tasks:file_upload',\n 'tasks:created_at', 'tasks:updated_at'\n ]\n },\n 'columnsDisplayType': {},\n 'filters': {\n 'conjunction':\n 'or',\n 'items': [{\n 'filter': 'filter:tasks:predictions_results',\n 'operator': 'equal',\n 'type': 'String',\n 'value': 'placeholder_a'\n }, {\n 'filter': 'filter:tasks:predictions_results',\n 'operator': 'equal',\n 'type': 'String',\n 'value': 'placeholder_b'\n }]\n }\n }\n }\n\n default_view = copy.deepcopy(view_template)\n\n filtered_labels = []\n for label in labels_with_few_annos:\n filtered_labels.append({\n 'filter': 'filter:tasks:predictions_results',\n 'operator': 'contains',\n 'type': 'String',\n 'value': label\n })\n\n view_template['data']['filters']['conjunction'] = 'or' # noqa: PyTypeChecker\n view_template['data']['filters']['items'] = filtered_labels\n view_template['data']['title'] = 'rare_classes'\n\n view_template.update({'project': self.project_id})\n\n url = f'{os.environ[\"LS_HOST\"]}/api/dm/views?project={self.project_id}'\n resp = requests.get(url, headers=headers)\n\n default_tab = [\n x for x in resp.json() if x['data']['title'] == 'Default'\n ]\n\n if not default_tab:\n logger.debug(\n f'Creating default view for project {self.project_id}')\n default_view.update({'project': self.project_id})\n default_view['data']['title'] = 'Default'\n default_view['data'].pop('filters')\n url = f'{os.environ[\"LS_HOST\"]}/api/dm/views/'\n new_view_resp = requests.post(url,\n headers=headers,\n data=json.dumps(default_view))\n new_default_view = new_view_resp.json()\n logger.debug(f'Response: {new_default_view}')\n\n existing_rare_classes_tab = [\n x for x in resp.json() if x['data']['title'] == 'rare_classes'\n ]\n\n if existing_rare_classes_tab:\n version_col = 'tasks:predictions_model_versions'\n explore_dict = existing_rare_classes_tab[0]['data'][\n 'hiddenColumns']['explore']\n if existing_rare_classes_tab[0]['data']['filters'][\n 'items'] == filtered_labels and (version_col\n in explore_dict):\n logger.debug(\n 'An identical `rare_classes` view already exists for '\n f'project {self.project_id}. Skipping...')\n return\n else:\n logger.debug(\n 'The list of rare classes has changed! Replacing...')\n existing_view_id = existing_rare_classes_tab[0]['id']\n url = f'{os.environ[\"LS_HOST\"]}/api/dm/views/' \\\n f'{existing_view_id}'\n _ = requests.delete(url, headers=headers)\n\n url = f'{os.environ[\"LS_HOST\"]}/api/dm/views/'\n logger.debug(f'Request: {url} -d {view_template}')\n resp = requests.post(url,\n headers=headers,\n data=json.dumps(view_template))\n new_view = resp.json()\n logger.debug(f'Response: {new_view}')\n return new_view", "def network_wide():\n network = getNetworkWide()\n return render_template(\"network-wide.html\", network=network)", "def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()", "def show_networks():\n return get_networks()", "def build_graph(self, graph, inst_name, port_nets):\n return", "def __repr__(self):\n return self.buildView(0)", "def _build_graph(show=False):\n global G\n G = nx.Graph()\n node_labels, edge_labels = {}, {}\n for idx, dep in enumerate(A.deps):\n\n types = [\"dependent\", \"governor\"]\n\n # nodes, labels\n for x in types:\n G.add_node(str(dep[x]), word=dep[x + \"Gloss\"], pos=A.lookup[dep[x]][\"pos\"])\n node_labels[str(dep[x])] = dep[x + \"Gloss\"] + \" : \" + A.lookup[dep[x]][\"pos\"]\n\n # edges, labels\n G.add_edge(str(dep[types[0]]), str(dep[types[1]]), dep=dep[\"dep\"])\n edge_labels[(str(dep[types[0]]), str(dep[types[1]]))] = dep[\"dep\"]\n\n if show == True:\n pos = nx.spring_layout(G)\n nx.draw_networkx(G, pos=pos, labels=node_labels, node_color=\"white\", alpha=.5)\n nx.draw_networkx_edge_labels(G, pos=pos, edge_labels=edge_labels)\n plt.show()", "def create_nodes(self):", "def _build_graph(self):\n pass", "def view(self):", "def build_net(self, n_dps=1, n_vlans=1,\n dp_links=None, host_links=None, host_vlans=None,\n vlan_options=None, dp_options=None, host_options=None,\n routers=None, stack_roots=None,\n include=None, include_optional=None,\n hw_dpid=None, lacp_trunk=False):\n if include is None:\n include = []\n if include_optional is None:\n include_optional = []\n self.NUM_DPS = n_dps\n self.dpids = [str(self.rand_dpid()) for _ in range(n_dps)]\n self.dpids[0] = self.dpid\n vlan_vids = {vlan: self.vlan_vid(vlan) for vlan in range(n_vlans)}\n self.topo = FaucetTopoGenerator(\n self.OVS_TYPE,\n self.ports_sock,\n self._test_name(),\n self.dpids,\n dp_links,\n host_links,\n host_vlans,\n vlan_vids,\n hw_dpid=self.hw_dpid,\n switch_map=self.switch_map,\n port_order=self.port_order,\n start_port=self.start_port\n )\n self.port_maps = {dpid: self.create_port_map(dpid) for dpid in self.dpids}\n self.port_map = self.port_maps[self.dpid]\n self.CONFIG = self.get_config(\n dpids=self.dpids,\n hw_dpid=hw_dpid,\n hardware=self.hardware,\n ofchannel_log=self.debug_log_path,\n n_vlans=n_vlans,\n host_links=host_links,\n host_vlans=host_vlans,\n stack_roots=stack_roots,\n include=include,\n include_optional=include_optional,\n acls=self.acls(),\n acl_in_dp=self.acl_in_dp(),\n lacp_trunk=lacp_trunk,\n vlan_options=vlan_options,\n dp_options=dp_options,\n routers=routers,\n host_options=host_options\n )\n self.n_vlans = n_vlans\n self.dp_links = dp_links\n self.host_links = host_links\n self.host_vlans = host_vlans\n self.stack_roots = stack_roots\n self.routers = routers\n self.dp_options = dp_options\n self.host_options = host_options\n self.vlan_options = vlan_options", "def _build_networks(self):\n # Calling online_convnet will generate a new graph as defined in\n # self._get_network_template using whatever input is passed, but will always\n # share the same weights.\n self.online_convnet = tf.make_template('Online', self._network_template)\n self.target_convnet = tf.make_template('Target', self._network_template)\n self._net_outputs = self.online_convnet(self.state_ph)\n\n self._replay_net_outputs = self.online_convnet(self._replay.states)\n self._replay_next_target_net_outputs = self.target_convnet(\n self._replay.next_states)\n\n if self.acting_policy == 'hyperbolic':\n self._q_argmax = tf.argmax(self._net_outputs.hyp_q_value, axis=1)[0]\n elif self.acting_policy == 'largest_gamma':\n self._q_argmax = tf.argmax(self._net_outputs.q_values[-1], axis=1)[0]\n else:\n raise NotImplementedError", "def create_layout( self ):", "def make_model(self):\n onnx_graph = onnx.helper.make_graph(\n self._nodes, self._name, self._inputs, self._outputs, self._initializers\n )\n kwargs = {}\n kwargs[\"opset_imports\"] = self._get_opsets()\n kwargs[\"producer_name\"] = \"TVM Relay\"\n kwargs[\"producer_version\"] = tvm.__version__\n\n return onnx.helper.make_model(onnx_graph, **kwargs)", "def build_network(config):\n network_cfg = config['network']\n\n network_name = network_cfg['name']\n\n network_params = list(inspect.signature(eval(network_name).__init__).parameters)[1:]\n\n args = [f'{param}={network_cfg[param]}' for param in network_params if network_cfg.get(param)]\n\n try:\n model = eval('{}({})'.format(network_name, ', '.join(args)))\n except:\n raise ValueError('Can\\'t load network.')\n\n return model.to(device='cuda')", "def build_graph(self):\n self.__create_placeholders()\n self.__create_encoder()\n self.__create_latent()\n self.__create_decoder()\n self.__create_loss()\n self.__create_generate()\n self.__create_reconstruct()\n self.__create_optimizer()\n self.__create_summary()", "def build_topo(topo_file, display_graph = False):\n topo_graph = read_topo( topo_file )\n # mininet topo\n topo = NetworkXTopo( )\n topo.build_network( topo_graph, HOSTS_PER_SWITCH )\n hosts = topo.hosts( )\n # Debug \n print \"Total number of Vertices:\", len(topo.switches())\n print \"Total number of Edges(including edges to hosts):\", len(topo.links())\n #for host in hosts:\n # print host\n #for link in net.links():\n # print link\n if display_graph:\n draw_graph(topo_graph)\n return topo", "def build_graph(self):\n pass", "def build_graph(self):\n if self.model == 'dense':\n # ForecastNet with two densely connected hidden layers in a cell and Mixture Density Network outputs\n self.outputs, self.mu, self.sigma, self.cost = forecastnet_graph(self.X,\n self.Y,\n self.hidden_dim,\n self.out_seq_length,\n self.is_training)\n elif self.model == 'conv':\n # ForecastNet with a convlutional neural network in a cell and Mixture Density Network outputs\n self.outputs, self.mu, self.sigma, self.cost = forecastnet_conv_graph(self.X,\n self.Y,\n self.hidden_dim,\n self.out_seq_length,\n self.is_training)\n elif self.model == 'dense2':\n # ForecastNet with two densely connected hidden layers in a cell and linear outputs\n self.outputs, self.cost = forecastnet_graph2(self.X,\n self.Y,\n self.hidden_dim,\n self.out_seq_length,\n self.is_training)\n elif self.model == 'conv2':\n # ForecastNet with a convolutional neural network in a cell and linear outputs\n self.outputs, self.cost = forecastnet_conv_graph2(self.X,\n self.Y,\n self.hidden_dim,\n self.out_seq_length,\n self.is_training)", "def build_network(self):\n\n input_placeholder = Input(shape = self.input_shape)\n\n # Stage 1\n x = self.main_path_block(\n input_placeholder,\n 64, (7, 7), 'same',\n 'conv1', 'bn_conv1',\n activation = 'relu',\n strides = (2, 2)\n )\n x = MaxPooling2D((3, 3), strides = (2, 2), padding = 'same')(x)\n\n # Stage 2\n x = self.identity_block(x, 64, 'relu', 2, 'a', False)\n x = self.identity_block(x, 64, 'relu', 2, 'b')\n\n # Stage 3\n x = self.convolutional_block(x, [128, 128, 128], 'relu', 3, 'a')\n x = self.identity_block(x, 128, 'relu', 3, 'b')\n\n # Stage 4\n x = self.convolutional_block(x, [256, 256, 256], 'relu', 4, 'a')\n x = self.identity_block(x, 256, 'relu', 4, 'b')\n\n # Stage 5\n x = self.convolutional_block(x, [512, 512, 512], 'relu', 5, 'a')\n x = self.identity_block(x, 512, 'relu', 4, 'b')\n\n # Fully Connected Layers\n x = BatchNormalization(axis = 3)(x)\n x = Activation('relu')(x)\n x = AveragePooling2D((2, 1), padding = 'valid', strides = (2, 2))(x)\n x = Flatten()(x)\n x = Dense(512)\n x = Dense(\n self.classes, activation = 'softmax',\n name = 'fc_' + str(self.classes),\n kernel_initializer = glorot_uniform(seed = 0)\n )(x)\n\n self.model = Model(input_placeholder, x, name = 'Resnet18')", "def _create_network(self, name):\n network = self.network(self.num_actions, self.quantile_embedding_dim,\n name=name)\n return network", "def create_graph_network_visualization(graph_network, connections, connections_grouped):\n\n edge_trace = go.Scatter(\n x=[],\n y=[],\n customdata=[],\n text=[],\n line=dict(width=2, color='#888'),\n hoverinfo='all',\n mode='lines+text',\n textposition='top left',\n )\n edge_label_trace = go.Scatter(\n x=[],\n y=[],\n text=[],\n textposition='top left',\n mode='markers+text',\n hoverinfo='none',\n marker=go.Marker(\n opacity=0\n ),\n textfont=dict(size=20, color='black')\n )\n\n for edge in graph_network.edges():\n x0, y0 = graph_network.node[edge[0]]['pos']\n x1, y1 = graph_network.node[edge[1]]['pos']\n edge_weight = graph_network.node[edge[1]]['pos']\n edge_trace['x'] += tuple([x0, x1, None])\n edge_trace['y'] += tuple([y0, y1, None])\n\n text = graph_network[edge[0]][edge[1]]['weight']\n edge_label_trace['x'] += tuple([(x0 + x1) / 2])\n edge_label_trace['y'] += tuple([(y0 + y1) / 2])\n edge_label_trace['text'] += tuple([text])\n\n # writing to edge customdata\n edge_trace['customdata'] += graph_network[edge[0]][edge[1]]['weight']\n edge_trace['text'] = str(graph_network[edge[0]][edge[1]]['weight'])\n # edge_trace['marker']['size'] += professor_graph[edge[0]][edge[1]]['weight']\n # print(graph_network[edge[0]][edge[1]]['weight'])\n\n node_trace = go.Scatter(\n x=[],\n y=[],\n text=[],\n hovertext=[],\n mode=\"markers+text\",\n hoverinfo='text',\n textposition='bottom center',\n marker=dict(\n showscale=False,\n # colorscale options\n # ['Greys', 'YlGnBu', 'Greens', 'YlOrRd', 'Bluered', 'RdBu',\n # 'Reds', 'Blues', 'Picnic', 'Rainbow', 'Portland', 'Jet',\n # 'Hot', 'Blackbody', 'Earth', 'Electric', 'Viridis', 'Cividis]\n colorscale='YlGnBu',\n reversescale=True,\n color=[],\n size=40,\n colorbar=dict(\n thickness=15,\n title='Node Connections',\n xanchor='left',\n titleside='right'\n ),\n line=dict(width=2))\n )\n\n entry_bool = True\n\n for node in graph_network.nodes():\n x, y = graph_network.node[node]['pos']\n node_trace['x'] += tuple([x])\n node_trace['y'] += tuple([y])\n # node_trace['text'].append(node)\n\n # x, y = professor_graph.node[node]['pos']\n # node_trace['x'].append(x)\n # node_trace['y'].append(y)\n\n if entry_bool:\n # node_trace['text'].append(node)\n node_trace['text'] += tuple([node])\n entry_bool = False\n total_projects = \"Total Projects: {}\".format(len(connections[\"Proposal Number:\"].unique()))\n print(\"Total Projects\", total_projects)\n node_trace['hovertext'] += tuple([total_projects])\n else:\n # node_trace['text'].append(node)\n node_trace['text'] += tuple([node])\n some_text = []\n some_text.append(node + \"<br>\")\n for i in range(len(connections_grouped.loc[node]['proposal_number'])):\n if i > 0:\n some_text.append(\"<br>\")\n print(\"list index is \", i)\n print(\"prop number is \", connections_grouped.loc[node]['proposal_number'][i])\n some_text.append(connections_grouped.loc[node]['proposal_number'][i])\n # import pdb\n # pdb.set_trace()\n some_text.append(\"<br>\")\n some_text.append(connections_grouped.loc[node]['proposal_title'][i])\n some_text.append(\"<br>\")\n some_text.append(connections_grouped.loc[node]['project_status'][i])\n some_text.append(\"<br>\")\n some_text.append(connections_grouped.loc[node]['institution'][i])\n some_text.append(\"<br>\")\n some_text = [x for x in some_text if str(x) != 'nan']\n\n some_text = \"\".join(some_text)\n print(node)\n print(\"yo is \", some_text)\n # node_trace['hovertext'].append(some_text)\n node_trace['hovertext'] += tuple([some_text])\n\n for node, adjacencies in enumerate(graph_network.adjacency_list()):\n # print(node,adjacencies)\n # print(professor_graph[node])\n node_trace['marker']['color'] += tuple([len(adjacencies)])\n\n return node_trace, edge_trace, edge_label_trace", "def draw_network(network):\n # 增加原始网格\n bpy.ops.object.select_all(action='DESELECT')\n # bpy.ops.mesh.primitive_cone_add(vertices=3, depth=1.414213)\n bpy.ops.mesh.primitive_uv_sphere_add()\n cube = bpy.context.scene.objects['Sphere']\n\n # 保存所有节点和边的引用\n shapes = []\n\n # 生成结点\n for key, node in network[\"nodes\"].items():\n \n # 结点的颜色设定\n # col = node.get(\"color\", choice(list(colors.keys())))\n\n # 复制原始网格并且生成新节点\n node_cube = cube.copy()\n node_cube.data = cube.data.copy()\n node_cube.name = key\n\n for name_instance in data_instance:\n if name_instance[\"display_name\"]==key:\n node_cube.scale = (0.5,0.5,0.5)\n if name_instance[\"vm_state\"]==\"active\":\n node_cube.active_material = bpy.data.materials[\"green\"]\n elif name_instance[\"vm_state\"]==\"stopped\":\n node_cube.active_material = bpy.data.materials[\"gray\"]\n else:\n for name_instance in data_image:\n if name_instance[\"images_name\"]==key:\n node_cube.active_material = bpy.data.materials[\"blue\"]\n elif name_instance[\"status\"]==\"stopped\":\n node_cube.active_material = bpy.data.materials[\"red\"]\n \n node_cube.location = node[\"location\"]\n # node_cube.active_material = bpy.data.materials[col]\n bpy.context.scene.objects.link(node_cube)\n shapes.append(node_cube)\n \n for edge in network[\"edges\"]:\n # 通过遍历获取源和目标的位置\n source_name = edge[\"images_name\"]\n target_name = edge[\"display_name\"]\n source_obj = bpy.data.objects[source_name] \n target_obj = bpy.data.objects[target_name]\n # 设置父子关系\n bpy.ops.object.mode_set(mode = 'OBJECT')\n bpy.ops.object.select_all(action='DESELECT')\n bpy.context.scene.objects.active = source_obj\n target_obj.select = True\n try:\n bpy.ops.object.parent_set()\n except:\n pass\n \n # 删除原始网格\n bpy.ops.object.select_all(action='DESELECT')\n cube.select = True\n\n # 删除启动时的小方块\n if \"Cube\" in bpy.data.objects.keys():\n bpy.data.objects.get(\"Cube\").select = True\n bpy.ops.object.delete()\n\n # 将整个物体居中对齐\n bpy.ops.object.origin_set(type=\"ORIGIN_GEOMETRY\", center=\"MEDIAN\")\n \n # 刷新场景\n bpy.context.scene.update()", "def create_network(self):\n\n print ('Creating network, changing data will have no effect beyond this point.')\n n = IMNN.IMNN(parameters=self.parameters)\n\n if self.load_network:\n n.restore_network()\n else:\n n.setup(network = self.network, load_data = self.data)\n\n return n", "def build_graph(self, graph, inst_name, port_nets):\n self.add_graph_edges(graph, port_nets)", "def build_model(self):\n for link in self.links:\n # if from neuron is input to graph, add it to input_neurons set\n if self.is_input_neuron(link.from_neuron_id):\n self.input_neurons.add(link.from_neuron_id)\n # add weight to neuron\n if link.to_neuron_id not in self.weights:\n self.weights[link.to_neuron_id] = []\n self.weights[link.to_neuron_id].append(link.weight)\n # add input to neuron\n if link.to_neuron_id not in self.connections:\n self.connections[link.to_neuron_id] = []\n self.connections[link.to_neuron_id].append(link.from_neuron_id)", "def create_network(layers):\r\n return NeuronNetwork(layers)", "def build_net(nz=100):\n\tif opts.celeba:\n\t\tgen = get_gen_celebA(nz=nz)\n\t\tdis = get_dis_celebA(nz=nz)\n\n\tif opts.mnist:\n\t\tgen = get_gen_mnist(nz=nz)\n\t\tdis = get_dis_mnist(nz=nz)\n\n\treturn gen, dis", "def build_network(self, inputs, targets, training=False):\n raise NotImplementedError", "def view(self):\n raise NotImplementedError", "def network_list(request):\n flatpage = get_flatpage_or_none(request)\n network_list = Network.objects.filter(user_id=0)\n\n return {\n 'flatpage': flatpage,\n 'network_list': network_list,\n }", "def build(self, config):\n nets = OrderedDict()\n\n nets['shared'] = NeuralNet(self.tensor_in, config['net_g']['shared'],\n name='shared')\n\n nets['pitch_time_private'] = [\n NeuralNet(nets['shared'].tensor_out,\n config['net_g']['pitch_time_private'],\n name='pt_'+str(idx))\n for idx in range(config['num_track'])\n ]\n\n nets['time_pitch_private'] = [\n NeuralNet(nets['shared'].tensor_out,\n config['net_g']['time_pitch_private'],\n name='tp_'+str(idx))\n for idx in range(config['num_track'])\n ]\n\n nets['merged_private'] = [\n NeuralNet(tf.concat([nets['pitch_time_private'][idx].tensor_out,\n nets['time_pitch_private'][idx].tensor_out],\n -1),\n config['net_g']['merged_private'],\n name='merged_'+str(idx))\n for idx in range(config['num_track'])\n ]\n\n nets['refiner_private'] = [\n NeuralNet(nets['merged_private'][idx].tensor_out,\n config['net_r']['private'],\n slope_tensor=self.slope_tensor,\n name='refiner_private'+str(idx))\n for idx in range(config['num_track'])\n ]\n\n return (tf.concat([nn.tensor_out for nn in nets['private']], -1), nets,\n tf.concat([nn.layers[-1].preactivated\n for nn in nets['private']], -1))", "def build_net(nz=100):\n\tif opts.celeba:\n\t\tgen = get_wgen_celebA(nz=nz)\n\t\tdis = get_wdis_celebA(nz=nz)\n\n\tif opts.mnist:\n\t\tgen = get_wgen_mnist(nz=nz)\n\t\tdis = get_wdis_mnist(nz=nz)\n\n\treturn gen, dis", "def create_graph_network(start_node, connections):\n graph = nx.Graph()\n graph.add_node(start_node)\n print(connections.index)\n graph.add_nodes_from(connections.index)\n edge_list = list(zip(itertools.repeat(start_node), connections.index))\n print(\"edge list is \", edge_list)\n graph.add_edges_from(edge_list)\n for i in graph.edges():\n graph[i[0]][i[1]]['weight'] = connections.loc[i[1]]['count']\n # graph[i[0]][i[1]]['proposal_number'] = connections.loc[i[1]]['proposal_number']\n # graph[i[0]][i[1]]['institution'] = connections.loc[i[1]]['institution']\n # graph[i[0]][i[1]]['proposal_title'] = connections.loc[i[1]]['proposal_title']\n # graph[i[0]][i[1]]['project_status'] = connections.loc[i[1]]['project_status']\n\n # Adding random position data to the graph.\n # pos = nx.spring_layout(graph, k=1)\n pos = nx.circular_layout(graph)\n nx.set_node_attributes(graph, 'pos', pos)\n return graph", "def build(self):\n root = ET.Element(\"ncx\", xmlns=self.namespace, version=self.version)\n head = ET.SubElement(root, \"head\")\n ET.SubElement(head, \"meta\",\n content=\"urn:uuid:%s\" % self.bookid,\n name=\"dtb:uid\",\n )\n ET.SubElement(head, \"meta\",\n content=\"1\",\n name=\"dtb:depth\",\n )\n ET.SubElement(head, \"meta\",\n content=\"0\",\n name=\"dtb:totalPageCount\",\n )\n ET.SubElement(head, \"meta\",\n content=\"0\",\n name=\"dtb:maxPageNumber\",\n )\n doctitle = ET.SubElement(root, \"docTitle\")\n ET.SubElement(doctitle, \"text\").text = self.title\n navmap = ET.SubElement(root, \"navMap\")\n seq = 1\n for sid, label, src in self.items:\n navpt = ET.SubElement(navmap, \"navPoint\", id=sid,\n playOrder=str(seq))\n navlabel = ET.SubElement(navpt, \"navLabel\")\n ET.SubElement(navlabel, \"text\").text = label\n ET.SubElement(navpt, \"content\", src=src)\n seq += 1\n return root", "def build_topology(self):\n# errstr = \"build_topology() is not implemented.\\n\"\n# errstr += textwrap.dedent(self.build_topology.__doc__)\n# raise NotImplementedError(errstr)\n pass # May be a 1-compartment neuron. No need to abstract. ", "def createView(self):\n logging.debug(\"ShortestPathUI.createView function started\")\n formLayout = QFormLayout()\n\n self.fromLineEdit = QLineEdit()\n self.fromLineEdit.textChanged.connect(partial(self.__clearErrorInfo,\n self.fromLineEdit))\n formLayout.addRow(\"From: \", self.fromLineEdit)\n\n self.toLineEdit = QLineEdit()\n self.toLineEdit.textChanged.connect(partial(self.__clearErrorInfo,\n self.toLineEdit))\n formLayout.addRow(\"To: \", self.toLineEdit)\n\n self.pathLineEdit = QLineEdit()\n self.pathLineEdit.setReadOnly(True)\n formLayout.addRow(\"Path: \", self.pathLineEdit)\n\n self.lengthLabel = QLabel()\n formLayout.addRow(\"Length: \", self.lengthLabel)\n self.__generalLayout.addLayout(formLayout, 0, 0)\n\n self.OkButton = QPushButton(\"Ok\")\n self.OkButton.setFixedWidth(50)\n self.OkButton.clicked.connect(self.updatePath)\n self.__generalLayout.addWidget(self.OkButton, 0, 1, alignment=Qt.AlignTop)\n\n logging.debug(\"ShortestPathUI.createView function ended\\n\")", "def generateView(data):\n\n view = data[\"view\"]\n global h_include\n addInclude(h_include, view[\"type\"])\n global h_view\n h_view.append(\"class %s : public %s\" % (view[\"name\"], view[\"type\"]))\n h_view.append(\"{\")\n h_view.append(\"\tQ_OBJECT\")\n h_view.append(\"public:\")\n h_view.append(\"\t%s(QWidget *parent=0);\" % view[\"name\"])\n\n global c_include\n addInclude(c_include, \"QHeaderView\")\n\n global c_view\n c_view.append(\"%s::%s(QWidget *parent)\" % (view[\"name\"],view[\"name\"]))\n c_view.append(\"\\t: %s(parent)\" % view[\"type\"])\n c_view.append(\"{\")\n # TODO: should this be configurable?\n c_view.append(\"\tsetAlternatingRowColors(true);\")\n c_view.append(\"\tverticalHeader()->hide();\")\n c_view.append(\"\thorizontalHeader()->setResizeMode(QHeaderView::Stretch);\")\n c_view.append(\"\tsetTabKeyNavigation(false);\")\n c_view.append(\"\\tsetHorizontalScrollMode(QAbstractItemView::ScrollPerPixel);\")\n c_view.append(\"\\tsetVerticalScrollMode(QAbstractItemView::ScrollPerPixel);\")\n c_view.append(\"\")\n if get(view, \"sort\", True):\n c_view.append(\"\\tconnect(horizontalHeader(), SIGNAL(sortIndicatorChanged(int, Qt::SortOrder)),\")\n c_view.append(\"\\t SLOT(sortByColumn(int)) );\")\n c_view.append(\"\\tsetSortingEnabled(true);\")\n c_view.append(\"\\tsortByColumn(0, Qt::AscendingOrder);\")\n c_view.append(\"\\tsetEditTriggers(QAbstractItemView::AnyKeyPressed | QAbstractItemView::EditKeyPressed);\")\n if data.has_key(\"dialog\"):\n c_view.append(\"\\tconnect(this, SIGNAL(doubleClicked(const QModelIndex &)), SLOT(slotEdit(const QModelIndex &)) );\")\n c_view.append(\"}\\n\")\n\n if get(view, \"delete\") or get(view, \"insert\"):\n if data.has_key(\"container\"):\n generateViewInsertDelete(data)\n else:\n print \"Warning: cannot generate view inser/delete function without container\"\n\n if data.has_key(\"dialog\"):\n if data.has_key(\"container\"):\n generateViewSlotEdit(data)\n else:\n print \"Warning: cannot generate edit slot in view without container\"\n\n h_view.append(\"};\")", "def _CreateGraph(self):\n self.nodes = []\n self.edges = []\n for i, r in self.airports.set_index('airport_id').iterrows():\n self.nodes.append((i,r.to_dict()))\n for i, r in self.routes.set_index(['src_id','dst_id']).iterrows():\n self.edges.append((i[0],i[1],r.to_dict()))\n # print('node ex: {}'.format(self.nodes[0]))\n # print('edge ex: {}'.format(self.edges[0]))\n\n self.graph = self._CreateAdjacencyListGraph()", "def createViews(views):\n ...", "def create_netlist(self):\n self.add_modules()\n self.add_pins()\n self.create_instances()", "def buildFromTopo( self, topo=None ):\n\n # Possibly we should clean up here and/or validate\n # the topo\n if self.cleanup:\n pass\n\n info( '*** Creating network\\n' )\n\n if not self.controllers and self.controller:\n # Add a default controller\n info( '*** Adding controller\\n' )\n classes = self.controller\n if not isinstance( classes, list ):\n classes = [ classes ]\n for i, cls in enumerate( classes ):\n # Allow Controller objects because nobody understands partial()\n if isinstance( cls, Controller ):\n self.addController( cls )\n else:\n self.addController( 'c%d' % i, cls )\n\n info( '*** Adding docker hosts:\\n' )\n for hostName in topo.hosts():\n self.addDocker( hostName, **topo.nodeInfo( hostName ))\n info( hostName + ' ' )\n\n info( '\\n*** Adding switches:\\n' )\n for switchName in topo.switches():\n # A bit ugly: add batch parameter if appropriate\n params = topo.nodeInfo( switchName)\n cls = params.get( 'cls', self.switch )\n #if hasattr( cls, 'batchStartup' ):\n # params.setdefault( 'batch', True )\n self.addSwitch( switchName, **params )\n info( switchName + ' ' )\n\n info( '\\n*** Adding links:\\n' )\n for srcName, dstName, params in topo.links(\n sort=True, withInfo=True ):\n self.addLink( **params )\n info( '(%s, %s) ' % ( srcName, dstName ) )\n\n info( '\\n' )", "def __init__(\n self,\n input_shape: Tuple[int, int, int], \n encoding_size: int, \n output_size: int\n ):\n super(VisualQNetwork, self).__init__()\n height = input_shape[0]\n width = input_shape[1]\n initial_channels = input_shape[2]\n conv_1_hw = self.conv_output_shape((height, width), 8, 4)\n conv_2_hw = self.conv_output_shape(conv_1_hw, 4, 2)\n self.final_flat = conv_2_hw[0] * conv_2_hw[1] * 32\n self.conv1 = torch.nn.Conv2d(initial_channels, 16, [8, 8], [4, 4])\n self.conv2 = torch.nn.Conv2d(16, 32, [4, 4], [2, 2])\n self.dense1 = torch.nn.Linear(self.final_flat, encoding_size)\n self.dense2 = torch.nn.Linear(encoding_size, output_size)", "def createNet(self):\n\n sw = OVSKernelSwitch\n topo = G2Topo(self.config.topoData)\n ctrl = RemoteController('c', ip=REMOTE_CONTROLLER_IP, port=CONTROLLER_PORT)\n\n # Default link parameters.\n # HTB: Hierarchical Token Bucket rate limiter.\n spec = self.config.topoData['defaultLinkInfo']\n if spec:\n mybw = float(spec['bw'])\n mydelay = spec['delay']\n myloss = float(spec['loss'])\n link = partial(TCLink, delay=mydelay, bw=mybw, loss=myloss)\n if spec['max_queue_size'] != 'N/A' and spec['use_htb'] == 'N/A':\n myqueue = int(spec['max_queue_size'])\n link = partial(TCLink, delay=mydelay, bw=mybw, loss=myloss, max_queue_size=myqueue)\n if spec['max_queue_size'] == 'N/A' and spec['use_htb'] != 'N/A':\n myhtb = bool(spec['use_htb'])\n link = partial(TCLink, delay=mydelay, bw=mybw, loss=myloss, use_htb=myhtb)\n if spec['max_queue_size'] != 'N/A' and spec['use_htb'] != 'N/A':\n myqueue = int(spec['max_queue_size'])\n myhtb = bool(spec['use_htb'])\n link = partial(TCLink, delay=mydelay, bw=mybw, loss=myloss, max_queue_size=myqueue, use_htb=myhtb)\n else:\n # No spec for default parameters, using Mininet defaults.\n info(\"**** [G2]: using Mininet default parameters for links other than those configured in link_info \\n\")\n link = TCLink\n\n # Configure bw, delay, loss, etc. for some links that are specified in config file.\n for spec in self.config.topoData['linkInfos']:\n src = spec['src']\n dst = spec['dst']\n try:\n linkInfo = topo.linkInfo(src, dst)\n if spec['bw'] != 'N/A':\n linkInfo['bw'] = float(spec['bw']) # Mbit\n if spec['delay'] != 'N/A':\n linkInfo['delay'] = spec['delay'] # ms\n if spec['loss'] != 'N/A':\n linkInfo['loss'] = float(spec['loss']) # Percentage\n if spec['max_queue_size'] != 'N/A':\n linkInfo['max_queue_size'] = int(spec['max_queue_size'])\n if spec['use_htb'] != 'N/A':\n linkInfo['use_htb'] = bool(spec['use_htb'])\n\n topo.setlinkInfo(src,dst,linkInfo)\n except KeyError:\n info(\"**** [G2]: no link exists between switch pair (%s, %s) \\n\" %(src, dst))\n\n # Assign a fraction of overall CPU time to Mininet hosts.\n nHosts = float(len(self.config.topoData['hosts']))\n cpuHostFrac = 0.50/nHosts\n # 'cpu' is the fraction of CPU that each host would get.\n # Indirectly, it sets 'cpu.cfs_quota_us': the total available run-time within a period (in microseconds).\n # Mininet uses the following scheme: cfs_quota_us = (cpuHostFrac * nCPU * period_us) microseconds.\n # 'period_us' sets cpu.cfs_period_us.\n # Larger period would allow for increased burst capacity.\n host = custom(CPULimitedHost, cpu=cpuHostFrac, period_us=100000)\n\n net = Mininet(topo=topo,\n host=host,\n switch=sw,\n controller=ctrl,\n waitConnected=True,\n autoStaticArp=True,\n link=link)\n\n # Create a default route for each host.\n # Turn on tcpdump on each host if debug mode is on.\n for hs in topo.hosts():\n net.getNodeByName(hs).setDefaultRoute(intf='%s-eth0' %hs) # 1st interface on hosts is hi-eth0\n if self.config.isDebug:\n net.getNodeByName(hs).cmd('tcpdump -w %s.pcap -i %s-eth0 &' %(hs,hs))\n return net", "def assemble_widget(self) -> widgets.Widget:\n graph_selection = self._create_layer_selection(layer_type=\"graphs\")\n map_selection = self._create_layer_selection(layer_type=\"maps\")\n view_buttons = self.create_visibility_buttons()\n\n widget = widgets.VBox(\n [\n widget_utils.create_html_header(\"Graph Selection\"),\n graph_selection,\n widget_utils.HRULE,\n widget_utils.create_html_header(\"Map Selection\"),\n map_selection,\n widget_utils.HRULE,\n widget_utils.create_html_header(\"View Selection\"),\n view_buttons,\n ]\n )\n\n return widget", "def build_graph(self):\n n_classes = self.n_classes\n\n (self.feed('data')\n .conv(3, 3, 64, 1, 1, name='conv1_1', trainable=False)\n .conv(3, 3, 64, 1, 1, name='conv1_2', trainable=False)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool1')\n .conv(3, 3, 128, 1, 1, name='conv2_1', trainable=False)\n .conv(3, 3, 128, 1, 1, name='conv2_2', trainable=False)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool2')\n .conv(3, 3, 256, 1, 1, name='conv3_1')\n .conv(3, 3, 256, 1, 1, name='conv3_2')\n .conv(3, 3, 256, 1, 1, name='conv3_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool3')\n .conv(3, 3, 512, 1, 1, name='conv4_1')\n .conv(3, 3, 512, 1, 1, name='conv4_2')\n .conv(3, 3, 512, 1, 1, name='conv4_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool4')\n .conv(3, 3, 512, 1, 1, name='conv5_1')\n .conv(3, 3, 512, 1, 1, name='conv5_2')\n .conv(3, 3, 512, 1, 1, name='conv5_3'))\n\n self.compute_rDeRF() # dummy\n\n # Classification\n (self.feed('conv5_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool6')\n .reshape(shape=(-1, 7, 7, 512), name='pool6_reshape')\n .fc(4096, name='fc6')\n .dropout(0.5, name='drop6')\n .fc(4096, name='fc7')\n .dropout(0.5, name='drop7')\n # .make_time(name='drop7_reduced')\n .fc(n_classes, relu=False, name='cls_score')\n .softmax(name='cls_prob'))\n pass", "def macro_network():\n # fmt: off\n tpm = np.array([\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 1.0, 1.0],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 1.0, 1.0],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 1.0, 1.0],\n [1.0, 1.0, 0.3, 0.3],\n [1.0, 1.0, 0.3, 0.3],\n [1.0, 1.0, 0.3, 0.3],\n [1.0, 1.0, 1.0, 1.0],\n ])\n # fmt: on\n return Network(tpm, node_labels=LABELS[:tpm.shape[1]])", "def view(self, windowSize='800x600'):\n f = np.vectorize(\n lambda x: x.__str__() if isinstance(x, QubitChannel) else str(\n np.nan\n )\n )\n data = f(self.diagram)\n namefield = np.array([['']] * len(self.diagram[:, 0]), dtype=object)\n for key, val in {**self.qubitDict, **self.readoutDict}.items():\n namefield[val] = key + f':{val}'\n data = np.hstack([namefield, data])\n timeindex = np.array(\n [''] + list(range(len(self.diagram[0, :]))), dtype=object\n )\n data = np.array(np.vstack([timeindex, data]), dtype=str)\n # create a scrollable window\n _, fm, run = simple_scrollable_window(windowSize)\n for i, row in enumerate(data):\n for j, item in enumerate(row):\n Label(\n fm, text=item, font='Consolas',\n relief='solid', borderwidth=1\n ).grid(row=i, column=j, ipadx=5, ipady=5, sticky='news')\n run()", "def get_build_view(self):\n connection = self._create_connection()\n view = MySQLBuildView(connection)\n return view", "def drawNetwork(self, screen):\r\n # world = max(self.worlds, key=lambda w: w.nn.fitness)\r\n # draw the world\r\n world = self.worlds[0]\r\n world.renderer.render(screen)\r\n\r\n networkSurface = pygame.Surface((750, 180)).convert_alpha()\r\n networkSurface.fill((0, 0, 0, 0))\r\n # draw the minimap and network\r\n networkrenderer.render_network(networkSurface, world.nn, world.minimapValues)\r\n screen.blit(networkSurface, (10, 60))", "def new_network():\n new_names = Names()\n new_devices = Devices(new_names)\n return Network(new_names, new_devices)", "def build_graph(self):\n raise NotImplementedError", "def build_view(frame, box, _view):\n\n\tif isinstance(_view, view.Switch):\n\t\tfor action in _view.get_actions():\n\t\t\tbutton = ActionButton(action, _view)\n\t\t\tbox.pack_start(button.make(frame), False, False, 0)", "def make(self):\n\t\tif RENDER_VIEWS > 1:\n\t\t\tself._make()", "def build_network(num_actions: int) -> hk.Transformed:\n\n def q(obs):\n network = hk.Sequential(\n [hk.Flatten(),\n nets.MLP([FLAGS.hidden_units, num_actions])])\n return network(obs)\n\n return hk.without_apply_rng(hk.transform(q, apply_rng=True))", "def _createNetwork(self,verbose):\n filename,n,rulesList = self.filename,self.n,self.rulesList\n if self.verbose:\n mult = 2\n if self.MichaelisMenten:\n mult = 4\n start,startWall = cpuTime(),wallTime()\n print(\"\")\n print(\"Creating network with \"+str(n)+\" activation sites\")\n print(\" and \"+str(len(rulesList))+\" additional rules (\" \\\n +str(mult*(n+len(rulesList)))+\" parameters).\")\n \n namesList = writeBNGL.writeBNGLnetwork(n,rulesList,filename, \\\n MichaelisMenten=self.MichaelisMenten)\n self._runBNGLfile(filename)\n \n if self.verbose:\n print(\"Network creation took \"+bothTimeStr(start,startWall))\n \n return namesList", "def build_graph(self):\n for each_list in self.lab.look():\n vertice = self._add_vertice(each_list)\n if vertice:\n self.unvisited.add(vertice)\n self.graph.addEdge((self.current, vertice))\n \n self.unvisited -= self.visited\n self._connect_neighbours()", "def _build_topology(self):\n\t\t# childSection.connect(parentSection, [parentX], [childEnd])\n\t\tfor i in range(self._axonNodes-1):\n\t\t\tself.node[i].connect(self.mysa[2*i],0,1)\n\t\t\tself.mysa[2*i].connect(self.flut[2*i],0,1)\n\t\t\tself.flut[2*i].connect(self.stin[6*i],0,1)\n\t\t\tself.stin[6*i].connect(self.stin[6*i+1],0,1)\n\t\t\tself.stin[6*i+1].connect(self.stin[6*i+2],0,1)\n\t\t\tself.stin[6*i+2].connect(self.stin[6*i+3],0,1)\n\t\t\tself.stin[6*i+3].connect(self.stin[6*i+4],0,1)\n\t\t\tself.stin[6*i+4].connect(self.stin[6*i+5],0,1)\n\t\t\tself.stin[6*i+5].connect(self.flut[2*i+1],0,1)\n\t\t\tself.flut[2*i+1].connect(self.mysa[2*i+1],0,1)\n\t\t\tself.mysa[2*i+1].connect(self.node[i+1],0,1)", "def build(self):\n # Generate a 4x4 identity matrix, which will be the basis for the view matrix.\n vtm = np.identity( 4, float )\n # Generate a translation matrix to move the VRP to the origin and then premultiply the vtm by the translation matrix.\n t1 = np.matrix( [[1, 0, 0, -self.vrp[0, 0]],\n [0, 1, 0, -self.vrp[0, 1]],\n [0, 0, 1, -self.vrp[0, 2]],\n [0, 0, 0, 1] ] )\n\n vtm = t1 * vtm\n\n # Calculate the view reference axes tu, tvup, tvpn.\n tu = np.cross(self.vup, self.vpn)\n tvup = np.cross(self.vpn, tu)\n tvpn = self.vpn.copy()\n\n # Normalize the view axes tu, tvup, and tvpn to unit length.\n\n # if this doesn't work, create my own normalize function\n tu = self.normalize(tu)\n tvup = self.normalize(tvup)\n tvpn = self.normalize(tvpn)\n\n # Copy the orthonormal axes tu, tvup, and tvpn back to self.u, self.vup and self.vpn.\n self.u = tu.copy()\n self.vup = tvup.copy()\n self.vpn = tvpn.copy()\n\n # Use the normalized view reference axes to generate the rotation matrix \n # to align the view reference axes and then premultiply M by the rotation.\n r1 = np.matrix( [[ tu[0, 0], tu[0, 1], tu[0, 2], 0.0 ],\n [ tvup[0, 0], tvup[0, 1], tvup[0, 2], 0.0 ],\n [ tvpn[0, 0], tvpn[0, 1], tvpn[0, 2], 0.0 ],\n [ 0.0, 0.0, 0.0, 1.0 ] ] )\n\n vtm = r1 * vtm\n\n # Translate the lower left corner of the view space to the origin.\n # extent of the view volume in the X and Y view axes.\n vtm = self.T( 0.5*self.extent[0], 0.5*self.extent[1], 0 ) * vtm\n\n vtm = self.S( -self.screen[0] / self.extent[0], -self.screen[1] / self.extent[1], 1.0 / self.extent[2] ) * vtm\n\n vtm = self.T( self.screen[0] + self.offset[0], self.screen[1] + self.offset[1], 0 ) * vtm\n\n return vtm", "def api_call_network_views(view, logger):\n trynetwork = 3\n rnet = None\n for iview in range(trynetwork):\n try:\n rnet = requests.get(PAYLOAD['url'] + \"network?_return_fields=\"\n \"extattrs,comment,network,\"\n \"network_view,utilization&\"\n \"network_view=\" + view,\n \"_max_results=-5000\",\n auth=(PAYLOAD['username'],\n PAYLOAD['password']),\n verify=False)\n break\n except requests.exceptions.ConnectionError as nerrt:\n if iview < trynetwork - 1:\n logger.warning('Container View Retry #%s ,$%s', view, iview)\n time.sleep(5)\n continue\n else:\n logger.info('Timeout Error for container view: %s, %s, %s',\n view, iview, nerrt)\n return []\n return json.loads(rnet.content.decode('utf-8'))", "def show_visualizations(self, number = -1):\n instance = self.instance\n if number > instance.view_num:\n print(\"In function show_visualizations: Error, input number greater than the view numbers.\")\n return Page()\n if self.rank_method == methods_of_ranking[3]: # diversified_ranking\n G = myGraph(instance.view_num)\n for i in range(instance.view_num):\n view = instance.tables[instance.views[i].table_pos].views[instance.views[i].view_pos]\n G.addNode(view)\n G.getSim()\n result = G.getTopK(instance.view_num)\n if number != -1:\n begin = number - 1\n end = number\n else:\n begin = 0\n end = instance.view_num\n page = Page()\n for order in range(begin, end):\n if self.rank_method == methods_of_ranking[3]: # diversified_ranking\n view = G.nodes[result[order]]\n else:\n view = instance.tables[instance.views[order].table_pos].views[instance.views[order].view_pos]\n data = {}\n data['order'] = order\n data['chartname'] = instance.table_name\n data['describe'] = view.table.describe\n data['x_name'] = view.fx.name\n data['y_name'] = view.fy.name\n data['chart'] = Chart.chart[view.chart]\n data['classify'] = [v[0] for v in view.table.classes]\n data['x_data'] = view.X\n data['y_data'] = view.Y\n data['title_top'] = 5\n \n # 以下代码与html_handle相似\n margin = str(data['title_top']) + '%'\n \n if data['chart'] == 'bar':\n chart = (Bar().set_series_opts(label_opts=opts.LabelOpts(is_show=False))\n .set_global_opts(title_opts=opts.TitleOpts(title=data['chartname'], subtitle=data['describe'], pos_left='center', pos_top=margin),\n xaxis_opts=opts.AxisOpts(name=data['x_name']),\n yaxis_opts=opts.AxisOpts(name=data['y_name'], splitline_opts=opts.SplitLineOpts(is_show=True))))\n elif data['chart'] == 'pie': \n chart = (Pie().set_global_opts(title_opts=opts.TitleOpts(title=data['chartname'], subtitle=data['describe'], pos_left='center', pos_top=margin)))\n elif data['chart'] == 'line': \n chart = (Line().set_series_opts(label_opts=opts.LabelOpts(is_show=False))\n .set_global_opts(title_opts=opts.TitleOpts(title=data['chartname'], subtitle=data['describe'], pos_left='center', pos_top=margin),\n xaxis_opts=opts.AxisOpts(name=data['x_name']),\n yaxis_opts=opts.AxisOpts(name=data['y_name'], splitline_opts=opts.SplitLineOpts(is_show=True))))\n elif data['chart']== 'scatter': \n chart = (Scatter().set_series_opts(label_opts=opts.LabelOpts(is_show=False))\n .set_global_opts(title_opts=opts.TitleOpts(title=data['chartname'], subtitle=data['describe'], pos_left='center', pos_top=margin),\n xaxis_opts=opts.AxisOpts(type_='value', name=data['x_name'], splitline_opts=opts.SplitLineOpts(is_show=True)),\n yaxis_opts=opts.AxisOpts(type_='value', name=data['y_name'], splitline_opts=opts.SplitLineOpts(is_show=True))))\n else :\n print (\"not valid chart\")\n \n if not data[\"classify\"] :\n attr = data[\"x_data\"][0]\n val = data[\"y_data\"][0]\n if data['chart'] == 'bar': \n chart.add_xaxis(attr).add_yaxis(\"\", val, label_opts=opts.LabelOpts(is_show=False))\n elif data['chart'] == 'line': \n chart.add_xaxis(attr).add_yaxis(\"\", val, label_opts=opts.LabelOpts(is_show=False))\n elif data['chart'] == 'pie': \n chart.add(\"\", [list(z) for z in zip(attr, val)])\n elif data['chart'] == 'scatter': \n if isinstance(attr[0], str):\n attr = [x for x in attr if x != '']\n attr = list(map(float, attr))\n if isinstance(val[0], str):\n val = [x for x in val if x != '']\n val = list(map(float, val))\n chart.add_xaxis(attr).add_yaxis(\"\", val, label_opts=opts.LabelOpts(is_show=False))\n page.add(chart)\n else :\n attr = data[\"x_data\"][0]\n for i in range(len(data[\"classify\"])) :\n val = data[\"y_data\"][i]\n name = (data[\"classify\"][i][0] if type(data[\"classify\"][i]) == type(('a','b')) else data[\"classify\"][i])\n if i == 0:\n if data['chart'] != 'pie' and data['chart'] != 'scatter':\n chart.add_xaxis(attr)\n if data['chart'] == 'bar': \n chart.add_yaxis(name, val, stack=\"stack1\", label_opts=opts.LabelOpts(is_show=False))\n elif data['chart'] == 'line': \n chart.add_yaxis(name, val, label_opts=opts.LabelOpts(is_show=False))\n elif data['chart'] == 'pie': \n chart.add(\"\", [list(z) for z in zip(attr, val)])\n elif data['chart'] == 'scatter': \n attr_scatter = data[\"x_data\"][i]\n if isinstance(attr_scatter[0], str):\n attr_scatter = [x for x in attr_scatter if x != '']\n attr_scatter = list(map(float, attr_scatter))\n if isinstance(val[0], str):\n val = [x for x in val if x != '']\n val = list(map(float, val))\n chart.add_xaxis(attr_scatter).add_yaxis(name, val, label_opts=opts.LabelOpts(is_show=False))\n page.add(chart)\n return page", "def __init__(\n self,\n viewer: geoviewer.GeoGraphViewer,\n layer_type: str,\n layer_subtype: str,\n layer_name: Optional[str] = None,\n link_to_current_state: bool = True,\n layout: Optional[widgets.Layout] = None,\n **kwargs,\n ) -> None:\n\n self.viewer = viewer\n\n # Setting log with handler, that allows access to log\n # via self.log_handler.show_logs()\n self.logger = logging.getLogger(type(self).__name__)\n self.logger.setLevel(self.viewer.logger.level)\n self.log_handler = self.viewer.log_handler\n self.logger.addHandler(self.log_handler)\n\n if layout is None:\n layout = widgets.Layout(height=\"auto\", width=\"auto\")\n\n super().__init__(layout=layout, **kwargs)\n\n self.add_traits(\n layer_type=traitlets.Unicode().tag(sync=True),\n layer_subtype=traitlets.Unicode().tag(sync=True),\n layer_name=traitlets.Unicode().tag(sync=True),\n )\n self.layer_subtype = layer_subtype\n self.layer_type = layer_type\n\n if layer_type == \"maps\":\n if layer_name is None:\n layer_name = self.viewer.current_map\n self.layer_name = layer_name\n\n # If current map changes the function of this button changes\n if link_to_current_state:\n widgets.dlink((self.viewer, \"current_map\"), (self, \"layer_name\"))\n\n elif layer_type == \"graphs\":\n if layer_name is None:\n layer_name = self.viewer.current_graph\n self.layer_name = layer_name\n\n if link_to_current_state:\n widgets.dlink((self.viewer, \"current_graph\"), (self, \"layer_name\"))\n\n self.observe(self._handle_view, names=[\"value\", \"layer_name\"])\n self._check_layer_exists()\n\n self.logger.info(\"Initialised.\")", "def build(self):", "def build(self):", "def build(self):", "def build(self,A,k=5):\n # instantiate a Crayon::Graph object\n self.cpp = _crayon.neighborhood(A,k)\n # retrieve adjacency matrix\n self.adj = self.cpp.adj()\n # compute its Graphlet Degree Vector\n self.gdv = self.cpp.gdv()\n # convert node-wise to graph-wise graphlet frequencies\n self.sgdv = np.sum(self.gdv,axis=0)\n # weight GDV according to dependencies between orbits\n o = np.array([1, 2, 2, 2, 3, 4, 3, 3, 4, 3,\n 4, 4, 4, 4, 3, 4, 6, 5, 4, 5,\n 6, 6, 4, 4, 4, 5, 7, 4, 6, 6,\n 7, 4, 6, 6, 6, 5, 6, 7, 7, 5,\n 7, 6, 7, 6, 5, 5, 6, 8, 7, 6,\n 6, 8, 6, 9, 5, 6, 4, 6, 6, 7,\n 8, 6, 6, 8, 7, 6, 7, 7, 8, 5,\n 6, 6, 4],dtype=np.float)\n w = 1. - o / 73.\n self.ngdv = self.sgdv * w[:self.sgdv.shape[0]]\n self.ngdv = self.ngdv / max(float(np.sum(self.ngdv)),1.)", "def intf_VIEWSHOW(E):\n out= \"View Properties\\n\"\n out+= \"---------------\\n\"\n out+= \"svgoutfile=%s\\n\" % OUT.outfile\n out+= \"camera=%s {camset}\\n\" % (','.join([str(x) for x in OUT.camera]))\n out+= \"target=%s {tarset}\\n\" % (','.join([str(x) for x in OUT.target]))\n out+= \"opacity=%s {hlr,hide}\\n\" % str(OUT.opacity)\n out+= \"facelines=%s {facelines}\\n\" % str(OUT.facelines)\n out+= \"vlinewidth=%0.2f {vlw,viewlinewidth}\\n\" % OUT.vlinewidth\n out+= \"vrefreshms=%d {refreshms,viewrefreshms}\\n\" % OUT.vrefreshms\n out+= \"vbox=(%d,%d) {viewbox[xy]}\\n\" % (OUT.vboxX,OUT.vboxY)\n out+= \"vtran=(%d,%d) {vtran[xy],viewtran[xy]}\\n\" % (OUT.vtranX,OUT.vtranY)\n out+= \"vscale=(%d,%d) {vscale[xy],viewscale[xy]}\\n\" % (OUT.vscaleX,OUT.vscaleY)\n print(out)", "def get_network(self):\n\n # Find which nodes are input and which are output. We may want to store\n # this info somewhere else (like in the genome)\n\n inputs = []\n outputs = []\n bias = []\n edges = []\n node_num = dict() #Map from node_id to zero index node number\n\n for i, node in enumerate(self.node_genes):\n # Create mapping\n node_num[node.node_id] = i\n\n # Store input and output node_numbers\n if node.node_type is INPUT:\n inputs.append(i)\n elif node.node_type is OUTPUT:\n outputs.append(i)\n elif node.node_type is BIAS:\n bias.append(i)\n\n # Create edge list.\n for link in self.link_genes:\n if link.enabled:\n edges.append((node_num[link.to_node.node_id],\n node_num[link.from_node.node_id], link.weight))\n\n\n # Build an adjacency matrix for the network\n n = len(node_num)\n adj_matrix = np.zeros((n, n))\n try:\n for e in edges:\n adj_matrix[e[:2]] = e[2]\n except:\n global GENOME\n GENOME = self\n print([node.node_id for node in self.node_genes])\n print()\n print('len(node_genes)', len(self.node_genes))\n print('edge', e)\n print('adj.shape', adj_matrix.shape)\n sys.exit()\n\n return Network(adj_matrix, inputs, outputs, bias)", "def visualise(self) -> None:\n nx_graph = nx.DiGraph()\n\n for v in self._vertices:\n if not v.predicate:\n name = v.name.split(\"/\")[-1]\n nx_graph.add_node(name, name=name, pred=v.predicate)\n\n for v in self._vertices:\n if not v.predicate:\n v_name = v.name.split(\"/\")[-1]\n # Neighbors are predicates\n for pred in self.get_neighbors(v):\n pred_name = pred.name.split(\"/\")[-1]\n for obj in self.get_neighbors(pred):\n obj_name = obj.name.split(\"/\")[-1]\n nx_graph.add_edge(v_name, obj_name, name=pred_name)\n\n plt.figure(figsize=(10, 10))\n _pos = nx.circular_layout(nx_graph)\n nx.draw_networkx_nodes(nx_graph, pos=_pos)\n nx.draw_networkx_edges(nx_graph, pos=_pos)\n nx.draw_networkx_labels(nx_graph, pos=_pos)\n names = nx.get_edge_attributes(nx_graph, \"name\")\n nx.draw_networkx_edge_labels(nx_graph, pos=_pos, edge_labels=names)", "def build_graph(self):\n for node in self.nodes:\n self.graph.add_node(node.id, node_obj=node)\n edges = []\n for i in range(0, len(self.nodes)):\n for j in range(i+1, len(self.nodes)):\n if (self.nodes[i].distance(self.nodes[j]) < self.radio_range):\n edges.append((self.nodes[i].id, self.nodes[j].id,1))\n self.graph.add_weighted_edges_from(edges)", "def build(self, *args, **kwargs):\n\n # BGP routers\n\n as1ra = self.bgp('as1ra',['2001:1234:1::/64'])\n as2rb = self.bgp('as2rb',['2001:1234:2::/64'])\n as3rc = self.bgp('as3rc',['2001:1234:3::/64'])\n as4rd = self.bgp('as4rd',['2001:1234:4::/64'])\n\n # Set AS-ownerships\n\n self.addOverlay(AS(1, (as1ra,)))\n self.addOverlay(AS(2, (as2rb,)))\n self.addOverlay(AS(3, (as3rc,)))\n self.addOverlay(AS(4, (as4rd,)))\n\n # Inter-AS links\n\n self.addLink(as1ra, as2rb, \n params1={\"ip\": \"2001:12::a/64\"},\n params2={\"ip\": \"2001:12::b/64\"})\n self.addLink(as1ra, as3rc, \n params1={\"ip\": \"2001:13::a/64\"},\n params2={\"ip\": \"2001:13::c/64\"})\n self.addLink(as2rb, as3rc, \n params1={\"ip\": \"2001:23::b/64\"},\n params2={\"ip\": \"2001:23::c/64\"})\n self.addLink(as2rb, as4rd, \n params1={\"ip\": \"2001:24::c/64\"},\n params2={\"ip\": \"2001:24::d/64\"})\n\n # Add eBGP peering\n bgp_peering(self, as1ra, as2rb)\n bgp_peering(self, as1ra, as3rc)\n bgp_peering(self, as2rb, as3rc)\n bgp_peering(self, as2rb, as4rd)\n\n\n # hosts attached to the routers\n\n self.addLink(as1ra, self.addHost('h1'),\n params1={\"ip\": \"2001:1234:1::a/64\"},\n params2={\"ip\": \"2001:1234:1::1/64\"})\n self.addLink(as2rb, self.addHost('h2'),\n params1={\"ip\": \"2001:1234:2::b/64\"},\n params2={\"ip\": \"2001:1234:2::2/64\"})\n self.addLink(as3rc, self.addHost('h3'),\n params1={\"ip\": \"2001:1234:3::c/64\"},\n params2={\"ip\": \"2001:1234:3::1/64\"})\n self.addLink(as4rd, self.addHost('h4'),\n params1={\"ip\": \"2001:1234:4::d/64\"},\n params2={\"ip\": \"2001:1234:4::4/64\"})\n\n super(SimpleBGP, self).build(*args, **kwargs)", "def fig4():\n # fmt: off\n tpm = np.array([\n [0, 0, 0],\n [0, 0, 1],\n [1, 0, 1],\n [1, 0, 0],\n [1, 0, 0],\n [1, 1, 1],\n [1, 0, 1],\n [1, 1, 0],\n ])\n cm = np.array([\n [0, 1, 1],\n [1, 0, 1],\n [1, 1, 0],\n ])\n # fmt: on\n return Network(tpm, cm=cm, node_labels=LABELS[:tpm.shape[1]])", "def fullgraphplot(time_lower,time_upper):\n\n edges_list,node_list,title_list = graphformation(time_lower,time_upper)\n node_size = []\n for i in range(len(node_list)):\n node_size.append(5)\n g = Network(\n height=\"750px\",\n width=\"100%\",\n bgcolor=\"#222222\",\n font_color=\"white\")\n g.add_nodes(node_list,label=node_list,title=title_list, size= node_size)\n g.add_edges(edges_list)\n g.show(\"nx.html\")\n return", "def build_topo(self):\n super(EBGPTopo, self).build()", "def create_network(num_subs):\n\n # Need one host for each subscriber, one for a publisher, and one for a broker\n n_hosts = num_subs + 2\n\n topo = SingleSwitchTopo(n=n_hosts)\n\n return Mininet(topo=topo, controller=OVSController)", "def create_network():\n net = ln.models.TinyYolo(CLASSES, CONF_THRESH, NMS_THRESH)\n\n net.load(args.weight)\n net.eval()\n net.postprocess.append(ln.data.transform.TensorToBrambox(NETWORK_SIZE, LABELS))\n net = net.to(device)\n return net", "def config_to_view(self):\n raise NotImplementedError", "def generate(self,num_pores,domain_size):\n logger.info(\"Start of network topology generation\")\n self._generate_setup(num_pores,domain_size)\n self._generate_pores()\n self._generate_throats()\n logger.debug(\"Network generation complete\")", "def connect(self, netid):\n if netid in _k.networks:\n pagename = \"network%d\" % self._page_serial\n self._page_serial += 1\n net = _k.networks[netid]\n page = self._notebook.add(pagename, label=net.name)\n page._netframe = NetworkFrame(page, pagename=pagename, network=net,\n netid=netid)\n page._netframe.pack(fill=Tix.BOTH, expand=True)", "def get_road_network(self, root):\n road_network = etree.SubElement(root, \"RoadNetwork\")\n road_network_logic_file = etree.SubElement(road_network, \"LogicFile\")\n road_network_logic_file.set(\"filepath\", self._road_network)\n road_network_scene_graph = etree.SubElement(road_network, \"SceneGraphFile\")\n road_network_scene_graph.set(\"filepath\", \"\")", "def _build_networks(self):\n self.online_convnet = self._create_network(name='Online')\n self.target_convnet = self._create_network(name='Target')\n\n # Compute the Q-values which are used for action selection in the current\n # state.\n self._net_outputs = self.online_convnet(self.state_ph,\n self.num_quantile_samples)\n # Shape of self._net_outputs.quantile_values:\n # num_quantile_samples x num_actions.\n # e.g. if num_actions is 2, it might look something like this:\n # Vals for Quantile .2 Vals for Quantile .4 Vals for Quantile .6\n # [[0.1, 0.5], [0.15, -0.3], [0.15, -0.2]]\n # Q-values = [(0.1 + 0.15 + 0.15)/3, (0.5 + 0.15 + -0.2)/3].\n self._q_values = tf.reduce_mean(self._net_outputs.quantile_values, axis=0)\n self._q_argmax = tf.argmax(self._q_values, axis=0)\n self._policy_logits = tf.nn.softmax(self._q_values / self.tau, axis=0)\n self._stochastic_action = tf.random.categorical(\n self._policy_logits[None, Ellipsis],\n num_samples=1,\n dtype=tf.int32)[0][0]\n\n self._replay_net_outputs = self.online_convnet(self._replay.states,\n self.num_tau_samples)\n # Shape: (num_tau_samples x batch_size) x num_actions.\n self._replay_net_quantile_values = self._replay_net_outputs.quantile_values\n self._replay_net_quantiles = self._replay_net_outputs.quantiles\n\n # Do the same for next states in the replay buffer.\n self._replay_net_target_outputs = self.target_convnet(\n self._replay.next_states, self.num_tau_prime_samples)\n # Shape: (num_tau_prime_samples x batch_size) x num_actions.\n vals = self._replay_net_target_outputs.quantile_values\n self._replay_net_target_quantile_values = vals\n\n # Compute Q-values which are used for action selection for the states and\n # next states in the replay buffer.\n target_next_action = self.target_convnet(self._replay.next_states,\n self.num_quantile_samples)\n target_action = self.target_convnet(self._replay.states,\n self.num_quantile_samples)\n\n # Shape: (num_quantile_samples x batch_size) x num_actions.\n target_next_quantile_values_action = target_next_action.quantile_values\n # Shape: num_quantile_samples x batch_size x num_actions.\n target_next_quantile_values_action = tf.reshape(\n target_next_quantile_values_action,\n [self.num_quantile_samples, self._replay.batch_size, self.num_actions])\n\n # Shape: (num_quantile_samples x batch_size) x num_actions.\n target_quantile_values_action = target_action.quantile_values\n # Shape: num_quantile_samples x batch_size x num_actions.\n target_quantile_values_action = tf.reshape(target_quantile_values_action,\n [self.num_quantile_samples,\n self._replay.batch_size,\n self.num_actions])\n # Shape: batch_size x num_actions.\n self._replay_next_target_q_values = tf.squeeze(tf.reduce_mean(\n target_next_quantile_values_action, axis=0))\n self._replay_target_q_values = tf.squeeze(tf.reduce_mean(\n target_quantile_values_action, axis=0))\n\n self._replay_next_qt_argmax = tf.argmax(\n self._replay_next_target_q_values, axis=1)", "def main():\n root = Node(1)\n root.left = Node(2)\n root.right = Node(3)\n root.left.left = Node(4)\n root.left.right = Node(5)\n root.right.left = Node(6)\n root.right.right = Node(7)\n\n v = View()\n v.top_view(root)" ]
[ "0.69119674", "0.65324295", "0.64664465", "0.6227124", "0.61709285", "0.6079168", "0.5990852", "0.59398776", "0.5891403", "0.58894837", "0.587379", "0.5862538", "0.5839957", "0.5753144", "0.5751551", "0.567196", "0.56632817", "0.5620824", "0.5600299", "0.5599422", "0.55967724", "0.5559049", "0.5557225", "0.5542804", "0.5528389", "0.55277216", "0.55269456", "0.5515937", "0.55064803", "0.55057186", "0.54986036", "0.54943496", "0.54733694", "0.54712224", "0.5454754", "0.54541415", "0.5444814", "0.544335", "0.5440842", "0.54137915", "0.5407675", "0.5405669", "0.5395992", "0.5381868", "0.5380192", "0.53793776", "0.5377544", "0.53751093", "0.5371342", "0.53711087", "0.53631437", "0.5362675", "0.535525", "0.53541344", "0.5352095", "0.5341852", "0.53412837", "0.5339762", "0.53311694", "0.53286326", "0.5322611", "0.53198105", "0.5319599", "0.5288145", "0.52844566", "0.52841455", "0.5278414", "0.52747184", "0.5267791", "0.5259554", "0.52550757", "0.52549404", "0.52500874", "0.5243021", "0.5239114", "0.52367246", "0.5230219", "0.52220464", "0.5220522", "0.5218421", "0.52107006", "0.5206843", "0.5206843", "0.5206843", "0.520652", "0.5205265", "0.52046305", "0.5204621", "0.52033657", "0.52020663", "0.5192909", "0.51857615", "0.51797163", "0.51699114", "0.5164403", "0.51612633", "0.5158808", "0.5156502", "0.5155438", "0.51525867", "0.51519614" ]
0.0
-1
Guard the given spatial reference object against axis swapping, when running with GDAL 3. Does nothing if GDAL < 3. Modifies the object in place.
def preventGdal3axisSwap(sr): if hasattr(sr, 'SetAxisMappingStrategy'): sr.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def swap_axis(ogr_geom):\n\n osr_sref = ogr_geom.GetSpatialReference()\n sref = SpatialRef.from_osr(osr_sref)\n if (sref.epsg == 4326) and GDAL_3_ENABLED and (osr_sref.GetAxisMappingStrategy() == 1):\n ogr_geom.SwapXY()\n osr_sref.SetAxisMappingStrategy(0)\n ogr_geom.AssignSpatialReference(osr_sref)\n\n return ogr_geom", "def force_axis_mapping(ogr_geom):\n\n osr_sref = ogr_geom.GetSpatialReference()\n sref = SpatialRef.from_osr(osr_sref)\n if sref.epsg == 4326:\n if GDAL_3_ENABLED:\n osr_sref.SetAxisMappingStrategy(0)\n ogr_geom.AssignSpatialReference(osr_sref)\n\n return ogr_geom", "def ge_inplace(a,b):", "def gdal_compliant(\n self, rename_dims=True, force_sn=False\n ) -> xr.DataArray | xr.Dataset:\n obj_out = self._obj\n crs = obj_out.raster.crs\n if (\n obj_out.raster.res[1] < 0 and force_sn\n ): # write data with South -> North orientation\n obj_out = obj_out.raster.flipud()\n x_dim, y_dim, x_attrs, y_attrs = gis_utils.axes_attrs(crs)\n if rename_dims:\n obj_out = obj_out.rename(\n {obj_out.raster.x_dim: x_dim, obj_out.raster.y_dim: y_dim}\n )\n else:\n x_dim = obj_out.raster.x_dim\n y_dim = obj_out.raster.y_dim\n obj_out[x_dim].attrs.update(x_attrs)\n obj_out[y_dim].attrs.update(y_attrs)\n obj_out = obj_out.drop_vars([\"spatial_ref\"], errors=\"ignore\")\n obj_out.rio.write_crs(crs, inplace=True)\n obj_out.rio.write_transform(obj_out.raster.transform, inplace=True)\n obj_out.raster.set_spatial_dims()\n\n return obj_out", "def mask_if_less(tobject, tobject_ref):\n\n assert len(tobject) == len(tobject_ref)\n\n _new_tobject = _ROOTObjectFunctions._project_or_clone(tobject, \"e\")\n _new_tobject_ref = _ROOTObjectFunctions._project_or_clone(tobject_ref, \"e\")\n\n for _bin_proxy, _bin_proxy_ref in zip(_new_tobject, _new_tobject_ref):\n if hasattr(_bin_proxy, 'graph_'):\n # for TGraph etc.\n if _bin_proxy.y < _bin_proxy_ref.y:\n _bin_proxy.y.value = 0\n _bin_proxy.y.error_hi = 0\n # 'low' error setter has a bug in rootpy. workaround:\n _bin_proxy.graph_.SetPointEYlow(_bin_proxy.idx_, 0)\n else:\n # for TH1D etc.\n if _bin_proxy.value < _bin_proxy_ref.value:\n _bin_proxy.value, _bin_proxy.error = 0, 0\n\n # cleanup\n _new_tobject_ref.Delete()\n\n return _new_tobject", "def apply_inplace_patches(rec):\n\n # The following is a patch for session files made with glue 0.15.* or\n # earlier that were read in with a developer version of glue for part of\n # the 0.16 development cycle, and re-saved. Essentially, if coords is set\n # to the default identity Coordinates class, we need to make sure we\n # always preserve the world coordinate components, and we do that by\n # setting force_coords to True.\n for key, value in rec.items():\n if value['_type'] == 'glue.core.data.Data':\n if 'coords' in value and value['coords'] is not None:\n coords = rec[value['coords']]\n if coords['_type'] == 'glue.core.coordinates.Coordinates':\n for cid, comp in value['components']:\n if 'log' in rec[comp]:\n load_log = rec[rec[comp]['log']]\n if 'force_coords' not in load_log:\n load_log['force_coords'] = True\n\n # The following accounts for the addition of the degree mode to the\n # full-sphere projection. Originally, this was only used for polar mode\n # and so the `coords` parameter was not needed. If this is not present,\n # the plot is polar and we can set coords to be ['x']\n if value['_type'] == 'glue.core.roi_pretransforms.RadianTransform':\n if 'state' in value and value['state'] is not None:\n state = value['state']\n if 'contents' in state and state['contents'] is not None:\n contents = state['contents']\n if 'st__coords' not in contents:\n contents['st__coords'] = ['x']", "def or__inplace(a,b):", "def reproject_coordinates(x_in, y_in, spatial_reference_source, spatial_reference_target=None): \n if spatial_reference_target is not None:\n pass\n else:\n spatial_reference_target = osr.SpatialReference()\n spatial_reference_target.ImportFromEPSG(4326) \n pass\n \n if int(osgeo.__version__[0]) >= 3:\n # GDAL 3 changes axis order: https://github.com/OSGeo/gdal/issues/1546\n \n spatial_reference_source.SetAxisMappingStrategy(osgeo.osr.OAMS_TRADITIONAL_GIS_ORDER)\n spatial_reference_target.SetAxisMappingStrategy(osgeo.osr.OAMS_TRADITIONAL_GIS_ORDER)\n\n \n pTransform = osr.CoordinateTransformation( spatial_reference_source, spatial_reference_target)\n \n x_new,y_new, z = pTransform.TransformPoint( x_in,y_in)\n \n return x_new,y_new", "def _sync_gripped_object(self, gripped_object_id):\n if gripped_object_id != -1:\n agent_body_transformation = (\n self._default_agent.scene_node.transformation\n )\n self.set_transformation(\n agent_body_transformation, gripped_object_id\n )\n translation = agent_body_transformation.transform_point(\n np.array([0, 2.0, 0])\n )\n self.set_translation(translation, gripped_object_id)", "def orthonormalize_inplace(self):\n Q = np.linalg.qr(self.components.T)[0].T\n self.components[...] = Q", "def clean_input_references(self):\n new_x = []\n new_y = []\n for i, x_i in enumerate(self.x):\n if os.path.exists(self.source_path / x_i):\n y_i = self.y[i]\n new_x.append(x_i)\n new_y.append(y_i)\n self.x = new_x\n self.y = new_y", "def is_inplace_def(*args):\n return _ida_hexrays.is_inplace_def(*args)", "def ensure_geo_reference(origin):\r\n\r\n if isinstance(origin, Geo_reference):\r\n geo_ref = origin\r\n elif origin is None:\r\n geo_ref = None\r\n else:\r\n geo_ref = apply(Geo_reference, origin)\r\n\r\n return geo_ref", "def _force_rescale(self, setpoint_x, setpoint_y):", "def _set_coords_inplace(self,coords):\n if isinstance(coords,Coords) and coords.shape == self.coords.shape:\n self.coords = coords\n return self\n else:\n raise ValueError,\"Invalid reinitialization of Geometry coords\"", "def composes_inplace_with(self):\n pass", "def _is_inplace(n: Node):\n inplace = False\n if n.op == \"call_function\":\n inplace = n.kwargs.get(\"inplace\", False)\n elif n.op == \"call_module\":\n inplace = getattr(n.graph.owning_module.get_submodule(n.target), \"inplace\", False)\n return inplace", "def reproject(self, spatial_reference, transformation=None, inplace=False):\r\n if HASARCPY:\r\n if isinstance(spatial_reference, arcpy.SpatialReference):\r\n wkt = spatial_reference.exportToString()\r\n wkid = spatial_reference.factoryCode\r\n if wkid:\r\n sr = _types.SpatialReference({'wkid' : wkid})\r\n elif wkt:\r\n sr = _types.SpatialReference({'wkt': wkt})\r\n else:\r\n sr = None\r\n elif isinstance(spatial_reference, int):\r\n sr = _types.SpatialReference({'wkid' : spatial_reference})\r\n elif isinstance(spatial_reference, string_types):\r\n sr = _types.SpatialReference({'wkt' : spatial_reference})\r\n elif isinstance(spatial_reference, _types.SpatialReference):\r\n sr = spatial_reference\r\n else:\r\n raise ValueError(\"spatial_referernce must be of type: int, string, _types.SpatialReference, or arcpy.SpatialReference\")\r\n\r\n if inplace:\r\n df = self\r\n else:\r\n df = self.copy()\r\n sarcpy = sr.as_arcpy\r\n if sarcpy:\r\n geom = df.geometry.project_as(sarcpy, transformation)\r\n geom.sr = sr\r\n df.geometry = geom\r\n if inplace:\r\n return df\r\n else:\r\n raise Exception(\"could not reproject the dataframe.\")\r\n return df", "def le_inplace(a,b):", "def OrientObject(object_id, reference, target, flags=0):\n object_id = rhutil.coerceguid(object_id, True)\n from_array = rhutil.coerce3dpointlist(reference)\n to_array = rhutil.coerce3dpointlist(target)\n if from_array is None or to_array is None:\n raise ValueError(\"Could not convert reference or target to point list\")\n from_count = len(from_array)\n to_count = len(to_array)\n if from_count<2 or to_count<2: raise Exception(\"point lists must have at least 2 values\")\n\n copy = ((flags & 1) == 1)\n scale = ((flags & 2) == 2)\n xform_final = None\n if from_count>2 and to_count>2:\n #Orient3Pt\n from_plane = Rhino.Geometry.Plane(from_array[0], from_array[1], from_array[2])\n to_plane = Rhino.Geometry.Plane(to_array[0], to_array[1], to_array[2])\n if not from_plane.IsValid or not to_plane.IsValid:\n raise Exception(\"unable to create valid planes from point lists\")\n xform_final = Rhino.Geometry.Transform.PlaneToPlane(from_plane, to_plane)\n else:\n #Orient2Pt\n xform_move = Rhino.Geometry.Transform.Translation( to_array[0]-from_array[0] )\n xform_scale = Rhino.Geometry.Transform.Identity\n v0 = from_array[1] - from_array[0]\n v1 = to_array[1] - to_array[0]\n if scale:\n len0 = v0.Length\n len1 = v1.Length\n if len0<0.000001 or len1<0.000001: raise Exception(\"vector lengths too short\")\n scale = len1 / len0\n if abs(1.0-scale)>=0.000001:\n plane = Rhino.Geometry.Plane(from_array[0], v0)\n xform_scale = Rhino.Geometry.Transform.Scale(plane, scale, scale, scale)\n v0.Unitize()\n v1.Unitize()\n xform_rotate = Rhino.Geometry.Transform.Rotation(v0, v1, from_array[0])\n xform_final = xform_move * xform_scale * xform_rotate\n rc = scriptcontext.doc.Objects.Transform(object_id, xform_final, not copy)\n if rc==System.Guid.Empty: return scriptcontext.errorhandler()\n scriptcontext.doc.Views.Redraw()\n return rc", "def test_destroy_map4(self):\r\n Z = shared(self.rand(2, 2), name='Z')\r\n A = shared(self.rand(2, 2), name='A')\r\n one = T.constant(1.0).astype(Z.dtype)\r\n f = inplace_func([], gemm_inplace(Z, one, A, A, one))\r\n f()\r\n f = inplace_func([], gemm_inplace(Z, one, A, A.T, one))\r\n f()", "def assert_inplace_is_respected(\n inplace: bool, input_block: Block, transformed_block: Block\n):\n if inplace:\n # Note that this is not a strict requirement,\n # as \"allow_inplace\" does not mandate inplace modification,\n # but this test utility is specifically aimed for middleware\n # which do support inplace modification (e.g. for performance reasons)\n assert transformed_block is input_block\n else:\n assert transformed_block is not input_block", "def ModifiedShape(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_RemoveLocations_ModifiedShape(self, *args)", "def transfer_shape(source_object, target_objects, replace = True):\n if len(target_objects) == 0:\n return False\n shapes_list = []\n if source_object.type() == 'transform':\n shapes_list = source_object.getShapes(noIntermediate = True)\n if not shapes_list:\n return False\n\n for tgt in target_objects:\n if replace:\n pm.delete(tgt.getShapes(noIntermediate = True))\n for shp in shapes_list:\n new_shp = pm.duplicate(shp, addShape = True)[0]\n new_shp.setParent(tgt, relative = True, shape = True)\n new_shp.rename(tgt.nodeName(stripNamespace = True) + 'Shape')\n\n return True", "def test_destroy_map2(self):\r\n Z = as_tensor_variable(self.rand(2, 2))\r\n A = as_tensor_variable(self.rand(2, 2))\r\n try:\r\n gemm_inplace(Z, 1.0, inplace.transpose_inplace(Z), A, 1.0)\r\n except InconsistencyError, e:\r\n if exc_message(e) == Gemm.E_z_uniq:\r\n return\r\n self.fail()", "def test_destroy_map1(self):\r\n Z = as_tensor_variable(self.rand(2, 2))\r\n A = as_tensor_variable(self.rand(2, 2))\r\n try:\r\n gemm_inplace(Z, 1.0, A, inplace.transpose_inplace(Z), 1.0)\r\n except InconsistencyError, e:\r\n if exc_message(e) == Gemm.E_z_uniq:\r\n return\r\n self.fail()", "def sync_update(rpr_context, obj: bpy.types.Object, is_updated_geometry, is_updated_transform, **kwargs):\n\n log(\"sync_update\", obj)\n\n obj_key = object.key(obj)\n rpr_shape = rpr_context.objects.get(obj_key, None)\n if not rpr_shape:\n sync(rpr_context, obj, **kwargs)\n return True\n\n if is_updated_geometry:\n rpr_context.remove_object(obj_key)\n sync(rpr_context, obj)\n return True\n\n if is_updated_transform:\n rpr_shape.set_transform(object.get_transform(obj))\n return True\n\n material_override = kwargs.get('material_override', None)\n return mesh.assign_materials(rpr_context, rpr_shape, obj, material_override=material_override)", "def Shape(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_UnifySameDomain_Shape(self, *args)", "def and__inplace(a,b):", "def flipObject(self, object, vertical, horizontal):\n try:\n # is object a grid reference?\n row = object[0]\n column = object[1]\n except TypeError:\n flipped = pygame.transform.flip(object, vertical, horizontal)\n return flipped\n flipped = pygame.transform.flip(self.getObject(row, column), vertical,\n horizontal)\n self.addObject(flipped, row, column)\n return flipped", "def _reference(self, hit):\n hit = hit.deepcopy()\n hit[\"x\"] = 0.\n hit[\"px\"] = 0.\n return hit", "def swap(self, *args):\n return _osgAnimation.mapVertexInfluence_swap(self, *args)", "def fix(self):\n gAsset = cmds.ls(type='gAsset')\n\n trans = cmds.listRelatives(gAsset[0], p=True)\n meshes = cmds.listRelatives(trans, ad=True, type='mesh')\n for mesh in meshes:\n if mesh:\n try:\n cmds.addAttr(mesh, ln=\"grid_renderGeo\", at='double', dv=1)\n cmds.setAttr(\n '{0}.grid_renderGeo'.format(mesh), e=False, keyable=False, lock=True)\n except:\n pass\n\n self.run()", "def add_inplace(a, b):", "def test_destroy_map3(self):\r\n Z = as_tensor_variable(self.rand(2, 2))\r\n A = as_tensor_variable(self.rand(2, 2))\r\n try:\r\n gemm_inplace(Z, 1.0, Z, A, 1.0)\r\n except InconsistencyError, e:\r\n if exc_message(e) == Gemm.E_z_uniq:\r\n return\r\n self.fail()", "def toggle_geom(self,event):\n \n geom=self.winfo_geometry()\n print(geom,self._geom)\n self.geometry(self._geom)\n self._geom=geom", "def test_destroy_map0(self):\r\n Z = as_tensor_variable(self.rand(2, 2))\r\n try:\r\n gemm_inplace(Z, 1.0, Z, Z, 1.0)\r\n except InconsistencyError, e:\r\n if exc_message(e) == Gemm.E_z_uniq:\r\n return\r\n self.fail()", "def _process_references0(self, references):\n if \"zarr_consolidated_format\" in references:\n # special case for Ike prototype\n references = _unmodel_hdf5(references)\n self.references = references", "def __init__(self, xarray_obj: xr.DataArray | xr.Dataset) -> None:\n self._obj = xarray_obj\n # create new coordinate with attributes in which to save x_dim, y_dim and crs.\n # other spatial properties are always calculated on the fly to ensure\n # consistency with data\n if GEO_MAP_COORD not in self._obj.coords:\n # zero is used by rioxarray\n self._obj.coords[GEO_MAP_COORD] = xr.Variable((), 0)", "def remember(self, grid_x, grid_y, obj):\n self.memory[grid_x][grid_y] = obj", "def lhs_name_transform_inplace(self, name_map):\n\n dep = self._dependent_variable\n self._dependent_variable = name_map.get(dep, dep)\n\n indep = self._independent_variable\n self._independent_variable = name_map.get(indep, indep)", "def warn_inplace(exc, nav, repl_pairs, local_opt):\r\n if isinstance(exc, InconsistencyError):\r\n return\r\n return NavigatorOptimizer.warn(exc, nav, repl_pairs, local_opt)", "def flip(self, axes=None, inplace=False, i=False):\n d = _inplace_enabled_define_and_cleanup(self)\n super(DimensionCoordinate, d).flip(axes=axes, inplace=True)\n\n direction = d._custom.get(\"direction\")\n if direction is not None:\n d._custom[\"direction\"] = not direction\n\n return d", "def reorient_obj(obj, step_ang, plane):\n start_angle = 0\n end_angle = math.pi / 2\n min_area = math.inf\n best_angle = 0\n start_axis = array.array(\"d\", obj.Centroid)\n end_axis = []\n index = [0] * 3\n\n if plane == \"xy\":\n index = [1, 1, 0]\n end_axis = array.array(\"d\", [obj.Centroid[0], obj.Centroid[1], obj.Centroid[2] + 1])\n elif plane == \"xz\":\n index = [1, 0, 1]\n end_axis = array.array(\"d\", [obj.Centroid[0], obj.Centroid[1] + 1, obj.Centroid[2]])\n elif plane == \"yz\":\n index = [0, 1, 1]\n end_axis = array.array(\"d\", [obj.Centroid[0] + 1, obj.Centroid[1], obj.Centroid[2]])\n\n min_pt, max_pt = obj.GetBoundingBox()\n # projecting the points to the plane\n project_points_to_plane(min_pt, max_pt, index)\n while start_angle <= end_angle:\n obj.Rotate3D(start_axis, end_axis, step_ang)\n # compute the area\n dims = [(max_pt[0] - min_pt[0]), (max_pt[1] - min_pt[1]), (max_pt[2] - min_pt[2])]\n curr_area = 1\n for dim in dims:\n if dim > 0:\n curr_area *= dim\n if curr_area < min_area:\n min_area = curr_area\n best_angle = start_angle\n start_angle += step_ang\n min_pt, max_pt = obj.GetBoundingBox()\n # projecting the points to the plane\n project_points_to_plane(min_pt, max_pt, index)\n # rotate the object using the best angle\n obj.Rotate3D(start_axis, end_axis, best_angle)", "def toggle_geom(self,event):\n geom=self.master.winfo_geometry()\n self.master.geometry(self._geom)\n self._geom=geom", "def define_reference_frame(self, temporal=None, geospatial=None):", "def define_reference_frame(self, temporal=None, geospatial=None):", "def toggleDisplacement(*args, **kwargs)->None:\n pass", "def mirror_gun (glider_gun):\n return glider_gun[2] (-1, 0, swap_xy)", "def simplify(self, tolerance, preserve_topology=...): # -> BaseGeometry:\n ...", "def test_inplace_set_value(self):\r\n dtype = self.dtype\r\n if dtype is None:\r\n dtype = theano.config.floatX\r\n\r\n shp = (100/4,1024)#100KB\r\n\r\n x = numpy.zeros(shp, dtype=dtype)\r\n x = self.cast_value(x)\r\n x_shared = self.shared_constructor(x, borrow=True)\r\n\r\n old_data = x_shared.container.storage[0]\r\n nd = numpy.ones(shp, dtype=dtype)\r\n\r\n if x.__class__.__name__ != 'csr_matrix':\r\n #sparse matrix don't support inplace affectation\r\n x_shared.container.value[:] = nd\r\n assert (numpy.asarray(x_shared.get_value(borrow=True))==nd).all()\r\n #This should always share value!\r\n assert may_share_memory(old_data, x_shared.container.storage[0])\r\n assert may_share_memory(old_data, x_shared.get_value(borrow=True, return_internal_type=True))\r\n\r\n nd[0]+=1\r\n x_shared.container.value[0] = nd[0]\r\n assert (numpy.asarray(x_shared.get_value(borrow=True)[0])==nd[0]).all()\r\n assert (numpy.asarray(x_shared.get_value(borrow=True)[1:])==nd[1:]).all()\r\n #This should always share value!\r\n assert may_share_memory(old_data, x_shared.container.storage[0])\r\n assert may_share_memory(old_data, x_shared.get_value(borrow=True, return_internal_type=True))\r\n\r\n if x.__class__.__name__ != 'csr_matrix':\r\n #sparse matrix don't support inplace affectation\r\n nd += 1\r\n #THIS DON't DO WHAT WE EXPECT the contain of a is not updated for CudaNdarray, but it is for ndarray\r\n x_shared.get_value(borrow=True)[:] = nd\r\n #assert (numpy.asarray(x_shared.get_value(borrow=True))!=nd).all()\r\n assert may_share_memory(old_data, x_shared.container.storage[0])\r\n x_shared.get_value(borrow=True)\r\n\r\n # Test by set_value with borrow=False\r\n nd += 1\r\n old_data = x_shared.container.storage[0]\r\n x_shared.set_value(nd, borrow=False)\r\n assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)),\r\n self.ref_fct(self.cast_value(nd)))\r\n assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_value_inplace\r\n\r\n # Test by set_value with borrow=False when new data cast.\r\n # specificaly useful for gpu data\r\n nd += 1\r\n old_data = x_shared.container.storage[0]\r\n x_shared.set_value(self.cast_value(nd), borrow=False)\r\n assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)),\r\n self.ref_fct(self.cast_value(nd)))\r\n assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_cast_value_inplace\r\n\r\n # Test by set_value with borrow=True\r\n nd += 1\r\n old_data = x_shared.container.storage[0]\r\n x_shared.set_value(nd.copy(), borrow=True)\r\n assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)),\r\n self.ref_fct(self.cast_value(nd)))\r\n assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_value_inplace\r\n\r\n # Test by set_value with borrow=True when new data cast.\r\n nd += 1\r\n old_data = x_shared.container.storage[0]\r\n x_shared.set_value(self.cast_value(nd.copy()), borrow=True)\r\n assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)), self.ref_fct(self.cast_value(nd)))\r\n assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_cast_value_inplace", "def displacementToPoly(*args, findBboxOnly: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[bool, Any]:\n pass", "def restore_backup_shape(self):\n\n self.shape = self.shape_backup", "def lt_inplace(a,b):", "def translate(self, vector):\n if self.blender_object:\n self.blender_object.location = vector", "def replace(self, csys, whichin, whichout):\n return _coordsys.coordsys_replace(self, csys, whichin, whichout)", "def freezeObjectsTransforms(self):\n\t\tmc.makeIdentity( self.objects, apply = True, t = 1, r = 1, s = 1, n = 0, pn = 1 )", "def gt_inplace(a,b):", "def _swap_xy_data(data_obj):\n swaps = [\n (\"x\", \"y\"),\n (\"x0\", \"y0\"),\n (\"dx\", \"dy\"),\n (\"xbins\", \"ybins\"),\n (\"nbinsx\", \"nbinsy\"),\n (\"autobinx\", \"autobiny\"),\n (\"error_x\", \"error_y\"),\n ]\n for swap in swaps:\n _swap_keys(data_obj, swap[0], swap[1])\n try:\n rows = len(data_obj[\"z\"])\n cols = len(data_obj[\"z\"][0])\n for row in data_obj[\"z\"]:\n if len(row) != cols:\n raise TypeError\n\n # if we can't do transpose, we hit an exception before here\n z = data_obj.pop(\"z\")\n data_obj[\"z\"] = [[0 for rrr in range(rows)] for ccc in range(cols)]\n for iii in range(rows):\n for jjj in range(cols):\n data_obj[\"z\"][jjj][iii] = z[iii][jjj]\n except (KeyError, TypeError, IndexError) as err:\n warn = False\n try:\n if data_obj[\"z\"] is not None:\n warn = True\n if len(data_obj[\"z\"]) == 0:\n warn = False\n except (KeyError, TypeError):\n pass\n if warn:\n warnings.warn(\n \"Data in this file required an 'xy' swap but the 'z' matrix \"\n \"in one of the data objects could not be transposed. Here's \"\n \"why:\\n\\n{}\".format(repr(err))\n )", "def revive(self, place_rect):\n self.initialize_components()\n\n self.image = self._graphics_comp.get_image()\n self.rect = place_rect(self.image.get_rect())\n\n self.velocity = (0, 0)\n\n self.add(self.__class__.groups)", "def quickMirror(objArray=None, upVector=[0,0,1], axis='X'):\n if objArray is None:\n objArray=pm.ls(sl=1)\n for obj in objArray:\n nSplit=libName.nameSplit(obj)\n if nSplit[-1][0] == 'L':\n nSplit[-1][0]='R'\n elif nSplit[-1][0] == 'R':\n nSplit[-1][0]='L'\n else:\n print 'obj \"%s\" has been skipped cause prefix is neither \"L\" nor \"R\"'\n break\n\n mirrorObj=libName.nameRevertOriginal(splitName=nSplit)\n if pm.objExists(mirrorObj) == 0:\n print 'obj %s doesnt Exists. Mirrorring Skipped!!!!'%(mirrorObj)\n\n else:\n loc=pm.spaceLocator(n=obj+'_tmpLocQuickMirror')\n locUp=pm.spaceLocator(n=obj+'_tmpLocQuickMirrorAim')\n locAim=pm.spaceLocator(n=obj+'_tmpLocQuickMirrorUp')\n mloc=pm.spaceLocator(n=obj+'_tmpLocQuickMirrorMirror')\n\n snap(driver=obj, driven=loc)\n snap(driver=obj, driven=mloc)\n pm.parent(locUp, locAim, loc)\n locAim.attr('t').set([1,0,0])\n locUp.attr('t').set(upVector)\n grpIn('mirrorGrpTmp', loc)\n\n pm.setAttr('mirrorGrpTmp.scale'+axis, -1)\n\n mloc.attr('translate'+axis).set( mloc.attr('translate'+axis).get() * -1 )\n\n aimCon=pm.aimConstraint(locAim, mloc, aimVector=[1,0,0], upVector=upVector, worldUpObject=locUp, worldUpType='object', mo=0)\n snap(driver=mloc, driven=mirrorObj)\n\n pm.delete('mirrorGrpTmp', mloc)", "def any_geom2ogr_geom(geom, osr_sref):\n\n if isinstance(geom, (tuple, list)) and (not isinstance(geom[0], (tuple, list))) and \\\n (len(geom) == 4) and osr_sref:\n geom_ogr = geometry.bbox2polygon(geom, osr_sref)\n geom_ogr = swap_axis(geom_ogr) # ensure lon lat order\n elif isinstance(geom, (tuple, list)) and (isinstance(geom[0], (tuple, list))) and \\\n (len(geom) == 2) and osr_sref:\n edge = ogr.Geometry(ogr.wkbLinearRing)\n geom = [geom[0], (geom[0][0], geom[1][1]), geom[1], (geom[1][0], geom[0][1])]\n for point in geom:\n if len(point) == 2:\n edge.AddPoint(float(point[0]), float(point[1]))\n edge.CloseRings()\n geom_ogr = ogr.Geometry(ogr.wkbPolygon)\n geom_ogr.AddGeometry(edge)\n geom_ogr.AssignSpatialReference(osr_sref)\n geom_ogr = force_axis_mapping(geom_ogr)\n elif isinstance(geom, (tuple, list)) and isinstance(geom[0], (tuple, list)) and osr_sref:\n edge = ogr.Geometry(ogr.wkbLinearRing)\n for point in geom:\n if len(point) == 2:\n edge.AddPoint(float(point[0]), float(point[1]))\n edge.CloseRings()\n geom_ogr = ogr.Geometry(ogr.wkbPolygon)\n geom_ogr.AddGeometry(edge)\n geom_ogr.AssignSpatialReference(osr_sref)\n geom_ogr = force_axis_mapping(geom_ogr)\n elif isinstance(geom, shapely.geometry.Polygon):\n geom_ogr = ogr.CreateGeometryFromWkt(geom.wkt)\n geom_ogr.AssignSpatialReference(osr_sref)\n geom_ogr = swap_axis(geom_ogr) # ensure lon lat order\n elif isinstance(geom, ogr.Geometry):\n geom_sref = geom.GetSpatialReference()\n if geom_sref is None:\n geom.AssignSpatialReference(osr_sref)\n geom_ogr = geom\n geom_ogr = swap_axis(geom_ogr) # ensure lon lat order\n else:\n raise GeometryUnkown(geom)\n\n return geom_ogr", "def fl_deactivate_object(ptr_flobject):\n _fl_deactivate_object = library.cfuncproto(\n library.load_so_libforms(), \"fl_deactivate_object\",\\\n None, [cty.POINTER(xfdata.FL_OBJECT)],\\\n \"\"\"void fl_deactivate_object(FL_OBJECT * ob)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n library.keep_elem_refs(ptr_flobject)\n _fl_deactivate_object(ptr_flobject)", "def updateBindPose():\n\n dag = pmc.dagPose(q=True, bindPose=True)\n objects = pmc.dagPose(dag, q=True, members=True)\n for obj in objects:\n pmc.dagPose(obj, reset=True, name=dag[0])", "def applyGravity(o):\n if not o._tryShift(o.block,Point(0,-1)): o._setBlock(o.block)", "def reference_position(\n self, find_surface=False, reference_object_id=None, verbose=True\n ):\n self.not_implemented(\"reference_position\")", "def _ensure_exists(self, name, shape):\n ident = name.lower()\n internal = self._internals.get(ident, None)\n if internal is None:\n internal = Internal(name, shape)\n self._internals[ident] = internal\n return internal", "def unmap(widget):\n result = False\n if widget and widget.winfo_exists() and widget.winfo_ismapped():\n result = True\n geom_mgr = widget.winfo_manager()\n if geom_mgr == \"grid\":\n widget.grid_forget()\n elif geom_mgr == \"pack\":\n widget.pack_forget()\n elif geom_mgr == \"place\":\n widget.place_forget()\n else:\n result = False\n return result", "def _update_proxy(self, change):\n if change['type'] == 'container':\n #: Only update what's needed\n self.proxy.update_points(change)\n else:\n super(MapPolygon, self)._update_proxy(change)", "def rotateAlignToWorld( alignObject, primaryAxisVector = [1, 0, 0], worldAxisToKeep = ['x'], alignTwist = True ):\n \n # normalize primary axis vector\n primaryAxisVectorVec = vector.makeMVector( primaryAxisVector )\n primaryAxisVectorVec.normalize()\n primaryAxisVector = [ primaryAxisVectorVec.x, primaryAxisVectorVec.y, primaryAxisVectorVec.z ]\n \n \n axisSign = primaryAxisVector[0] + primaryAxisVector[1] + primaryAxisVector[2]\n \n primaryAxisVectorVecMove = primaryAxisVector[:]\n primaryAxisVector = [ abs( primaryAxisVector[0] ), abs( primaryAxisVector[1] ), abs( primaryAxisVector[2] ) ]\n \n # prepare align twist vector\n allaxis = ['x', 'y', 'z']\n \n for axiskeep in worldAxisToKeep:\n \n allaxis.remove( axiskeep.lower() )\n \n skipUpAxis = allaxis[0]\n upVectorMove = {'x':[1, 0, 0], 'y':[0, 1, 0], 'z':[0, 0, 1]}[skipUpAxis]\n upVector = {'x':[1 * axisSign, 0, 0], 'y':[0, 1 * axisSign, 0], 'z':[0, 0, 1 * axisSign]}[skipUpAxis]\n \n # prepare align setup\n prefix = name.removeSuffix( alignObject )\n \n alignObjectAim = mc.group( n = prefix + 'alignObjectAim', em = 1, p = alignObject )\n alignObjectAimUp = mc.group( n = prefix + 'alignObjectAimUp', em = 1, p = alignObject )\n mc.move( primaryAxisVectorVecMove[0], primaryAxisVectorVecMove[1], primaryAxisVectorVecMove[2], alignObjectAim, objectSpace = True )\n mc.move( upVectorMove[0], upVectorMove[1], upVectorMove[2], alignObjectAimUp, objectSpace = True )\n mc.parent( [alignObjectAim, alignObjectAimUp], w = 1 )\n mc.delete( mc.pointConstraint( alignObject, alignObjectAim, skip = worldAxisToKeep ) )\n \n if alignTwist:\n \n mc.delete( mc.pointConstraint( alignObject, alignObjectAimUp, skip = skipUpAxis ) )\n \n # rotate object\n mc.delete( mc.aimConstraint( alignObjectAim, alignObject, aim = primaryAxisVector, u = upVector, wut = 'object', wuo = alignObjectAimUp ) )\n mc.delete( alignObjectAim, alignObjectAimUp )", "def _prepare_with_copy(geometry):\n geometry = pygeos.apply(geometry, lambda x: x) # makes a copy\n pygeos.prepare(geometry)\n return geometry", "def __call__(self, geom_obj):\n if not hasattr(geom_obj, \"_frame\"):\n raise ValueError(\"Cannot transform object that has no frame.\")\n elif geom_obj._frame is self:\n return geom_obj\n if geom_obj._frame.name not in self._connected_frames:\n raise ValueError(\"Transformation to frame {geom_obj._frame} is unknown.\")\n\n transformed = self._connected_frames[geom_obj._frame.name] * geom_obj\n transformed._frame = self\n return transformed", "def _fix_genotypes_object(self, genotypes, variant_info):\n # Checking the name (if there were duplications)\n if self.has_index and variant_info.name != genotypes.variant.name:\n if not variant_info.name.startswith(genotypes.variant.name):\n raise ValueError(\"Index file not synced with IMPUTE2 file\")\n genotypes.variant.name = variant_info.name\n\n # Trying to set multi-allelic information\n if self.has_index and self._index_has_location:\n # Location was in the index, so we can automatically set the\n # multi-allelic state of the genotypes\n genotypes.multiallelic = variant_info.multiallelic\n\n else:\n # Location was not in the index, so we check one marker before and\n # after the one we found\n logging.warning(\"Multiallelic variants are not detected on \"\n \"unindexed files.\")", "def update_flags(self):\n # view mode, filled vs wirefrom\n if self.view['wireframe']:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)\n else:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)\n\n # set fullscreen or windowed\n self.set_fullscreen(fullscreen=self.view['fullscreen'])\n\n # backface culling on or off\n if self.view['cull']:\n gl.glEnable(gl.GL_CULL_FACE)\n else:\n gl.glDisable(gl.GL_CULL_FACE)\n\n # case where we WANT an axis and NO vertexlist\n # is stored internally\n if self.view['axis'] and self._axis is None:\n from .. import creation\n # create an axis marker sized relative to the scene\n axis = creation.axis(origin_size=self.scene.scale / 100)\n # create ordered args for a vertex list\n args = rendering.mesh_to_vertexlist(axis)\n # store the axis as a reference\n self._axis = self.batch.add_indexed(*args)\n\n # case where we DON'T want an axis but a vertexlist\n # IS stored internally\n elif not self.view['axis'] and self._axis is not None:\n # remove the axis from the rendering batch\n self._axis.delete()\n # set the reference to None\n self._axis = None", "def rs_edit_upd(obj):\n verts = [x.co for x in obj.data.vertices]\n if verts[0] != Vector():\n fix = Vector(verts[0])\n for i in range(len(verts)):\n obj.data.vertices[i].co = obj.data.vertices[i].co - fix\n\n obj.data.update()\n obj.location += fix\n verts[1] = (verts[2] + verts[0]) / 2\n verts[3] = (verts[2] + verts[4]) / 2\n verts[5] = (verts[4] + verts[6]) / 2\n verts[7] = (verts[6] + verts[0]) / 2\n for i in range(len(verts)):\n obj.data.vertices[i].co = verts[i]\n\n obj.data.update()", "def toggleShapeVis(self, transform, value):\n\n if cmds.objExists(transform):\n shape = cmds.listRelatives(transform, shapes=True)\n if shape is not None:\n cmds.setAttr(shape[0] + \".v\", lock=False)\n cmds.setAttr(shape[0] + \".v\", value)\n cmds.setAttr(shape[0] + \".v\", lock=True)", "def apply_changes(self):\n self.x = self.buff_x\n self.y = self.buff_y\n self.buff_x = None\n self.buff_y = None", "def postDraw(self, xform=None, bbox=None):\n\n self.modulateTexture.unbindTexture()\n self.clipTexture .unbindTexture()\n self.colourTexture .unbindTexture()\n self.cmapTexture .unbindTexture()", "def _optimizeshape(shape):\n shape.sort()\n if ORDER == 'C':\n shape[:] = shape[::-1]", "def track_reference(space, py_obj, w_obj):\n # XXX looks like a PyObject_GC_TRACK\n assert py_obj.c_ob_refcnt < rawrefcount.REFCNT_FROM_PYPY\n py_obj.c_ob_refcnt += rawrefcount.REFCNT_FROM_PYPY\n w_obj._cpyext_attach_pyobj(space, py_obj)", "def reorient_spatial_axes(\n data_shape: Sequence[int], init_affine: NdarrayOrTensor, target_affine: NdarrayOrTensor\n) -> tuple[np.ndarray, NdarrayOrTensor]:\n init_affine_, *_ = convert_data_type(init_affine, np.ndarray)\n target_affine_, *_ = convert_data_type(target_affine, np.ndarray)\n start_ornt = nib.orientations.io_orientation(init_affine_)\n target_ornt = nib.orientations.io_orientation(target_affine_)\n try:\n ornt_transform = nib.orientations.ornt_transform(start_ornt, target_ornt)\n except ValueError as e:\n raise ValueError(f\"The input affine {init_affine} and target affine {target_affine} are not compatible.\") from e\n new_affine = init_affine_ @ nib.orientations.inv_ornt_aff(ornt_transform, data_shape)\n new_affine, *_ = convert_to_dst_type(new_affine, init_affine)\n return ornt_transform, new_affine", "def translate_shape(shape, x_shift, y_shift):", "def reset_object_in_place(self, obj):\n x, y = obj.x, obj.y\n obj_class = obj.__class__.__name__\n spawned = self.spawn_object_of_class(obj_class, x, y)\n if spawned:\n self.app.log('%s reset to class defaults' % obj.name)\n if obj is self.player:\n self.player = spawned\n obj.destroy()", "def testTrivial(self):\n self.doTest(afwGeom.makeIdentityTransform())", "def testTrivial(self):\n self.doTest(afwGeom.makeIdentityTransform())", "def flagUntexturedObject(self, object):\n object.setShaderInput(\"texDisable\", 1, 1, 1, 1)", "def sub_inplace(a, b):", "def hit_object(self):\n obj1 = self.window.get_object_at(self.ball.x, self.ball.y)\n obj2 = self.window.get_object_at(self.ball.x, self.ball.y + BALL_RADIUS*2)\n obj3 = self.window.get_object_at(self.ball.x + BALL_RADIUS*2, self.ball.y)\n obj4 = self.window.get_object_at(self.ball.x + BALL_RADIUS*2, self.ball.y + BALL_RADIUS*2)\n if obj1 is not None and obj1.width == BRICK_WIDTH:\n self.window.remove(obj1)\n self.num_bricks -= 1\n self.__dy = -self.__dy\n elif obj2 is not None and obj2.width == BRICK_WIDTH:\n self.window.remove(obj2)\n self.num_bricks -= 1\n self.__dy = -self.__dy\n elif obj2 is not None and obj2.width == PADDLE_WIDTH:\n self.__dy = -self.__dy\n elif obj3 is not None and obj3.width == BRICK_WIDTH:\n self.window.remove(obj3)\n self.num_bricks -= 1\n self.__dy = -self.__dy\n elif obj4 is not None and obj4.width == BRICK_WIDTH:\n self.window.remove(obj4)\n self.num_bricks -= 1\n self.__dy = -self.__dy\n elif obj4 is not None and obj4.width == PADDLE_WIDTH:\n self.__dy = -self.__dy", "def local_inplace_remove0(node):\r\n # If inplace is not enabled, enable it and replace that op with a\r\n # new op which has inplace enabled\r\n if isinstance(node.op, sparse.Remove0) and not node.op.inplace:\r\n new_op = node.op.__class__(inplace=True)\r\n new_node = new_op(*node.inputs)\r\n return [new_node]\r\n return False", "def normalize_to_ref(tobject, tobject_ref):\n\n _new_tobject = asrootpy(tobject.Clone())\n if tobject.integral():\n _factor = float(tobject_ref.integral()) / float(tobject.integral())\n\n return _new_tobject * _factor\n else:\n return _new_tobject", "def eq_inplace(a,b):", "def _check_do_transform(df, reference_im, affine_obj):\n try:\n crs = getattr(df, 'crs')\n except AttributeError:\n return False # if it doesn't have a CRS attribute\n\n if not crs:\n return False # return False for do_transform if crs is falsey\n elif crs and (reference_im is not None or affine_obj is not None):\n # if the input has a CRS and another obj was provided for xforming\n return True", "def _check_do_transform(df, reference_im, affine_obj):\n try:\n crs = getattr(df, 'crs')\n except AttributeError:\n return False # if it doesn't have a CRS attribute\n\n if not crs:\n return False # return False for do_transform if crs is falsey\n elif crs and (reference_im is not None or affine_obj is not None):\n # if the input has a CRS and another obj was provided for xforming\n return True", "def conflict_check() ->None:\r\n global conflict_space\r\n conflict_space = np.zeros(mShape)\r\n for x in range(shape):\r\n for y in range(shape):\r\n for z in range(y+1, shape):\r\n if example[x, y] == example[x, z]:\r\n conflict_space[x, y] = example[x, y]\r\n conflict_space[x, z] = example[x, z]\r\n if example[y, x] == example[z, x]:\r\n conflict_space[y, x] = example[y, x]\r\n conflict_space[z, x] = example[z, x]", "def test_reference_to_array(self):\n arr = numpy.arange(0.0, 10.0, 0.1)\n arr = numpy.reshape(arr, (25, 4))\n vtk_arr = array_handler.array2vtk(arr)\n arr1 = array_handler.vtk2array(vtk_arr)\n # Now make sure these are using the same memory.\n arr[0][0] = 100.0\n self.assertEqual(arr[0][0], arr1[0][0])\n self.assertEqual(arr.shape, arr1.shape)", "def check_extent(self):\n if self.lower_left.x > self.upper_right.x:\n dlx = self.lower_left.x\n self.lower_left.x = self.upper_right.x\n self.upper_right.y = dlx\n\n if self.lower_left.y > self.upper_right.y:\n dly = self.lower_left.y\n self.lower_left.y = self.upper_right.y\n self.upper_right.y = dly", "def refinement(self,ref): \n self.set('patchmesh.refinement',ref)\n return self", "def try_put(self, object_to_put, pos_x, pos_y):\n self.validate_position(pos_x, pos_y)\n if self.map[pos_x][pos_y] is None:\n self.map[pos_x][pos_y] = object_to_put\n else:\n return self.map[pos_x][pos_y]", "def test_prism_layer_invalid_surface_reference(\n dummy_layer,\n): # pylint: disable=redefined-outer-name\n coordinates, surface, reference, _ = dummy_layer\n # Surface with wrong shape\n surface_invalid = np.arange(20, dtype=float)\n with pytest.raises(ValueError):\n prism_layer(coordinates, surface_invalid, reference)\n # Reference with wrong shape\n reference_invalid = np.zeros(20)\n surface = np.arange(20, dtype=float).reshape(4, 5)\n with pytest.raises(ValueError):\n prism_layer(coordinates, surface, reference_invalid)", "def post_solve_bird_wood(arbiter, space_obj, _):\n #removing polygon\n removed_poly = []\n if arbiter.total_impulse.length > 1100:\n object1, object2 = arbiter.shapes\n for Each_column in columns:\n if object2 == Each_column.shape:\n removed_poly.append(Each_column)\n for Each_beam in beams:\n if object2 == Each_beam.shape:\n removed_poly.append(Each_beam)\n for Each_poly in removed_poly:\n if Each_poly in columns:\n columns.remove(Each_poly)\n if Each_poly in beams:\n beams.remove(Each_poly)\n space_obj.remove(object2, object2.body)\n #you can also remove bird if you want" ]
[ "0.60711205", "0.57531947", "0.54899454", "0.54608077", "0.5264879", "0.52497613", "0.5123439", "0.51041955", "0.50515485", "0.50315267", "0.5019805", "0.4985836", "0.49436423", "0.4911243", "0.48981285", "0.48816335", "0.4864694", "0.4860169", "0.48571274", "0.48423705", "0.48379374", "0.4835702", "0.48189253", "0.48137587", "0.48059765", "0.47898957", "0.47869033", "0.4776863", "0.47766796", "0.47668168", "0.4762015", "0.47600475", "0.4735886", "0.47348198", "0.4734352", "0.4720399", "0.47160405", "0.47141582", "0.47119188", "0.47072166", "0.46956944", "0.46912655", "0.46885264", "0.46787035", "0.4675369", "0.46670458", "0.46670458", "0.46641117", "0.46619236", "0.4658871", "0.46493897", "0.46431285", "0.46336442", "0.4628281", "0.46256962", "0.4625502", "0.46240437", "0.46216738", "0.46197844", "0.46159747", "0.4609616", "0.46026695", "0.46011093", "0.45836285", "0.4576287", "0.4574602", "0.45735306", "0.45712382", "0.45567486", "0.4556603", "0.45513326", "0.45493618", "0.45435223", "0.45428595", "0.4531317", "0.45263007", "0.45255858", "0.45240018", "0.45225152", "0.4518331", "0.45139578", "0.45136654", "0.4510443", "0.44992805", "0.44992805", "0.44972447", "0.44886917", "0.4479637", "0.44691887", "0.44673362", "0.44527003", "0.4452069", "0.4452069", "0.4451888", "0.44474387", "0.44457006", "0.4445338", "0.4441674", "0.4441043", "0.44403785" ]
0.6333744
0
Given a polygon Geometry object in lat/long, work out what would be a suitable projection to use with this area, in order to avoid things like the international date line wraparound, or the north/sourth pole discontinuities. This only makes sense for tiled products, as opposed to long strips which cross multiple zones, etc. Main possible options are UTM in a suitable zone, UPS when near the poles. Return the EPSG number of the projection.
def findSensibleProjection(geom): coords = getCoords(geom) y = coords[:, 1] x = coords[:, 0] yMin = y.min() yMax = y.max() if (yMax - yMin) > 90: # We are crossing a lot of latitude, which suggests that we have a # long strip> In this case, we don't even bother to suggest an EPSG. epsg = None elif yMin < -80: # We are nearing the south pole, so go with UPS south epsg = 32761 elif yMax > 80: # Nearing north pole, so UPS North epsg = 32661 else: # Work out a UTM zone. Note that we use the median value to get a rough # idea of the centre, rather than the mean, because the mean is subject to all # sorts of problems when crossing the date line xMedian = numpy.median(x) yMedian = numpy.median(y) zone = int((xMedian + 180)/6) % 60 + 1 if yMedian < 0: epsgBase = 32700 else: epsgBase = 32600 epsg = epsgBase + zone return epsg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def polygon_area(polygon):\n if not PYPROJ_INSTALLED:\n raise ImportError(\"`pyproj` must be installed to use this feature!\")\n poly = wkt_loads(polygon)\n poly_area = shapely.ops.transform(\n partial(\n pyproj.transform,\n pyproj.Proj(init='EPSG:4326'),\n pyproj.Proj(\n proj='aea',\n lat1=poly.bounds[1],\n lat2=poly.bounds[3]\n )\n ),\n poly\n )\n return poly_area.area / 1e6", "def polygon(self):\n radius = self._get_max_rupture_projection_radius()\n return self.location.to_polygon(radius)", "def _get_projection(el):\n result = None\n if hasattr(el, 'crs'):\n result = (int(el._auxiliary_component), el.crs)\n return result", "def grid_proj(self):\r\n return _get_projection(self._grid_proj, self.longitude, self.latitude)", "def _get_polygon(areasrc):\n\n str = areasrc.geometry.wkt\n str = re.sub('POLYGON\\(\\(', '', str)\n str = re.sub('\\)\\)', '', str)\n aa = re.split('\\,', str)\n lons = []\n lats = []\n for str in aa:\n bb = re.split('\\s+', re.sub('^\\s+', '', str))\n lons.append(float(bb[0]))\n lats.append(float(bb[1]))\n return lons, lats", "def get_boundary_as_polygon(self, do_geo=True):\n xhor, yhor = self.get_coordinates()\n dimensions = xhor.shape\n xbottom = xhor[0, :]\n xright = xhor[:, dimensions[1]-1]\n xtop = xhor[dimensions[0]-1, :][::-1]\n xleft = xhor[:, 0][::-1]\n\n ybottom = yhor[0, :]\n yright = yhor[:, dimensions[1]-1]\n ytop = yhor[dimensions[0]-1, :][::-1]\n yleft = yhor[:, 0][::-1]\n\n lon_square = np.concatenate((xbottom, xright, xtop, xleft))\n lat_square = np.concatenate((ybottom, yright, ytop, yleft))\n\n return lon_square, lat_square", "def _get_polygon(element):\n polygon = element.find('%s/%s/%s/%s/%s' %\n (NRML04_AREA_GEOMETRY, gml.GML_POLYGON,\n gml.GML_EXTERIOR, gml.GML_LINEAR_RING,\n gml.GML_POS_LIST)).text\n\n polygon = gml._get_polygon_from_2DLinestring(polygon)\n\n return polygon", "def calculate_areas(polygon):\n project = ft.partial(pj.transform,\n pj.Proj(init='epsg:4326'),\n pj.Proj('+proj=eck4 +lat_0=' + str(polygon.centroid.y) + ' +lon_0=' + str(polygon.centroid.x)))\n field_projected = transform(project, polygon)\n # convert from square meters to acres\n return uom.Uom(field_projected.area, uom.SquareMeter)", "def process_latlon(self):\n data = self.unixtext.replace(\"\\n\", \" \")\n search = LAT_LON_PREFIX.search(data)\n if search is None:\n return None\n pos = search.start()\n newdata = data[pos+9:]\n # Go find our next non-digit, non-space character, if we find it, we\n # should truncate our string, this could be improved, I suspect\n search = re.search(r\"[^\\s0-9]\", newdata)\n if search is not None:\n pos2 = search.start()\n newdata = newdata[:pos2]\n\n poly = str2polygon(newdata)\n if poly is None:\n return None\n\n # check 0, PGUM polygons are east longitude akrherz/pyIEM#74\n if self.tp.source == 'PGUM':\n newpts = [[0 - pt[0], pt[1]] for pt in poly.exterior.coords]\n poly = Polygon(newpts)\n\n # check 1, is the polygon valid?\n if not poly.is_valid:\n self.tp.warnings.append(\n (\"LAT...LON polygon is invalid!\\n%s\") % (poly.exterior.xy,))\n return\n # check 2, is the exterior ring of the polygon clockwise?\n if poly.exterior.is_ccw:\n self.tp.warnings.append(\n (\"LAT...LON polygon exterior is CCW, reversing\\n%s\"\n ) % (poly.exterior.xy,))\n poly = Polygon(zip(poly.exterior.xy[0][::-1],\n poly.exterior.xy[1][::-1]))\n self.giswkt = 'SRID=4326;%s' % (dumps(MultiPolygon([poly]),\n rounding_precision=6),)\n return poly", "def getPolygonBoundaries(self, polygon: Polygon):\n polygon_df = gpd.GeoDataFrame([polygon], columns=['geometry'])\n\n polygon_df.set_crs(epsg=self.output_epsg, inplace=True)\n polygon_df['geometry'] = polygon_df['geometry'].to_crs(epsg=self.input_epsg)\n minx, miny, maxx, maxy = polygon_df['geometry'][0].bounds\n\n polygon_input = 'POLYGON(('\n xcords, ycords = polygon_df['geometry'][0].exterior.coords.xy\n for x, y in zip(list(xcords), list(ycords)):\n polygon_input += f'{x} {y}, '\n polygon_input = polygon_input[:-2]\n polygon_input += '))'\n\n return f\"({[minx, maxx]},{[miny,maxy]})\", polygon_input", "def get_projection(g, extension = 'tiff'):\n if isinstance(g, str):\n try:\n g = gdal.Open(g)\n print(type(g))\n except:\n print('path is not correct')\n else:\n pass\n # assert isinstance(g ,gdal.Dataset)\n try:\n if extension == 'tiff':\n # Get info of the dataset that is used for transforming\n g_proj = g.GetProjection()\n srs = osr.SpatialReference()\n srs.ImportFromWkt(g_proj)\n epsg_to = srs.ExportToProj4()\n else:\n epsg_to = \"+proj=longlat +datum=WGS84 +no_defs\"\n print('Was not able to get the projection, so WGS84 is assumed')\n except:\n epsg_to = \"+proj=longlat +datum=WGS84 +no_defs\"\n print('Was not able to get the projection, so WGS84 is assumed')\n return epsg_to", "def projection(self):\n return self._map_projection", "def projection(self):\n return self._map_projection", "def polygon_area_2d(polygon):\r\n return geometry.gmPolygonArea(polygon)", "def _get_projection(cls, obj):\n isoverlay = lambda x: isinstance(x, CompositeOverlay)\n opts = cls._traverse_options(obj, 'plot', ['projection'],\n [CompositeOverlay, Element],\n keyfn=isoverlay)\n from_overlay = not all(p is None for p in opts[True]['projection'])\n projections = opts[from_overlay]['projection']\n custom_projs = [p for p in projections if p is not None]\n if len(set(custom_projs)) > 1:\n raise Exception(\"An axis may only be assigned one projection type\")\n return custom_projs[0] if custom_projs else None", "def get_polygon_coordinates(self) -> Tuple[List, List]:\n\n polygon_query = f\"https://nominatim.openstreetmap.org/\" \\\n f\"search?city={self.location.replace(' ', '+')}&polygon_geojson=1&format=json\"\n r = requests.get(polygon_query)\n js = ast.literal_eval(r.text)\n\n self.monitor.info(\"-> Downloaded area polygon data points.\")\n clean_polygon_coords = js[0]['geojson']['coordinates'][0]\n\n polygon_lats = [float(i[1]) for i in clean_polygon_coords]\n polygon_longs = [float(i[0]) for i in clean_polygon_coords]\n\n self.monitor.info(\"-> Created lat/long vectors.\")\n return polygon_lats, polygon_longs", "def read_gdal_projection(dataset):\n wkt = dataset.GetProjection()\n srs = osr.SpatialReference()\n srs.ImportFromWkt(wkt)\n # src = None\n return srs", "def define_projection(self, region):\n region = {\n \"start_longitude\": region[0],\n \"end_longitude\": region[1],\n \"start_latitude\": region[2],\n \"end_latitude\": region[3],\n }\n projection = \"LambertConformal\"\n plotextend = [\n region[\"start_longitude\"],\n region[\"end_longitude\"],\n region[\"start_latitude\"],\n region[\"end_latitude\"],\n ]\n if projection == \"LambertConformal\":\n # plotextend has to be a little larger so everything is on there\n plotextend = [\n plotextend[0] - 1.0,\n plotextend[1] + 1.0,\n plotextend[2] - 1.0,\n plotextend[3] + 1.0,\n ]\n # path to cut out is exact though\n lons = self.region_to_square(region, \"longitude\")\n lats = self.region_to_square(region, \"latitude\")\n path_ext = [[lon, lat] for lon, lat in zip(lons, lats)]\n path_ext = mpath.Path(path_ext).interpolated(20)\n # South Hemisfere\n if region[\"start_latitude\"] <= 0 and region[\"end_latitude\"] <= 0:\n proj = ccrs.LambertConformal(\n central_longitude=np.sum(plotextend[:2]) / 2.0,\n central_latitude=np.sum(plotextend[2:]) / 2.0,\n cutoff=+30,\n standard_parallels=(-33, -45),\n )\n # North Hemisphere\n else:\n proj = ccrs.LambertConformal(\n central_longitude=np.sum(plotextend[:2]) / 2.0,\n central_latitude=np.sum(plotextend[2:]) / 2.0,\n )\n return proj, path_ext, plotextend", "def polygon_area(ppath): # pragma: no cover\n v_ = ppath.vertices\n if len(v_) < 3:\n return 0.0\n x_ = v_[:, 1] - v_[:, 1].mean()\n y_ = v_[:, 0] - v_[:, 0].mean()\n correction = x_[-1] * y_[0] - y_[-1] * x_[0]\n main_area = np.dot(x_[:-1], y_[1:]) - np.dot(y_[:-1], x_[1:])\n return 0.5 * np.abs(main_area + correction)", "def _getshapepoly(self, polygon, compound=False):\n if self._resizemode == \"user\" or compound:\n t11, t12, t21, t22 = self._shapetrafo\n elif self._resizemode == \"auto\":\n l = max(1, self._pensize/5.0)\n t11, t12, t21, t22 = l, 0, 0, l\n elif self._resizemode == \"noresize\":\n return polygon\n return tuple((t11*x + t12*y, t21*x + t22*y) for (x, y) in polygon)", "def calculate_polygon_area(aLon_in, aLat_in, algorithm = 0):\n #TODO: take into account geodesy (i.e. convert latitude to authalic sphere, use radius of authalic sphere instead of mean radius of spherical earth)\n #for i in range(len(aLon_in)):\n # aLon_in[i] = aLon_in[i] + 180.0\n \n lons = np.deg2rad(aLon_in)\n lats = np.deg2rad(aLat_in)\n if algorithm==0:\n # Line integral based on Green's Theorem, assumes spherical Earth \n #close polygon\n if lats[0]==lats[-1] and lons[0]==lons[-1] :\n pass\n else:\n lats = np.append(lats, lats[0])\n lons = np.append(lons, lons[0])\n\n # Get colatitude (a measure of surface distance as an angle)\n a = np.sin(lats/2)**2 + np.cos(lats)* np.sin(lons/2)**2\n colat = 2*np.arctan2( np.sqrt(a), np.sqrt(1-a) )\n\n #azimuth of each point in segment from the arbitrary origin\n az = np.arctan2(np.cos(lats) * np.sin(lons), np.sin(lats)) % (2*np.pi)\n\n # Calculate step sizes\n # daz = np.diff(az) % (2*pi)\n daz = np.diff(az)\n daz = (daz + np.pi) % (2 * np.pi) - np.pi\n\n # Determine average surface distance for each step\n deltas=np.diff(colat)/2\n colat=colat[0:-1]+deltas\n\n # Integral over azimuth is 1-cos(colatitudes)\n integrands = (1-np.cos(colat)) * daz\n\n # Integrate and save the answer as a fraction of the unit sphere.\n # Note that the sum of the integrands will include a factor of 4pi.\n area = abs(sum(integrands))/(4*np.pi) # Could be area of inside or outside\n\n area = min(area,1-area)\n radius= 6378137.0\n \n dArea = area * 4*np.pi*(radius**2)\n return dArea\n\n\n elif algorithm==2:\n #L'Huilier Theorem, assumes spherical earth\n #see:\n # https://mathworld.wolfram.com/SphericalPolygon.html\n # https://web.archive.org/web/20160324191929/http://forum.worldwindcentral.com/showthread.php?20724-A-method-to-compute-the-area-of-a-spherical-polygon\n # https://github.com/spacetelescope/spherical_geometry/blob/master/spherical_geometry/polygon.py\n # https://github.com/tylerjereddy/spherical-SA-docker-demo/blob/master/docker_build/demonstration.py\n #TODO\n pass\n elif algorithm==3:\n #https://trs.jpl.nasa.gov/handle/2014/41271\n #TODO\n pass", "def construct_polygon(self, polygon_longs: List, polygon_lats: List) -> gpd.GeoDataFrame:\n\n polygon_geom = Polygon(zip(polygon_longs, polygon_lats))\n\n crs = {'init': 'epsg:4326'}\n polygon = gpd.GeoDataFrame(index=[0], crs=crs, geometry=[polygon_geom])\n\n polygon.to_file(filename=f'{self.polygon_path}/polygon_{self.postfix}.geojson', driver='GeoJSON')\n polygon.to_file(filename=f'{self.polygon_path}/polygon_{self.postfix}.shp', driver=\"ESRI Shapefile\")\n\n self.monitor.info(\"-> Created area polygon.\")\n return polygon", "def get_projection(self):\n return self.projection", "def to_shapely_polygon(self):\n # load shapely lazily, which makes the dependency more optional\n import shapely.geometry\n return shapely.geometry.Polygon([(point[0], point[1]) for point in self.exterior])", "def get_projection(lon, lat, city=\"victoria\"):\n # TODO: Doesn't work if a city is on the boundary of two UTM zones (per Benoit). Figure out solution for future.\n code = PROJ_CODES[city.strip().lower()]\n\n def transf(x, y):\n return transform(PROJ_LATLONG, Proj(init=code), x, y)\n\n utm_e, utm_n = np.vectorize(transf)(lat, lon)\n return utm_e, utm_n", "def polygon_to_lonlat(polygon):\n poly_coords = polygon.split('((')[1].split('))')[0].split(',')\n coords = [(float(lon), float(lat)) for lon, lat in\n [co.split(' ') for co in poly_coords]]\n lon, lat = zip(*coords)\n return (lon, lat)", "def coord_proj(self):\r\n return self._coord_proj", "def extent_as_polygon(self, crs=wgs84):\n from shapely.geometry import Polygon\n\n # this is not so trivial\n # for optimisation we will transform the boundaries only\n _i = np.hstack([np.arange(self.nx+1),\n np.ones(self.ny+1)*self.nx,\n np.arange(self.nx+1)[::-1],\n np.zeros(self.ny+1)]).flatten()\n _j = np.hstack([np.zeros(self.nx+1),\n np.arange(self.ny+1),\n np.ones(self.nx+1)*self.ny,\n np.arange(self.ny+1)[::-1]]).flatten()\n _i, _j = self.corner_grid.ij_to_crs(_i, _j, crs=crs)\n return Polygon(zip(_i, _j))", "def compute_polygon_centroid_2d(polygon):\r\n return geometry.gmComputePolygonCentroid(polygon)", "def project(self, (lng, lat)):\n x = lng * DEG_TO_RAD\n lat = max(min(MAX_LATITUDE, lat), -MAX_LATITUDE)\n y = lat * DEG_TO_RAD\n y = math.log(math.tan((math.pi / 4) + (y / 2)))\n return (x*EARTH_RADIUS, y*EARTH_RADIUS)", "def getProj4String(self):\n with self._getDatasetLock:\n if self.dataset.GetGCPs() and self.dataset.GetGCPProjection():\n wkt = self.dataset.GetGCPProjection()\n else:\n wkt = self.dataset.GetProjection()\n if not wkt:\n if (self.dataset.GetGeoTransform(can_return_null=True) or\n hasattr(self, '_netcdf') or self._getDriver() in {'NITF'}):\n return NeededInitPrefix + 'epsg:4326'\n return\n proj = osr.SpatialReference()\n proj.ImportFromWkt(wkt)\n return proj.ExportToProj4()", "def calculate_radius(polygon):\n radius = math.sqrt(calculate_areas(polygon).value / math.pi)\n return uom.Uom(radius, uom.Meter)", "def split_polygon(polygon):\n\n if len(polygon) < 3:\n raise ValueError(\n 'At least 3 lat/lon float value pairs must be provided')\n\n polygon_string = ''\n\n for poly in polygon:\n polygon_string += ' '.join(map(str, poly))\n polygon_string += ' '\n\n return polygon_string.strip()", "def polygonFromInteriorPoints(geom, preferredEpsg):\n (projTr, llTr) = makeTransformations(4326, preferredEpsg)\n\n geomProj = copyGeom(geom)\n geomProj.Transform(projTr)\n geomOutline = geomProj.ConvexHull()\n geomOutline.Transform(llTr)\n return geomOutline", "def point2wgs84(self, crsop):\n latr = math.radians(self.lat)\n lngr = math.radians(self.lng)\n \n # a is the semi-major axis of given datum.\n a = crsop.SemimajorAxisInM\n \n # f is the flattening of given datum\n f = 1.0/crsop.InverseFlattening\n dx = crsop.dx\n dy = crsop.dy\n dz = crsop.dz\n \n # da is the difference between the WGS84 and source ellipsoid semi-major axes.\n da = 6378137.0 - a\n \n # df is the difference between the WGS84 and source CRS flattenings.\n df = 1.0/298.257223563 - f\n \n e_squared = f*(2-f)\n rho = a*(1-e_squared)/math.pow((1-e_squared*sqr(math.sin(latr))),1.5)\n nu = a/math.pow((1-e_squared*sqr(math.sin(latr))),0.5)\n dlat = (1/rho)*(-dx*math.sin(latr)*math.cos(lngr) - \\\n dy*math.sin(latr)*math.sin(lngr) + \\\n dz*math.cos(latr) + (f*da + a*df)*math.sin(2*latr))\n dlng = (-dx*math.sin(lngr) + dy*math.cos(lngr))/(nu*math.cos(latr))\n newlng = lng180(math.degrees(lngr + dlng))\n newlat = math.degrees(latr + dlat)\n return Point(float(truncate(newlng,DEGREE_DIGITS)), \\\n float(truncate(newlat,DEGREE_DIGITS)))", "def proj_coord(coord, proj_in, proj_out):\n x, y = coord\n return pyproj.transform(proj_in, proj_out, x, y)", "def define_polygon(cls, polygon):\n \n num_obj = cls()\n num_obj.coord = [np.array(polygon)]\n return num_obj", "def gml_to_polygon(footprint):\n footprint = footprint.replace('\\n', '').strip()\n coords_poly = []\n #\n # Sentinel-1\n # (http://www.opengis.net/gml/srs/epsg.xml#4326)\n #\n if ',' in footprint:\n coords_gml = footprint.split()\n for coord_pair in coords_gml:\n lat, lon = [float(_) for _ in coord_pair.split(',')]\n if lon < -180.0:\n lon = -180.0\n if lon > 180.0:\n lon = 180.0\n if lat < -90.0:\n lat = -90.0\n if lat > 90.0:\n lat = 90.0\n coords_poly.append('{lon:.4f} {lat:.4f}'.format(lon=lon, lat=lat))\n\n #\n # Sentinel-3 and Sentinel-2\n # (http://www.opengis.net/def/crs/EPSG/0/4326)\n #\n else:\n coords_gml = footprint.split()\n for i in range(len(coords_gml)//2):\n lat = float(coords_gml[2*i])\n lon = float(coords_gml[2*i+1])\n if lon < -180.0:\n lon = -180.0\n if lon > 180.0:\n lon = 180.0\n if lat < -90.0:\n lat = -90.0\n if lat > 90.0:\n lat = 90.0\n coords_poly.append('{lon:.4f} {lat:.4f}'.format(lon=lon, lat=lat))\n\n #\n # Make sure the polygon is a closed line string.\n #\n if coords_poly[0] != coords_poly[-1]:\n coords_poly.append(coords_poly[0])\n\n wkt = 'POLYGON (({}))'.format(','.join(coords_poly))\n return wkt", "def calculate_polygon_centroid(polygon):\n\n # Make sure it is numeric\n P = numpy.array(polygon)\n\n # Get area - needed to compute centroid\n A = calculate_polygon_area(P, signed=True)\n\n # Extract x and y coordinates\n x = P[:, 0]\n y = P[:, 1]\n\n # Exercise: Compute C as shown in http://paulbourke.net/geometry/polyarea\n a = x[:-1] * y[1:]\n b = y[:-1] * x[1:]\n\n cx = x[:-1] + x[1:]\n cy = y[:-1] + y[1:]\n\n Cx = numpy.sum(cx * (a - b)) / (6. * A)\n Cy = numpy.sum(cy * (a - b)) / (6. * A)\n\n # Create Nx2 array and return\n C = numpy.array([Cx, Cy])\n return C", "def test_polygon_area(self):\n\n # Create closed simple polygon (counter clock wise)\n P = numpy.array([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]])\n A = calculate_polygon_area(P)\n msg = 'Calculated area was %f, expected 1.0 deg^2' % A\n assert numpy.allclose(A, 1), msg\n\n # Create closed simple polygon (clock wise)\n P = numpy.array([[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]])\n A = calculate_polygon_area(P)\n msg = 'Calculated area was %f, expected 1.0 deg^2' % A\n assert numpy.allclose(A, 1), msg\n\n A = calculate_polygon_area(P, signed=True)\n msg = 'Calculated signed area was %f, expected -1.0 deg^2' % A\n assert numpy.allclose(A, -1), msg\n\n # Not starting at zero\n # Create closed simple polygon (counter clock wise)\n P = numpy.array([[168, -2], [169, -2], [169, -1],\n [168, -1], [168, -2]])\n A = calculate_polygon_area(P)\n\n msg = 'Calculated area was %f, expected 1.0 deg^2' % A\n assert numpy.allclose(A, 1), msg\n\n # Realistic polygon\n filename = '%s/%s' % (TESTDATA, 'test_polygon.shp')\n layer = read_layer(filename)\n geometry = layer.get_geometry()\n\n P = geometry[0]\n A = calculate_polygon_area(P)\n\n # Verify against area reported by qgis (only three decimals)\n qgis_area = 0.003\n assert numpy.allclose(A, qgis_area, atol=1.0e-3)\n\n # Verify against area reported by ESRI ARC (very good correspondence)\n esri_area = 2.63924787273461e-3\n assert numpy.allclose(A, esri_area, rtol=0, atol=1.0e-10)", "def polygon(self):\n return self._polygon", "def test_unicode_proj4_string(self):\n from pyresample import utils\n utils.get_area_def(u\"eurol\", u\"eurol\", u\"bla\",\n u'+proj=stere +a=6378273 +b=6356889.44891 +lat_0=90 +lat_ts=70 +lon_0=-45',\n 1000, 1000, (-1000, -1000, 1000, 1000))", "def projection(self, point):\n projected_point = self._iterate_over_factors(\"projection\", {\"point\": point})\n return projected_point", "def point_to_polygon_geojson(g):\n point_coordinates = g['geometry']['coordinates']\n polygon_geojson = {\n 'type': 'Feature',\n 'properties': g['properties'],\n 'geometry': {\n 'type': 'Polygon',\n 'coordinates': [\n [point_coordinates, point_coordinates, point_coordinates, point_coordinates]\n ]\n }\n }\n return polygon_geojson", "def km2_area(polygons):\n\n reprojected_polygons = [reproject(p) for p in polygons]\n return ops.cascaded_union(reprojected_polygons).area * 1e-6", "def safeProj(proj, lon, lat):\n x, y = proj(np.atleast_1d(lon),np.atleast_1d(lat))\n x[x > 1e29] = None\n y[y > 1e29] = None\n #return np.ma.array(x,mask=x>1e2),np.ma.array(y,mask=y>1e2)\n return x, y", "def setCrsIsProjection(self):\n self.isgeographic = False", "def getRegionData(self, polygon: Polygon, epsg: int, region: str):\n pipeline = self.runPipeline(polygon, epsg, region)\n arr = pipeline.arrays[0]\n return self.makeGeoDf(arr)", "def PolygonPath(polygon):\n\n def coding(ob):\n # The codes will be all \"LINETO\" commands, except for \"MOVETO\"s at the\n # beginning of each subpath\n n = len(getattr(ob, 'coords', None) or ob)\n vals = ones(n, dtype=Path.code_type) * Path.LINETO\n vals[0] = Path.MOVETO\n return vals\n\n if hasattr(polygon, 'geom_type'): # Shapely\n ptype = polygon.geom_type\n if ptype == 'Polygon':\n polygon = [Polygon(polygon)]\n elif ptype == 'MultiPolygon':\n polygon = [Polygon(p) for p in polygon]\n else:\n raise ValueError(\n \"A polygon or multi-polygon representation is required\")\n\n else: # GeoJSON\n polygon = getattr(polygon, '__geo_interface__', polygon)\n ptype = polygon[\"type\"]\n if ptype == 'Polygon':\n polygon = [Polygon(polygon)]\n elif ptype == 'MultiPolygon':\n polygon = [Polygon(p) for p in polygon['coordinates']]\n else:\n raise ValueError(\n \"A polygon or multi-polygon representation is required\")\n\n vertices = concatenate([\n concatenate([asarray(t.exterior)[:, :2]] +\n [asarray(r)[:, :2] for r in t.interiors])\n for t in polygon])\n codes = concatenate([\n concatenate([coding(t.exterior)] +\n [coding(r) for r in t.interiors]) for t in polygon])\n\n return Path(vertices, codes)", "def project(self, latitude, longitude):\n longitude = longitude if longitude > 0 else 360 + longitude\n\n lon_array = self.ecmwf_data.longitude\n lat_array = self.ecmwf_data.latitude\n\n # Find in the lon_array / lat_array the index interval\n # Including lon_ul and lat_ul\n a_lon = np.where((lon_array < longitude))[0][-1]\n if longitude > lon_array.max():\n # lon is between 359.6 and 0 ...\n b_lon = 0\n else:\n b_lon = np.where((lon_array >= longitude))[0][0]\n\n a_lat = np.where((lat_array < latitude))[0][0]\n b_lat = np.where((lat_array >= latitude))[0][-1]\n\n # Compute geo extent around the point :\n # => extent definition : LR,LL,UL,UR\n extent = [lon_array[a_lon], lat_array[a_lat],\n lon_array[b_lon], lat_array[a_lat],\n lon_array[b_lon], lat_array[b_lat],\n lon_array[a_lon], lat_array[b_lat]]\n\n extent_index = [a_lon, a_lat,\n b_lon, a_lat,\n b_lon, b_lat,\n a_lon, b_lat]\n\n log.info(' - Selected vertex : ')\n log.info('LL (px,ln) / (lon,lat) : (%s, %s) / (%s dd , %s dd)' % (\n str(extent_index[0]), str(extent_index[1]), str(extent[0]), str(extent[1])))\n log.info('LR (px,ln) / (lon,lat) : (%s, %s) / (%s dd , %s dd)' % (\n str(extent_index[2]), str(extent_index[3]), str(extent[2]), str(extent[3])))\n log.info('UR (px,ln) / (lon,lat) : (%s, %s) / (%s dd , %s dd)' % (\n str(extent_index[4]), str(extent_index[5]), str(extent[4]), str(extent[5])))\n log.info('UL (px,ln) / (lon,lat) : (%s, %s) / (%s dd , %s dd)' % (\n str(extent_index[6]), str(extent_index[7]), str(extent[6]), str(extent[7])))\n\n # TIE Point grid defined - compute linear transformation\n # to estimate value at the lat/lon location\n # origin : extent_ul[0], extent_ul[1]\n delta_lon = 0.4 # extent[4] - extent[6] # UR - UL\n delta_lat = -0.4 # extent[1] - extent[7] # LL - UL\n\n lambda_lat = latitude - extent[7]\n lambda_lon = longitude - extent[6]\n\n beta_longitude = lambda_lon / delta_lon\n beta_latitude = lambda_lat / delta_lat\n\n # Processing of all keys\n for key in self.ecmwf_data.mandatory_attributes:\n M = getattr(self.ecmwf_data, key)\n v = self.linear_estimate(M,\n beta_latitude,\n beta_longitude,\n extent_index)\n setattr(self, key, v)", "def proj(self,lon,lat):\n x, y = self(np.atleast_1d(lon),np.atleast_1d(lat))\n x[x > 1e29] = None\n y[y > 1e29] = None\n #return np.ma.array(x,mask=x>1e2),np.ma.array(y,mask=y>1e2)\n return x, y", "def json2polygon(geojson_str):\n geojson_object = geojson.loads(geojson_str)\n return geometry.shape(geojson_object)", "def _preprocess_polygon(polygon):\n\n # Could catch ValueErrors for unsuitable inputs\n polygon = numpy.array(polygon)\n\n if len(polygon.shape) == 1:\n if len(polygon) % 2:\n raise ValueError('Number of values for polygon not divisible by two.'\n 'Coordinates need an x and y coordinate: '.format(polygon))\n polygon = polygon.reshape((-1, 2))\n\n if not len(polygon.shape) == 2 or polygon.shape[1] != 2:\n raise ValueError('polygon of wrong dimensions. It should be of shape. '\n 'Should be: (num_points, 2). Input: {}'.format(polygon))\n\n polygon = Polygon(numpy.array(polygon))\n\n # Mainly for self-intersection\n if not polygon.is_valid:\n raise ValueError('polygon is invalid, likely self-intersecting: {}'.\n format(polygon))\n\n return polygon", "def Polygon(self, polyline = False):\n\n from geographiclib.polygonarea import PolygonArea\n return PolygonArea(self, polyline)", "def largest_polygon(polygons):\n # we should probably use a complicated formula to do this\n # but for now, it probably suffices to notice that the last one is usually\n # the largest\n return polygons.points[-1]", "def getpolycenter(poly):\n polylength = len(poly)\n\n return (\n round(sum(x for x, y in poly) / polylength, 2),\n round(sum(y for x, y in poly) / polylength, 2)\n )", "def get_poly_obj(self):\n try:\n area = self.request.POST['area'].replace(\"\\n\", \"\")\n geo_poly_obj = Polygon(json.loads(area)['coordinates'][0])\n return geo_poly_obj\n except:\n raise ValidationError(\"Not proper geo json\")", "def warp_geometry(geom, src_crs, dst_crs):\n return shapely.geometry.shape(rasterio.warp.transform_geom(src_crs, dst_crs, shapely.geometry.mapping(geom)))", "def get_polygon_envelope(polygon, x_pixel_size, y_pixel_size):\n # retrieve polygon points\n poly_pts = list(polygon.exterior.coords)\n # split tuple points into x and y coordinates and convert them to numpy arrays\n xs, ys = [np.array(coords) for coords in zip(*poly_pts)]\n # compute bounding box\n min_x, min_y, max_x, max_y = min(xs), min(ys), max(xs), max(ys)\n # round boundary coordinates to upper-left corner\n min_x = int(round(min_x / x_pixel_size, DECIMALS)) * x_pixel_size\n min_y = int(np.ceil(round(min_y / y_pixel_size, DECIMALS))) * y_pixel_size\n max_x = int(round(max_x / x_pixel_size, DECIMALS)) * x_pixel_size\n max_y = int(np.ceil(round(max_y / y_pixel_size, DECIMALS))) * y_pixel_size\n\n return min_x, min_y, max_x, max_y", "def parallel_projection(self):\n return self.camera.parallel_projection", "def produce_polygon(polygon_ordered_coordinates: List, zoom: int, plot_polygon: bool = False) -> Path:\n polygon_tile_points = []\n for item in polygon_ordered_coordinates:\n polygon_tile_points += [Utility.get_tile(*item, zoom)]\n polygon_tile_points += [polygon_tile_points[0]]\n polygon = Path(polygon_tile_points)\n if plot_polygon:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n patch = patches.PathPatch(polygon, facecolor='orange', lw=2)\n ax.add_patch(patch)\n ax.set_xlim(min(polygon_tile_points, key = lambda item: item[0])[0], max(polygon_tile_points, key = lambda item: item[0])[0])\n ax.set_ylim(min(polygon_tile_points, key = lambda item: item[1])[1], max(polygon_tile_points, key = lambda item: item[1])[1])\n plt.show()\n return polygon", "def get_selected_utm_epsg(self):\n\n geo_aoi = self.get_selected_lat_lon_corners()\n lon = statistics.mean([geo_aoi['ul_lng'], geo_aoi['lr_lng']])\n lat = statistics.mean([geo_aoi['ul_lat'], geo_aoi['lr_lat']])\n \n # Calculation from https://stackoverflow.com/a/40140326/4556479 \n utm_band = str((math.floor((lon + 180) / 6 ) % 60) + 1)\n if len(utm_band) == 1:\n utm_band = '0'+utm_band\n if lat >= 0:\n epsg_code = '326' + utm_band\n return f'EPSG:{epsg_code}'\n epsg_code = '327' + utm_band \n return f'EPSG:{epsg_code}'", "def point2wgs84_9603(self, datum):\n \"\"\"\n h is the height above the ellipsoid. This is the height value that is \n delivered by GPS satellite observations but is not the gravity-related height \n value which is normally used for national mapping and levelling operations. The\n gravity-related height (H) is usually the height above mean sea level or an \n alternative level reference for the country. If one starts with a gravity-related \n height H, it will be necessary to convert it to an ellipsoid height (h) before \n using the above transformation formulas. See section 4.11.1. For the WGS 84 \n ellipsoid the difference between ellipsoid and mean sea level can vary between \n values of -100m in the Sri Lanka area to +80m in the North Atlantic.)\n \"\"\"\n h=0\n # a is the semi-major axis of the ellipsoid of the given datum.\n a = datum.axis\n\n # f is the flattening of the ellipsoid of the given datum \n # (get_flattening actually returns the inverse flattening).\n f = 1.0/datum.flattening\n \n # dx, dy, dz are the x, y, z offset parameters for the given datum transformation\n # to WGS84\n dx = datum.dx\n dy = datum.dy\n dz = datum.dz\n \n # latr, lngr are the latitude and longitude in radians\n latr = math.radians(self.lat)\n lngr = math.radians(self.lng)\n\n # e is the eccentricity of the ellipsoid\n e_squared = f*(2-f)\n\n # nu is the prime vertical radius of curvature at latr\n nu = a/math.pow((1-e_squared*sqr(math.sin(latr))),0.5)\n\n X = (nu+h)*math.cos(latr)*math.cos(vlambda)\n Y = (nu+h)*math.cos(latr)*math.sin(vlambda)\n Z = ((1 - math.pow(e,2))*nu + h)*math.sin(phi)\n\n Xwgs84 = X+dx\n Ywgs84 = Y+dy\n Zwgs84 = Z+dz\n\n epsilon = e_squared/(1-e_squared)\n b = a*(1-f)\n p = math.pow(sqr(Xwgs84)+sqr(Ywgs84),0.5)\n q = math.atan2((Zwgs84*a),(p*b))\n\n latrwgs84 = math.atan2( (Zwgs84 + epsilon*b*math.pow(math.sin(q)),3)), \\\n (p - e_squared*a*math.pow(math.cos(q),3) )\n lngrwgs84 = math.atan2(Ywgs84, Xwgs84)\n hwgs84 = (p/math.cos(latrwgs84))-nu\n newlng = lng180(math.degrees(lngrwgs84))\n newlat = math.degrees(latrwgs84)\n return Point(float(truncate(newlng,DEGREE_DIGITS)), float(truncate(newlat,DEGREE_DIGITS)))", "def _epsg(self):\n info = self._info['coordinateSystem']['wkt'].rsplit('\"EPSG\",', 1)[-1]\n return int(re.findall(r\"\\d+\", info)[0])", "def isPointInPolygon(xPolygon, yPolygon, xPt, yPt):\n \n # How to tell if a point is inside a polygon:\n # Determine the change in angle made by the point and the vertices\n # of the polygon. Add up the delta(angle)'s from the first (include\n # the first point again at the end). If the point is inside the\n # polygon, then the total angle will be +/-360 deg. If the point is\n # outside, then the total angle will be 0 deg. Points on the edge will\n # outside.\n # This is called the Winding Algorithm\n # http://geomalgorithms.com/a03-_inclusion.html\n\n n = len(xPolygon)\n # Array for the angles\n angle = np.zeros(n)\n\n # add first vertex to the end\n xPolygon1 = np.append( xPolygon, xPolygon[0] )\n yPolygon1 = np.append( yPolygon, yPolygon[0] )\n\n wn = 0 # winding number counter\n\n # Loop through the edges of the polygon\n for i in range(n):\n # if edge crosses upward (includes its starting endpoint, and excludes its final endpoint)\n if yPolygon1[i] <= yPt and yPolygon1[i+1] > yPt:\n # if (P is strictly left of E[i]) // Rule #4\n if isLeft(xPolygon1[i], yPolygon1[i], xPolygon1[i+1], yPolygon1[i+1], xPt, yPt) > 0: \n wn += 1 # a valid up intersect right of P.x\n\n # if edge crosses downward (excludes its starting endpoint, and includes its final endpoint)\n if yPolygon1[i] > yPt and yPolygon1[i+1] <= yPt:\n # if (P is strictly right of E[i]) // Rule #4\n if isLeft(xPolygon1[i], yPolygon1[i], xPolygon1[i+1], yPolygon1[i+1], xPt, yPt) < 0: \n wn -= 1 # a valid up intersect right of P.x\n\n # wn = 0 only when P is outside the polygon\n if wn == 0:\n return False\n else:\n return True", "def proj(self):\n return self._proj", "def polygonize(inRas, outPoly, outField=None, mask=True, band=1, filetype=\"ESRI Shapefile\"):\n\n options = []\n src_ds = gdal.Open(inRas)\n if src_ds is None:\n print('Unable to open %s' % inRas)\n sys.exit(1)\n\n try:\n srcband = src_ds.GetRasterBand(band)\n except RuntimeError as e:\n # for example, try GetRasterBand(10)\n print('Band ( %i ) not found')\n print(e)\n sys.exit(1)\n if mask == True:\n maskband = src_ds.GetRasterBand(band)\n options.append('-mask')\n else:\n mask = False\n maskband = None\n\n srs = osr.SpatialReference()\n srs.ImportFromWkt( src_ds.GetProjectionRef() )\n\n #\n # create output datasource\n #\n dst_layername = outPoly\n drv = ogr.GetDriverByName(filetype)\n dst_ds = drv.CreateDataSource(dst_layername)\n dst_layer = dst_ds.CreateLayer(dst_layername, srs=srs)\n\n if outField is None:\n dst_fieldname = 'DN'\n fd = ogr.FieldDefn(dst_fieldname, ogr.OFTInteger)\n dst_layer.CreateField(fd)\n dst_field = dst_layer.GetLayerDefn().GetFieldIndex(dst_fieldname)\n\n else:\n dst_field = dst_layer.GetLayerDefn().GetFieldIndex(outField)\n\n gdal.Polygonize(srcband, maskband, dst_layer, dst_field,\n callback=gdal.TermProgress)\n dst_ds.FlushCache()\n\n srcband = None\n src_ds = None\n dst_ds = None", "def proj_is_latlong(proj):\n\n try:\n return proj.is_latlong()\n except AttributeError:\n return proj.crs.is_geographic", "def reproject(point):\n wgs84 = pyproj.Proj('+init=epsg:4326')\n native = pyproj.Proj(DEM_PROJECTION)\n x, y = pyproj.transform(wgs84, native, point.x, point.y)\n return geom.Point(x, y)", "def get_grid_id(point, grids):\r\n\tdis_cents = 100\r\n\tgc_id = 0\r\n\r\n\tfor i, gc in enumerate(grids[\"grid_region\"]):\r\n\t\tdis = sqrt((float(point[0]) - float(gc[0])) ** 2 + (float(point[1]) - float(gc[1])) ** 2)\r\n\t\tif dis < dis_cents:\r\n\t\t\tdis_cents = dis\r\n\t\t\tgc_id = i\r\n\r\n\tgd_id = -1\r\n\tfor j, gd in enumerate(grids[\"grid_boundary\"][str(gc_id)]):\r\n\t\tboundary = grids[\"grid_boundary\"][str(gc_id)][gd]\r\n\t\tif isInsidePolygon((float(point[0]),float(point[1])),boundary):\r\n\t\t\tgd_id = gd\r\n\t\t\tbreak \r\n\tif(gd_id>0):\r\n\t\treturn str(gc_id) + '-' + str(gd_id)\r\n\telse:\r\n\t\treturn '-'", "def calculate_stereographic_projection(p):\n # P' = P * (2r / r + z)\n mu = 1 / (1 + p[2])\n x = p[0] * mu\n y = p[1] * mu\n return x, y", "def geospatial(self):\n return bool(\n self.dataset.GetProjection() or\n (self.dataset.GetGCPProjection() and self.dataset.GetGCPs()) or\n self.dataset.GetGeoTransform(can_return_null=True) or\n hasattr(self, '_netcdf'))", "def test_polygonize():\n # A collection with one non-zero-area Polygon is returned as a Polygon.\n geom1 = GeometryCollection([POLY, ZERO_POLY])\n result1 = polygonize(geom1)\n assert result1.geom_type == \"Polygon\"\n assert result1.area == 1.0\n\n # A collection with multiple non-zero-area polygons is returned as a MultiPolygon.\n geom2 = GeometryCollection([POLY, POLY])\n result2 = polygonize(geom2)\n assert result2.geom_type == \"MultiPolygon\"\n assert result2.area == 2.0\n\n # Zero-area geometries are not permitted.\n with pytest.raises(ValueError) as err:\n _ = polygonize(ZERO_POLY)\n assert err.match(\"Geometry has zero area\")", "def get_numberOfProjections(self):\n self._nproj = len(self._projIndices)\n return self._nproj", "def zip_geom():\r\n engine = get_sql_engine()\r\n zipgeom = text(\r\n \"\"\"\r\n SELECT zip_code, geom\r\n FROM philly_zipcode\r\n \"\"\"\r\n )\r\n zipgeom = gpd.read_postgis(zipgeom, con=engine)\r\n return zipgeom", "def pointsToVoronoiGridShapefile(lat, lon, vor_shp_path, extent=None):\n voronoi_centroids = _get_voronoi_centroid_array(lat, lon, extent)\n\n # set-up output polygon shp\n log(\"Creating output polygon shp {0}\"\n .format(os.path.basename(vor_shp_path)))\n if os.path.exists(vor_shp_path):\n os.remove(vor_shp_path)\n drv = ogr.GetDriverByName('ESRI Shapefile')\n outShp = drv.CreateDataSource(vor_shp_path)\n osr_geographic_proj = osr.SpatialReference()\n osr_geographic_proj.ImportFromEPSG(4326)\n layer = outShp.CreateLayer('', osr_geographic_proj, ogr.wkbPolygon)\n layer.CreateField(ogr.FieldDefn('GRID_LAT', ogr.OFTReal))\n layer.CreateField(ogr.FieldDefn('GRID_LON', ogr.OFTReal))\n layerDefn = layer.GetLayerDefn()\n\n # find nodes surrounding polygon centroid\n # sort nodes in counterclockwise order\n # create polygon perimeter through nodes\n log(\"Building Voronoi polygons...\")\n # compute voronoi\n voronoi_manager = Voronoi(voronoi_centroids)\n voronoi_vertices = voronoi_manager.vertices\n voronoi_regions = voronoi_manager.regions\n for point_id, region_index in enumerate(voronoi_manager.point_region):\n vert_index_list = np.array(voronoi_regions[region_index])\n voronoi_centroid = voronoi_centroids[point_id]\n voronoi_poly_points = _get_voronoi_poly_points(vert_index_list,\n voronoi_vertices,\n voronoi_centroid)\n if len(voronoi_poly_points) == 4:\n poly = ogr.Geometry(ogr.wkbPolygon)\n ring = ogr.Geometry(ogr.wkbLinearRing)\n for node in voronoi_poly_points:\n ring.AddPoint(node[0], node[1])\n\n # grab first node to close ring\n ring.AddPoint(voronoi_poly_points[0][0], voronoi_poly_points[0][1])\n\n poly.AddGeometry(ring)\n feat = ogr.Feature(layerDefn)\n feat.SetField('GRID_LON', float(voronoi_centroid[0]))\n feat.SetField('GRID_LAT', float(voronoi_centroid[1]))\n feat.SetGeometry(poly)\n layer.CreateFeature(feat)", "def Reproject(x, y, in_grid = 4326, out_grid = 32737):\n \n inProj = Proj(init='epsg:'+str(in_grid))\n outProj = Proj(init='epsg:'+str(out_grid))\n \n \n x2,y2 = transform(inProj,outProj,x,y)\n \n return x2, y2", "def PolygonGeometry(Coords):\n area = 0.0\n peri = 0.0\n for i in range(np.shape(Coords)[0]-1):\n area = area + Coords[i,0]*Coords[i+1,1] - Coords[i+1,0]*Coords[i,1]\n peri = peri + ( (Coords[i+1,0] - Coords[i,0])**2 + (Coords[i+1,1]-Coords[i,1])**2 )**0.5\n area = area + Coords[np.shape(Coords)[0]-1,0] * Coords[0,1] - Coords[0,0] * Coords[np.shape(Coords)[0]-1,1]\n area = area*0.5\n\n return area, peri", "def polygon_to_multipolygon(geom):\n if geom.__class__.__name__ == 'Polygon':\n g = OGRGeometry(OGRGeomType('MultiPolygon'))\n g.add(geom)\n return g\n elif geom.__class__.__name__ == 'MultiPolygon':\n return geom\n else:\n raise ValueError('Geom is neither Polygon nor MultiPolygon.')", "def point_in_polygon_2d(polygon, point):\r\n return geometry.gmPointInPolygon2D(polygon, point)", "def range_project_polygon(axis_vector, polygon):\n\n vertices = polygon.points\n\n c = linalg.dot(axis_vector, vertices[0])\n min_c = c\n max_c = c\n\n for i in range(1, len(vertices)):\n\n # Compute the dot product between the axis vector and the vertex\n c = linalg.dot(axis_vector, vertices[i])\n\n # Check if the result is min/max\n if c < min_c:\n min_c = c\n elif c > max_c:\n max_c = c\n\n return min_c, max_c", "def polygonal_number(s, n):\n return (n*n*(s-2)-n*(s-4))/2", "def _multigeometry(self, ogr_geometry):\n\n geo_type = ogr_geometry.GetGeometryType()\n\n if geo_type == ogr.wkbPolygon:\n return ogr.ForceToMultiPolygon(ogr_geometry)\n elif geo_type == ogr.wkbPoint:\n return ogr.ForceToMultiPoint(ogr_geometry)\n elif geo_type in [ogr.wkbLineString, ogr.wkbLinearRing]:\n return ogr.ForceToMultiLineString(ogr_geometry)\n else:\n return ogr_geometry", "def geometry_n(self, n: int | ir.IntegerValue) -> GeoSpatialValue:\n return ops.GeoGeometryN(self, n).to_expr()", "def extent_geom(extent):\n ring = ogr.Geometry(ogr.wkbLinearRing)\n ring.AddPoint(extent[0], extent[3])\n ring.AddPoint(extent[2], extent[3])\n ring.AddPoint(extent[2], extent[1])\n ring.AddPoint(extent[0], extent[1])\n ring.CloseRings()\n polygon = ogr.Geometry(ogr.wkbPolygon)\n polygon.AddGeometry(ring)\n return polygon", "def envelope(self) -> ir.PolygonValue:\n return ops.GeoEnvelope(self).to_expr()", "def polygonal(resolution, in_vertices, out_vertices_list = None):\n in_vertices = [Point(in_vertices[k,0],in_vertices[k,1]) for k in range(in_vertices.shape[0])] \n\n domain = mshr.Polygon(in_vertices) # https://bitbucket.org/fenics-project/mshr/wiki/API/Polygon\n # Create polygon defined by the given vertices. Vertices must be in counter-clockwise order and free of self-intersections.\n \n if(out_vertices_list is not None):\n for out_vertices in out_vertices_list:\n out_vertices = [Point(out_vertices[k,0],out_vertices[k,1]) for k in range(out_vertices.shape[0])]\n domain -= mshr.Polygon(out_vertices)\n \n mesh=mshr.generate_mesh(domain, resolution)\n\n # TODO : add refined mesh\n # if(refine_mesh):\n # d = mesh.topology().dim()\n \n # class To_refine(SubDomain):\n # def inside(self, x, on_boundary):\n # return x[1]<=0 and x[1]>= -l_mot/2-h_grid-l_vacuum/4\n\n # to_refine = To_refine()\n # marker = MeshFunction(\"bool\", mesh, d, False)\n # to_refine.mark(marker, True)\n # mesh = refine(mesh,marker)\n\n return mesh", "def projection(self):\n return self.dataset.GetProjection() if self.dataset else None", "def grid_in_shape(up_lat,low_lat,left_long,right_long,shape,\n lat_resolution=15,long_resolution=60): \n \n longitudes = np.linspace(left_long,right_long,60)\n latitudes = np.linspace(low_lat,up_lat,15)\n prods = list(itertools.product(longitudes,latitudes))\n points = [shapely.geometry.Point(point) for point in prods]\n points_within = [point for point in points if shape.contains(point)]\n points_gdf = gpd.GeoDataFrame(geometry = points_within)\n \n return points_gdf", "def to_wkt(projstring: str):\n from osgeo import osr\n\n sr = osr.SpatialReference()\n sr.ImportFromProj4(projstring)\n return sr.ExportToWkt()", "def projection(poly1, dim, solver=None, abs_tol=ABS_TOL, verbose=0):\n if isinstance(poly1, Region):\n ret = Polytope()\n for i in range(len(poly1.list_poly)):\n p = projection(\n poly1.list_poly[i], dim,\n solver=solver, abs_tol=abs_tol)\n ret = ret + p\n return ret\n # flat ?\n if (poly1.dim < len(dim)) or is_empty(poly1):\n return poly1\n # `poly1` isn't flat\n poly_dim = poly1.dim\n dim = np.array(dim)\n org_dim = range(poly_dim)\n new_dim = dim.flatten() - 1\n del_dim = np.setdiff1d(org_dim, new_dim) # Index of dimensions to remove\n # logging\n logger.debug('polytope dim = ' + str(poly_dim))\n logger.debug('project on dims = ' + str(new_dim))\n logger.debug('original dims = ' + str(org_dim))\n logger.debug('dims to delete = ' + str(del_dim))\n mA, nA = poly1.A.shape\n # fewer rows than dimensions ?\n if mA < poly_dim:\n msg = 'fewer rows in A: ' + str(mA)\n msg += ', than polytope dimension: ' + str(poly_dim)\n logger.warning(msg)\n # enlarge A, b with zeros\n A = poly1.A.copy()\n poly1.A = np.zeros((poly_dim, poly_dim))\n poly1.A[0:mA, 0:nA] = A\n # stack\n poly1.b = np.hstack([poly1.b, np.zeros(poly_dim - mA)])\n logger.debug('m, n = ' + str((mA, nA)))\n # Compute cheby ball in lower dim to see if projection exists\n norm = np.sum(poly1.A * poly1.A, axis=1).flatten()\n norm[del_dim] = 0\n c = np.zeros(len(org_dim) + 1, dtype=float)\n c[len(org_dim)] = -1\n G = np.hstack([poly1.A, norm.reshape(norm.size, 1)])\n h = poly1.b\n sol = lpsolve(c, G, h)\n if sol['status'] != 0:\n # Projection not fulldim\n return Polytope()\n if sol['x'][-1] < abs_tol:\n return Polytope()\n # select projection solver\n if solver == \"esp\":\n return projection_esp(poly1, new_dim, del_dim)\n elif solver == \"exthull\":\n return projection_exthull(poly1, new_dim)\n elif solver == \"fm\":\n return projection_fm(poly1, new_dim, del_dim)\n elif solver == \"iterhull\":\n return projection_iterhull(poly1, new_dim)\n elif solver is not None:\n logger.warning('unrecognized projection solver \"' +\n str(solver) + '\".')\n # `solver` undefined or unknown\n # select method based on dimension criteria\n if len(del_dim) <= 2:\n logger.debug(\"projection: using Fourier-Motzkin.\")\n return projection_fm(poly1, new_dim, del_dim)\n elif len(org_dim) <= 4:\n logger.debug(\"projection: using exthull.\")\n return projection_exthull(poly1, new_dim)\n else:\n logger.debug(\"projection: using iterative hull.\")\n return projection_iterhull(poly1, new_dim)", "def point_in_polygon(pnt, poly): # pnt_in_poly(pnt, poly): #\r\n x, y = pnt\r\n N = len(poly)\r\n for i in range(N):\r\n x0, y0, xy = [poly[i][0], poly[i][1], poly[(i + 1) % N]]\r\n c_min = min([x0, xy[0]])\r\n c_max = max([x0, xy[0]])\r\n if c_min < x <= c_max:\r\n p = y0 - xy[1]\r\n q = x0 - xy[0]\r\n y_cal = (x - x0) * p / q + y0\r\n if y_cal < y:\r\n return True\r\n return False", "def buildMultiPolygon(self,polygonList):\r\n geomlist=[]\r\n for geom in polygonList:\r\n # Cut 'MULTIPOLYGON(*) if we got one'\r\n if geom.exportToWkt()[:12]==\"MULTIPOLYGON\":\r\n geomWkt=geom.exportToWkt()[13:len(geom.exportToWkt())-1]\r\n else:\r\n # Cut 'POLYGON' if we got one\r\n geomWkt=geom.exportToWkt()[7:]\r\n geomlist.append(str(geomWkt))\r\n multiGeomWKT=\"MULTIPOLYGON(\"\r\n multiGeomWKT +=\",\".join(geomlist)\r\n multiGeomWKT+=\")\"\r\n #if self.debug: print multiGeomWKT\r\n multiGeom=QgsGeometry.fromWkt(multiGeomWKT)\r\n return multiGeom", "def point_inside_polygon(xxx_todo_changeme,poly):\n (x,y) = xxx_todo_changeme\n n = len(poly)\n inside = False\n\n p1x, p1y = poly[0]\n for i in range(n + 1):\n p2x, p2y = poly[i % n]\n if y > min(p1y, p2y):\n if y <= max(p1y, p2y):\n if x <= max(p1x, p2x):\n if p1y != p2y:\n xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x\n if p1x == p2x or x <= xinters:\n inside = not inside\n p1x, p1y = p2x, p2y\n\n return inside", "def test_geography_area(self):\n # SELECT ST_Area(poly) FROM geogapp_zipcode WHERE code='77002';\n z = Zipcode.objects.annotate(area=Area(\"poly\")).get(code=\"77002\")\n # Round to the nearest thousand as possible values (depending on\n # the database and geolib) include 5439084, 5439100, 5439101.\n rounded_value = z.area.sq_m\n rounded_value -= z.area.sq_m % 1000\n self.assertEqual(rounded_value, 5439000)", "def projection(self):\n pass", "def projection(self) -> Projection:\n return self._projection", "def getCRS(shp):\r\n if not isinstance(shp, geopandas.geodataframe.GeoDataFrame):\r\n shp = geopandas.read_file(shp)\r\n return shp.crs['init'][5:]", "def to_geometry(self, to_crs=None):\n from geopandas import GeoDataFrame\n from shapely.geometry import Polygon\n out = GeoDataFrame()\n geoms = []\n ii = []\n jj = []\n xx = self.corner_grid.x0 + np.arange(self.nx+1) * self.dx\n yy = self.corner_grid.y0 + np.arange(self.ny+1) * self.dy\n for j, (y0, y1) in enumerate(zip(yy[:-1], yy[1:])):\n for i, (x0, x1) in enumerate(zip(xx[:-1], xx[1:])):\n coords = [(x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0, y0)]\n geoms.append(Polygon(coords))\n jj.append(j)\n ii.append(i)\n out['j'] = jj\n out['i'] = ii\n out['geometry'] = geoms\n out.crs = self.proj.srs\n\n if check_crs(to_crs):\n transform_geopandas(out, to_crs=to_crs, inplace=True)\n return out", "def loadProjection(filepath):\n \n from osgeo import gdal\n \n ds = gdal.Open(filepath, 0)\n \n return ds.GetProjection()" ]
[ "0.6414271", "0.63134813", "0.62757397", "0.60515285", "0.603324", "0.59443223", "0.59352905", "0.5911644", "0.58656824", "0.57909673", "0.5745204", "0.57246166", "0.57246166", "0.5703267", "0.5702607", "0.5696144", "0.56794995", "0.5671271", "0.56611097", "0.5648878", "0.56116", "0.5570832", "0.5559011", "0.55309105", "0.5529", "0.54962236", "0.549105", "0.54810816", "0.5416324", "0.54125315", "0.53991854", "0.538976", "0.53884447", "0.53662264", "0.53621334", "0.53527373", "0.5349008", "0.53219086", "0.53216976", "0.5307751", "0.5299372", "0.52987826", "0.52978253", "0.5261178", "0.5225457", "0.5218658", "0.5216089", "0.52136993", "0.5205852", "0.5199294", "0.517719", "0.51699376", "0.5169602", "0.51678246", "0.51497734", "0.514299", "0.5138539", "0.5109274", "0.5104043", "0.50982046", "0.50972277", "0.5073277", "0.50654393", "0.5056852", "0.5047502", "0.5047349", "0.5044086", "0.50377107", "0.5022727", "0.5022519", "0.50149065", "0.5012515", "0.50109744", "0.5003232", "0.4997895", "0.49978578", "0.4992337", "0.49899206", "0.49868545", "0.49837077", "0.4973367", "0.49722368", "0.4964408", "0.49613532", "0.49555138", "0.49530283", "0.49486625", "0.4945221", "0.4941643", "0.49349958", "0.49227574", "0.49110088", "0.49092188", "0.4908139", "0.49067572", "0.49042237", "0.49001166", "0.48975894", "0.4893685", "0.48895782" ]
0.693248
0
Make a pair of ogr.CoordinateTransformation objects, for transforming between the two given EPSG projections. Return a tuple (tr1to2, tr2to1)
def makeTransformations(epsg1, epsg2): sr1 = osr.SpatialReference() sr1.ImportFromEPSG(epsg1) preventGdal3axisSwap(sr1) sr2 = osr.SpatialReference() sr2.ImportFromEPSG(epsg2) preventGdal3axisSwap(sr2) tr1to2 = osr.CoordinateTransformation(sr1, sr2) tr2to1 = osr.CoordinateTransformation(sr2, sr1) return (tr1to2, tr2to1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transform_proj(p1, p2, x, y, nocopy=False):\n\n try:\n # This always makes a copy, even if projections are equivalent\n return _transform_internal(p1, p2, x, y, always_xy=True)\n except TypeError:\n if proj_is_same(p1, p2):\n if nocopy:\n return x, y\n else:\n return copy.deepcopy(x), copy.deepcopy(y)\n\n return _transform_internal(p1, p2, x, y)", "def coordinate_transform_proj4(proj1, proj2, coords):\n\n srs1 = osr.SpatialReference()\n srs2 = osr.SpatialReference()\n srs1.ImportFromProj4(proj1)\n srs2.ImportFromProj4(proj2)\n\n return coordinate_transform(srs1, srs2, coords)", "def transform_coord(proj1, proj2, x, y):\r\n\r\n # Set full EPSG projection strings\r\n proj1 = pyproj.Proj(\"+init=EPSG:\"+proj1)\r\n proj2 = pyproj.Proj(\"+init=EPSG:\"+proj2)\r\n\r\n # Convert coordinates\r\n return pyproj.transform(proj1, proj2, x, y)", "def transform_coord(proj1, proj2, x, y):\r\n\r\n # Set full EPSG projection strings\r\n proj1 = pyproj.Proj(\"+init=EPSG:\"+proj1)\r\n proj2 = pyproj.Proj(\"+init=EPSG:\"+proj2)\r\n\r\n # Convert coordinates\r\n return pyproj.transform(proj1, proj2, x, y)", "def transform_coord(proj1, proj2, x, y):\r\n # Set full EPSG projection strings\r\n proj1 = pyproj.Proj(\"+init=EPSG:\"+proj1)\r\n proj2 = pyproj.Proj(\"+init=EPSG:\"+proj2)\r\n\r\n # Convert coordinates\r\n return pyproj.transform(proj1, proj2, x, y)", "def create_osr_transform(src_epsg: int, dst_epsg: int):\n src_srs = osr.SpatialReference()\n src_srs.ImportFromEPSG(src_epsg)\n dst_srs = osr.SpatialReference()\n dst_srs.ImportFromEPSG(dst_epsg)\n return osr.CoordinateTransformation(src_srs, dst_srs)", "def __convert(args):\n a, b, zone, ellipsoid, datum, inverse = args\n projection = Proj(\"+proj=utm +zone={}, +ellps={} +datum={} +units=m +no_defs\".format(zone, ellipsoid, datum))\n c, d = projection(a, b, inverse=inverse)\n\n return c, d", "def __affine_geo_transformation(x, y, gtr):\n\n # https://gdal.org/user/raster_data_model.html#affine-geotransform\n # Affine transformation rewritten for rasterio:\n gtr_x = gtr[2] + (x + 0.5) * gtr[0] + (y + 0.5) * gtr[1]\n gtr_y = gtr[5] + (x + 0.5) * gtr[3] + (y + 0.5) * gtr[4]\n\n return gtr_x, gtr_y", "def Reproject(x, y, in_grid = 4326, out_grid = 32737):\n \n inProj = Proj(init='epsg:'+str(in_grid))\n outProj = Proj(init='epsg:'+str(out_grid))\n \n \n x2,y2 = transform(inProj,outProj,x,y)\n \n return x2, y2", "def coordinate_transform(srs1, srs2, coords):\n\n # create transformation helper\n converter = osr.CoordinateTransformation(srs1, srs2)\n\n return np.asarray(converter.TransformPoints(coords))[:, :-1]", "def triangulate_pts(proj1, proj2, pts1T, pts2T):\n pts1 = pts1T.T\n pts2 = pts2T.T\n pts3d = np.zeros((pts1.shape[0], 4))\n for i in range(pts1.shape[0]):\n A = np.zeros((4, 4))\n A[0, :] = pts1[i, 0] * proj1[2, :] - proj1[0, :]\n A[1, :] = pts1[i, 1] * proj1[2, :] - proj1[1, :]\n A[2, :] = pts2[i, 0] * proj2[2, :] - proj2[0, :]\n A[3, :] = pts2[i, 1] * proj2[2, :] - proj2[1, :]\n U, sigma, VT = np.linalg.svd(A)\n pts3d[i, :] = VT[-1, :]\n return pts3d.T", "def coord_space(\n a0: numpy.ndarray, a1: numpy.ndarray, a2: numpy.ndarray, rev: bool = False\n) -> Tuple[numpy.ndarray, Optional[numpy.ndarray]]:\n # dbg = False\n # if dbg:\n # print(a0.transpose())\n # print(a1.transpose())\n # print(a2.transpose())\n\n # a0 = acs[0]\n # a1 = acs[1]\n # a2 = acs[2]\n\n global gtm\n global gmry\n global gmrz, gmrz2\n\n tm = gtm\n mry = gmry\n mrz = gmrz\n mrz2 = gmrz2\n\n # tx acs[1] to origin\n # tm = homog_trans_mtx(-a1[0][0], -a1[1][0], -a1[2][0])\n set_homog_trans_mtx(-a1[0], -a1[1], -a1[2], tm)\n\n # directly translate a2 using a1\n p = a2 - a1\n sc = get_spherical_coordinates(p)\n\n # if dbg:\n # print(\"p\", p.transpose())\n # print(\"sc\", sc)\n\n # mrz = homog_rot_mtx(-sc[1], \"z\") # rotate translated a2 -azimuth about Z\n set_Z_homog_rot_mtx(-sc[1], mrz)\n # mry = homog_rot_mtx(-sc[2], \"y\") # rotate translated a2 -polar_angle about Y\n set_Y_homog_rot_mtx(-sc[2], mry)\n\n # mt completes a1-a2 on Z-axis, still need to align a0 with XZ plane\n # mt = mry @ mrz @ tm # python 3.5 and later\n mt = gmry.dot(gmrz.dot(gtm))\n\n # if dbg:\n # print(\"tm:\\n\", tm)\n # print(\"mrz:\\n\", mrz)\n # print(\"mry:\\n\", mry)\n # # print(\"mt \", mt)\n\n p = mt.dot(a0)\n\n # if dbg:\n # print(\"mt:\\n\", mt, \"\\na0:\\n\", a0, \"\\np:\\n\", p)\n\n # need azimuth of translated a0\n # sc2 = get_spherical_coordinates(p)\n # print(sc2)\n azimuth2 = _get_azimuth(p[0], p[1])\n\n # rotate a0 -azimuth2 about Z to align with X\n # mrz2 = homog_rot_mtx(-azimuth2, \"z\")\n set_Z_homog_rot_mtx(-azimuth2, mrz2)\n\n # mt = mrz2 @ mt\n mt = gmrz2.dot(mt)\n\n # if dbg:\n # print(\"mt:\", mt, \"\\na0:\", a0, \"\\np:\", p)\n # # print(p, \"\\n\", azimuth2, \"\\n\", mrz2, \"\\n\", mt)\n\n # if dbg:\n # print(\"mt:\\n\", mt)\n # print(\"<<<<<<==============================\")\n\n if not rev:\n return mt, None\n\n # rev=True, so generate the reverse transformation\n\n # rotate a0 theta about Z, reversing alignment with X\n # mrz2 = homog_rot_mtx(azimuth2, \"z\")\n set_Z_homog_rot_mtx(azimuth2, mrz2)\n # rotate a2 phi about Y\n # mry = homog_rot_mtx(sc[2], \"y\")\n set_Y_homog_rot_mtx(sc[2], mry)\n # rotate a2 theta about Z\n # mrz = homog_rot_mtx(sc[1], \"z\")\n set_Z_homog_rot_mtx(sc[1], mrz)\n # translation matrix origin to a1\n # tm = homog_trans_mtx(a1[0][0], a1[1][0], a1[2][0])\n set_homog_trans_mtx(a1[0], a1[1], a1[2], tm)\n\n # mr = tm @ mrz @ mry @ mrz2\n mr = gtm.dot(gmrz.dot(gmry.dot(gmrz2)))\n # mr = numpy.dot(tm, numpy.dot(mrz, numpy.dot(mry, mrz2)))\n\n return mt, mr", "def reproject(wcs1, wcs2):\n\n if isinstance(wcs1, fitswcs.WCS):\n forward_transform = wcs1.all_pix2world\n elif isinstance(wcs1, gwcs.WCS):\n forward_transform = wcs1.forward_transform\n elif issubclass(wcs1, Model):\n forward_transform = wcs1\n else:\n raise TypeError(\"Expected input to be astropy.wcs.WCS or gwcs.WCS \"\n \"object or astropy.modeling.Model subclass\")\n\n if isinstance(wcs2, fitswcs.WCS):\n backward_transform = wcs2.all_world2pix\n elif isinstance(wcs2, gwcs.WCS):\n backward_transform = wcs2.backward_transform\n elif issubclass(wcs2, Model):\n backward_transform = wcs2.inverse\n else:\n raise TypeError(\"Expected input to be astropy.wcs.WCS or gwcs.WCS \"\n \"object or astropy.modeling.Model subclass\")\n\n def _reproject(x, y):\n sky = forward_transform(x, y)\n flat_sky = []\n for axis in sky:\n flat_sky.append(axis.flatten())\n # Filter out RuntimeWarnings due to computed NaNs in the WCS\n warnings.simplefilter(\"ignore\")\n det = backward_transform(*tuple(flat_sky))\n warnings.resetwarnings()\n det_reshaped = []\n for axis in det:\n det_reshaped.append(axis.reshape(x.shape))\n return tuple(det_reshaped)\n return _reproject", "def compose_transform2(alpha1, sx1, sy1, scale1, alpha2, sx2, sy2, scale2):\n\n\tt1 = Transform({\"type\":\"2D\",\"alpha\":alpha1,\"tx\":sx1,\"ty\":sy1,\"mirror\":0,\"scale\":scale1})\n\tt2 = Transform({\"type\":\"2D\",\"alpha\":alpha2,\"tx\":sx2,\"ty\":sy2,\"mirror\":0,\"scale\":scale2})\n\ttt = t2*t1\n\td = tt.get_params(\"2D\")\n\treturn d[ \"alpha\" ], d[ \"tx\" ], d[ \"ty\" ], d[ \"scale\" ]", "def reproject(wcs1, wcs2, origin=0):\n\n def _reproject(x, y):\n sky = wcs1.forward_transform(x, y)\n return wcs2.backward_transform(*sky)\n return _reproject", "def ReprojectCoords(coords,src_srs,tgt_srs):\n trans_coords = []\n transform = osr.CoordinateTransformation( src_srs, tgt_srs)\n for x,y in coords:\n x,y,z = transform.TransformPoint(x,y)\n trans_coords.append([x,y])\n return trans_coords", "def combine_params2(alpha1, sx1, sy1, mirror1, alpha2, sx2, sy2, mirror2):\n\n\tt1 = Transform({\"type\":\"2D\",\"alpha\":alpha1,\"tx\":sx1,\"ty\":sy1,\"mirror\":mirror1,\"scale\":1.0})\n\tt2 = Transform({\"type\":\"2D\",\"alpha\":alpha2,\"tx\":sx2,\"ty\":sy2,\"mirror\":mirror2,\"scale\":1.0})\n\ttt = t2*t1\n\td = tt.get_params(\"2D\")\n\treturn d[ \"alpha\" ], d[ \"tx\" ], d[ \"ty\" ], d[ \"mirror\" ]", "def getAffineTransform(self, coord1, coord2):\n num_coords = 2 * len(coord1)\n A = np.zeros((num_coords, 6))\n b = []\n for point2 in coord2:\n b.append(float(point2[0]))\n b.append(float(point2[1]))\n b = np.asarray(b)\n i = 0\n for point1 in coord1:\n A[i, 0:2] = point1[0:2]\n A[i, 2] = 1\n A[i+1, 3:5] = point1[0:2]\n A[i+1, 5] = 1\n i += 2\n A = np.asarray(A)\n b = np.asarray(b)\n x = np.matmul(np.matmul(np.linalg.inv(np.matmul(A.T, A)), A.T), b.T)\n self.depth2rgb_affine = np.reshape(x, (2, 3))\n csv.writer(open(\"depth2rgb_affine.cfg\", \"w+\", newline=''), delimiter=',').writerows(self.depth2rgb_affine)\n # else:\n # x = np.vstack([np.reshape(x,(2,3)),[0,0,1]])\n # self.cam_ext_mat = x\n # A = [point[i,j+0:j+3].astype(np.float32) for i,point in enumerate(coord1) if i%2 == 0]\n # pts1 = coord1[0:3].astype(np.float32)\n # pts2 = coord2[0:3].astype(np.float32)\n # print(cv2.getAffineTransform(pts1, pts2))\n # return cv2.getAffineTransform(pts1, pts2)", "def pose_pair_construct(p1,n1,p2,n2):\n v1 = p2-p1; v1 /= np.linalg.norm(v1)\n R1 = tf_construct(n1,v1)\n return RigidTransform.from_Rt(R1, p1)", "def getAffineTransform_deprecated(self, coord1, coord2):\n pts1 = coord1[0:3].astype(np.float32)\n pts2 = coord2[0:3].astype(np.float32)\n return cv2.getAffineTransform(pts1, pts2)", "def _build_geotransform(self, i, j):\n assert isinstance(i, int), (\"i is not an integer\")\n assert isinstance(j, int), (\"j is not an integer\")\n x_origin, x_res, x_ignore, y_origin, y_ignore, y_res = (\n self.image_metadata.geotransform)\n # integer conversion to reduce floating point error\n new_x_origin = self._calculate_origin(x_origin, x_res, self.offset, j)\n new_y_origin = self._calculate_origin(y_origin, y_res, self.offset, i)\n geotransform = (new_x_origin, x_res, x_ignore, new_y_origin, \n y_ignore, y_res) \n return geotransform", "def _transform_coordinates(rectangle, Q=np.matrix(((1, 1), (-1, 1)))):\n return tuple((rectangle[0]*Q).A1), tuple((rectangle[1]*Q).A1)", "def transform_pnt(pnt, inEPSG, outEPSG):\r\n x1, y1 = pnt[0], pnt[1] #added benefit of checking pnt format\r\n inProj = pyproj.Proj(init='epsg:{}'.format(inEPSG))\r\n outProj = pyproj.Proj(init='epsg:{}'.format(outEPSG))\r\n\r\n return pyproj.transform(inProj, outProj, x1, y1)", "def AffineTransform( from_pts, to_pts ):\n \n # check that there are match points\n if len(from_pts) != len(to_pts) or len(to_pts)<1:\n print \"from_pts and to_pts must be of same size.\"\n return False\n\n # check the dimensions\n dim = len(from_pts[0]) # num of dimensions\n if len(from_pts) < dim:\n print \"Too few points => under-determined system.\"\n return False\n elif len(from_pts) > dim + 1:\n print \"Too many points => over-determined system.\"\n return False\n\n \n #segregate the x and y coordinages\n from_pts_x, from_pts_y = zip(*from_pts)\n to_pts_x, to_pts_y = zip(*to_pts)\n \n #create the Matricies for processing\n I = np.matrix([from_pts_x, from_pts_y, [1,1,1]])\n P = np.matrix([to_pts_x, to_pts_y])\n \n #Calculate the 2D affine transform matrix (A)\n A = P * linalg.pinv(I) \n\n # Make a result object\n class Transformation:\n \"\"\"Result object that represents the transformation\n from affine fitter.\"\"\"\n\n def To_Str(self):\n res = \"\"\n for j in range(dim):\n str1 = \"x%d' = \" % j\n for i in range(dim):\n str1 +=\"x%d * %f + \" % (i, A[i][j+dim+1])\n str1 += \"%f\" % A[dim][j+dim+1]\n res += str1 + \"\\n\"\n return res\n\n def Transform(self, pt_x, pt_y):\n pt_vector = np.matrix([[pt_x], [pt_y], [1]])\n transformed_pt = A * pt_vector\n return map(itemgetter(0), transformed_pt.tolist())\n return Transformation()", "def register_com(vol_a: Volume, vol_b: Volume) -> Tuple[Volume, Volume]:\n from dipy.align.imaffine import transform_centers_of_mass\n\n affine = transform_centers_of_mass(vol_a, vol_a.grid_to_world, vol_b, vol_b.grid_to_world)\n\n vol_b.world_transform[:] = np.array(affine.affine)\n return vol_a, vol_b", "def convertCoord(lon, lat, inEPSG, outEPSG):\n from pyproj import Proj, transform\n inProj = Proj(init='epsg:'+str(inEPSG))\n outProj = Proj(init='epsg:'+str(outEPSG))\n x, y = transform(inProj, outProj, lon, lat)\n return x, y\n # epsg:4326 WGS84\n # epsg:2950 MTM8\n # epsg:6622 Quebec Lambert", "def cartesian_to_geographical(coordinate_triples):\n if len(coordinate_triples.shape) == 1:\n x = coordinate_triples[0]\n y = coordinate_triples[1]\n z = coordinate_triples[2]\n elif len(coordinate_triples.shape) == 2:\n assert coordinate_triples.shape[1] == 3\n x = coordinate_triples[:, 0]\n y = coordinate_triples[:, 1]\n z = coordinate_triples[:, 2]\n radius = np.sqrt(x**2 + y**2 + z**2)\n longitudes = np.arctan2(y, x)\n latitudes = np.arcsin(z/radius)\n return (latitudes, longitudes)", "def _get_pt_tuple(pnt1, pnt2):\n return tuple(map(_map_x_dim(tuple(pnt1)), pnt2))", "def coord_transform_from_wkt(proj_ref_wkt, new_cs_wkt):\n # Transform the features into WGS-84\n # What is the NITF/ophoto Referenced in?\n old_cs = osr.SpatialReference()\n old_cs.ImportFromWkt(proj_ref_wkt)\n\n # How about going to WGS-84?\n new_cs = osr.SpatialReference()\n new_cs.ImportFromWkt(new_cs_wkt)\n\n # The actual Tranfromation class/object\n transform = osr.CoordinateTransformation(old_cs, new_cs)\n return transform", "def _translate_coordinate(self, x1, y1, x2, y2):\n\n return (x1 + x2, y1 + y2)", "def transform_geometry(geom, crs=wgs84, to_crs=wgs84):\n\n from_crs = check_crs(crs)\n to_crs = check_crs(to_crs)\n\n if isinstance(to_crs, pyproj.Proj) and isinstance(from_crs, pyproj.Proj):\n project = partial(transform_proj, from_crs, to_crs)\n elif isinstance(to_crs, Grid):\n project = partial(to_crs.transform, crs=from_crs)\n elif isinstance(from_crs, Grid):\n project = partial(from_crs.ij_to_crs, crs=to_crs)\n else:\n raise NotImplementedError()\n\n from shapely.ops import transform\n return transform(project, geom)", "def get_affine_transform_2d(gps_coords, pdr_coords):\n X = np.array(pdr_coords)\n Xp = np.array(gps_coords)\n\n # Estimate 2d similarity to align to GPS\n T = tf.affine_matrix_from_points(X.T[:2], Xp.T[:2], shear=False)\n s = np.linalg.det(T[:2, :2]) ** 0.5\n A = np.eye(3)\n A[:2, :2] = T[:2, :2] / s\n b = np.array([\n T[0, 2],\n T[1, 2],\n Xp[:, 2].mean() - s * X[:, 2].mean() # vertical alignment\n ])\n\n return s, A, b", "def transform_geometries(datasource, src_epsg, dst_epsg):\n # Part 1\n src_srs = osr.SpatialReference()\n src_srs.ImportFromEPSG(src_epsg)\n dst_srs = osr.SpatialReference()\n dst_srs.ImportFromEPSG(dst_epsg)\n transformation = osr.CoordinateTransformation(src_srs, dst_srs)\n layer = datasource.GetLayerByIndex(0)\n \n # Part 2\n geoms = []\n layer.ResetReading()\n for feature in layer:\n geom = feature.GetGeometryRef().Clone()\n geom.Transform(transformation)\n geoms.append(geom)\n return geoms", "def transform(self, ((a, b), (c, d))=((1, 1), (-1, 1)), aligned_with_grid=False):\n (x0, y0), (x1, y1) = self.vertices\n return type(self)((int(a * x0 + c * y0), int(b * x0 + d * y0)),\n (int(a * x1 + c * y1), int(b * x1 + d * y1)),\n aligned_with_grid=aligned_with_grid)", "def convert_spatial_reference_system(coordinates, source_epsg, target_epsg):\n source_spatial_reference = osr.SpatialReference()\n source_spatial_reference.ImportFromEPSG(source_epsg)\n target_spatial_reference = osr.SpatialReference()\n target_spatial_reference.ImportFromEPSG(target_epsg)\n coordinate_transformation = osr.CoordinateTransformation(source_spatial_reference, target_spatial_reference)\n\n transformed_coordinates = []\n for coordinate in coordinates:\n point = ogr.Geometry(ogr.wkbPoint)\n point.AddPoint(coordinate[0], coordinate[1])\n point.Transform(coordinate_transformation)\n transformed_coordinates.append((point.GetX(), point.GetY()))\n return transformed_coordinates", "def coordinate_pairs(lat_axis, lon_axis):\n \n lon_mesh, lat_mesh = numpy.meshgrid(lon_axis, lat_axis) # This is the correct order\n \n return lat_mesh.flatten(), lon_mesh.flatten()", "def pyr_point_translator(x, y, org_l, dest_l):\n dest_x = (2.0 ** (org_l - dest_l)) * x\n dest_y = (2.0 ** (org_l - dest_l)) * y\n return np.array([dest_x, dest_y]).transpose()", "def convert_coordinate_system_2d(x, z):\n\n return x, -z", "def transform(self, X, Y):\n\n X_star = self.X_tranform.dot(X.T).T\n Y_star = self.Y_tranform.dot(Y.T).T\n\n return X_star, Y_star", "def itkTranslationTransformD2_cast(*args):\n return _itkTranslationTransformPython.itkTranslationTransformD2_cast(*args)", "def get_affine_transform(gps_coords, pdr_coords):\n # Compute similarity Xp = s A X + b\n X = np.array(pdr_coords)\n Xp = np.array(gps_coords)\n T = tf.superimposition_matrix(X.T, Xp.T, scale=True)\n\n A, b = T[:3, :3], T[:3, 3]\n s = np.linalg.det(A)**(1. / 3)\n A /= s\n return s, A, b", "def compose_transform3(phi1,theta1,psi1,sx1,sy1,sz1,scale1,phi2,theta2,psi2,sx2,sy2,sz2,scale2):\n\n\tR1 = Transform({\"type\":\"spider\",\"phi\":float(phi1),\"theta\":float(theta1),\"psi\":float(psi1),\"tx\":float(sx1),\"ty\":float(sy1),\"tz\":float(sz1),\"mirror\":0,\"scale\":float(scale1)})\n\tR2 = Transform({\"type\":\"spider\",\"phi\":float(phi2),\"theta\":float(theta2),\"psi\":float(psi2),\"tx\":float(sx2),\"ty\":float(sy2),\"tz\":float(sz2),\"mirror\":0,\"scale\":float(scale2)})\n\tRcomp=R2*R1\n\td = Rcomp.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],d[\"tx\"],d[\"ty\"],d[\"tz\"],d[\"scale\"]", "def map_coordinates(self,geometry):\n\t\tg = self.geomatrix\n\t\tdef project_coord(x,y,z=None):\n\t\t\tx = g[0] + g[1] * x + g[2] * y\n\t\t\ty = g[3] + g[4] * x + g[5] * y\n\t\t\tif z is None:\n\t\t\t\treturn x,y\n\t\t\telse:\n\t\t\t\treturn x,y,z\n\t\treturn transform(project_coord, geometry)", "def reproject_coordinates(x_in, y_in, spatial_reference_source, spatial_reference_target=None): \n if spatial_reference_target is not None:\n pass\n else:\n spatial_reference_target = osr.SpatialReference()\n spatial_reference_target.ImportFromEPSG(4326) \n pass\n \n if int(osgeo.__version__[0]) >= 3:\n # GDAL 3 changes axis order: https://github.com/OSGeo/gdal/issues/1546\n \n spatial_reference_source.SetAxisMappingStrategy(osgeo.osr.OAMS_TRADITIONAL_GIS_ORDER)\n spatial_reference_target.SetAxisMappingStrategy(osgeo.osr.OAMS_TRADITIONAL_GIS_ORDER)\n\n \n pTransform = osr.CoordinateTransformation( spatial_reference_source, spatial_reference_target)\n \n x_new,y_new, z = pTransform.TransformPoint( x_in,y_in)\n \n return x_new,y_new", "def test_geotransform2resolution(self):\n\n for gt in GEOTRANSFORMS:\n res = geotransform2resolution(gt, isotropic=False)\n assert len(res) == 2\n assert numpy.allclose(res[0], gt[1], rtol=0, atol=1.0e-12)\n assert numpy.allclose(res[1], - gt[5], rtol=0, atol=1.0e-12)\n\n res = geotransform2resolution(gt, isotropic=True)\n assert numpy.allclose(res, gt[1], rtol=0, atol=1.0e-12)\n assert numpy.allclose(res, - gt[5], rtol=0, atol=1.0e-12)", "def cross_2d(origin, a, b):\r\n return geometry.gmCross2D(origin, a, b)", "def get_transformation(markers):\r\n # get bounds of markers\r\n q1 = markers[1][1]\r\n q2 = markers[2][0]\r\n q3 = markers[2][3]\r\n q4 = markers[1][2]\r\n src_rect = np.array([q1, q2, q3, q4], np.float32)\r\n \r\n # get bounds of destination markers\r\n box_ratio = KEYBOARD_DIM[0] / MARKER_DIM\r\n box_h = math.hypot(q3[0] - q2[0], q3[1] - q2[1])\r\n box_w = box_ratio * box_h\r\n \r\n r1 = [0, 0]\r\n r2 = [box_w, 0]\r\n r3 = [box_w, box_h]\r\n r4 = [0, box_h]\r\n dest_rect = np.array([r1, r2, r3, r4], np.float32)\r\n \r\n # get expected height of keyboard + box height\r\n keyboardbox_ratio = (KEYBOARD_DIM[1] + MARKER_DIM)/ KEYBOARD_DIM[0]\r\n expected_h = keyboardbox_ratio * box_w\r\n \r\n # get perspective transformation matrix\r\n M = cv2.getPerspectiveTransform(src_rect, dest_rect)\r\n # apply y shift\r\n for j in range(3):\r\n M[1][j] += M[2][j] * -box_h\r\n \r\n return M, (math.ceil(box_w), math.ceil(expected_h - box_h))", "def getAffineTransform(self, coord1, coord2):\n # generate coord1 into A\n mat_A = np.zeros((2*coord1.shape[0], 6))\n coord1 = np.hstack([coord1, np.ones((coord1.shape[0], 1))])\n for i in range(coord1.shape[0]):\n row = coord1[i,:]\n row_block = block_diag(row, row)\n assert(row_block.shape == (2,6))\n mat_A[2*i:2*i+2, :] = row_block\n \n # generate coord2 into b\n vec_b = coord2.reshape(-1,1)\n\n # solve the least square\n pseudo_inv = np.linalg.inv(np.matmul(mat_A.T, mat_A))\n pseudo_inv = np.matmul(pseudo_inv, mat_A.T)\n affine_mat = np.matmul(pseudo_inv, vec_b)\n assert(affine_mat.shape == (6,1))\n \n return affine_mat.reshape(2,-1)", "def combine_trans_projection(tx: float, ty: float, rot: float, mag: float, x: float, y: float, z: float, phi: float, the: float, psi: float, tiltangle: float, dim: float, binning: int, particle_dim: int = 200):\n from numpy import cos, sin, pi\n\n # Calculates the inverse transformation matrix of the projection alignment transformations\n alpha = -rot * pi/180\n c = cos(alpha)\n s = sin(alpha)\n\n rotate = np.matrix([[c, s, 0], [-s, c, 0], [0, 0, 1]])\n magnify = np.matrix([[mag, 0, 0], [0, mag, 0], [0, 0, 1]])\n translate = np.matrix([[1, 0, tx], [0, 1, ty], [0, 0, 1]])\n\n align_transformations = np.linalg.inv(rotate * magnify * translate)\n\n # Map the 3D position to a 2D position on the projection of the tiltangle\n x = x * binning\n y = y * binning\n z = z * binning\n\n aligned_y = y # assume the rotation axis is around y\n aligned_x = (cos(tiltangle * pi / 180) * (x - dim / 2) -\n sin(tiltangle * pi / 180) * (z - dim / 2)) + dim / 2\n\n # Use the projection alignment transformations to map this 2D position to a 2D position on the raw projections\n aligned_pos = np.matrix([[aligned_x - dim/2], [aligned_y - dim/2], [1]])\n raw_pos = align_transformations * aligned_pos\n\n # Calculate the rotation matrix for the template, a combination of the particle rotation and the tilt angle\n template_3d_rotation = generate_rotation_matrix(0, tiltangle, 0) * generate_rotation_matrix(\n phi, the, psi) * matrix_rotate_3d_z(rot) * matrix_magnify_3d(mag)\n\n # Merge this matrix with the projection transformations\n merged_matrix = template_3d_rotation\n\n return (align_transformations, (raw_pos.item(0, 0) + dim/2, raw_pos.item(1, 0) + dim/2), (aligned_x, aligned_y), merged_matrix)", "def mapping_points_between_figure_elements(element1: FigureElement,\n element2: FigureElement\n ) -> Tuple[Vec, Vec]:\n if isinstance(element1, FigureVertex):\n center1 = Vec(x1=element1.center[0], y1=element1.center[1])\n radius1 = element1.get_radius()\n elif isinstance(element1, FigureEdge):\n x, y = element1.get_center()\n center1 = Vec(x1=x, y1=y)\n radius1 = element1.get_linewidth()\n else:\n raise ValueError\n if isinstance(element2, FigureVertex):\n center2 = Vec(x1=element2.center[0], y1=element2.center[1])\n radius2 = element2.get_radius()\n elif isinstance(element2, FigureEdge):\n x, y = element2.get_center()\n center2 = Vec(x1=x, y1=y)\n radius2 = element2.get_linewidth()\n else:\n raise ValueError\n # p1, p2 = connection_points_between_circles(center1,\n # center2,\n # radius1,\n # radius2)\n p1, p2 = center1, center2\n return p1, p2", "def cast(*args):\n return _itkTranslationTransformPython.itkTranslationTransformD2_cast(*args)", "def createTwoPoints(cls, x1, y1, z1, x2, y2, z2):\n d = Vector(x2 - x1, y2 - y1, z2 - z1)\n p0 = Vector(x1, y1, z1)\n return cls(p0, d)", "def compose_transform(T1, T2):\n aux_vec = np.array([0, 0, 1]).reshape(1, 3)\n\n T1 = np.concatenate((T1, aux_vec), axis=0)\n T2 = np.concatenate((T2, aux_vec), axis=0)\n\n T1_inv = np.linalg.inv(T1)\n T = T1_inv@T2\n\n return T[0:2]", "def cartesian_coordinates(self):\n # extract RA items\n ra_hours, ra_minutes, ra_seconds = RA_RE.match(str(self.ra)).groups()\n # then cast\n ra_hours = int(ra_hours)\n ra_minutes = int(ra_minutes)\n ra_seconds = float(ra_seconds)\n\n # extract DEC items\n dec_sign, dec_degrees, dec_minutes, dec_seconds = DEC_RE.match(str(self.dec)).groups()\n # then cast\n dec_sign = -1 if dec_sign == '-' else 1\n dec_degrees = int(dec_degrees)\n dec_minutes = int(dec_minutes)\n dec_seconds = float(dec_seconds)\n\n # to degrees\n a = (ra_hours*15) + (ra_minutes*0.25) + (ra_seconds*0.004166)\n b = abs(dec_degrees + dec_minutes/60 + dec_seconds/3600) * dec_sign\n\n # to radians\n a = math.radians(a)\n b = math.radians(b)\n\n distance = float(self.distance)\n\n x = (distance * math.cos(b)) * math.cos(a)\n y = (distance * math.cos(b)) * math.sin(a)\n z = distance * math.sin(b)\n\n return x, y, z", "def point2wgs84(self, crsop):\n latr = math.radians(self.lat)\n lngr = math.radians(self.lng)\n \n # a is the semi-major axis of given datum.\n a = crsop.SemimajorAxisInM\n \n # f is the flattening of given datum\n f = 1.0/crsop.InverseFlattening\n dx = crsop.dx\n dy = crsop.dy\n dz = crsop.dz\n \n # da is the difference between the WGS84 and source ellipsoid semi-major axes.\n da = 6378137.0 - a\n \n # df is the difference between the WGS84 and source CRS flattenings.\n df = 1.0/298.257223563 - f\n \n e_squared = f*(2-f)\n rho = a*(1-e_squared)/math.pow((1-e_squared*sqr(math.sin(latr))),1.5)\n nu = a/math.pow((1-e_squared*sqr(math.sin(latr))),0.5)\n dlat = (1/rho)*(-dx*math.sin(latr)*math.cos(lngr) - \\\n dy*math.sin(latr)*math.sin(lngr) + \\\n dz*math.cos(latr) + (f*da + a*df)*math.sin(2*latr))\n dlng = (-dx*math.sin(lngr) + dy*math.cos(lngr))/(nu*math.cos(latr))\n newlng = lng180(math.degrees(lngr + dlng))\n newlat = math.degrees(latr + dlat)\n return Point(float(truncate(newlng,DEGREE_DIGITS)), \\\n float(truncate(newlat,DEGREE_DIGITS)))", "def CreateTargetGeoMap(latS, latN, lonW, lonE, latlen, lonlen):\n\n lat_grid = np.linspace(latS, latN, latlen)\n lon_grid = np.linspace(lonW, lonE, lonlen)\n\n return lat_grid,lon_grid", "def transform_coordinates(coords):\n # WGS 84 reference coordinate system parameters\n A = 6378.137 # major axis [km]\n E2 = 6.69437999014e-3 # eccentricity squared\n\n coords = prepare_coords(coords)\n\n # convert to radiants\n lat_rad = np.radians(coords[:, 0])\n lon_rad = np.radians(coords[:, 1])\n\n # convert to cartesian coordinates\n r_n = A / (np.sqrt(1 - E2 * (np.sin(lat_rad) ** 2)))\n x = r_n * np.cos(lat_rad) * np.cos(lon_rad)\n y = r_n * np.cos(lat_rad) * np.sin(lon_rad)\n z = r_n * (1 - E2) * np.sin(lat_rad)\n\n return np.column_stack((x, y, z))", "def convert_coords(x, y, conversion):\n if conversion == \"cartesian\" :\n # convert to cartesian plane coordinates \n x_new = x - (width/2)\n y_new = (height/2) + y \n\n elif conversion == \"pygame\":\n # only needed to place images in pygame\n x_new = x + (width/2)\n y_new = (height/2) - y\n \n return x_new, y_new", "def Mxform(x1,y1,x2,y2):\n return Jones.toMueller([[np.dot(x2,x1), np.dot(x2, y1)], [np.dot(y2,x1), np.dot(y2,y1)]])", "def _compose_transforms(basis_transforms, source_basis, source_dag):\n example_gates = _get_example_gates(source_dag)\n mapped_instrs = {}\n\n for gate_name, gate_num_qubits in source_basis:\n # Need to grab a gate instance to find num_qubits and num_params.\n # Can be removed following https://github.com/Qiskit/qiskit-terra/pull/3947 .\n example_gate = example_gates[gate_name, gate_num_qubits]\n num_params = len(example_gate.params)\n\n placeholder_params = ParameterVector(gate_name, num_params)\n placeholder_gate = Gate(gate_name, gate_num_qubits, list(placeholder_params))\n placeholder_gate.params = list(placeholder_params)\n\n dag = DAGCircuit()\n qr = QuantumRegister(gate_num_qubits)\n dag.add_qreg(qr)\n dag.apply_operation_back(placeholder_gate, qr[:], [])\n mapped_instrs[gate_name, gate_num_qubits] = placeholder_params, dag\n\n for gate_name, gate_num_qubits, equiv_params, equiv in basis_transforms:\n logger.debug(\n \"Composing transform step: %s/%s %s =>\\n%s\",\n gate_name,\n gate_num_qubits,\n equiv_params,\n equiv,\n )\n\n for mapped_instr_name, (dag_params, dag) in mapped_instrs.items():\n doomed_nodes = [\n node\n for node in dag.op_nodes()\n if (node.op.name, node.op.num_qubits) == (gate_name, gate_num_qubits)\n ]\n\n if doomed_nodes and logger.isEnabledFor(logging.DEBUG):\n\n logger.debug(\n \"Updating transform for mapped instr %s %s from \\n%s\",\n mapped_instr_name,\n dag_params,\n dag_to_circuit(dag, copy_operations=False),\n )\n\n for node in doomed_nodes:\n\n replacement = equiv.assign_parameters(\n dict(zip_longest(equiv_params, node.op.params))\n )\n\n replacement_dag = circuit_to_dag(replacement)\n\n dag.substitute_node_with_dag(node, replacement_dag)\n\n if doomed_nodes and logger.isEnabledFor(logging.DEBUG):\n\n logger.debug(\n \"Updated transform for mapped instr %s %s to\\n%s\",\n mapped_instr_name,\n dag_params,\n dag_to_circuit(dag, copy_operations=False),\n )\n\n return mapped_instrs", "def convertView2Geo(self, x, y):\n\n # x_pix is from left map edge, y_pix from top map edge\n x_pix = x + self.view_offset_x\n y_pix = y + self.view_offset_y\n\n lon = self.map_llon + x_pix/self.ppd_x\n lat = self.map_tlat - y_pix/self.ppd_y\n\n return (lon, lat)", "def compute_geometric_transform(p1,p2,best_matches):\n # How many good matches are there?\n num_bad_matches = sum([x == None for x in best_matches])\n num_good_matches = p1.shape[0]-num_bad_matches\n\n # Prepare data for fitting\n A = np.ones((3, num_good_matches))\n B = np.ones((3, num_good_matches))\n count = 0\n for i in range(p1.shape[0]):\n if best_matches[i] != None:\n A[0,count] = p1[i,0]\n A[1,count] = p1[i,1]\n A[2,count] = p1[i,2]\n B[0,count] = p2[best_matches[i],0]\n B[1,count] = p2[best_matches[i],1]\n B[2,count] = p2[best_matches[i],2]\n count += 1\n A = A.T\n B = B.T\n\n model = GeometricTransform(bScale=False)\n data = np.hstack((A,B))\n\n # Need at least seven points for a good transform fit...\n if (num_good_matches < 7):\n print 'WARNING: not enough matches to compute a geometric transform.'\n return 1, np.identity(3), np.array([0,0,0])\n elif (num_good_matches < 20):\n print 'WARNING: not enough matches to compute a robust fit.'\n return model.fit(data)\n else:\n import lflib.calibration.ransac as ransac\n try:\n bestdata = ransac.ransac(data,model,\n 10, #rand samp size (num required to fit)\n 30, #num iterations\n 4.0, #transformed dist required to be considered inlier,\n 15, #min inliers to be considered \n debug=False,return_all=False)\n return model.fit(bestdata)\n except ValueError:\n return model.fit(data)", "def from_pts(one, two):\n\t\treturn Vec2(two[0] - one[0], two[1] - one[1])", "def inverse_transform2(alpha, tx = 0.0, ty = 0.0, mirror = 0):\n\n\tt = Transform({\"type\":\"2D\",\"alpha\":alpha,\"tx\":tx,\"ty\":ty,\"mirror\":mirror,\"scale\":1.0})\n\tt = t.inverse()\n\tt = t.get_params(\"2D\")\n\treturn t[ \"alpha\" ], t[ \"tx\" ], t[ \"ty\" ], t[ \"mirror\" ]", "def transform_point(transform, x_in, y_in):\n # create point geometry from coordinates\n point = ogr.Geometry(ogr.wkbPoint)\n point.AddPoint(x_in, y_in)\n point.Transform(transform)\n\n x_out = point.GetX()\n y_out = point.GetY()\n return x_out, y_out", "def get_affine_transform_2d_no_numpy(gps_coords, pdr_coords):\n diff_x = [i - j for i, j in zip(pdr_coords[1], pdr_coords[0])]\n diff_xp = [i - j for i, j in zip(gps_coords[1], gps_coords[0])]\n\n dot = diff_x[0] * diff_xp[0] + diff_x[1] * diff_xp[1] # dot product\n det = diff_x[0] * diff_xp[1] - diff_x[1] * diff_xp[0] # determinant\n theta = math.atan2(det, dot) # atan2(y, x) or atan2(sin, cos)\n\n A = [[math.cos(theta), -math.sin(theta), 0],\n [math.sin(theta), math.cos(theta), 0],\n [0, 0, 1]]\n s = math.sqrt((diff_xp[0]*diff_xp[0]+diff_xp[1]*diff_xp[1]+diff_xp[2]*diff_xp[2])/\n (diff_x[0]*diff_x[0]+diff_x[1]*diff_x[1]+diff_x[2]*diff_x[2]))\n\n x1 = pdr_coords[1]\n a_dot_x1 = [A[0][0]*x1[0] + A[0][1]*x1[1] + A[0][2]*x1[2],\n A[1][0]*x1[0] + A[1][1]*x1[1] + A[1][2]*x1[2],\n A[2][0]*x1[0] + A[2][1]*x1[1] + A[2][2]*x1[2]]\n b = [i - j*s for i, j in zip(gps_coords[1], a_dot_x1)]\n\n return s, A, b", "def calc_transform(src_, dst_):\n M_ = cv2.getPerspectiveTransform(src_, dst_)\n Minv_ = cv2.getPerspectiveTransform(dst_, src_)\n return M_, Minv_", "def convert_points(pointsIN,epsgIN,epsgOUT):\n \n if(epsgIN != epsgOUT):\n \n coords_in = osr.SpatialReference()\n coords_in.ImportFromEPSG(epsgIN)\n coords_out = osr.SpatialReference() \n coords_out.ImportFromEPSG(epsgOUT) \n numPts = len(pointsIN)\n dimension = len(pointsIN[0])\n pointsOUT = []\n n=0\n while n<numPts:\n point = ogr.Geometry(type=ogr.wkbPoint)\n point.SetPoint(0, float(pointsIN[n][0]), float(pointsIN[n][1]))\n point.AssignSpatialReference(coords_in)\n point.TransformTo(coords_out)\n if dimension < 3:\n pointsOUT.append([float(point.GetX()),float(point.GetY())])\n else:\n pointsOUT.append([float(point.GetX()),float(point.GetY()),float(pointsIN[n][2])])\n \n n+=1\n \n return pointsOUT\n \n else:\n return pointsIN", "def geodetic_to_grid(self, latitude, longitude):\n\n φ = math.radians(latitude)\n λ = math.radians(longitude)\n\n φ_star = φ - math.sin(φ) * math.cos(φ) * (self.A +\n self.B * math.sin(φ) ** 2 +\n self.C * math.sin(φ) ** 4 +\n self.D * math.sin(φ) ** 6)\n\n δλ = λ - self.λ0\n ξ_prim = math.atan(math.tan(φ_star) / math.cos(δλ))\n η_prim = math.atanh(math.cos(φ_star) * math.sin(δλ))\n\n x = self.k0 * self.â * (ξ_prim +\n self.β1 * math.sin(2 * ξ_prim) * math.cosh(2 * η_prim) +\n self.β2 * math.sin(4 * ξ_prim) * math.cosh(4 * η_prim) +\n self.β3 * math.sin(6 * ξ_prim) * math.cosh(6 * η_prim) +\n self.β4 * math.sin(8 * ξ_prim) * math.cosh(8 * η_prim)) + self.fn\n\n y = self.k0 * self.â * (η_prim +\n self.β1 * math.cos(2 * ξ_prim) * math.sinh(2 * η_prim) +\n self.β2 * math.cos(4 * ξ_prim) * math.sinh(4 * η_prim) +\n self.β3 * math.cos(6 * ξ_prim) * math.sinh(6 * η_prim) +\n self.β4 * math.cos(8 * ξ_prim) * math.sinh(8 * η_prim)) + self.fe\n\n return x, y", "def proj_coords(coords, proj_in, proj_out): \n return [proj_coord(coord, proj_in, proj_out) for coord in coords]", "def transform(self, x, y):\n # return self.transform_2D(x, y)\n return self.transform_perspective(x, y)", "def test_transform_multiples(self):\n \n t1_ref = 6.28318530717958647692528676655867\n t4_ref = t1_ref / 4\n\n # test for all kinds of curvatures K\n for k in (0, 1, -1, 1/11, -1/11, 11, -2):\n\n s = space(curvature=k)\n\n # use a small enough magnitude to not break math for very negative K\n magic = 0.33377777373737737777\n\n o = s.make_origin(3)\n p = s.make_point((2/11, 6/11, 9/11), magic)\n q = s.make_point((3/7, 6/7, 2/7), magic)\n\n f, g, i = map(space_point_transform, (p, q, o))\n\n def check_transform_eq(t1, t2, invert=False, skip=None):\n if skip:return\n for ref in (\n s.make_point((9/17, 8/17, 12/17), magic),\n s.make_point((0, 3/5, 4/5), magic)\n ):\n self.assertTrue(invert ^ point_isclose(\n t1(ref),\n t2(ref)\n ))\n\n def skip_test(n):\n \"\"\"\n Should we skip this case?\n \"\"\"\n return k > 0 and magic * n * k**0.5 >= t4_ref\n\n # check f^0 = I\n check_transform_eq(f * 0, i)\n check_transform_eq(g * 0, i)\n check_transform_eq(i * 0, i)\n\n # check f^1 = f\n check_transform_eq(f * 1, f)\n check_transform_eq(g * 1, g)\n check_transform_eq(i * 1, i)\n\n # check f^-1 is correct inverse of f\n check_transform_eq(f * -1,\n space_point_transform(p * -1))\n check_transform_eq(g * -1,\n space_point_transform(q * -1))\n\n # check f^n is correct iterated f\n check_transform_eq(f * 3,\n space_point_transform(p * 3),\n skip = skip_test(3))\n check_transform_eq(g * 5,\n space_point_transform(q * 5),\n skip = skip_test(5))\n check_transform_eq(f * 19,\n space_point_transform(p * 19),\n skip = skip_test(19))\n check_transform_eq(g * 21,\n space_point_transform(q * 21),\n skip = skip_test(21))\n\n # check f^(1/n) f is correct fractional f\n hf = f * 0.5\n check_transform_eq(hf + hf, f)\n hg = g * 0.25\n check_transform_eq(hg * 4, g)", "def reproject(self, lon, lat):\n if self.xform is None:\n # if the CRS hasn't been determined yet, we set it from the first image's lat/lon (take the UTM crs)\n utm_i = str(int(math.floor((self.images[0].lon + 180) / 6 ) % 60) + 1).zfill(2)\n epsg_code = int('326' + utm_i) if (self.images[0].lat >= 0) else int('327' + utm_i)\n self.crs_dest = QgsCoordinateReferenceSystem(epsg_code)\n self.xform = QgsCoordinateTransform(self.crs_src, self.crs_dest, QgsProject.instance())\n return self.xform.transform(QgsPointXY(lon, lat))", "def transition_point(x1, y1, x2, y2):\n return (\n ((x1, y1), True) if abs(x1) > abs(x2) and abs(y1) > abs(y2)\n else ((x2, y2), False))", "def create_transforms(ntiles, solution):\n rtransforms = []\n for i in range(ntiles):\n rtransforms.append(renderapi.transform.AffineModel(\n B0=solution[0][i],\n B1=solution[1][i]))\n return rtransforms", "def geom_trans(cls, Y1, Y2):\n return super().geom_trans(Y1, Y2)", "def get_perspective_transform(points_src: Tensor, points_dst: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(points_src, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK_SHAPE(points_dst, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK(points_src.shape == points_dst.shape, \"Source data shape must match Destination data shape.\")\n KORNIA_CHECK(points_src.dtype == points_dst.dtype, \"Source data type must match Destination data type.\")\n\n # we build matrix A by using only 4 point correspondence. The linear\n # system is solved with the least square method, so here\n # we could even pass more correspondence\n\n # create the lhs tensor with shape # Bx8x8\n B: int = points_src.shape[0] # batch_size\n\n A = torch.empty(B, 8, 8, device=points_src.device, dtype=points_src.dtype)\n\n # we need to perform in batch\n _zeros = zeros(B, device=points_src.device, dtype=points_src.dtype)\n _ones = torch.ones(B, device=points_src.device, dtype=points_src.dtype)\n\n for i in range(4):\n x1, y1 = points_src[..., i, 0], points_src[..., i, 1] # Bx4\n x2, y2 = points_dst[..., i, 0], points_dst[..., i, 1] # Bx4\n\n A[:, 2 * i] = stack([x1, y1, _ones, _zeros, _zeros, _zeros, -x1 * x2, -y1 * x2], -1)\n A[:, 2 * i + 1] = stack([_zeros, _zeros, _zeros, x1, y1, _ones, -x1 * y2, -y1 * y2], -1)\n\n # the rhs tensor\n b = points_dst.view(-1, 8, 1)\n\n # solve the system Ax = b\n X: Tensor = _torch_solve_cast(A, b)\n\n # create variable to return the Bx3x3 transform\n M = torch.empty(B, 9, device=points_src.device, dtype=points_src.dtype)\n M[..., :8] = X[..., 0] # Bx8\n M[..., -1].fill_(1)\n\n return M.view(-1, 3, 3) # Bx3x3", "def unproject(self, (x, y)):\n lng = x/EARTH_RADIUS * RAD_TO_DEG\n lat = 2 * math.atan(math.exp(y/EARTH_RADIUS)) - math.pi/2 * RAD_TO_DEG\n return (lng, lat)", "def proj_coord(coord, proj_in, proj_out):\n x, y = coord\n return pyproj.transform(proj_in, proj_out, x, y)", "def _transform_point(self, x, y):\n return (x, y)", "def transform_point(p,R,t):\r\n x = R[0][0]*p[0]+R[0][1]*p[1]+t[0]\r\n y = R[1][0]*p[0]+R[1][1]*p[1]+t[1]\r\n return [x,y]", "def composite(c, r):\n x, y = gta * (c, r)\n lat, lon = transform.TransformPoint(x, y)[:2]\n if not -90 <= lat <= 90:\n raise ValueError('illegal lat value, did you switch coordinates')\n return lon, lat", "def reproject_coordinates(coordinates, inproj, outproj, flat=False):\n if flat:\n return np.array([transform(inproj, outproj, coord[0], coord[1]) for coord in coordinates]).flatten()\n return [list(transform(inproj, outproj, coord[0], coord[1])) for coord in coordinates]", "def latlon_2_grid(x, y, z, origin):\n new_y = (y - origin[1]) * 111111\n new_x = (x - origin[0]) * (111111 * np.cos(origin[1] * (np.pi/180)))\n return new_x, new_y, z", "def cart2pol(x: float, y: float) -> typing.Tuple[float, float]:\n return atan2(y, x), hypot(x, y)", "def get_transform(ds):\n\n if 'transform' in ds.attrs:\n ds_trans = ds.attrs['transform']\n if isinstance(ds_trans, Affine):\n return ds_trans\n else:\n return Affine(*ds_trans)\n\n elif 'crs' in ds.data_vars and 'i2m' in ds.data_vars['crs'].attrs:\n transf_str = ds.data_vars['crs'].attrs['i2m']\n a = list(map(float, transf_str.split(',')))\n return Affine(a[0], a[2], a[4], a[1], a[3], a[5])\n\n else:\n resx, resy = get_resolution(ds)\n xoff = ds['x'].values.min()\n yoff = ds['y'].values.max()\n return Affine(resx, 0, xoff, 0, resy, yoff)", "def estimate_translation(points1, points2):\n xs = points1[:,0]\n ys = points1[:,1]\n x2s = points2[:,0]\n y2s = points2[:,1]\n N = len(xs)\n \n # build b\n b = np.empty((N+N, 1))\n b[::2,0] = xs\n b[1::2,0] = ys\n \n # build A\n A = np.empty((N+N, 3))\n A[::2, 0] = x2s\n A[1::2,0] = y2s\n A[::2, 1] = np.ones(N)\n A[1::2, 1] = np.zeros(N)\n A[::2, 2] = np.zeros(N)\n A[1::2, 2] = np.ones(N)\n \n A = np.linalg.lstsq(A, b)[0][:,0]\n M = [[1, 0, A[1]],\n [0, 1, A[2]]]\n return (A[0], A[1], A[2], np.array(M))", "def affine_transform(geom, matrix):\n if geom.is_empty:\n return geom\n if len(matrix) == 6:\n ndim = 2\n a, b, d, e, xoff, yoff = matrix\n if geom.has_z:\n ndim = 3\n i = 1.0\n c = f = g = h = zoff = 0.0\n matrix = a, b, c, d, e, f, g, h, i, xoff, yoff, zoff\n elif len(matrix) == 12:\n ndim = 3\n a, b, c, d, e, f, g, h, i, xoff, yoff, zoff = matrix\n if not geom.has_z:\n ndim = 2\n matrix = a, b, d, e, xoff, yoff\n else:\n raise ValueError(\"'matrix' expects either 6 or 12 coefficients\")\n\n def affine_pts(pts):\n \"\"\"Internal function to yield affine transform of coordinate tuples\"\"\"\n if ndim == 2:\n for x, y in pts:\n xp = a * x + b * y + xoff\n yp = d * x + e * y + yoff\n yield (xp, yp)\n elif ndim == 3:\n for x, y, z in pts:\n xp = a * x + b * y + c * z + xoff\n yp = d * x + e * y + f * z + yoff\n zp = g * x + h * y + i * z + zoff\n yield (xp, yp, zp)\n\n # Process coordinates from each supported geometry type\n if geom.type in ('Point', 'LineString', 'LinearRing'):\n return type(geom)(list(affine_pts(geom.coords)))\n elif geom.type == 'Polygon':\n ring = geom.exterior\n shell = type(ring)(list(affine_pts(ring.coords)))\n holes = list(geom.interiors)\n for pos, ring in enumerate(holes):\n holes[pos] = type(ring)(list(affine_pts(ring.coords)))\n return type(geom)(shell, holes)\n elif geom.type.startswith('Multi') or geom.type == 'GeometryCollection':\n # Recursive call\n # TODO: fix GeometryCollection constructor\n return type(geom)([affine_transform(part, matrix)\n for part in geom.geoms])\n else:\n raise ValueError('Type %r not recognized' % geom.type)", "def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()/self.pbaspect[0]\n ymin, ymax = self.get_ylim3d()/self.pbaspect[1]\n zmin, zmax = self.get_zlim3d()/self.pbaspect[2]\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0 \n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates \n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down \n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M", "def CreateReleasePoints(points_on_longitude, points_on_latitude, grids):\n \n ReleasePointsLon = []\n ReleasePointsLat = []\n \n GridsCW_array = np.asarray(grids[['min_lon', 'min_lat', 'max_lon', 'max_lat']])\n \n for i in range(len(GridsCW_array)):\n \n lon_space = np.linspace(GridsCW_array[i,0], GridsCW_array[i,2], num = points_on_longitude+2 )\n lat_space = np.linspace(GridsCW_array[i,1], GridsCW_array[i,3], num = points_on_latitude+2 )\n \n \n lon_space_cor = lon_space[1:-1]\n lat_space_cor = lat_space[1:-1]\n \n for j in lon_space_cor:\n for k in lat_space_cor:\n \n ReleasePointsLon.append(j)\n ReleasePointsLat.append(k)\n \n return ReleasePointsLon, ReleasePointsLat", "def project(self, other):\n L = self.length2\n if L > pygonal.EPSILON2:\n s = self.dot(other) / L\n return tuple.__new__(Vec2, (self[0] * s, self[1] * s))\n else:\n return null", "def connect(cls, x1, y1, z1, x2, y2, z2, t1=0, t2=0):\n return cls(x2 - x1, y2 - y1, z2 - z1, t2 - t1)", "def projectBack(points, proj):\n\n mpoints = MultiPoint(points)\n project = partial(\n pyproj.transform,\n proj,\n pyproj.Proj(proj='latlong', datum='WGS84'))\n gmpoints = transform(project, mpoints)\n coords = []\n for point in gmpoints.geoms:\n x, y = point.coords[0]\n coords.append((x, y))\n coords = np.array(coords)\n return coords", "def param_convert(self, alpha_1, w_c, w_t, e1, e2):\n if self._static is True:\n return (\n self._theta_convert_static,\n self._w_c_static,\n self._w_t_stactic,\n self._s_scale_1_static,\n self._s_scale_2_static,\n )\n return self._param_convert(alpha_1, w_c, w_t, e1, e2)", "def get_transform(self, map_from='visual', map_to='render'):\n return self.transforms.get_transform(map_from, map_to)", "def transform(self, pol_lon=None, pol_lat=None):\n if self.rotated:\n direction = 'rot2geo'\n pol_lon = self.pol_lon\n pol_lat = self.pol_lat\n else:\n if pol_lon is None or pol_lat is None:\n pol_lon = self.pol_lon\n pol_lat = self.pol_lat\n #raise Exception('grid is not rotated, transform requires pol_lon and pol_lat')\n direction = 'geo2rot'\n lon_arr_trans, lat_arr_trans = rotated_grid_transform(\n self.lon_arr, self.lat_arr, pol_lon, pol_lat,\n direction=direction)\n if self.rotated:\n return Grid(lon_arr_trans, lat_arr_trans)\n else:\n return Grid(lon_arr_trans, lat_arr_trans, pol_lon, pol_lat)", "def givens_rotation(v1: float, v2: float) -> Tuple[float, float]:\n t = jnp.sqrt(v1**2 + v2**2)\n cs = v1 / t\n sn = -v2 / t\n return cs, sn", "def convert_pose_to_xy_and_theta(self, passed_stamped_pose):\n # Convert to map coordinate frame from odom\n pose = self.transform(passed_stamped_pose).pose # Apply current transform to given pose\n\n orientation_tuple = (pose.orientation.x,\n pose.orientation.y,\n pose.orientation.z,\n pose.orientation.w)\n angles = t.euler_from_quaternion(orientation_tuple)\n\n return (pose.position.x, pose.position.y, angles[2])", "def get_transforms(args):\n gps_reader = GPSReader(args['gps'])\n gps_data = gps_reader.getNumericData()\n imu_transforms = IMUTransforms(gps_data)\n return imu_transforms", "def coords_trans(map, coords, up_lift=True):\n if len(coords) < 3:\n coords = np.append(coords, [0])\n pass\n\n exact_location = carla.Location(x=coords[0], y=coords[1], z=coords[2])\n waypoint = map.get_waypoint(exact_location) # carla.waypoint\n road_center_location = waypoint.transform.location\n\n if up_lift == True:\n exact_location.z += 1.0\n road_center_location.z += 1.0\n\n return exact_location, waypoint, road_center_location" ]
[ "0.6856417", "0.683055", "0.6792389", "0.6792389", "0.67460185", "0.64127856", "0.64124286", "0.63331443", "0.6282391", "0.62185615", "0.6211598", "0.61743957", "0.6105238", "0.60964894", "0.6039987", "0.59297293", "0.591202", "0.5792901", "0.5790143", "0.57694685", "0.5704589", "0.5699321", "0.56897587", "0.5664953", "0.5657789", "0.5626941", "0.56239927", "0.5616216", "0.56143695", "0.5608458", "0.56040967", "0.5564466", "0.5552187", "0.553884", "0.5519179", "0.5515255", "0.5489414", "0.54624563", "0.5456078", "0.5449235", "0.54486376", "0.5433683", "0.54336065", "0.54164565", "0.5377904", "0.536584", "0.53651094", "0.53643614", "0.5348436", "0.53410625", "0.53384644", "0.5327547", "0.5319171", "0.53022164", "0.5301873", "0.52967566", "0.52898854", "0.52831286", "0.52698976", "0.52603096", "0.52511746", "0.5250859", "0.5246229", "0.52424926", "0.5239163", "0.5211754", "0.52097714", "0.5202239", "0.519093", "0.51848227", "0.5178972", "0.51785207", "0.517802", "0.51701474", "0.5156202", "0.51462036", "0.51396847", "0.51325816", "0.513021", "0.51250345", "0.5112952", "0.51091427", "0.510541", "0.5103938", "0.5103167", "0.5086579", "0.5081295", "0.5078259", "0.5061596", "0.50612164", "0.50466394", "0.50385594", "0.5037762", "0.5032306", "0.50285864", "0.50228125", "0.50223184", "0.50096744", "0.5007068", "0.50013465" ]
0.8401615
0
Given a geometry as a lat/long polygon, find the lat/long centroid, by first projecting into the preferred EPSG, so as to avoid discontinuities. The preferredEpsg is one in which the polygon ought to make sense (as found, hopefully, by the findSensibleProjection() function). Returns a pair [centroidX, centroidY] in lat/long
def findCentroid(geom, preferredEpsg): (projTr, llTr) = makeTransformations(4326, preferredEpsg) geomProj = copyGeom(geom) geomProj.Transform(projTr) geomCentroid = geomProj.Centroid() geomCentroid.Transform(llTr) centroidDict = eval(geomCentroid.ExportToJson()) centroidXY = centroidDict['coordinates'] return centroidXY
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_polygon_centroid_2d(polygon):\r\n return geometry.gmComputePolygonCentroid(polygon)", "def findSensibleProjection(geom):\n coords = getCoords(geom)\n y = coords[:, 1]\n x = coords[:, 0]\n yMin = y.min()\n yMax = y.max()\n if (yMax - yMin) > 90:\n # We are crossing a lot of latitude, which suggests that we have a \n # long strip> In this case, we don't even bother to suggest an EPSG. \n epsg = None\n elif yMin < -80:\n # We are nearing the south pole, so go with UPS south\n epsg = 32761\n elif yMax > 80:\n # Nearing north pole, so UPS North\n epsg = 32661\n else:\n # Work out a UTM zone. Note that we use the median value to get a rough \n # idea of the centre, rather than the mean, because the mean is subject to all \n # sorts of problems when crossing the date line\n xMedian = numpy.median(x)\n yMedian = numpy.median(y)\n zone = int((xMedian + 180)/6) % 60 + 1\n if yMedian < 0:\n epsgBase = 32700\n else:\n epsgBase = 32600\n epsg = epsgBase + zone\n return epsg", "def calculate_polygon_centroid(polygon):\n\n # Make sure it is numeric\n P = numpy.array(polygon)\n\n # Get area - needed to compute centroid\n A = calculate_polygon_area(P, signed=True)\n\n # Extract x and y coordinates\n x = P[:, 0]\n y = P[:, 1]\n\n # Exercise: Compute C as shown in http://paulbourke.net/geometry/polyarea\n a = x[:-1] * y[1:]\n b = y[:-1] * x[1:]\n\n cx = x[:-1] + x[1:]\n cy = y[:-1] + y[1:]\n\n Cx = numpy.sum(cx * (a - b)) / (6. * A)\n Cy = numpy.sum(cy * (a - b)) / (6. * A)\n\n # Create Nx2 array and return\n C = numpy.array([Cx, Cy])\n return C", "def polygonFromInteriorPoints(geom, preferredEpsg):\n (projTr, llTr) = makeTransformations(4326, preferredEpsg)\n\n geomProj = copyGeom(geom)\n geomProj.Transform(projTr)\n geomOutline = geomProj.ConvexHull()\n geomOutline.Transform(llTr)\n return geomOutline", "def test_polygon_centroids(self):\n\n # Create closed simple polygon (counter clock wise)\n P = numpy.array([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]])\n C = calculate_polygon_centroid(P)\n\n msg = ('Calculated centroid was (%f, %f), expected '\n '(0.5, 0.5)' % tuple(C))\n assert numpy.allclose(C, [0.5, 0.5]), msg\n\n # Create closed simple polygon (clock wise)\n # FIXME (Ole): Not sure whether to raise an exception or\n # to return absolute value in this case\n P = numpy.array([[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]])\n C = calculate_polygon_centroid(P)\n\n msg = ('Calculated centroid was (%f, %f), expected '\n '(0.5, 0.5)' % tuple(C))\n assert numpy.allclose(C, [0.5, 0.5]), msg\n\n # Not starting at zero\n # Create closed simple polygon (counter clock wise)\n P = numpy.array([[168, -2], [169, -2], [169, -1],\n [168, -1], [168, -2]])\n C = calculate_polygon_centroid(P)\n\n msg = ('Calculated centroid was (%f, %f), expected '\n '(168.5, -1.5)' % tuple(C))\n assert numpy.allclose(C, [168.5, -1.5]), msg\n\n # Realistic polygon\n filename = '%s/%s' % (TESTDATA, 'test_polygon.shp')\n layer = read_layer(filename)\n geometry = layer.get_geometry()\n\n P = geometry[0]\n C = calculate_polygon_centroid(P)\n\n # Check against reference centroid\n reference_centroid = [106.7036938, -6.134533855] # From qgis\n assert numpy.allclose(C, reference_centroid, rtol=1.0e-8)\n\n # Store centroid to file (to e.g. check with qgis)\n out_filename = unique_filename(prefix='test_centroid', suffix='.shp')\n V = Vector(data=None,\n projection=DEFAULT_PROJECTION,\n geometry=[C],\n name='Test centroid')\n V.write_to_file(out_filename)\n\n # Another realistic polygon\n P = numpy.array([[106.7922547, -6.2297884],\n [106.7924589, -6.2298087],\n [106.7924538, -6.2299127],\n [106.7922547, -6.2298899],\n [106.7922547, -6.2297884]])\n\n C = calculate_polygon_centroid(P)\n\n # Check against reference centroid from qgis\n reference_centroid = [106.79235602697445, -6.229849764722536]\n msg = 'Got %s but expected %s' % (str(C), str(reference_centroid))\n assert numpy.allclose(C, reference_centroid, rtol=1.0e-8), msg\n\n # Store centroid to file (to e.g. check with qgis)\n out_filename = unique_filename(prefix='test_centroid', suffix='.shp')\n V = Vector(data=None,\n projection=DEFAULT_PROJECTION,\n geometry=[C],\n name='Test centroid')\n V.write_to_file(out_filename)", "def centroid(self) -> PointValue:\n return ops.GeoCentroid(self).to_expr()", "def centroid_for_uncomputed_shapes(shape_list: List[List[Tuple[float, float]]]) -> Tuple[float, float]:\n centroids = []\n areas = []\n for s in shape_list:\n centroids.append(convex_centroid(s))\n areas.append(convex_area(s))\n return centroid_for_shapes(centroids, areas)", "def getCentroid(self):\n if len(self.points) == 0:\n # None\n return None\n elif len(self.points) == 1:\n # Same point\n return self.points[0]\n elif len(self.points) == 2:\n # Middle of a segment\n return Segment(*self.points).middle\n elif len(self.points) == 3:\n # Intersection point of 2 medians\n return Point.average(self.points)\n else:\n # Geometric decomposition to compute centroids (wikipedia)\n n = len(self.points) # n is the number of points\n # There are n-2 forms\n forms = [Form([self.points[0]] + self.points[i:i + 2]) for i in range(1, n - 1)]\n # So n-2 centroids and areas, except if some of the points are one upon another, no area is null\n centroids = [form.center for form in forms]\n areas = [form.area for form in forms]\n # we compute the average centroid weighted by the areas\n weighted_centroid = Point.sum([a * c for (c, a) in zip(centroids, areas)])\n centroid = weighted_centroid / sum(areas)\n return centroid", "def centroids(network,\n geometry,\n **kwargs):\n Np = geometry.num_pores()\n value = _sp.zeros(Np)\n pore_map = geometry.map_pores(geometry.pores(),geometry._net)\n for geom_pore,net_pore in pore_map:\n net_throats = geometry._net.find_neighbor_throats(net_pore)\n geom_throats = geometry._net.map_throats(net_throats,geometry)[:,1]\n tcs = geometry[\"throat.centroid\"][geom_throats]\n pc = geometry[\"pore.centroid\"][geom_pore]\n value[geom_pore]=_sp.mean(_sp.sqrt(((tcs-pc)*(tcs-pc))[:,0]+((tcs-pc)*(tcs-pc))[:,1]+((tcs-pc)*(tcs-pc))[:,2]))*2\n return value", "def get_centroid(poly):\n # Make sure poly is formatted correctly\n if len(poly) < 3:\n raise ValueError('polygon has less than 3 points')\n for point in poly:\n if type(point) is not list or 2 != len(point):\n print(type(point))\n raise ValueError('point is not a list of length 2')\n # Calculate the centroid from the weighted average of the polygon's\n # constituent triangles\n area_total = 0\n centroid_total = [float(poly[0][0]), float(poly[0][1])]\n for i in range(0, len(poly) - 2):\n # Get points for triangle ABC\n a, b, c = poly[0], poly[i+1], poly[i+2]\n # Calculate the signed area of triangle ABC\n area = ((a[0] * (b[1] - c[1])) +\n (b[0] * (c[1] - a[1])) +\n (c[0] * (a[1] - b[1]))) / 2.0\n # If the area is zero, the triangle's line segments are\n # colinear so we should skip it\n if 0 == area:\n continue\n # The centroid of the triangle ABC is the average of its three\n # vertices\n centroid = [(a[0] + b[0] + c[0]) / 3.0, (a[1] + b[1] + c[1]) / 3.0]\n # Add triangle ABC's area and centroid to the weighted average\n centroid_total[0] = ((area_total * centroid_total[0]) +\n (area * centroid[0])) / (area_total + area)\n centroid_total[1] = ((area_total * centroid_total[1]) +\n (area * centroid[1])) / (area_total + area)\n area_total += area\n return centroid_total", "def centroid(self):\n return _property_geo(arctern.ST_Centroid, self)", "def centroid(self):\n x, y = self.coordinates\n A = 0.5 * sum(x[i]*y[i+1] - x[i+1]*y[i] for i in range(-1, len(self)-1))\n cx = sum((x[i] + x[i+1]) * (x[i]*y[i+1] - x[i+1]*y[i])\n for i in range(-1, len(self)-1)) / (6*A)\n cy = sum((y[i] + y[i+1]) * (x[i]*y[i+1] - x[i+1]*y[i])\n for i in range(-1, len(self)-1)) / (6*A)\n return Point((cx, cy), properties=self.properties, crs=self.crs)", "def centroid_gdf(gdf):\n df = gdf.copy()\n df['LAT'] = df['geometry'].centroid.apply(lambda p : p.coords[0][1])\n df['LONG'] = df['geometry'].centroid.apply(lambda p : p.coords[0][0])\n df = df.drop(columns=['geometry'])\n df = df.set_index('LOCATION')\n return df", "def geojson_centroid(obj):\n points = coords(obj)\n return best_effort_centroid2d(points)", "def estimate_centroid(self):\r\n\t\tstrain = self.strain_distribution_compr(self.max_pure_compresive_strain,\\\r\n\t\t\tself.max_pure_compresive_strain)\r\n\t\tself.geometric_centrod = (self.depth/2) \r\n\t\tself.plastic_centroid = (self.depth/2)+\\\r\n\t\t\t(self.sectional_moment(strain, self.depth/2)/\\\r\n\t\t\tself.sectional_force(strain))", "def centroid(self) -> Point:\n points = self.normalized_array\n centroids = [np.average(points[[0, i, i + 1], :-1], axis=0) for i in range(1, points.shape[0] - 1)]\n weights = [det(self._normalized_projection()[[0, i, i + 1]]) / 2 for i in range(1, points.shape[0] - 1)]\n return Point(*np.average(centroids, weights=weights, axis=0))", "def get_district_centroid(state=48, district=7, leg_body='US-REP', year='2015'):\r\n district_file = get_district_geojson_filename(\r\n state=state, district=district, leg_body=leg_body)\r\n \r\n get_district_file(state=state, district=district, leg_body=leg_body)\r\n\r\n district = gpd.read_file(district_file)\r\n\r\n longitude = district.geometry.centroid[0].x\r\n latitude = district.geometry.centroid[0].y\r\n\r\n return (longitude, latitude)", "def getCoordinateCentroid(coords):\n # Check we're getting numpy arrays\n assert(type(coords).__module__ == np.__name__)\n assert(coords.shape[1] == 2)\n coords = np.radians(coords)\n lat, lon = coords[:,0], coords[:,1]\n # compute location in 3D axis\n X = np.cos(lat) * np.cos(lon)\n Y = np.cos(lat) * np.sin(lon)\n Z = np.sin(lat)\n\n x, y, z = np.mean(X), np.mean(Y), np.mean(Z)\n centroid_lon = np.arctan2(y, x)\n hyp = np.sqrt(x*x + y*y)\n centroid_lat = np.arctan2(z, hyp)\n\n return np.degrees(centroid_lat), np.degrees(centroid_lon)", "def centroid(self) -> Point:\n # if the hydroline is defined, use the centroid of the hydroline\n if isinstance(self.geometry, Polyline):\n pt = Geometry({\n 'x': np.mean([self.putin.geometry.x, self.takeout.geometry.x]),\n 'y': np.mean([self.putin.geometry.y, self.takeout.geometry.y]),\n 'spatialReference': self.putin.geometry.spatial_reference\n })\n\n # if both accesses are defined, use the mean of the accesses\n elif isinstance(self.putin, ReachPoint) and isinstance(self.takeout, ReachPoint):\n\n # create a point geometry using the average coordinates\n pt = Geometry({\n 'x': np.mean([self.putin.geometry.x, self.takeout.geometry.x]),\n 'y': np.mean([self.putin.geometry.y, self.takeout.geometry.y]),\n 'spatialReference': self.putin.geometry.spatial_reference\n })\n\n # if only the putin is defined, use that\n elif isinstance(self.putin, ReachPoint):\n pt = self.putin.geometry\n\n # and if on the takeout is defined, likely the person digitizing was taking too many hits from the bong\n elif isinstance(self.takeout, ReachPoint):\n pt = self.takeout.geometry\n\n else:\n pt = None\n\n return pt", "def _find_coord_centre(self,\n shape_coords) -> np.array:\n return shape_coords.mean(axis=0)", "def centroid(self): # -> BaseGeometry:\n ...", "def GetCentroid(self, p_float=..., p_float=..., p_float=...):\n ...", "def find_centroid_cell(self):\n\n x_min, y_min = self.find_min()\n x_max, y_max = self.find_max()\n x_centroid = int((x_max+x_min)/2)\n y_centroid = int((y_max+y_min)/2)\n centroide = x_centroid, y_centroid\n return centroide", "def calc_centroid(x1, y1, x2, y2):\n x = x1 + ((x2 - x1) / 2.0)\n y = y1 + ((y2 - y1) / 2.0)\n return [x, y]", "def calc_centroid(self):\n sumX = 0.0\n sumY = 0.0\n dis = 0.0\n for p in self.points:\n sumX += p.x\n sumY += p.y\n d = p.distance(self.centroid)\n if dis < d: dis = d\n # radius is the longest distance within points\n self.radius = dis + 0.1\n size = len(self.points)\n if size:\n return Point(x=float(sumX)/size, y=float(sumY)/size)\n else:\n return self.centroid", "def calc_centroid(self):\n num = 0\n centroid = numpy.zeros(3, float)\n for atm in self:\n if atm.position is not None:\n centroid += atm.position\n num += 1\n return centroid / num", "def coordinates(self):\n # TODO: Add the feature where coordinates come from multiple sources.\n # Consider whether or not you'd want to output the categorical\n # variable indicating the source of the coordinate data or\n # make the user place coordinates a different property entirely.\n try:\n bounding_box = array(\n self.status.place\n [\"bounding_box\"]\n [\"coordinates\"]\n ).squeeze()\n centroid = bounding_box.mean(axis=0)\n return centroid\n except AttributeError:\n return zeros(2)", "def centerPoint(featureCollection):\n features = featureCollection[\"features\"]\n center = [0, 0]\n for feature in features:\n geometry = feature[\"geometry\"]\n if geometry[\"type\"] == \"Point\":\n point = feature[\"geometry\"][\"coordinates\"]\n center[0] += point[0]\n center[1] += point[1]\n else:\n raise ValueError(\"expected a point but got a {}\".format(geometry[\"type\"]))\n center[0] /= len(features) \n center[1] /= len(features) \n\n return geojson.Point(coordinates=center)", "def centroid_of_points(pts):\n xs, ys, zs = 0, 0, 0\n for pt in pts:\n xs += pt[0]\n ys += pt[1]\n if len(pt) > 2:\n zs += pt[2]\n if len(pts) > 0:\n xs /= len(pts)\n ys /= len(pts)\n if len(pts[0]) > 2:\n zs /= len(pts)\n return xs, ys, zs\n return xs, ys", "def centroid_for_shapes(centroids: List[Tuple[float, float]],\n areas: List[float] = None) -> Tuple[float, float]:\n gc = np.zeros(2)\n area = 0\n if areas is None:\n areas = np.ones(len(centroids))\n for pc, a in zip(centroids, areas):\n gc += np.array(pc)*a\n area += a\n gc /= area\n return np.array(gc)", "def extract_coordinates(x):\n if not pd.isna(x):\n x = ast.literal_eval(x)\n bbox = Polygon(x['bounding_box']['coordinates'][0])\n centroid = bbox.centroid.coords\n return Point(centroid)\n else:\n return np.nan", "def get_zone_centre(gdf, name):\n\n gdf_temp = gdf[gdf['name'] == name]\n if len(gdf_temp) == 0:\n return None\n if gdf_temp.iloc[0].geometry is None:\n return Point(0.0, 0.0)\n return gdf_temp.iloc[0].geometry.centroid", "def get_centroid(points):\n\n xs, ys = points[:, 0], points[:, 1]\n\n a = xs[:-1] * ys[1:]\n b = ys[:-1] * xs[1:]\n\n A = numpy.sum(a - b) / 2.\n\n cx = xs[:-1] + xs[1:]\n cy = ys[:-1] + ys[1:]\n\n Cx = numpy.sum(cx * (a - b)) / (6. * A)\n Cy = numpy.sum(cy * (a - b)) / (6. * A)\n\n return Cx, Cy", "def getContourCentroid(x, y, w, h):\n coordXCentroid = (x+x+w)/2\n coordYCentroid = (y+y+h)/2\n objectCentroid = (int(coordXCentroid),int(coordYCentroid))\n return objectCentroid", "def get_centroid(arr):\n if len(arr) == 0:\n return -1\n return ctr(arr).most_common(1)[0][0]", "def getpolycenter(poly):\n polylength = len(poly)\n\n return (\n round(sum(x for x, y in poly) / polylength, 2),\n round(sum(y for x, y in poly) / polylength, 2)\n )", "def get_geom_center(coordlist):\n return sum(coordlist) / len(coordlist)", "def centroid(self) -> Point[Scalar]:\n return self._context.multipoint_centroid(self)", "def centroid(sign, FS):\n\n time = compute_time(sign, FS)\n\n energy, time_energy=signal_energy(sign, time)\n\n total_energy = np.dot(np.array(time_energy),np.array(energy))\n energy_sum = np.sum(energy)\n\n if energy_sum == 0 or total_energy == 0:\n centroid = 0\n else:\n centroid = total_energy / energy_sum\n return centroid", "def get_centroid(self, sub_graph):\n centroid_node = 0\n max_degree = 0\n for node in sub_graph.nodes():\n if sub_graph.degree(node) > max_degree:\n max_degree = sub_graph.degree(node)\n centroid_node = node\n return centroid_node", "def center_data(x: npt.NDArray, y: npt.NDArray) -> Tuple[npt.NDArray, ...]:\n centroid = np.array([x.mean(), y.mean()])\n xc = x - centroid[0]\n yc = y - centroid[1]\n return xc, yc, centroid", "def compute_centroid(data):\n return sum(data[:]) / len(data)", "def getBeliefsCentroid(self, idx):\n x = 0.0\n y = 0.0\n total = 0.0\n for p in self.beliefs[idx]:\n x += p[0]\n y += p[1]\n total += 1.0\n return (round(x / total), round(y / total))", "def center_point(self) -> tuple:\n return (self.min_lat + self.max_lat) / 2, (self.min_lon + self.max_lon) / 2", "def centroid(self, region_list):\n centroid_list = [] # a list of [(distance from robot, centroid)]\n robot = map_helper.map_to_world(self.start[0], self.start[1], self.resolution, self.x_offset, self.y_offset)\n\t#rospy.loginfo(region_list)\n for region in region_list:\n n = len(region)\n i = math.trunc(n/2)\n centroid = region[i]\n\n x = abs(centroid[0] - robot[0])\n y = abs(centroid[1] - robot[1])\n dist = math.hypot(x, y)\n centroid_list.append((dist, centroid))\n return self.smallest_centroid(centroid_list)", "def find_centroid_for_each(self):", "def maybe_centroid2d(points):\n try:\n return centroid2d(points)\n except (ZeroDivisionError, TypeError, IndexError):\n return None", "def best_effort_centroid2d(points):\n ps = filter(is_point, points if points else [])\n return centroid2d(ps)", "def centroid_of_rect(roi):\n return int(roi.shape[0] / 2), int(roi.shape[1] / 2)", "def calc_centroid(self, points):\n\t\tself.canvas.create_polygon(points)\n\t\tx = [i[0] for i in points] # all the math is wrong :(\n\t\ty = [j[1] for j in points]\n\n\t\tarea = x[0] * (y[0] - y[-1])\n\t\tx_hat = (x[0] ** 2) * (y[0] - y[-1]) / (2) \n\t\ty_hat = -(y[0] ** 2) * (x[0] - x[-1]) / (2)\n\n\t\tfor i in range(1, len(points) - 1):\n\t\t\tdt = length(x[i], y[i], x[i - 1], y[i - 1])\n\t\t\tdy = y[i] - y[i - 1]\n\t\t\tdx = x[i] - x[i - 1]\n\t\t\tarea += 2 * x[i] * dy\n\t\t\tx_hat += (x[i] ** 2) * dy\n\t\t\ty_hat -= (y[i] ** 2) * dx\n\n\t\tarea += x[-1] * (y[-1] - y[-2])\n\t\tx_hat += (x[-1] ** 2) * (y[-1] - y[-2]) / 2\n\t\ty_hat -= (y[-1] ** 2) * (x[-1] - x[-2]) / 2\n\t\tarea /= 2\n\t\tx_hat /=2\n\t\ty_hat /= 2\n\t\tprint(\"Area: %s\\nX: %s\\nY: %s\" % (area, x_hat/area, y_hat/area))\n\t\treturn x_hat/area, y_hat/area", "def find_closest_centroid(x, centroids):\n\n return np.argmin([np.linalg.norm(x - centroids[k], axis=1)\n for k in range(centroids.shape[0])], axis=0)", "def test_centroids_from_polygon_data(self):\n\n for vectorname in ['kecamatan_jakarta_osm.shp',\n 'OSM_subset.shp']:\n\n # Read and verify test data\n filename = '%s/%s' % (TESTDATA, vectorname)\n p_layer = read_layer(filename)\n p_geometry = p_layer.get_geometry()\n p_attributes = p_layer.get_data()\n N = len(p_layer)\n assert FEATURE_COUNTS[vectorname] == N\n\n # Read reference centroids generated by Qgis\n filename = '%s/%s' % (TESTDATA, vectorname[:-4] + '_centroids.shp')\n r_layer = read_layer(filename)\n r_geometry = r_layer.get_geometry()\n r_attributes = r_layer.get_data()\n assert len(r_layer) == N\n\n # Compute centroid data\n c_layer = convert_polygons_to_centroids(p_layer)\n assert len(c_layer) == N\n c_geometry = c_layer.get_geometry()\n c_attributes = c_layer.get_data()\n\n # Check that attributes are the same\n for i in range(N):\n p_att = p_attributes[i]\n c_att = c_attributes[i]\n r_att = r_attributes[i]\n for key in p_att:\n assert key in c_att\n assert c_att[key] == p_att[key]\n\n assert key in r_att\n assert c_att[key] == r_att[key]\n\n # Check that coordinates are the same up to machine precision\n for i in range(N):\n c_geom = c_geometry[i]\n r_geom = r_geometry[i]\n\n assert numpy.allclose(c_geom, r_geom,\n rtol=1.0e-8, atol=1.0e-12)\n\n # Check that each centroid fall within its polygon\n for i in range(N):\n point = c_geometry[i]\n polygon = p_geometry[i]\n assert is_inside_polygon(point, polygon, closed=False)\n\n # Write to file (for e.g. visual inspection)\n out_filename = unique_filename(prefix='centroid', suffix='.shp')\n #print 'writing to', out_filename\n c_layer.write_to_file(out_filename)", "def find_centroid(event_file):\n \n print('Finding the centroid of the event file...\\n')\n \n make_img(event_file,clobber=True)\n \n fits = pyfits.open('temp.fits')\n \n #Previously used the RA and DEC headers to find the centre, now trying a more nuanced\n #max pixel value method\n \n #source_ra = fits[1].header['RA_TARG']\n #source_dec = fits[1].header['DEC_TARG']\n \n #return source_ra,source_dec\n \n data = fits[0].data\n \n #As the data from make_img is 1024x1024 based on the centre of the image, use modulo\n #arithmetic to find the physical x and y coordinates\n \n argmax = np.argmax(data)\n \n x = argmax%1024 + 3584\n y = int(argmax/1024) + 3584\n \n return x,y", "def geog(self) -> typing.Union[None, typing.Tuple[float, float]]:\n geog = self.data[4]\n geog = re.findall(r'(\\d{2})(\\d{2})(\\d{2}\\.\\d+)([NS]) (\\d{3})(\\d{2})(\\d{2}\\.\\d+)([EW])', geog)\n\n if geog:\n lat_deg, lat_min, lat_sec, lat_dir, long_deg, long_min, long_sec, long_dir = geog[0]\n\n lat = Point.parse_degrees(lat_deg, lat_min, lat_sec, direction=lat_dir)\n long = Point.parse_degrees(long_deg, long_min, long_sec, direction=long_dir)\n return lat, long\n return None", "def getCentroid(self):\n centroid = 0.0\n sumMagnitude = 0.0\n\n for i in range(0,self.nUniquePoints):\n freq,magnitude = self.fDomain[i]\n\n centroid += freq*magnitude\n sumMagnitude += magnitude\n \n centroid /= sumMagnitude\n return centroid", "def _get_centre(self, gdf):\n bounds = gdf[\"geometry\"].bounds\n centre_x = (bounds[\"maxx\"].max() + bounds[\"minx\"].min()) / 2\n centre_y = (bounds[\"maxy\"].max() + bounds[\"miny\"].min()) / 2\n return centre_x, centre_y", "def calcLatLon(northing, easting):\n from math import asin, atan2, cos, log, pow, sin, sqrt\n\n # CONSUS Albers variables (EPSG: 5070)\n RE_NAD83 = 6378137.0\n E_NAD83 = 0.0818187034 # Eccentricity\n D2R = 0.01745329251 # Pi/180\n standardParallel1 = 43.\n standardParallel2 = 47.\n centralMeridian = -114.\n originLat = 30\n originLon = 0\n\n m1 = cos(standardParallel1 * D2R) / \\\n sqrt(1.0 - pow((E_NAD83 * sin(standardParallel1 * D2R)), 2.0))\n m2 = cos(standardParallel2 * D2R) / \\\n sqrt(1.0 - pow((E_NAD83 * sin(standardParallel2 * D2R)), 2.0))\n\n def calcPhi(i):\n sinPhi = sin(i * D2R)\n return (1.0 - pow(E_NAD83, 2.0)) * \\\n ((sinPhi/(1.0 - pow((E_NAD83 * sinPhi), 2.0))) -\n 1.0/(2.0 * E_NAD83) *\n log((1.0 - E_NAD83 * sinPhi)/(1.0 + E_NAD83 * sinPhi)))\n\n q0 = calcPhi(originLat)\n q1 = calcPhi(standardParallel1)\n q2 = calcPhi(standardParallel2)\n nc = (pow(m1, 2.0) - pow(m2, 2.0)) / (q2 - q1)\n C = pow(m1, 2.0) + nc * q1\n rho0 = RE_NAD83 * sqrt(C - nc * q0) / nc\n rho = sqrt(pow(easting, 2.0) + pow((rho0 - northing), 2.0))\n q = (C - pow((rho * nc / RE_NAD83), 2.0)) / nc\n beta = asin(q / (1.0 - log((1.0 - E_NAD83) / (1.0 + E_NAD83)) *\n (1.0 - pow(E_NAD83, 2.0))/(2.0 * E_NAD83)))\n a = 1.0 / 3.0 * pow(E_NAD83, 2.0) + 31.0 / 180.0 * \\\n pow(E_NAD83, 4.0) + 517.0 / 5040.0 * pow(E_NAD83, 6.0)\n b = 23.0/360.0 * pow(E_NAD83, 4.0) + 251.0 / 3780.0 * pow(E_NAD83, 6.0)\n c = 761.0/45360.0 * pow(E_NAD83, 6.0)\n theta = atan2(easting, (rho0 - northing))\n\n lat = (beta + a * sin(2.0 * beta) + b * sin(4.0 * beta) +\n c * sin(6.0 * beta))/D2R\n lon = centralMeridian + (theta / D2R) / nc\n coords = [lat, lon]\n\n return coords", "def calc_distribution_centroid_and_spread_2d(xdata, ydata, use_iqr=False):\n\n (xcentre, sig_x) = calc_distribution_centre_and_spread(xdata, use_iqr=use_iqr)\n (ycentre, sig_y) = calc_distribution_centre_and_spread(ydata, use_iqr=use_iqr)\n\n return xcentre, sig_x, ycentre, sig_y", "def centroid(im, mask=None, w=None, x=None, y=None):\n from numpy import ones, arange, meshgrid\n # 2009-09-02 13:35 IJC: Created\n if mask==None:\n mask = ones(im.shape)\n if w==None:\n w = ones(im.shape)\n if not (im.shape==mask.shape and im.shape==w.shape):\n print \"Image, mask, and weights must have same shape! Exiting.\"\n return -1\n if x==None or y==None:\n xx = arange(im.shape[1])\n yy = arange(im.shape[0])\n x,y = meshgrid(xx,yy)\n x0 = (x*im*mask*w).sum()/(im*mask*w).sum()\n y0 = (y*im*mask*w).sum()/(im*mask*w).sum()\n\n return (x0,y0)", "def centroid(im, mask=None, w=None, x=None, y=None):\n from numpy import ones, arange, meshgrid\n # 2009-09-02 13:35 IJC: Created\n if mask==None:\n mask = ones(im.shape)\n if w==None:\n w = ones(im.shape)\n if not (im.shape==mask.shape and im.shape==w.shape):\n print \"Image, mask, and weights must have same shape! Exiting.\"\n return -1\n if x==None or y==None:\n xx = arange(im.shape[1])\n yy = arange(im.shape[0])\n x,y = meshgrid(xx,yy)\n x0 = (x*im*mask*w).sum()/(im*mask*w).sum()\n y0 = (y*im*mask*w).sum()/(im*mask*w).sum()\n\n return (x0,y0)", "def getCentroid(self):\r\n return self._centroid", "def get_centre(self):\n # just get the centroid\n # perhaps try something like:\n # https://github.com/mapbox/polylabel/blob/master/polylabel.js\n # in the future\n coords = np.array([(n.x, n.y) for n in self.nodes])\n centre_x = coords[:, 0].mean()\n centre_y = coords[:, 1].mean()\n return centre_x, centre_y", "def get_polygon_envelope(polygon, x_pixel_size, y_pixel_size):\n # retrieve polygon points\n poly_pts = list(polygon.exterior.coords)\n # split tuple points into x and y coordinates and convert them to numpy arrays\n xs, ys = [np.array(coords) for coords in zip(*poly_pts)]\n # compute bounding box\n min_x, min_y, max_x, max_y = min(xs), min(ys), max(xs), max(ys)\n # round boundary coordinates to upper-left corner\n min_x = int(round(min_x / x_pixel_size, DECIMALS)) * x_pixel_size\n min_y = int(np.ceil(round(min_y / y_pixel_size, DECIMALS))) * y_pixel_size\n max_x = int(round(max_x / x_pixel_size, DECIMALS)) * x_pixel_size\n max_y = int(np.ceil(round(max_y / y_pixel_size, DECIMALS))) * y_pixel_size\n\n return min_x, min_y, max_x, max_y", "def get_center_location(self):\n latitude = 0\n longitude = 0\n for centroid in self.centroids:\n latitude += centroid[0]\n longitude += centroid[1]\n return [latitude / len(self.centroids), longitude / len(self.centroids)]", "def ComputeCentroid(self, vtkPoints, int_tuple, p_float=..., p_float=..., p_float=...):\n ...", "def extract_poi(max_lat=None, max_lng=None, min_lat=None, min_lng=None, lat=None, lng=None, l=50.0, h=50.0,\n api_key=None):\n\n # Generate bounding box coordinates\n if (max_lat is None) & (max_lng is None) & (min_lat is None) & (min_lng is None) & (lat is None) & (lng is None):\n raise ValueError('Please either provide a bounding box defined by its edges (i.e. maximum latitude, maximum longitude, minimum latitude, minimum longitude) or a single latitude, longitude pair')\n\n elif (lat is not None) & (lng is not None):\n max_lat, max_lng, min_lat, min_lng = translate_coordinate(lat, lng, l, h)\n\n elif (max_lat is not None) & (max_lng is not None) & (min_lat is not None) & (min_lng is not None):\n lat, lng = identify_centroid(max_lat=max_lat, max_lng=max_lng, min_lat=min_lat, min_lng=min_lng)\n\n else:\n pass\n\n if (max_lat is None) | (max_lng is None) | (min_lat is None) | (min_lng is None) | (lat is None) | (lng is None):\n raise ValueError('Please either provide a bounding box defined by its edges (i.e. maximum latitude, maximum longitude, minimum latitude, minimum longitude) or a single latitude, longitude pair')\n\n radius = calculate_circle_radius(max_lat, max_lng, lat, lng)\n\n # Pass query into Google Places API\n geocode_url = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json'\n params = dict(key=api_key,\n location=str(lat)+','+str(lng),\n radius=str(radius))\n\n query_result = requests.get(url=geocode_url, params=params)\n\n return query_result.json()", "def _get_voronoi_centroid_array(lsm_lat_array, lsm_lon_array, extent):\n YMin = extent[2]\n YMax = extent[3]\n XMin = extent[0]\n XMax = extent[1]\n\n ptList = []\n if (lsm_lat_array.ndim == 2) and (lsm_lon_array.ndim == 2):\n # generate point list with 2D lat lon lists\n if extent:\n # exctract subset within extent\n lsm_dx = np.max(np.absolute(np.diff(lsm_lon_array)))\n lsm_dy = np.max(np.absolute(np.diff(lsm_lat_array, axis=0)))\n\n # remove values with NaN\n lsm_lat_array = np.ma.filled(lsm_lat_array, fill_value=-9999)\n lsm_lon_array = np.ma.filled(lsm_lon_array, fill_value=-9999)\n\n lsm_lat_indices_from_lat, lsm_lon_indices_from_lat = \\\n np.where((lsm_lat_array >= (YMin - 2*lsm_dy)) &\n (lsm_lat_array <= (YMax + 2*lsm_dy)))\n lsm_lat_indices_from_lon, lsm_lon_indices_from_lon = \\\n np.where((lsm_lon_array >= (XMin - 2*lsm_dx)) &\n (lsm_lon_array <= (XMax + 2*lsm_dx)))\n\n lsm_lat_indices = np.intersect1d(lsm_lat_indices_from_lat,\n lsm_lat_indices_from_lon)\n lsm_lon_indices = np.intersect1d(lsm_lon_indices_from_lat,\n lsm_lon_indices_from_lon)\n\n lsm_lat_list = \\\n lsm_lat_array[lsm_lat_indices, :][:, lsm_lon_indices]\n lsm_lon_list = \\\n lsm_lon_array[lsm_lat_indices, :][:, lsm_lon_indices]\n # Create a list of geographic coordinate pairs\n for i in range(len(lsm_lat_indices)):\n for j in range(len(lsm_lon_indices)):\n ptList.append([lsm_lon_list[i][j], lsm_lat_list[i][j]])\n\n elif lsm_lat_array.ndim == 1 and lsm_lon_array.ndim == 1:\n # generate point list with 1D lat lon lists\n if extent:\n Ybuffer = 2 * abs(lsm_lat_array[0]-lsm_lat_array[1])\n Xbuffer = 2 * abs(lsm_lon_array[0]-lsm_lon_array[1])\n # Extract the lat and lon within buffered extent\n # (buffer with 2* interval degree)\n lsm_lat_list = lsm_lat_array[(lsm_lat_array >= (YMin - Ybuffer)) &\n (lsm_lat_array <= (YMax + Ybuffer))]\n lsm_lon_list = lsm_lon_array[(lsm_lon_array >= (XMin - Xbuffer)) &\n (lsm_lon_array <= (XMax + Xbuffer))]\n\n # Create a list of geographic coordinate pairs\n for ptX in lsm_lon_list:\n for ptY in lsm_lat_list:\n ptList.append([ptX, ptY])\n else:\n raise IndexError(\"Lat/Lon lists have invalid dimensions. \"\n \"Only 1D or 2D arrays allowed ...\")\n\n if len(ptList) <= 0:\n raise IndexError(\"The watershed is outside of the bounds of the\"\n \" land surface model grid ...\")\n\n return np.array(ptList) # set-up for input to Delaunay", "def as_centroid_feature(self) -> Feature:\n return Feature(geometry=self.centroid, attributes=self._get_feature_attributes())", "def calcCentroid(self):\n size = len(self.vectors)\n # zip all features together\n zipped = zip(*self.vectors)\n # Calculate the mean for each feature/column\n centroid = [math.fsum(column)/size for column in zipped]\n \n return centroid", "def get_center(self):\n lon, lat = self.coordinates\n\n dimx = lon.shape[0]\n dimy = lon.shape[1]\n \n return (lon[dimx/2][dimy/2],lat[dimx/2][dimy/2])", "def getCentroid(self) -> Vec3:\n return self.centroid()", "def get_center_point(bbox):\n x_middle = 42\n y_middle = 42\n\n # HINT: bbox.xmin, bbox,xmax, bbox.ymin, bbox.ymax\n return (x_middle, y_middle)", "def project_nxpos(G, map_center = False):\n lats = nx.get_node_attributes(G, 'x')\n lons = nx.get_node_attributes(G, 'y')\n pos = {nid:(lat,-lon) for (nid,lat,lon) in common_entries(lats,lons)}\n if map_center:\n loncenter = map_center[0]\n latcenter = map_center[1]\n else:\n loncenter = np.mean(list(lats.values()))\n latcenter = -1* np.mean(list(lons.values()))\n local_azimuthal_projection = \"+proj=aeqd +R=6371000 +units=m +lat_0={} +lon_0={}\".format(latcenter, loncenter)\n # Use transformer: https://gis.stackexchange.com/questions/127427/transforming-shapely-polygon-and-multipolygon-objects\n wgs84_to_aeqd = pyproj.Transformer.from_proj(\n pyproj.Proj(\"+proj=longlat +datum=WGS84 +no_defs\"),\n pyproj.Proj(local_azimuthal_projection))\n pos_transformed = {nid:list(ops.transform(wgs84_to_aeqd.transform, Point(latlon)).coords)[0] for nid, latlon in pos.items()}\n return pos_transformed, (loncenter,latcenter)", "def get_centerlines_from_geom(\n geometry,\n segmentize_maxlen=0.5,\n max_points=3000,\n simplification=0.05,\n smooth_sigma=5,\n debug=False\n ):\n\n if geometry.geom_type not in [\"MultiPolygon\", \"Polygon\"]:\n raise TypeError(\n \"Geometry type must be Polygon or MultiPolygon, not %s\" %(\n geometry.geom_type\n )\n )\n\n if geometry.geom_type == \"MultiPolygon\":\n out_centerlines = MultiLineString([\n get_centerlines_from_geom(subgeom, segmentize_maxlen)\n for subgeom in geometry\n if get_centerlines_from_geom(subgeom, segmentize_maxlen) != None\n ])\n return out_centerlines\n else:\n\n # Convert Polygon to Linestring.\n if len(geometry.interiors) > 0:\n boundary = geometry.exterior\n else:\n boundary = geometry.boundary\n\n # print list(boundary.coords)\n if debug:\n debug_output['original_points'] = MultiPoint([\n point\n for point in list(boundary.coords)\n ])\n\n # Convert to OGR object and segmentize.\n ogr_boundary = ogr.CreateGeometryFromWkb(boundary.wkb)\n ogr_boundary.Segmentize(segmentize_maxlen)\n segmentized = loads(ogr_boundary.ExportToWkt())\n\n # Get points.\n points = segmentized.coords\n\n # Simplify segmentized geometry if necessary. This step is required\n # as huge geometries slow down the centerline extraction significantly.\n tolerance = simplification\n while len(points) > max_points:\n # If geometry is too large, apply simplification until geometry\n # is simplified enough (indicated by the \"max_points\" value)\n tolerance += simplification\n simplified = boundary.simplify(tolerance)\n points = simplified.coords\n if debug:\n debug_output['segmentized_points'] = MultiPoint([\n point\n for point in points\n ])\n\n # Calculate Voronoi diagram.\n vor = Voronoi(points)\n if debug:\n debug_output['voronoi'] = multilinestring_from_voronoi(\n vor,\n geometry\n )\n\n # The next three steps are the most processing intensive and probably\n # not the most efficient method to get the skeleton centerline. If you\n # have any recommendations, I would be very happy to know.\n\n # Convert to networkx graph.\n graph = graph_from_voronoi(vor, geometry)\n\n # Get end nodes from graph.\n end_nodes = get_end_nodes(graph)\n\n if len(end_nodes) < 2:\n return None\n\n # Get longest path.\n longest_paths = get_longest_paths(\n end_nodes,\n graph\n )\n\n # get least curved path.\n best_path = get_least_curved_path(longest_paths[:5], vor.vertices)\n\n #print (best_path == longest_paths[0])\n\n #best_path = longest_paths[0]\n\n centerline = LineString(vor.vertices[best_path])\n if debug:\n debug_output['centerline'] = centerline\n\n # Simplify again to reduce number of points.\n # simplified = centerline.simplify(tolerance)\n # centerline = simplified\n\n\n # Smooth out geometry.\n centerline_smoothed = smooth_linestring(centerline, smooth_sigma)\n\n out_centerline = centerline_smoothed\n\n return out_centerline", "def _identify_centroids( locations, medians ):\n log.info(\"Identifying the centroid of each amplicon\")\n min_pos = min([s for s, e in locations.itervalues()])\n max_pos = max([e for s, e in locations.itervalues()])\n mid_pos = (min_pos + max_pos) / 2\n five_prime, three_prime = _split_medians( medians, mid_pos )\n #five_prime_center = _calculate_centroid( five_prime )\n #three_prime_center = _calculate_centroid( three_prime )\n five_prime_center = (min_pos + mid_pos) / 2\n three_prime_center = (max_pos + mid_pos) / 2\n return (five_prime_center, three_prime_center)", "def _find_centroid_sphere(ch_pos, group_names):\n cartesian_positions = np.array([ch_pos[ch_name] for ch_name in group_names])\n sphere_positions = _cart_to_sph(cartesian_positions)\n cartesian_pos_centroid = np.average(cartesian_positions, axis=0)\n sphere_pos_centroid = _cart_to_sph(cartesian_pos_centroid)\n # average the radius and overwrite it\n avg_radius = np.average(sphere_positions, axis=0)[0]\n sphere_pos_centroid[0, 0] = avg_radius\n # convert back to cartesian\n pos_centroid = _sph_to_cart(sphere_pos_centroid)[0, :]\n return pos_centroid", "def calculate_celestial_pole(native_reference_x, native_reference_cos_lat,\n native_reference_sin_lat,\n reference_x, reference_y,\n reference_cos_lat, reference_sin_lat,\n native_pole_x, native_pole_y,\n select_solution): # pragma: no cover\n right_angle = np.pi / 2\n d_phi = native_pole_x - native_reference_x\n sin_d_phi = np.sin(d_phi)\n cos_d_phi = np.cos(d_phi)\n delta_p1 = np.arctan2(\n native_reference_sin_lat, native_reference_cos_lat * cos_d_phi)\n cs = native_reference_cos_lat * sin_d_phi\n\n delta_p2 = acos(reference_sin_lat / np.sqrt(1 - (cs ** 2)))\n celestial_y = 0.0\n\n delta_n = delta_p1 + delta_p2\n delta_s = delta_p1 - delta_p2\n if delta_n > delta_s:\n temp = delta_s\n delta_s = delta_n\n delta_n = temp\n\n solutions = 0\n if np.abs(delta_n) <= right_angle:\n celestial_y = delta_n\n solutions += 1\n\n if np.abs(delta_s) <= right_angle:\n solutions += 1\n if solutions == 1:\n celestial_y = delta_s\n elif select_solution == -1:\n celestial_y = delta_s\n elif select_solution == 0:\n if np.abs(delta_s - native_pole_y) < np.abs(\n delta_n - native_pole_y):\n celestial_y = delta_s\n\n if solutions == 0: # pragma: no cover (shouldn't happen)\n return np.nan, np.nan\n\n if equal_angles(np.abs(reference_y), right_angle):\n celestial_x = reference_x\n elif equal_angles(np.abs(celestial_y), right_angle):\n celestial_x = reference_x\n if celestial_y > 0:\n celestial_x += native_pole_x - native_reference_x - np.pi\n else:\n celestial_x += native_reference_x - native_pole_x\n else:\n cl = np.cos(celestial_y)\n sl = np.sin(celestial_y)\n\n sin_d_lon = sin_d_phi * native_reference_cos_lat / reference_cos_lat\n cos_d_lon = native_reference_sin_lat - (sl * reference_sin_lat)\n cos_d_lon /= cl * reference_cos_lat\n celestial_x = reference_x - np.arctan2(sin_d_lon, cos_d_lon)\n\n return celestial_x, celestial_y", "def centroid2d(points):\n number_of_points = 0\n centroid = [0, 0]\n\n for p in points:\n centroid = (centroid[0] + p[0], centroid[1] + p[1])\n number_of_points += 1\n\n centroid = [centroid[0] / number_of_points, centroid[1] /\n number_of_points] if number_of_points else None\n\n return centroid", "def hull_centroid(points):\n dim = [np.unique(points[:, i]).size != 1 for i in range(3)]\n hull = ConvexHull(points[:, dim])\n centroid = points.mean(axis=0)\n centroid[dim] = hull.points[hull.vertices].mean(axis=0)\n\n return centroid", "def long_centroid(graph):\n all_proteins = []\n \n G = nx.read_gml(graph)\n for node in G._node:\n y = G._node[node]\n seq = y[\"protein\"].split(\";\")\n for x in seq:\n all_proteins.append(\">\" + y[\"name\"] + \"\\n\" + x)\n \n all_proteins = \"\\n\".join(all_proteins)\n return all_proteins", "def get_centroids_trip(self, trip):\n\n self.trip_centroids = trip.merge(self.df_centroids,\n how='inner',\n left_on='pro_com',\n right_on='pro_com')", "def get_region_centroid(mask, region):\n coords = np.column_stack(np.where(mask == region))\n coords = np.apply_along_axis(np.mean, 0, coords).round()\n coords = np.uint8(coords)\n return(coords)", "def get_centroids(self):\n if not self._trained:\n raise ValueError(\"SOM not trained yet\")\n return self._centroid_grid", "def get_centroids(self):\n if not self._trained:\n raise ValueError(\"SOM not trained yet\")\n return self._centroid_grid", "def point2wgs84(self, crsop):\n latr = math.radians(self.lat)\n lngr = math.radians(self.lng)\n \n # a is the semi-major axis of given datum.\n a = crsop.SemimajorAxisInM\n \n # f is the flattening of given datum\n f = 1.0/crsop.InverseFlattening\n dx = crsop.dx\n dy = crsop.dy\n dz = crsop.dz\n \n # da is the difference between the WGS84 and source ellipsoid semi-major axes.\n da = 6378137.0 - a\n \n # df is the difference between the WGS84 and source CRS flattenings.\n df = 1.0/298.257223563 - f\n \n e_squared = f*(2-f)\n rho = a*(1-e_squared)/math.pow((1-e_squared*sqr(math.sin(latr))),1.5)\n nu = a/math.pow((1-e_squared*sqr(math.sin(latr))),0.5)\n dlat = (1/rho)*(-dx*math.sin(latr)*math.cos(lngr) - \\\n dy*math.sin(latr)*math.sin(lngr) + \\\n dz*math.cos(latr) + (f*da + a*df)*math.sin(2*latr))\n dlng = (-dx*math.sin(lngr) + dy*math.cos(lngr))/(nu*math.cos(latr))\n newlng = lng180(math.degrees(lngr + dlng))\n newlat = math.degrees(latr + dlat)\n return Point(float(truncate(newlng,DEGREE_DIGITS)), \\\n float(truncate(newlat,DEGREE_DIGITS)))", "def centroid_1D(image, xpeak, xhw, debug=False):\n \n # Collapse input image unto x axis\n vector = np.sum(image, axis=0)\n \n c_sum = 0.0\n xcen = 0.0\n \n for ii in xrange(int(xpeak - xhw - 1), int(xpeak + xhw - 1)):\n c_sum = c_sum + vector[ii]\n xloc = ii + 1\n xcen += xloc * vector[ii]\n \n print('(centroid_1D): Sum = ', c_sum)\n \n \n if c_sum == 0:\n print('(centroid_1D): ERROR - divide by zero')\n else:\n xcen /= c_sum\n \n print('(centroid_1D): Centroid = ', xcen-1)\n \n # -1 on both axes, as Python is 0 major \n return xcen-1, c_sum", "def test_centroids_mask():\n data = np.ones((2, 2)).astype(np.float)\n mask = [[False, False], [True, True]]\n centroid = centroid_com(data, mask=None)\n centroid_mask = centroid_com(data, mask=mask)\n assert_allclose([0.5, 0.5], centroid, rtol=0, atol=1.e-6)\n assert_allclose([0.5, 0.0], centroid_mask, rtol=0, atol=1.e-6)", "def recenter_polygon(vertices: List[Tuple[float, float]]) -> List[Tuple[float, float]]:\n centroid = convex_centroid(vertices)\n new_verts = []\n for v in vertices:\n v = np.array(v)\n new_verts.append(v - centroid)\n return new_verts", "def getShortestCoordinate (analyzer,startLat, startLon, endLat, endLon):\n estacionOrigen=model.getCloserStation (analyzer, startLat, startLon)\n estacionDestino=model.getCloserStation (analyzer, endLat, endLon)\n ruta,tiempo=model.getShortestCoordinate(analyzer,estacionOrigen, estacionDestino)\n return (estacionOrigen,estacionDestino,ruta,tiempo)", "def getCentroid(cluster):\n try:\n return np.mean(cluster, axis = 0)\n except:\n return None", "def getProjectedShapes(shapes, xmin, xmax, ymin, ymax):\n latmiddle = ymin + (ymax-ymin)/2.0\n lonmiddle = xmin + (xmax-xmin)/2.0\n projstr = '+proj=ortho +datum=WGS84 +lat_0=%.4f +lon_0=%.4f +x_0=0.0 +y_0=0.0' % (latmiddle, lonmiddle)\n proj = pyproj.Proj(projparams=projstr)\n project = partial(\n pyproj.transform,\n pyproj.Proj(proj='latlong', datum='WGS84'),\n proj)\n\n pshapes = []\n for tshape in shapes:\n if tshape['geometry']['type'] == 'Polygon':\n pshapegeo = shape(tshape['geometry'])\n else:\n pshapegeo = shape(tshape['geometry'])\n pshape = transform(project, pshapegeo)\n pshapes.append(pshape) # assuming here that these are simple polygons\n\n return (pshapes, proj)", "def compute_voronoi_centroid_volume(vertices):\n from scipy.spatial import Delaunay, ConvexHull\n\n tess = Delaunay(vertices)\n dimension = np.shape(vertices)[1]\n\n w = np.zeros((tess.nsimplex, 1))\n cent = np.zeros((tess.nsimplex, dimension))\n for i in range(tess.nsimplex):\n # pylint: disable=E1136\n ch = ConvexHull(tess.points[tess.simplices[i]])\n w[i] = ch.volume\n cent[i, :] = np.mean(tess.points[tess.simplices[i]], axis=0)\n\n volume = np.sum(w)\n centroid = np.matmul(np.divide(w, volume).T, cent)\n\n return centroid, volume", "def getYesPoints(pshapes, proj, dx, nmax, touch_center=True):\n\n mxmin = 9e10\n mxmax = -9e10\n mymin = 9e10\n mymax = -9e10\n for pshape in pshapes:\n pxmin, pymin, pxmax, pymax = pshape.bounds\n if pxmin < mxmin:\n mxmin = pxmin\n if pxmax > mxmax:\n mxmax = pxmax\n if pymin < mymin:\n mymin = pymin\n if pymax > mymax:\n mymax = pymax\n\n if not touch_center:\n geodict = GeoDict.createDictFromBox(mxmin, mxmax, mymin, mymax, dx, dx)\n img = rasterizeShapes(pshapes, geodict)\n #now get the numpy array of x/y coordinates where covgrid == 1\n idx = np.where(img == 1)[0]\n x, y = np.unravel_index(idx, (geodict.ny, geodict.nx))\n yespoints = list(zip(x.flatten(), y.flatten()))\n nrows = geodict.ny\n ncols = geodict.nx\n xvar = np.arange(geodict.xmin, geodict.xmax+geodict.dx, geodict.dx)\n yvar = np.arange(geodict.ymin, geodict.ymax+geodict.dy, geodict.dy)\n else:\n xvar = np.arange(mxmin, mxmax+dx, dx)\n yvar = np.arange(mymin, mymax+dx, dx)\n ncols = len(xvar)\n nrows = len(yvar)\n if nmax is not None:\n if ncols*nrows > nmax:\n aspect = ncols/nrows\n ncols = np.sqrt(nmax*aspect)\n nrows = nmax/ncols\n ncols = int(ncols)\n nrows = int(nrows)\n #re-calculate dx here...\n tdx = (mxmax-mxmin)/ncols\n tdy = (mymax-mymin)/nrows\n dx = np.max(tdx, tdy)\n xvar = np.arange(mxmin, mxmax+dx, dx)\n yvar = np.arange(mymin, mymax+dx, dx)\n\n #Get the \"yes\" points to sample from\n yespoints = []\n idx = []\n shapeidx = 0\n if pshapes[0].type == 'Polygon':\n #loop over shapes, projecting each one, then get the sample points\n for pshape in pshapes:\n if not shapeidx % 1000:\n print('Searching polygon %i of %i' % (shapeidx, len(pshapes)))\n shapeidx += 1\n pxmin, pymin, pxmax, pymax = pshape.bounds\n leftcol = np.where((pxmin - xvar) >= 0)[0].argmax()\n rightcol = np.where((xvar - pxmax) >= 0)[0][0]\n bottomrow = np.where((pymin - yvar) >= 0)[0].argmax()\n toprow = np.where((yvar - pymax) >= 0)[0][0]\n xp = np.arange(xvar[leftcol], xvar[rightcol]+dx, dx)\n yp = np.arange(yvar[bottomrow], yvar[toprow]+dx, dx)\n xmesh, ymesh = np.meshgrid(xp, yp)\n xy = list(zip(xmesh.flatten(), ymesh.flatten()))\n for point in xy:\n ix = np.where(xvar == point[0])[0][0]\n iy = np.where(yvar == point[1])[0][0]\n if pshape.contains(Point(point)):\n yespoints.append(point)\n idx.append(np.ravel_multi_index((iy, ix), (nrows, ncols), mode='raise', order='C'))\n else:\n yespoints = []\n for pshape in pshapes:\n yespoints.append(pshape.coords[0])\n\n return (np.array(yespoints), nrows, ncols, xvar, yvar, idx)", "def find_center_of_coordinates(selection='(all)', state=-1):\n # find middle x, y, z coordinate of the selection\n state = utils.int_to_state(state)\n minc, maxc = cmd.get_extent(selection, state=state)\n coc = [float(l + (u - l) / 2.0) for l, u in zip(minc, maxc)]\n return coc", "def get_center(self):\n return center_points(np.expand_dims(self.best_box, axis=0))[0]", "def _get_x_center_pts(halfway_x, halfway_y):\n return reduce(iconcat, _get_pt_tuple(range(1, halfway_x),\n range(1, halfway_y)))", "def get_element_centroids(self):\n if self.centroids is None:\n self.centroids = np.vstack((\n np.mean(self.grid['x'], axis=1),\n np.mean(self.grid['z'], axis=1)\n )).T\n\n return self.centroids", "def center_of_mass_polygon(polygon):\n L = 0\n cx = 0\n cy = 0\n cz = 0\n p = len(polygon)\n for i in range(-1, p - 1):\n p1 = polygon[i]\n p2 = polygon[i + 1]\n d = distance_point_point(p1, p2)\n cx += 0.5 * d * (p1[0] + p2[0])\n cy += 0.5 * d * (p1[1] + p2[1])\n cz += 0.5 * d * (p1[2] + p2[2])\n L += d\n cx = cx / L\n cy = cy / L\n cz = cz / L\n return cx, cy, cz", "def Get_epsg(g, extension = 'tiff'):\n try:\n if extension == 'tiff':\n # Get info of the dataset that is used for transforming\n try:\n dest = gdal.Open(g)\n except:\n dest = g\n g_proj = dest.GetProjection()\n Projection=g_proj.split('EPSG\",\"')\n epsg_to=int((str(Projection[-1]).split(']')[0])[0:-1])\n \n if extension == 'GEOGCS':\n Projection = g\n epsg_to=int((str(Projection).split('\"EPSG\",\"')[-1].split('\"')[0:-1])[0])\n\n except:\n epsg_to=4326\n #print 'Was not able to get the projection, so WGS84 is assumed'\n \n return(epsg_to)", "def get_center_position( peg, position_on_peg):\n x = Hanoi.FIRST_TOWER_X + peg * Hanoi.DISTANCE_BETWEEN_TOWER\n y = position_on_peg * Hanoi.DISK_HEIGHT + 0.5 * Hanoi.DISK_HEIGHT\n return (x, y)" ]
[ "0.6208906", "0.60699695", "0.5872511", "0.5834941", "0.5801932", "0.5733032", "0.56430256", "0.5637194", "0.5616599", "0.5605853", "0.55601245", "0.5548161", "0.55228645", "0.5449253", "0.5448886", "0.5407162", "0.5400743", "0.5362452", "0.526248", "0.52553886", "0.5202955", "0.5177474", "0.5116135", "0.5102598", "0.5094321", "0.5070557", "0.5069573", "0.5043621", "0.5037388", "0.5036169", "0.50352794", "0.50206876", "0.5012893", "0.49869817", "0.49820432", "0.49742642", "0.4966296", "0.49565333", "0.49493033", "0.49424514", "0.49413967", "0.490964", "0.48999417", "0.489286", "0.48700997", "0.48623216", "0.4860875", "0.4846137", "0.48311406", "0.48266274", "0.48059058", "0.4805123", "0.4804255", "0.47947878", "0.47873208", "0.47778022", "0.47770974", "0.47702527", "0.47699904", "0.47699904", "0.4737092", "0.47296348", "0.4716966", "0.46763444", "0.4673815", "0.46723253", "0.4671518", "0.46634433", "0.46549463", "0.4652279", "0.46489406", "0.4639578", "0.4638339", "0.46345004", "0.46261397", "0.46134856", "0.46038166", "0.46034914", "0.46028104", "0.4601225", "0.4568341", "0.45597938", "0.45571855", "0.45571855", "0.45564216", "0.45469752", "0.45379016", "0.45339167", "0.4530845", "0.45295417", "0.45259234", "0.4520588", "0.45092085", "0.44977963", "0.4494405", "0.44937554", "0.44935876", "0.44869438", "0.44841477", "0.44780126" ]
0.7754564
0
Return a copy of the geometry. OGR does not provide a good method for doing this.
def copyGeom(geom): geomJson = geom.ExportToJson() newGeom = ogr.CreateGeometryFromJson(geomJson) return newGeom
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _prepare_with_copy(geometry):\n geometry = pygeos.apply(geometry, lambda x: x) # makes a copy\n pygeos.prepare(geometry)\n return geometry", "def getGeometry(self):\n return self.geometry", "def getGeometry(self):\n return self.geometry", "def geometry(self):\n return self._geometry", "def geometry(self):\n return self._geometry", "def get_obj_geo_copy(node_path):\n # Create a new hou.Geometry object.\n geo = hou.Geometry()\n\n # Get the geometry object's geo.\n source_geo = get_obj_geo(node_path)\n\n # Merge the geo to copy it.\n geo.merge(source_geo)\n\n return geo", "def geometry(self):\n return self[0].geometry", "def geometry():\n return Geometry()", "def get_geometry(self):\n geometry = self._geometry\n for geo in self._holes:\n geometry = geometry.difference(geo) \n return geometry", "def copy(self):\n return type(self)(self._geojson.copy(), **self._attrs)", "def geometry(self) -> Polyline:\n return self._geometry", "def get_geometry(self):\n\n return rs.ObjectsByLayer(self.name)", "def copy(self):\n return vertex(self.x, self.y, self.z)", "def _get_geometry(self, val):\n g = OGRGeometry(val)\n return json.loads(g.json)", "def _get_geometry(self):\r\n if self._geometry_column_name not in self.columns:\r\n raise AttributeError(\"Geometry Column Not Present: %s\" % self._geometry_column_name)\r\n return self[self._geometry_column_name]", "def copy(self):\n return _coordsys.coordsys_copy(self)", "def get_geometry(self, selection_name):", "def copy(self):\n return Point(self.data, name=self.name+\"'\")", "def getSourceGeometry(self, *args):\n return _osgAnimation.RigGeometry_getSourceGeometry(self, *args)", "def clone(self):\n return _libsbml.Point_clone(self)", "def get_geometry(self, region=None) -> List[Geometry2D]:\n return self.geometry_list", "def __copy__(self):\n return Region(list_poly=self.list_poly[:],\n props=self.props.copy())", "def restore_geometry(self):\n return stools.SETTINGS.get(\"waveformWidget/geometry\")", "def get_default_geometry(self):", "def clone(self, *args):\n return _osgAnimation.RigGeometry_clone(self, *args)", "def get_validation_geometry(self, world) -> ValidationGeometry:\n return create_validation_geometry(self.regions)", "def get_validation_geometry(self, world) -> ValidationGeometry:\n return create_validation_geometry(self.regions)", "def copy(self):\r\n return BasicMesh(self.gl_lists, list(self.pos),\r\n list(self.rotation), list(self.verts),\r\n self.scale, list(self.colorize))", "def copyFrom(self, *args):\n return _osgAnimation.RigGeometry_copyFrom(self, *args)", "def copy(self):\n return Vector(self.x, self.y)", "def give_geom(self):\n sgeom = self.get_smesh().GetShapeToMesh()\n if sgeom:\n node = self.node\n gnode = Node(node.get_std(), node.get_bld(), sgeom.GetStudyEntry())\n return Geom(gnode)", "def gdf(self) -> gpd.GeoDataFrame:\n return self.just_geometry_gdf.join(self.df)", "def clone(self):\n return _libsbml.BoundingBox_clone(self)", "def clone(self):\n return Point(self.x, self.y)", "def clone(self):\n return Point(self.x, self.y)", "def warp_geometry(geom, src_crs, dst_crs):\n return shapely.geometry.shape(rasterio.warp.transform_geom(src_crs, dst_crs, shapely.geometry.mapping(geom)))", "def copy(self):\n cdef StdVectorFst result = StdVectorFst.__new__(StdVectorFst)\n if self.isyms is not None:\n result.isyms = self.isyms.copy()\n if self.osyms is not None:\n result.osyms = (result.isyms if (self.isyms is self.osyms)\n else self.osyms.copy())\n result.fst = <openfst.StdVectorFst*> self.fst.Copy()\n return result", "def load_geometry(self, data):\n cdata = create_string_buffer(data)\n print(len(data))\n geo_ptr = c_void_p()\n ckresult(\n _dll.FMOD_System_LoadGeometry(self._ptr, cdata, len(data), byref(geo_ptr))\n )\n return get_class(\"Geometry\")(geo_ptr)", "def projection(self, point):\n return gs.copy(point)", "def geometry(self, objectId):\n\n objectId = GeometryReference(objectId, self)\n req = urllib2.Request(self.baseUri + 'geometry/%d' % objectId.id)\n r = urllib2.urlopen(req)\n\n data = json.load(r)\n r.close()\n return data", "def get_geospatial(self):\n self.unimpl_base_class()", "def copy(self):\n return type(self)(self.lat_lon[0], self.lat_lon[1], **self._attrs)", "def get_geometry(self, output='array'):\n \n geo_util = geo.Geo()\n \n if self.geometry[output] is None:\n geometry = self.metadata['geometry']\n \n if isinstance(geometry, str):\n geometry = json.loads(geometry.replace(\"'\", '\"'))\n \n coords = geometry['coordinates']\n self.geometry[output] = geo_util.convert_imageGeom(coords, output)\n \n return self.geometry[output]", "def just_geometry_gdf(self) -> gpd.GeoDataFrame:\n if not self.loa:\n raise TypeError(\n f\"Dataset {self.name} was instantiated without the loa \"\n f\"argument. Can't get geometry if don't know loa\"\n )\n geom: Union[GeomCountry, GeomPriogrid]\n if self.loa == \"cm\":\n geom = GeomCountry()\n elif self.loa == \"pgm\":\n geom = GeomPriogrid()\n elif self.loa == \"am\":\n raise NotImplementedError(\"Actor month geometries not implemented\")\n return geom.gdf", "def copy(self):\n new = copy.copy(self)\n new._surf = self._surf.copy()\n return new", "def copy(self):\n new = copy.copy(self)\n new._surf = self._surf.copy()\n return new", "def __init__(self, geom):\n self.geom = deepcopy(geom)", "def copy(self):\n newterrain=Terrain(size=self.size)\n for point in self.size.upto():\n newterrain[point]=self[point]\n return newterrain", "def __getGeometry(self, geom):\n if \"POLYGON\" in self.geometryType:\n rings = geom['rings']\n polygon = getMultiGeometry(rings)\n polyGeom = arcpy.Polygon(polygon, self.sr)\n return polyGeom\n elif \"POLYLINE\" in self.geometryType:\n paths = geom['paths']\n polyline = getMultiGeometry(paths)\n lineGeom = arcpy.Polyline(polyline, self.sr)\n return lineGeom\n elif \"POINT\" in self.geometryType:\n try:\n point = arcpy.Point(float(geom['x']), float(geom['y']))\n except:\n raise NullGeometryError(\"Point geometry is invalid or null\")\n pointGeom = arcpy.Geometry(\"point\", point, self.sr)\n return pointGeom", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def copy(self):\n return Struct(self)", "def _copy_(self):\n return copy.copy(self)", "def get_prop_geometry(self, name_building):\n return self._prop_geometry.loc[name_building].to_dict()", "def copy(self):\n return copy.copy(self)", "def copy(self):\n return copy.copy(self)", "def copy(self):\n return copy.copy(self)", "def cloneType(self):\n return _osgAnimation.RigGeometry_cloneType(self)", "def envelope(self): # -> BaseGeometry:\n ...", "def copy(self):\r\n return copy.copy(self)", "def copy(self):\n newVertices = [v.copy() for v in self.vertices]\n return face(newVertices)", "def __geo_interface__(self):\r\n if HASARCPY:\r\n template = {\r\n \"type\": \"FeatureCollection\",\r\n \"features\": []\r\n }\r\n geom_type = self.geometry_type\r\n if geom_type.lower() == \"point\":\r\n geom_type = \"Point\"\r\n elif geom_type.lower() == \"polyline\":\r\n geom_type = \"LineString\"\r\n elif geom_type.lower() == \"polygon\":\r\n geom_type = \"Polygon\"\r\n df_copy = self.copy(deep=True)\r\n df_copy['geom_json'] = self.geometry.JSON\r\n df_copy['SHAPE'] = df_copy['geom_json']\r\n del df_copy['geom_json']\r\n for index, row in df_copy.iterrows():\r\n geom = row['SHAPE']\r\n del row['SHAPE']\r\n template['features'].append(\r\n {\"type\" : geom_type,\r\n \"geometry\" : pd.io.json.loads(geom),\r\n \"attributes\":row}\r\n )\r\n return pd.io.json.dumps(template)", "def erase(self, other, inplace=False):\r\n if inplace:\r\n df = self\r\n else:\r\n df = self.copy()\r\n if isinstance(other, Geometry):\r\n df.geometry = self.geometry.symmetricDifference(other)\r\n return df\r\n else:\r\n raise ValueError(\"Input must be of type arcpy.Geometry, not %s\" % type(other))", "def copy(self):\n\n return ArrayCoordinates1d(self.coordinates, **self.properties)", "def clone(self):\n return _libsbml.GraphicalObject_clone(self)", "def as_feature(self) -> Feature:\n return Feature(\n geometry=self._geom,\n attributes={key: vars(self)[key] for key in vars(self).keys()\n if key != '_geometry' and not key.startswith('_')}\n )", "def _multigeometry(self, ogr_geometry):\n\n geo_type = ogr_geometry.GetGeometryType()\n\n if geo_type == ogr.wkbPolygon:\n return ogr.ForceToMultiPolygon(ogr_geometry)\n elif geo_type == ogr.wkbPoint:\n return ogr.ForceToMultiPoint(ogr_geometry)\n elif geo_type in [ogr.wkbLineString, ogr.wkbLinearRing]:\n return ogr.ForceToMultiLineString(ogr_geometry)\n else:\n return ogr_geometry", "def copy(self, deep=True):\r\n data = self._data\r\n if deep:\r\n data = data.copy()\r\n return SpatialDataFrame(data, sr=self.sr).__finalize__(self)", "def copy(self):\n \n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return copy(self)", "def copy(self):\n return copy(self)", "def copy(self):\n return self.__class__(\n self.xs.copy(), self.ys.copy(),\n self.gauge_length,\n self.sample_width,\n self.sample_thickness,\n self.name\n )", "def copy(self):\n return PathPoint(self.species.new_species(), deepcopy(self.constraints))", "def copy (self):\n import copy\n return copy.copy(self)", "def set_geometry(self, selection_name, geometry):", "def get_geometry(self):\n rows, cols = self.get_gridspec().get_geometry()\n return rows, cols, self.num1, self.num2", "def get_obj_geo(node_path):\n return OBJ.node(node_path).displayNode().geometry()", "def __copy__(self):\n return self.copy()", "def get_copy_of_graph(self):\r\n return deepcopy(self)", "def geometry(self):\n if hasattr(self._selection_artist, 'get_verts'):\n xfm = self.ax.transData.inverted()\n y, x = xfm.transform(self._selection_artist.get_verts()).T\n return np.array([x, y])\n else:\n return np.array(self._selection_artist.get_data())", "def copy(self):\n copy = self.__class__()\n copy.a = self.a\n copy.b = self.b\n copy.peak = self.peak\n copy.orientation = self.orientation\n copy.i = self.i\n copy.coords = self.coords.copy()\n return copy", "def simplify(self, tolerance, preserve_topology=...): # -> BaseGeometry:\n ...", "def copy_with(self):\n return self.copy()" ]
[ "0.7537405", "0.73882526", "0.73882526", "0.7358205", "0.7358205", "0.7251656", "0.71966004", "0.71324015", "0.7120845", "0.6842374", "0.6735027", "0.6728724", "0.67227095", "0.6678602", "0.6662164", "0.6536648", "0.65043837", "0.6306422", "0.6269604", "0.6267922", "0.6248901", "0.61758465", "0.61700416", "0.61588854", "0.6107103", "0.6100887", "0.6100887", "0.6071272", "0.60687643", "0.60523075", "0.60474867", "0.6010969", "0.6000665", "0.59975064", "0.59975064", "0.5990024", "0.59593195", "0.59211224", "0.58985186", "0.5896957", "0.5893468", "0.5873349", "0.5823745", "0.5809357", "0.58013546", "0.58013546", "0.57840973", "0.5765035", "0.5764821", "0.5735601", "0.5735601", "0.5735601", "0.5735601", "0.5732894", "0.57298744", "0.5729167", "0.5707103", "0.5707103", "0.5707103", "0.57031053", "0.5698867", "0.5691702", "0.5679932", "0.5665655", "0.56453925", "0.56423193", "0.56357235", "0.56338304", "0.5618427", "0.5609883", "0.5604701", "0.5599874", "0.5599874", "0.5599874", "0.5599874", "0.5599874", "0.5599874", "0.5599874", "0.5599874", "0.5599874", "0.5599874", "0.5599874", "0.5599874", "0.5599874", "0.5599874", "0.5599874", "0.55989635", "0.55989635", "0.5582737", "0.55799013", "0.557458", "0.55733556", "0.55719215", "0.5555208", "0.55534226", "0.5552542", "0.555132", "0.5546716", "0.5533589", "0.5522664" ]
0.74882305
1
Return the coordinates of the given OGR geometry. Assumes that this is a single polygon, and returns a numpy array of the x, y coords, of shape (numPts, 2). If the polygon has holes, they will be discarded this is just the outer polygon. If the geometry is a MultiPoint geom, also return a 2d array of coords.
def getCoords(geom): geomDict = eval(geom.ExportToJson()) coords = geomDict['coordinates'] if geomDict['type'] == 'Polygon': coordsArray = numpy.array(coords[0]) elif geomDict['type'] == 'MultiPoint': coordsArray = numpy.array(coords) else: coordsArray = None return coordsArray
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def geomFromInteriorPoints(coords):\n if isinstance(coords, numpy.ndarray):\n coords = coords.tolist()\n geomDict = {'type':'MultiPoint', 'coordinates':coords}\n geomPoints = ogr.CreateGeometryFromJson(repr(geomDict))\n return geomPoints", "def point_coords(geom):\n # Return a tuple with the x/y point coordinate for a GeoDataFrame geometry\n return list(geom.coords)[0] # Just get first tuple in list, since it's a point", "def getGeometryCoords(row, geom, coord_type, shape_type):\r\n \r\n # Parse the exterior of the coordinate\r\n if shape_type == 'polygon':\r\n exterior = row[geom].exterior\r\n if coord_type == 'x':\r\n # Get the x coordinates of the exterior\r\n return list( exterior.coords.xy[0] ) \r\n \r\n elif coord_type == 'y':\r\n # Get the y coordinates of the exterior\r\n return list( exterior.coords.xy[1] )\r\n\r\n elif shape_type == 'point':\r\n exterior = row[geom]\r\n \r\n if coord_type == 'x':\r\n # Get the x coordinates of the exterior\r\n return exterior.coords.xy[0][0] \r\n\r\n elif coord_type == 'y':\r\n # Get the y coordinates of the exterior\r\n return exterior.coords.xy[1][0]", "def _get_poly_coords(self, geometry, coord_type):\n ext = geometry.exterior\n return self._get_xy_coords(ext, coord_type)", "def get_polygon_coordinates(self) -> Tuple[List, List]:\n\n polygon_query = f\"https://nominatim.openstreetmap.org/\" \\\n f\"search?city={self.location.replace(' ', '+')}&polygon_geojson=1&format=json\"\n r = requests.get(polygon_query)\n js = ast.literal_eval(r.text)\n\n self.monitor.info(\"-> Downloaded area polygon data points.\")\n clean_polygon_coords = js[0]['geojson']['coordinates'][0]\n\n polygon_lats = [float(i[1]) for i in clean_polygon_coords]\n polygon_longs = [float(i[0]) for i in clean_polygon_coords]\n\n self.monitor.info(\"-> Created lat/long vectors.\")\n return polygon_lats, polygon_longs", "def getMultiGeometry(geometry):\n geom = arcpy.Array()\n for feature in geometry:\n array = arcpy.Array()\n for point in feature:\n point = arcpy.Point(float(point[0]), float(point[1]))\n array.add(point)\n geom.add(array)\n return geom", "def get_coords(self, srid=4326):\n return self.geom.transform(srid, clone=True).coords", "def getPolyCoords(row, geom, coord_type):\n # Parse the exterior of the coordinate\n exterior = row[geom].exterior\n if coord_type == 'x':\n # Get the x coordinates of the exterior\n return list( exterior.coords.xy[0])\n elif coord_type == 'y':\n # Get the y coordinates of the exterior\n return list( exterior.coords.xy[1])", "def get_coordinates(self):\n\n metadata = self.client.contents[self.layer]\n\n # coordinates\n bbox = metadata.boundingBoxWGS84\n crs = \"EPSG:4326\"\n logging.debug(\"WCS available boundingboxes: {}\".format(metadata.boundingboxes))\n for bboxes in metadata.boundingboxes:\n if bboxes[\"nativeSrs\"] == self.crs:\n bbox = bboxes[\"bbox\"]\n crs = self.crs\n break\n\n low = metadata.grid.lowlimits\n high = metadata.grid.highlimits\n xsize = int(high[0]) - int(low[0])\n ysize = int(high[1]) - int(low[1])\n\n # Based on https://www.ctps.org/geoserver/web/wicket/bookmarkable/org.geoserver.wcs.web.demo.WCSRequestBuilder;jsessionid=9E2AA99F95410C694D05BA609F25527C?0\n # The above link points to a geoserver implementation, which is the reference implementation.\n # WCS version 1.0.0 always has order lon/lat while version 1.1.1 actually follows the CRS\n if self.version == \"1.0.0\":\n rbbox = {\"lat\": [bbox[1], bbox[3], ysize], \"lon\": [bbox[0], bbox[2], xsize]}\n else:\n rbbox = resolve_bbox_order(bbox, crs, (xsize, ysize))\n\n coords = []\n coords.append(UniformCoordinates1d(rbbox[\"lat\"][0], rbbox[\"lat\"][1], size=rbbox[\"lat\"][2], name=\"lat\"))\n coords.append(UniformCoordinates1d(rbbox[\"lon\"][0], rbbox[\"lon\"][1], size=rbbox[\"lon\"][2], name=\"lon\"))\n\n if metadata.timepositions:\n coords.append(ArrayCoordinates1d(metadata.timepositions, name=\"time\"))\n\n if metadata.timelimits:\n raise NotImplementedError(\"TODO\")\n\n return Coordinates(coords, crs=crs)", "def shape_to_np(shape):\r\n\tcoords = np.zeros((shape.num_parts, 2), dtype = 'int')\r\n\r\n\tfor i in range(shape.num_parts):\r\n\t\tcoords[i] = (shape.part(i).x, shape.part(i).y)\r\n\r\n\treturn coords", "def array_coords(shape=(1, 1)):\n indexes = np.arange(np.product(shape))\n coords = [indexes % shape[1], indexes // shape[1]]\n return np.transpose(coords).astype('float64')", "def process_geometry(self, molecule_info):\n # Initalise variables\n atom_coords = []\n\n # Pull coordinates from molecule info.\n for line in molecule_info:\n xyz = np.asarray([\n float(line.split()[i+4])\n for i in range(3)\n ])\n atom_coords.append(xyz)\n\n return np.asarray(atom_coords)", "def coordinates(self):\n return np.array([self.x, self.y])", "def coordinates(self):\n return np.array([[f.x, f.y] for f in self])", "def get_geometry(self):\n rows, cols = self.get_gridspec().get_geometry()\n return rows, cols, self.num1, self.num2", "def get_coordinates_geo(self):\n if not self.rotated:\n lon_arr_geo = self.lon_arr\n lat_arr_geo = self.lat_arr\n else:\n lon_arr_geo, lat_arr_geo = self.transform().get_coordinates() \n\n return lon_arr_geo, lat_arr_geo", "def coordinates(self):\n logging.debug('Get coordinates from text')\n result = []\n blocks = self.del_comm(blocks=True)\n coor = re.compile('[FXYZ][+-]?[0-9]+(\\.[0-9]+)?')\n for line in blocks:\n coord_line = False\n comm = line.split()\n temp = []\n for c in comm:\n if c == 'G1':\n coord_line = True\n if coord_line and coor.match(c):\n temp.append(c)\n if temp:\n result.append(temp)\n return result", "def get_MultiPolyLists_xy(mpoly):\n # Get the x or y coordinates\n x = []\n y = []\n if isinstance(mpoly,Polygon):\n mpoly = [mpoly]\n for poly in mpoly: # the polygon objects return arrays, it's important they be lists or Bokeh fails\n exterior_coords_x = poly.exterior.coords.xy[0].tolist();\n interior_coords_x = []\n exterior_coords_y = poly.exterior.coords.xy[1].tolist();\n interior_coords_y = []\n\n for interior in poly.interiors:\n if isinstance(interior.coords.xy[0],list):\n interior_coords_x += [interior.coords.xy[0]];\n interior_coords_y += [interior.coords.xy[1]];\n else:\n interior_coords_x += [interior.coords.xy[0].tolist()];\n interior_coords_y += [interior.coords.xy[1].tolist()];\n x.append([exterior_coords_x, *interior_coords_x])\n y.append([exterior_coords_y, *interior_coords_y])\n return (x,y)", "def get_coords_enu(self, include_core=True):\n if not self.layouts:\n raise RuntimeError('No layout defined!')\n x, y, z = np.array([]), np.array([]), np.array([])\n for name in self.layouts:\n if not include_core and name == 'ska1_v5':\n continue\n layout = self.layouts[name]\n x = np.hstack((x, layout['x']))\n y = np.hstack((y, layout['y']))\n if 'z' in layout:\n z = np.hstack((z, layout['z']))\n else:\n z = np.hstack((z, np.zeros_like(layout['x'])))\n if z.size != x.size:\n raise RuntimeError('ENU coordinates dimension mismatch!')\n return x, y, z", "def xy_coordinates(self):\n\n return np.meshgrid(self.x_coord, self.y_coord)", "def get_coordinates(self):\n return np.array([(n.x, n.y) for n in self.nodes])", "def _get_coords(self, row, geom_col, coord_type):\n geom = row[geom_col]\n gtype = geom.geom_type\n\n if gtype == \"Polygon\":\n return list(self._get_poly_coords(geom, coord_type))\n elif gtype == \"MultiPolygon\":\n return list(self._multi_poly_handler(geom, coord_type))\n else:\n err_msg = \"Geometry type (\", gtype, \") not suppert by function\"\n raise TypeError(err_msg)", "def geom_to_array(geom, index=None, multi=False, geom_type=None):\n from spatialpandas.geometry import (\n Point, Polygon, Line, Ring, MultiPolygon, MultiPoint\n )\n if isinstance(geom, Point):\n if index is None:\n return np.array([[geom.x, geom.y]])\n arrays = [np.array([geom.y if index else geom.x])]\n elif isinstance(geom, (Polygon, Line, Ring)):\n exterior = geom.data[0] if isinstance(geom, Polygon) else geom.data\n arr = np.array(exterior.as_py()).reshape(-1, 2)\n if isinstance(geom, (Polygon, Ring)):\n arr = ensure_ring(arr)\n arrays = [arr if index is None else arr[:, index]]\n elif isinstance(geom, MultiPoint):\n if index is None:\n arrays = [np.array(geom.buffer_values).reshape(-1, 2)]\n else:\n arrays = [np.array(geom.buffer_values[index::2])]\n else:\n arrays = []\n for g in geom.data:\n exterior = g[0] if isinstance(geom, MultiPolygon) else g\n arr = np.array(exterior.as_py()).reshape(-1, 2)\n if isinstance(geom, MultiPolygon):\n arr = ensure_ring(arr)\n arrays.append(arr if index is None else arr[:, index])\n if geom_type != 'Point':\n arrays.append([[np.nan, np.nan]] if index is None else [np.nan])\n if geom_type != 'Point':\n arrays = arrays[:-1]\n if multi:\n return arrays\n elif len(arrays) == 1:\n return arrays[0]\n else:\n return np.concatenate(arrays)", "def get_outer_vertices(feature):\n return [\n point\n for part in Geometry.get_multipolygon(feature)\n for point in part[0][0:-1]\n ]", "def get_vertices(self, crs=None):\n if (crs is None) or (crs is self.crs):\n return np.array(self.vertices)\n else:\n vertices = [_reproject(v[:2], self.crs, crs)\n for v in self.vertices]\n return np.array(vertices)", "def geometry(self):\n if hasattr(self._selection_artist, 'get_verts'):\n xfm = self.ax.transData.inverted()\n y, x = xfm.transform(self._selection_artist.get_verts()).T\n return np.array([x, y])\n else:\n return np.array(self._selection_artist.get_data())", "def get_coordinate_lists(self, crs=None):\n x, y = self.vertices.vectors()[:2]\n if crs is not None and (crs != self.crs):\n x, y = _reproject((x,y), self.crs, crs)\n return x, y", "def xstagg_xy_coordinates(self):\n\n x_s = self.corner_grid.x0 + np.arange(self.nx+1) * self.dx\n y = self.center_grid.y0 + np.arange(self.ny) * self.dy\n return np.meshgrid(x_s, y)", "def storing_coordinates (self, geojson):\n polygons = []\n for feature in geojson['features']:\n polygons.append(feature['geometry'])\n print (\"Yass! Coordinates successfully stored as polygons.\")\n return polygons", "def extractCoords(self):\n if not self.rank:\n logging.info('Extracting atomic poitions')\n\n # Extract coordinates from liggghts\n self.lmp.command('variable x atom x')\n x = Rxn.lmp.extract_variable(\"x\", \"group1\", 1)\n\n self.lmp.command('variable y atom y')\n y = Rxn.lmp.extract_variable(\"y\", \"group1\", 1)\n\n self.lmp.command('variable z atom z')\n z = Rxn.lmp.extract_variable(\"z\", \"group1\", 1)\n\n coords = np.zeros((self.lmp.get_natoms(),3))\n\n for i in range(self.lmp.get_natoms()):\n coords[i,:] = x[i], y[i], z[i]\n\n self.lmp.command('variable x delete')\n self.lmp.command('variable y delete')\n self.lmp.command('variable z delete')\n\n return coords", "def to2D(geometry):\n\n return LineString(np.column_stack(geometry.xy))", "def read_gdal_coordinates(dataset, mode=\"center\"):\n coordinates_pixel = _pixel_coordinates(\n dataset.RasterXSize, dataset.RasterYSize, mode\n )\n\n geotransform = dataset.GetGeoTransform()\n coordinates = _pixel_to_map(coordinates_pixel, geotransform)\n\n return coordinates", "def getPointCoords(row, geom, coord_type):\r\n if coord_type == 'x':\r\n return row[geom].x\r\n elif coord_type == 'y':\r\n return row[geom].y", "def get_coordinates(geotags) -> Tuple[float, float]:\n lat = get_decimal_from_dms(\n geotags['GPSLatitude'],\n geotags['GPSLatitudeRef'],\n )\n lon = get_decimal_from_dms(\n geotags['GPSLongitude'],\n geotags['GPSLongitudeRef'],\n )\n\n return lat, lon", "def convert_coords(self):\n if self.coordsys in ['image', 'physical']:\n coords = self._convert_pix_coords()\n else:\n coords = self._convert_sky_coords()\n\n if self.region_type == 'line':\n coords = [coords[0][0], coords[0][1]]\n\n if self.region_type == 'text':\n coords.append(self.meta['text'])\n\n return coords", "def _convert_coords_to_coordinates(self, vect) -> np.ndarray:\n # create empty vector of necessary shape\n # one coordinate for each endpoint of the vector\n pos = np.empty((2 * len(vect), 2), dtype=np.float32)\n\n # create pairs of points\n pos[0::2, 0] = vect[:, 0]\n pos[1::2, 0] = vect[:, 0]\n pos[0::2, 1] = vect[:, 1]\n pos[1::2, 1] = vect[:, 1]\n\n # adjust second of each pair according to x-y projection\n pos[1::2, 0] += vect[:, 2]\n pos[1::2, 1] += vect[:, 3]\n\n return pos", "def map_coordinates(self,geometry):\n\t\tg = self.geomatrix\n\t\tdef project_coord(x,y,z=None):\n\t\t\tx = g[0] + g[1] * x + g[2] * y\n\t\t\ty = g[3] + g[4] * x + g[5] * y\n\t\t\tif z is None:\n\t\t\t\treturn x,y\n\t\t\telse:\n\t\t\t\treturn x,y,z\n\t\treturn transform(project_coord, geometry)", "def geom_as_list(geometry):\n if geometry.geom_type == \"Polygon\":\n return [geometry]\n elif geometry.geom_type == \"MultiPolygon\":\n return geometry.geoms", "def coordinates(self, xy_pairs=False):\n\n if xy_pairs:\n return tuple(zip(self._coordinates[:-1:2], self._coordinates[1::2]))\n else:\n return tuple(self._coordinates)", "def coordinates(self, xy_pairs=False):\n\n if xy_pairs:\n return tuple(zip(self._coordinates[:-1:2], self._coordinates[1::2]))\n else:\n return tuple(self._coordinates)", "def find_coordinates(self):\n\n raise AttributeError(\n \"Cannot get available coordinates for OGR datasources. \"\n \"The source data is a vector-based shapefile without native coordinates.\"\n )", "def getPointCoords(row, geom, coord_type):\n if coord_type == 'x':\n return row[geom].x\n elif coord_type == 'y':\n return row[geom].y", "def _parse_coords(self):\n\n coords = []\n\n while True:\n try:\n _, x, y = self._lines.current.split()\n coords.append((float(x), float(y)))\n except ValueError:\n break\n\n try:\n next(self._lines)\n except StopIteration:\n break\n\n return coords", "def _dense_point_array(self, geoms, distance, index):\n # interpolate lines to represent them as points for Voronoi\n points = []\n ids = []\n\n if pygeos.get_type_id(geoms[0]) not in [1, 2, 5]:\n lines = pygeos.boundary(geoms)\n else:\n lines = geoms\n lengths = pygeos.length(lines)\n for ix, line, length in zip(index, lines, lengths):\n if length > distance: # some polygons might have collapsed\n pts = pygeos.line_interpolate_point(\n line,\n np.linspace(0.1, length - 0.1, num=int((length - 0.1) // distance)),\n ) # .1 offset to keep a gap between two segments\n points.append(pygeos.get_coordinates(pts))\n ids += [ix] * len(pts)\n\n points = np.vstack(points)\n\n return points, ids\n\n # here we might also want to append original coordinates of each line\n # to get a higher precision on the corners", "def coords2D(self):\n return (self.x, self.y)", "def spatial(self):\n return self.spatial_x, self.spatial_y, self.spatial_data", "def getDataCoordinates(self):\n coord = np.zeros((self.dataset.shape[0], 2))\n for i in range(len(self.dataset)):\n coord[i, 0] = self.dataset[i][0]\n coord[i, 1] = self.dataset[i][1]\n return coord", "def getCoords(self):\r\n \r\n return self.coords", "def get_vertices(self, crs=None):\n if crs is None:\n vertices = []\n for poly_vertices in self.vertices:\n vertices.append([np.array(v) for v in poly_vertices])\n return vertices\n else:\n vertices = []\n for poly_vertices in self.vertices:\n poly = []\n for ring_vertices in poly_vertices:\n poly.append(np.array([_reproject(v[:2], self.crs, crs)\n for v in ring_vertices]))\n vertices.append(poly)\n return vertices", "def coord_list(self, part_name: str) -> List:\n part = self.parts[part_name]\n if isinstance(part, Polygon):\n # Note that in shapely, the first coord is repeated at the end, which we\n # trim off:\n return list(np.array(part.exterior.coords.xy).T)[:-1]\n elif isinstance(part, LineString):\n return list(np.array(part.coords.xy).T)[:]", "def getCoordinates(p):\n if p[0] == 'p': # minimum bounding rectangle for point\n return (int(p[1]), int(p[2]), int(p[1]), int(p[2]))\n elif p[0] == 'c': # minimum bounding rectangle for circle\n x = int(p[1])\n y = int(p[2])\n r = int(p[3])\n return (x - r, y - r, x + r, y + r)\n elif p[0] == 'l': # minimum bounding rectangle for line segment\n x1 = int(p[1])\n y1 = int(p[2])\n x2 = int(p[3])\n y2 = int(p[4])\n if y2 > y1:\n if x1 < x2:\n return (x1, y1, x2, y2)\n else:\n return (x2, y1, x1, y2)\n else:\n if x1 < x2:\n return (x1, y2, x2, y1)\n else:\n return (x2, y2, x1, y1)", "def getPointCoords(self, row, geom, coord_type):\n if coord_type == 'x':\n return row[geom].x\n elif coord_type == 'y':\n return row[geom].y", "def get_polygon(self):\n ret_array = []\n if self.parcel_polygon_string_list:\n cur_array = []\n for num in str(self.parcel_polygon_string_list).split(','):\n cur_array.append(float(num))\n if len(cur_array) == 2:\n ret_array.append(cur_array.copy())\n cur_array = []\n return ret_array", "def geomFromOutlineCoords(coords):\n if isinstance(coords, numpy.ndarray):\n coords = coords.tolist()\n geomDict = {'type':'Polygon', 'coordinates':[coords]}\n geom = ogr.CreateGeometryFromJson(repr(geomDict))\n return geom", "def get_all_coordinates(self):\n coordinates = []\n\n for relative_coordinate in self.shape:\n co = [self.coordinate[0] + relative_coordinate[0], self.coordinate[1] + relative_coordinate[1]]\n coordinates.append(co)\n return coordinates", "def feature_coords(features):\n coords_list = []\n for feature in features:\n coord_start = feature.location.nofuzzy_start\n coord_end = feature.location.nofuzzy_end\n coord_pair = (coord_start, coord_end)\n coords_list.append(coord_pair)\n ## consider adding some info to the log\n return coords_list", "def obtain_points(self):\n # Swapaxes makes the output a column rather than a row\n return np.swapaxes(np.array([np.ndarray.flatten(self.data[\"Base\"][\"Zone1\"][\"GridCoordinates\"][\"CoordinateX\"][\" data\"][:, :, :]),\n np.ndarray.flatten(self.data[\"Base\"][\"Zone1\"][\"GridCoordinates\"][\"CoordinateY\"][\" data\"][:, :, :]),\n np.ndarray.flatten(self.data[\"Base\"][\"Zone1\"][\"GridCoordinates\"][\"CoordinateZ\"][\" data\"][:, :, :])]), 0, 1)", "def find_intersecting_geometries(\n geo: Polygon,\n geo_idx: int,\n df: pd.DataFrame,\n ) -> list[int] | float:\n o = df.apply(\n lambda row:\n np.nan if not row['geometry'].is_valid else\n np.nan if not geo.intersects(row['geometry']) or row.name == geo_idx\n else row.name,\n axis=1,\n ).dropna()\n\n if o.shape[0] == 0:\n return np.nan\n else:\n return o.to_numpy(dtype=np.int64).tolist()", "def coords(self):\n return np.column_stack((self.x_coord_list, self.y_coord_list, self.z_coord_list))", "def make_xarray_coords(y, x, crs):\n if crs.is_geographic:\n y_attrs, x_attrs = COORD_DEFS['latitude'], COORD_DEFS['longitude']\n elif crs.is_projected:\n crs_osr = crs2osr(crs)\n units = crs_osr.GetLinearUnitsName()\n y_attrs, x_attrs = COORD_DEFS['y'], COORD_DEFS['x']\n y_attrs['units'], x_attrs['units'] = units, units\n\n y = xr.Variable(('y', ), y, attrs=y_attrs)\n x = xr.Variable(('x', ), x, attrs=x_attrs)\n\n return y, x", "def _point_array(self, objects, unique_id):\n points = []\n ids = []\n for idx, row in tqdm(objects.iterrows(), total=objects.shape[0]):\n if row[\"geometry\"].type in [\"Polygon\", \"MultiPolygon\"]:\n poly_ext = row[\"geometry\"].boundary\n else:\n poly_ext = row[\"geometry\"]\n if poly_ext is not None:\n if poly_ext.type == \"MultiLineString\":\n for line in poly_ext:\n point_coords = line.coords\n row_array = np.array(point_coords[:-1]).tolist()\n for i, a in enumerate(row_array):\n points.append(row_array[i])\n ids.append(row[unique_id])\n elif poly_ext.type == \"LineString\":\n point_coords = poly_ext.coords\n row_array = np.array(point_coords[:-1]).tolist()\n for i, a in enumerate(row_array):\n points.append(row_array[i])\n ids.append(row[unique_id])\n else:\n raise Exception(\"Boundary type is {}\".format(poly_ext.type))\n return points, ids", "def coord(self, i):\n assert(i >= 0 and i < self.nAtoms())\n assert(self._c_structure is not NULL)\n cdef const double *coord = freesasa_structure_coord_array(self._c_structure)\n return [coord[3*i], coord[3*i+1], coord[3*i+2]]", "def get_boundary_as_polygon(self, do_geo=True):\n xhor, yhor = self.get_coordinates()\n dimensions = xhor.shape\n xbottom = xhor[0, :]\n xright = xhor[:, dimensions[1]-1]\n xtop = xhor[dimensions[0]-1, :][::-1]\n xleft = xhor[:, 0][::-1]\n\n ybottom = yhor[0, :]\n yright = yhor[:, dimensions[1]-1]\n ytop = yhor[dimensions[0]-1, :][::-1]\n yleft = yhor[:, 0][::-1]\n\n lon_square = np.concatenate((xbottom, xright, xtop, xleft))\n lat_square = np.concatenate((ybottom, yright, ytop, yleft))\n\n return lon_square, lat_square", "def GetParametricCoords(self):\n ...", "def getLineCoords(row, geom, coord_type):\n if coord_type == 'x':\n return list( row[geom].coords.xy[0])\n elif coord_type == 'y':\n return list( row[geom].coords.xy[1])", "def find_coordinates(self):\n\n raise NotImplementedError", "def find_coordinates(self):\n\n raise NotImplementedError", "def point_to_coords(self, point: Sequence[float]) -> np.ndarray:\n point = np.asarray(point)\n result = np.asarray([axis.point_to_number(point) for axis in self.get_axes()])\n if point.ndim == 2:\n return result.T\n return result", "def get_vertices_list(feature):\n return [\n point\n for part in Geometry.get_multipolygon(feature)\n for ring in part\n for point in ring[0:-1]\n ]", "def get_coords(self):\n return self.coords", "def get_coordinates(bounding_box, bounding_box_type=''):\n coordinates = []\n if bounding_box_type == \"box\":\n coordinates.append([bounding_box[1], bounding_box[0]])\n coordinates.append([bounding_box[1], bounding_box[2]])\n coordinates.append([bounding_box[3], bounding_box[2]])\n coordinates.append([bounding_box[3], bounding_box[0]])\n coordinates.append([bounding_box[1], bounding_box[0]])\n return coordinates", "def _multi_poly_handler(self, multi_polygon, coord_type):\n for i, part in enumerate(multi_polygon):\n if i == 0:\n coord_arrays = np.append(self._get_poly_coords(part, coord_type), np.nan)\n else:\n coord_arrays = np.concatenate([coord_arrays,\n np.append(self._get_poly_coords(\n part, coord_type), np.nan)])\n # Return the coordinates\n return coord_arrays", "def positions_to_coords(self, positions):\n return [self.to_coords(px, py) for (px, py) in positions]", "def xy(self):\n xcol = self.xx.reshape(-1, 1)\n ycol = self.yy.reshape(-1, 1)\n return np.column_stack([xcol, ycol])", "def polygon_to_points(coords, z=None):\n\n bounds = array(coords).astype('int')\n\n bmax = bounds.max(0)\n bmin = bounds.min(0)\n\n path = Path(bounds)\n\n grid = meshgrid(range(bmin[0], bmax[0]+1), range(bmin[1], bmax[1]+1))\n\n grid_flat = zip(grid[0].ravel(), grid[1].ravel())\n\n points = path.contains_points(grid_flat).reshape(grid[0].shape).astype('int')\n points = where(points)\n points = (vstack([points[0], points[1]]).T + bmin[-1::-1]).tolist()\n if z is not None:\n points = map(lambda p: [p[0], p[1], z], points)\n\n return points", "def coords_to_point(\n self, *coords: float | Sequence[float] | Sequence[Sequence[float]] | np.ndarray\n ) -> np.ndarray:\n coords = np.asarray(coords)\n origin = self.x_axis.number_to_point(\n self._origin_shift([self.x_axis.x_min, self.x_axis.x_max]),\n )\n\n # Is coords in the format ([[x1 y1 z1] [x2 y2 z2] ...])? (True)\n # Or is coords in the format (x, y, z) or ([x1 x2 ...], [y1 y2 ...], [z1 z2 ...])? (False)\n # The latter is preferred.\n are_coordinates_transposed = False\n\n # If coords is in the format ([[x1 y1 z1] [x2 y2 z2] ...]):\n if coords.ndim == 3:\n # Extract from original tuple: now coords looks like [[x y z]] or [[x1 y1 z1] [x2 y2 z2] ...].\n coords = coords[0]\n # If there's a single coord (coords = [[x y z]]), extract it so that\n # coords = [x y z] and coords_to_point returns a single point.\n if coords.shape[0] == 1:\n coords = coords[0]\n # Else, if coords looks more like [[x1 y1 z1] [x2 y2 z2] ...], transform them (by\n # transposing) into the format [[x1 x2 ...] [y1 y2 ...] [z1 z2 ...]] for later processing.\n else:\n coords = coords.T\n are_coordinates_transposed = True\n # Otherwise, coords already looked like (x, y, z) or ([x1 x2 ...], [y1 y2 ...], [z1 z2 ...]),\n # so no further processing is needed.\n\n # Now coords should either look like [x y z] or [[x1 x2 ...] [y1 y2 ...] [z1 z2 ...]],\n # so it can be iterated directly. Each element is either a float representing a single\n # coordinate, or a float ndarray of coordinates corresponding to a single axis.\n # Although \"points\" and \"nums\" are in plural, there might be a single point or number.\n points = self.x_axis.number_to_point(coords[0])\n other_axes = self.axes.submobjects[1:]\n for axis, nums in zip(other_axes, coords[1:]):\n points += axis.number_to_point(nums) - origin\n\n # Return points as is, except if coords originally looked like\n # ([x1 x2 ...], [y1 y2 ...], [z1 z2 ...]), which is determined by the conditions below. In\n # that case, the current implementation requires that the results have to be transposed.\n if are_coordinates_transposed or points.ndim == 1:\n return points\n return points.T", "def __getGeometry(self, geom):\n if \"POLYGON\" in self.geometryType:\n rings = geom['rings']\n polygon = getMultiGeometry(rings)\n polyGeom = arcpy.Polygon(polygon, self.sr)\n return polyGeom\n elif \"POLYLINE\" in self.geometryType:\n paths = geom['paths']\n polyline = getMultiGeometry(paths)\n lineGeom = arcpy.Polyline(polyline, self.sr)\n return lineGeom\n elif \"POINT\" in self.geometryType:\n try:\n point = arcpy.Point(float(geom['x']), float(geom['y']))\n except:\n raise NullGeometryError(\"Point geometry is invalid or null\")\n pointGeom = arcpy.Geometry(\"point\", point, self.sr)\n return pointGeom", "def get_coords(h, w):\n coords = np.empty((h, w, 2), dtype = np.int)\n coords[..., 0] = np.arange(h)[:, None]\n coords[..., 1] = np.arange(w)\n\n return coords", "def extract(self, geometry,strategy=strategies.nearest):\n\t\tpixels = self.extract_pixels(self.pixel_coordinates(geometry))\n\t\treturn self.map_coordinates(pixels)", "def geojson_x_y(geojson_path):\n \n with open(geojson_path) as f:\n geo = geojson.load(f)\n \n coord_list = list(geojson.utils.coords(geo))\n \n ## Adapted from https://gis.stackexchange.com/a/313023\n box = []\n for i in (0,1):\n res = sorted(coord_list, key=lambda x:x[i])\n box.append((res[0][i],res[-1][i]))\n \n return box", "def get_coordinates(self):\n return self.coordinates", "def pose_coords_as_rows(pack_or_pose, selection=list(), atom_names=list()):\n import pyrosetta.distributed.packed_pose as packed_pose\n import numpy as np\n\n wpose = packed_pose.to_pose(pack_or_pose)\n\n # default to all residues\n if not selection:\n selection = range(1, wpose.total_residue() + 1)\n\n coords = np.array(\n [\n res.xyz(atom)\n for res in [wpose.residues[s] for s in selection]\n for atom in range(1, res.natoms() + 1)\n if (not atom_names or res.atom_name(atom).strip() in atom_names)\n ]\n )\n return coords", "def coordinates(self):\n if hasattr(self, '_coordinates'):\n return self._coordinates\n else:\n return self._points", "def grid_coordinates(points: np.array, dtype = np.uint16) -> np.array:\n xmin = np.min(points[:, 0])\n xmax = np.max(points[:, 0]) + 1\n ymin = np.min(points[:, 1])\n ymax = np.max(points[:, 1]) + 1\n return np.asarray([(x, y) for y in range(ymin, ymax)\n for x in range(xmin, xmax)], dtype = dtype)", "def extent(self):\n\n x = np.array([0, self.nx]) * self.dx + self.corner_grid.x0\n ypoint = [0, self.ny] if self.origin == 'lower-left' else [self.ny, 0]\n y = np.array(ypoint) * self.dy + self.corner_grid.y0\n\n return [x[0], x[1], y[0], y[1]]", "def _get_polygon(areasrc):\n\n str = areasrc.geometry.wkt\n str = re.sub('POLYGON\\(\\(', '', str)\n str = re.sub('\\)\\)', '', str)\n aa = re.split('\\,', str)\n lons = []\n lats = []\n for str in aa:\n bb = re.split('\\s+', re.sub('^\\s+', '', str))\n lons.append(float(bb[0]))\n lats.append(float(bb[1]))\n return lons, lats", "def ecef_coords(lats: ndarray, lons: ndarray) -> Tuple[ndarray, ndarray, ndarray]:\n # Cartopy Geodetic and Geocentric both default to the WGS84 datum\n spherical_latlon_crs = Geodetic()\n ecef_crs = Geocentric()\n xyz = ecef_crs.transform_points(\n spherical_latlon_crs, np.array(lons), np.array(lats)\n )\n return xyz[..., 0], xyz[..., 1], xyz[..., 2]", "def coordinates(self):\n return self.xy", "def ij_coordinates(self):\n\n x = np.arange(self.nx)\n y = np.arange(self.ny)\n return np.meshgrid(x, y)", "def get_vertices(self, crs=None):\n if crs is None:\n return [np.array(v) for v in self.vertices]\n else:\n vertices = []\n for line in self.vertices:\n line_vertices = [_reproject(v[:2], self.crs, crs) for v in line]\n vertices.append(np.array(line_vertices))\n return vertices", "def _generate_coords(self):\n coords = np.dstack([self.X.ravel(), self.Y.ravel()])[0]\n return coords, spatial.cKDTree(coords)", "def geometry(self):\n return self._geometry", "def geometry(self):\n return self._geometry", "def getCoords(self):\n if self._ra == \"\" or self._dec == \"\":\n raise ValueError('Object named ' + self._name +' has no coordinates in database.')\n ra = self._ra.split(\":\")\n dec = self._dec.split(\":\")\n raTuple = (int(ra[0]), int(ra[1]), float(ra[2]))\n decTuple = (dec[0][0], int(dec[0][1:]), int(dec[1]), float(dec[2]))\n return raTuple, decTuple", "def proj_coords(coords, proj_in, proj_out): \n return [proj_coord(coord, proj_in, proj_out) for coord in coords]", "def test_polygon_to_vertex_arrays(self):\n\n these_vertex_x_coords, these_vertex_y_coords = (\n skeleton_lines._polygon_to_vertex_arrays(POLYGON_OBJECT_XY))\n\n self.assertTrue(numpy.allclose(\n these_vertex_x_coords, VERTEX_X_COORDS, atol=TOLERANCE))\n self.assertTrue(numpy.allclose(\n these_vertex_y_coords, VERTEX_Y_COORDS, atol=TOLERANCE))", "def _get_coords(p_coords):\n l_ret = CoordinateInformation()\n if isinstance(p_coords, list):\n l_list = p_coords\n else:\n l_list = p_coords.strip('\\[\\]')\n l_list = l_list.split(',')\n try:\n l_ret.X_Easting = float(l_list[0])\n l_ret.Y_Northing = float(l_list[1])\n l_ret.Z_Height = float(l_list[2])\n except Exception as e_err:\n print('Error {}'.format(e_err))\n l_ret.X_Easting = 0.0\n l_ret.Y_Northing = 0.0\n l_ret.Z_Height = 0.0\n return l_ret", "def coords_in(self, frame_size=None, shape=None, img=None):\n w, h = _to_frame_size(frame_size=frame_size, shape=shape, img=img)\n return [(int(round(x * w)), int(round(y * h))) for x, y in self.points]", "def dump_coords(geom): # -> Any | list[Unknown]:\n ...", "def get_cartesian_coords(self):\n r = 1\n dec = self.dec + 90\n x = r * math.sin(np.deg2rad(dec)) * math.cos(np.deg2rad(self.ra))\n y = r * math.sin(np.deg2rad(dec)) * math.sin(np.deg2rad(self.ra))\n z = r * math.cos(np.deg2rad(dec))\n\n return [x, y, z]" ]
[ "0.6378374", "0.6318399", "0.6230891", "0.6092643", "0.598644", "0.59413916", "0.5929878", "0.5911129", "0.5879037", "0.5745938", "0.56728566", "0.5662459", "0.5643619", "0.56382567", "0.560755", "0.5599817", "0.55802184", "0.5575683", "0.557035", "0.55689305", "0.55629367", "0.55563664", "0.5554617", "0.5526887", "0.5526454", "0.55196726", "0.5507142", "0.54808164", "0.54733217", "0.54628074", "0.54600275", "0.54480976", "0.54409754", "0.54397607", "0.54097146", "0.5404788", "0.539331", "0.539011", "0.5388613", "0.5388613", "0.53830546", "0.5376676", "0.5365992", "0.5364857", "0.5351104", "0.53453857", "0.5333729", "0.5326178", "0.52989405", "0.529608", "0.5286596", "0.52834827", "0.527745", "0.5260315", "0.52471405", "0.5241693", "0.52380764", "0.52270615", "0.5226124", "0.52247614", "0.5196433", "0.5191757", "0.5191174", "0.5188216", "0.5184726", "0.5184297", "0.5184297", "0.51833534", "0.5175974", "0.5174574", "0.5171589", "0.5168623", "0.5159413", "0.5154024", "0.51527196", "0.5145125", "0.5140841", "0.5137277", "0.5136015", "0.5133559", "0.5124698", "0.51173633", "0.5115133", "0.5110997", "0.5110176", "0.5107352", "0.5107112", "0.5092974", "0.5086692", "0.50741524", "0.5073374", "0.5073241", "0.5073241", "0.5071624", "0.50705266", "0.5061675", "0.50615364", "0.5051397", "0.5051117", "0.5041867" ]
0.72319037
0
The given list of pairs (or 2d numpy array) is the (x, y) coords of the polygon outline. Return a Polygon ogr.Geometry object.
def geomFromOutlineCoords(coords): if isinstance(coords, numpy.ndarray): coords = coords.tolist() geomDict = {'type':'Polygon', 'coordinates':[coords]} geom = ogr.CreateGeometryFromJson(repr(geomDict)) return geom
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_polygon(*coords):\n global GEOMETRY_SURF, POLYGONS,col\n if len(coords) < 3:\n print(\"Warning: Invalid polygon passed, ignoring...\")\n return\n start = coords[0]\n prev = coords[0]\n for coord in coords:\n POLYGONS |= {coord}\n line = Boundary(prev[0],prev[1],coord[0],coord[1]) #add segment to WALL list\n prev = coord\n line = Boundary(prev[0], prev[1],start[0],start[1])\n #now draw poly\n pygame.draw.polygon(GEOMETRY_SURF,col[\"SHAPECOL\"], coords)\n return", "def polygon(self, pointlist, cls=None, style=None, attrs=None):\n payload = self._meta.make_payload(cls, style, attrs)\n pts_str = ' '.join('%s,%s' % (x, y) for x, y in pointlist)\n self.elements.append(\"\"\"<polygon points=\"%s\" %s/>\"\"\" % (pts_str, payload))\n return self", "def polygon(self, pointlist, cls=None, style=None, attrs=None):\n payload = self._meta.make_payload(cls, style, attrs)\n pts_str = ' '.join('%s,%s' % (x, y) for x, y in pointlist)\n self.elements.append(\"\"\"<polygon points=\"%s\" %s/>\"\"\" % (pts_str, payload))\n return self", "def array_to_poly(array):\n array = np.asarray(array)\n size = np.shape(array)\n if size[1] != 2:\n raise ValueError('Array is not the proper size.')\n return\n geom_array = np.append(array, [array[0]], axis = 0).tolist()\n geom = {\"type\": \"Polygon\", \"coordinates\": [geom_array]}\n poly = ogr.CreateGeometryFromJson(json.dumps(geom))\n return poly", "def to_shapely_polygon(self):\n # load shapely lazily, which makes the dependency more optional\n import shapely.geometry\n return shapely.geometry.Polygon([(point[0], point[1]) for point in self.exterior])", "def polygonFromInteriorPoints(geom, preferredEpsg):\n (projTr, llTr) = makeTransformations(4326, preferredEpsg)\n\n geomProj = copyGeom(geom)\n geomProj.Transform(projTr)\n geomOutline = geomProj.ConvexHull()\n geomOutline.Transform(llTr)\n return geomOutline", "def give_polygon(vertices, points):\n polygon = np.zeros((len(vertices), 2))\n for i, vertex in enumerate(vertices):\n polygon[i] = points[vertex]\n # End point of a polygon equals to start point\n polygon = polygon.tolist()\n if polygon[-1] != polygon[0]:\n polygon.append(polygon[0])\n return polygon", "def geomFromInteriorPoints(coords):\n if isinstance(coords, numpy.ndarray):\n coords = coords.tolist()\n geomDict = {'type':'MultiPoint', 'coordinates':coords}\n geomPoints = ogr.CreateGeometryFromJson(repr(geomDict))\n return geomPoints", "def get_polygon(self):\n ret_array = []\n if self.parcel_polygon_string_list:\n cur_array = []\n for num in str(self.parcel_polygon_string_list).split(','):\n cur_array.append(float(num))\n if len(cur_array) == 2:\n ret_array.append(cur_array.copy())\n cur_array = []\n return ret_array", "def _proc_polygon(self, tokens, filled):\n\n pts = [(p[\"x\"], p[\"y\"]) for p in tokens[\"points\"]]\n component = Polygon(pen=self.pen, points=pts, filled=filled)\n\n return component", "def getMultiGeometry(geometry):\n geom = arcpy.Array()\n for feature in geometry:\n array = arcpy.Array()\n for point in feature:\n point = arcpy.Point(float(point[0]), float(point[1]))\n array.add(point)\n geom.add(array)\n return geom", "def decompose_polygon(points):\n N, _ = points.shape\n\n for i in range(1, N - 1):\n yield numpy.array([points[0], points[i], points[i + 1]])", "def generate_polygon(x,y,N):\r\n # Add the first point to the end of the list and convert to array if needed\r\n if type(x) == list:\r\n x = np.array(x + [x[0]])\r\n y = np.array(y + [y[0]])\r\n else:\r\n x = np.append(x,x[0])\r\n y = np.append(y,y[0])\r\n \r\n # Parameterize the arrays and interpolate\r\n d = [get_distance((x[i],y[i]),(x[i+1],y[i+1])) for i in range(len(x)-1)]\r\n d = np.cumsum([0]+d)\r\n t = np.linspace(0,d[-1],N)\r\n fx = interp1d(d,x)\r\n fy = interp1d(d,y)\r\n x = fx(t)\r\n y = fy(t)\r\n \r\n return x,y", "def read_polygon_shapefile(filename):\n result = cpp_read_polygon_shapefile(filename)\n f_pos = Series(result[0], name=\"f_pos\")\n r_pos = Series(result[1], name=\"r_pos\")\n return (\n f_pos,\n r_pos,\n DataFrame({\"x\": result[2], \"y\": result[3]}),\n )", "def construct_polygon(self, polygon_longs: List, polygon_lats: List) -> gpd.GeoDataFrame:\n\n polygon_geom = Polygon(zip(polygon_longs, polygon_lats))\n\n crs = {'init': 'epsg:4326'}\n polygon = gpd.GeoDataFrame(index=[0], crs=crs, geometry=[polygon_geom])\n\n polygon.to_file(filename=f'{self.polygon_path}/polygon_{self.postfix}.geojson', driver='GeoJSON')\n polygon.to_file(filename=f'{self.polygon_path}/polygon_{self.postfix}.shp', driver=\"ESRI Shapefile\")\n\n self.monitor.info(\"-> Created area polygon.\")\n return polygon", "def hex_to_polygon(hexid):\n list_of_coords_list=h3.h3_to_geo_boundary(hexid,geo_json=True)\n return Polygon([tuple(i) for i in list_of_coords_list])", "def get_boundary_as_polygon(self, do_geo=True):\n xhor, yhor = self.get_coordinates()\n dimensions = xhor.shape\n xbottom = xhor[0, :]\n xright = xhor[:, dimensions[1]-1]\n xtop = xhor[dimensions[0]-1, :][::-1]\n xleft = xhor[:, 0][::-1]\n\n ybottom = yhor[0, :]\n yright = yhor[:, dimensions[1]-1]\n ytop = yhor[dimensions[0]-1, :][::-1]\n yleft = yhor[:, 0][::-1]\n\n lon_square = np.concatenate((xbottom, xright, xtop, xleft))\n lat_square = np.concatenate((ybottom, yright, ytop, yleft))\n\n return lon_square, lat_square", "def get_MultiPolyLists_xy(mpoly):\n # Get the x or y coordinates\n x = []\n y = []\n if isinstance(mpoly,Polygon):\n mpoly = [mpoly]\n for poly in mpoly: # the polygon objects return arrays, it's important they be lists or Bokeh fails\n exterior_coords_x = poly.exterior.coords.xy[0].tolist();\n interior_coords_x = []\n exterior_coords_y = poly.exterior.coords.xy[1].tolist();\n interior_coords_y = []\n\n for interior in poly.interiors:\n if isinstance(interior.coords.xy[0],list):\n interior_coords_x += [interior.coords.xy[0]];\n interior_coords_y += [interior.coords.xy[1]];\n else:\n interior_coords_x += [interior.coords.xy[0].tolist()];\n interior_coords_y += [interior.coords.xy[1].tolist()];\n x.append([exterior_coords_x, *interior_coords_x])\n y.append([exterior_coords_y, *interior_coords_y])\n return (x,y)", "def generate_mesh(\n poly_coords: np.ndarray,\n hole_coords: Optional[List[np.ndarray]] = None,\n min_points: Optional[int] = None,\n max_edge_length: Optional[float] = None,\n convex_hull: bool = False,\n boundary: Optional[np.ndarray] = None,\n preserve_boundary: bool = False,\n min_angle: float = 32.5,\n **kwargs,\n) -> Tuple[np.ndarray, np.ndarray]:\n poly_coords = ensure_unique(poly_coords)\n if hole_coords is None:\n hole_coords = []\n hole_coords = [ensure_unique(coords) for coords in hole_coords]\n # Facets is a shape (m, 2) array of edge indices.\n # coords[facets] is a shape (m, 2, 2) array of edge coordinates:\n # [(x0, y0), (x1, y1)]\n coords = np.concatenate([poly_coords] + hole_coords, axis=0)\n xmin = coords[:, 0].min()\n dx = np.ptp(coords[:, 0])\n ymin = coords[:, 1].min()\n dy = np.ptp(coords[:, 1])\n r0 = np.array([[xmin, ymin]]) + np.array([[dx, dy]]) / 2\n # Center the coordinates at (0, 0) to avoid floating point issues.\n coords = coords - r0\n indices = np.arange(len(poly_coords), dtype=int)\n if convex_hull:\n if boundary is not None:\n raise ValueError(\n \"Cannot have both boundary is not None and convex_hull = True.\"\n )\n facets = spatial.ConvexHull(coords).simplices\n else:\n if boundary is not None:\n boundary = list(map(tuple, ensure_unique(boundary - r0)))\n indices = [i for i in indices if tuple(coords[i]) in boundary]\n facets = np.array([indices, np.roll(indices, -1)]).T\n # Create facets for the holes.\n for hole in hole_coords:\n hole_indices = np.arange(\n indices[-1] + 1, indices[-1] + 1 + len(hole), dtype=int\n )\n hole_facets = np.array([hole_indices, np.roll(hole_indices, -1)]).T\n indices = np.concatenate([indices, hole_indices], axis=0)\n facets = np.concatenate([facets, hole_facets], axis=0)\n\n mesh_info = triangle.MeshInfo()\n mesh_info.set_points(coords)\n mesh_info.set_facets(facets)\n if hole_coords:\n # Triangle allows you to set holes by specifying a single point\n # that lies in each hole. Here we use the centroid of the hole.\n holes = [\n np.array(Polygon(hole).centroid.coords[0]) - r0.squeeze()\n for hole in hole_coords\n ]\n mesh_info.set_holes(holes)\n\n kwargs = kwargs.copy()\n kwargs[\"allow_boundary_steiner\"] = not preserve_boundary\n if \"min_angle\" not in kwargs:\n kwargs[\"min_angle\"] = min_angle\n\n mesh = triangle.build(mesh_info=mesh_info, **kwargs)\n points = np.array(mesh.points) + r0\n triangles = np.array(mesh.elements)\n if min_points is None and (max_edge_length is None or max_edge_length <= 0):\n return points, triangles\n\n kwargs[\"max_volume\"] = dx * dy / 100\n i = 1\n if min_points is None:\n min_points = 0\n if max_edge_length is None or max_edge_length <= 0:\n max_edge_length = np.inf\n max_length = get_edge_lengths(points, triangles).max()\n while (len(points) < min_points) or (max_length > max_edge_length):\n mesh = triangle.build(mesh_info=mesh_info, **kwargs)\n points = np.array(mesh.points) + r0\n triangles = np.array(mesh.elements)\n edges, is_boundary = get_edges(triangles)\n if preserve_boundary:\n # Only constrain the length of interior edges, i.e. edges not on the boundary.\n edges = edges[~is_boundary]\n edge_lengths = np.linalg.norm(np.diff(points[edges], axis=1), axis=2)\n max_length = edge_lengths.max()\n logger.debug(\n f\"Iteration {i}: Made mesh with {len(points)} points and \"\n f\"{len(triangles)} triangles with maximum interior edge length: \"\n f\"{max_length:.2e}. Target maximum edge length: {max_edge_length:.2e}.\"\n )\n if np.isfinite(max_edge_length):\n kwargs[\"max_volume\"] *= min(0.98, np.sqrt(max_edge_length / max_length))\n else:\n kwargs[\"max_volume\"] *= 0.98\n i += 1\n return points, triangles", "def generatePolygons():", "def shape_to_polygons(lines):\n from itertools import tee, izip\n def pairwise(iterable):\n a,b = tee(iterable)\n next(b, None)\n return izip(a, b)\n polygons = [[tuple(lines[0]['shape'])]]\n for a, b in pairwise(lines):\n if a['fid'] != b['fid']:\n polygons.append([])\n polygons[-1].append(tuple(b['shape']))\n return polygons", "def produce_polygon(polygon_ordered_coordinates: List, zoom: int, plot_polygon: bool = False) -> Path:\n polygon_tile_points = []\n for item in polygon_ordered_coordinates:\n polygon_tile_points += [Utility.get_tile(*item, zoom)]\n polygon_tile_points += [polygon_tile_points[0]]\n polygon = Path(polygon_tile_points)\n if plot_polygon:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n patch = patches.PathPatch(polygon, facecolor='orange', lw=2)\n ax.add_patch(patch)\n ax.set_xlim(min(polygon_tile_points, key = lambda item: item[0])[0], max(polygon_tile_points, key = lambda item: item[0])[0])\n ax.set_ylim(min(polygon_tile_points, key = lambda item: item[1])[1], max(polygon_tile_points, key = lambda item: item[1])[1])\n plt.show()\n return polygon", "def polygon_to_multipolygon(geom):\n if geom.__class__.__name__ == 'Polygon':\n g = OGRGeometry(OGRGeomType('MultiPolygon'))\n g.add(geom)\n return g\n elif geom.__class__.__name__ == 'MultiPolygon':\n return geom\n else:\n raise ValueError('Geom is neither Polygon nor MultiPolygon.')", "def get_polygon_coordinates(self) -> Tuple[List, List]:\n\n polygon_query = f\"https://nominatim.openstreetmap.org/\" \\\n f\"search?city={self.location.replace(' ', '+')}&polygon_geojson=1&format=json\"\n r = requests.get(polygon_query)\n js = ast.literal_eval(r.text)\n\n self.monitor.info(\"-> Downloaded area polygon data points.\")\n clean_polygon_coords = js[0]['geojson']['coordinates'][0]\n\n polygon_lats = [float(i[1]) for i in clean_polygon_coords]\n polygon_longs = [float(i[0]) for i in clean_polygon_coords]\n\n self.monitor.info(\"-> Created lat/long vectors.\")\n return polygon_lats, polygon_longs", "def create_polygon(self, vertices, style=None, parent=None):\n d = 'M %f %f L' % (vertices[0].x, vertices[0].y)\n for p in vertices[1:]:\n d = d + ' %f,%f' % (p.x, p.y)\n if vertices[0] != vertices[-1]:\n d = d + ' %f,%f' % (vertices[0].x, vertices[0].y)\n attrs = {'d': d}\n return self.create_path(attrs, style, parent)", "def get_multipolygon(feature_or_geometry):\n if isinstance(feature_or_geometry, QgsFeature):\n geom = feature_or_geometry.geometry()\n else:\n geom = feature_or_geometry\n if geom.wkbType() == WKBPolygon:\n return [geom.asPolygon()]\n return geom.asMultiPolygon()", "def point_to_polygon_geojson(g):\n point_coordinates = g['geometry']['coordinates']\n polygon_geojson = {\n 'type': 'Feature',\n 'properties': g['properties'],\n 'geometry': {\n 'type': 'Polygon',\n 'coordinates': [\n [point_coordinates, point_coordinates, point_coordinates, point_coordinates]\n ]\n }\n }\n return polygon_geojson", "def _get_polygon(areasrc):\n\n str = areasrc.geometry.wkt\n str = re.sub('POLYGON\\(\\(', '', str)\n str = re.sub('\\)\\)', '', str)\n aa = re.split('\\,', str)\n lons = []\n lats = []\n for str in aa:\n bb = re.split('\\s+', re.sub('^\\s+', '', str))\n lons.append(float(bb[0]))\n lats.append(float(bb[1]))\n return lons, lats", "def to_geom(self):\n return [self.create_poly(bbox) for bbox in self.tree_bounds]", "def boundary_polygon(self):\n try:\n return self.boundary_polygon_by_edges()\n except Exception as exc:\n self.log.warning('Warning, boundary_polygon() failed using edges! Trying polygon union method')\n self.log.warning(exc,exc_info=True)\n return self.boundary_polygon_by_union()", "def _get_polygon(element):\n polygon = element.find('%s/%s/%s/%s/%s' %\n (NRML04_AREA_GEOMETRY, gml.GML_POLYGON,\n gml.GML_EXTERIOR, gml.GML_LINEAR_RING,\n gml.GML_POS_LIST)).text\n\n polygon = gml._get_polygon_from_2DLinestring(polygon)\n\n return polygon", "def create_poly(self, bounds):\n\n left, bottom, right, top = bounds\n\n return Polygon(\n [\n (left, bottom),\n (left, top),\n (right, top),\n (right, bottom),\n (left, bottom),\n ]\n )", "def read_polygon_shapefile(filename):\n result = cpp_read_polygon_shapefile(filename)\n return (\n DataFrame({\"f_pos\": result[0], \"r_pos\": result[1]}),\n DataFrame({\"x\": result[2], \"y\": result[3]}),\n )", "def boundary_polygon_by_edges(self):\n lines=self.boundary_linestrings()\n polys=join_features.lines_to_polygons(lines,close_arc=False)\n if len(polys)>1:\n raise GridException(\"somehow there are multiple boundary polygons\")\n return polys[0]", "def buildMultiPolygon(self,polygonList):\r\n geomlist=[]\r\n for geom in polygonList:\r\n # Cut 'MULTIPOLYGON(*) if we got one'\r\n if geom.exportToWkt()[:12]==\"MULTIPOLYGON\":\r\n geomWkt=geom.exportToWkt()[13:len(geom.exportToWkt())-1]\r\n else:\r\n # Cut 'POLYGON' if we got one\r\n geomWkt=geom.exportToWkt()[7:]\r\n geomlist.append(str(geomWkt))\r\n multiGeomWKT=\"MULTIPOLYGON(\"\r\n multiGeomWKT +=\",\".join(geomlist)\r\n multiGeomWKT+=\")\"\r\n #if self.debug: print multiGeomWKT\r\n multiGeom=QgsGeometry.fromWkt(multiGeomWKT)\r\n return multiGeom", "def wdraw_polygon(self, wcoords, fill, outline):\r\n dpoints = []\r\n for i in range(0, len(wcoords), 2):\r\n dpoints += self.w_to_d(wcoords[i], wcoords[i+1])\r\n self.canvas.create_polygon(dpoints, fill=fill, outline=outline)", "def PolygonGeometry(Coords):\n area = 0.0\n peri = 0.0\n for i in range(np.shape(Coords)[0]-1):\n area = area + Coords[i,0]*Coords[i+1,1] - Coords[i+1,0]*Coords[i,1]\n peri = peri + ( (Coords[i+1,0] - Coords[i,0])**2 + (Coords[i+1,1]-Coords[i,1])**2 )**0.5\n area = area + Coords[np.shape(Coords)[0]-1,0] * Coords[0,1] - Coords[0,0] * Coords[np.shape(Coords)[0]-1,1]\n area = area*0.5\n\n return area, peri", "def polygon_descriptors(corners):\n n_points = corners.shape[1]\n p, a, cx, cy = 0, 0, 0, 0\n for i in xrange(n_points):\n j = (i+1) % n_points\n dot = corners[0,i]*corners[1,j] - corners[0,j]*corners[1,i]\n a += dot\n cx += (corners[0,i] + corners[0,j]) * dot\n cy += (corners[1,i] + corners[1,j]) * dot\n p += np.linalg.norm(corners[:,i] - corners[:,j])\n a /= 2\n cx /= 6*a\n cy /= 6*a\n a = abs(a)\n return (p, a, (cx,cy))", "def polyfillaa(px, py, xrange=None, yrange=None, start_indices=None,\n area=False):\n if start_indices is None:\n if hasattr(px[0], '__len__'):\n single = False\n poly_ind = [0]\n count = 0\n ox, oy = px, py\n px, py = [], []\n for i in range(len(ox)):\n count += len(ox[i])\n poly_ind.append(count)\n px.extend(ox[i])\n py.extend(oy[i])\n poly_ind = np.array(poly_ind)\n else:\n single = True\n poly_ind = np.array([0, len(px)])\n else:\n poly_ind = np.array(start_indices, dtype=int)\n poly_ind = np.append(poly_ind, px.size)\n single = False\n\n if not isinstance(px, np.ndarray):\n px = np.array(px, dtype=float)\n py = np.array(py, dtype=float)\n\n if px.shape != py.shape:\n raise ValueError(\"px and py must be the same shape\")\n elif px.ndim != 1:\n raise ValueError(\"polygons must be flat arrays\")\n\n npoly = poly_ind[1:] - poly_ind[:-1]\n n = npoly.size\n minpoly = np.min(npoly)\n nbins = np.max(npoly) - minpoly + 1\n binned = (npoly - minpoly).astype(int)\n npoly_ind = np.arange(n)\n csr = csr_matrix(\n (npoly_ind, [binned, np.arange(n)]), shape=(nbins, n))\n\n areas = {} if area else None\n result = {}\n\n for i, put in enumerate(np.split(csr.data, csr.indptr[1:-1])):\n\n # number of vertices for each polygon in this group\n nvert = i + minpoly\n nshapes = put.size # number of nvert sided shapes in polygon list\n\n # take holds indices of vertices in px and py for each polygon\n take = np.repeat([poly_ind[put]], nvert, axis=0).T\n take += np.arange(nvert)\n\n # store the left most and lowest pixel covered by each polygon\n left = np.floor(np.min(px[take], axis=1)).astype(int)\n bottom = np.floor(np.min(py[take], axis=1)).astype(int)\n\n # nx and ny are the span of pixels covered in x/y directions\n nx = np.floor(np.max(px[take], axis=1)).astype(int) - left + 1\n ny = np.floor(np.max(py[take], axis=1)).astype(int) - bottom + 1\n\n # create cell grids\n ngy, ngx = ny.max(), nx.max()\n gy, gx = np.mgrid[:ngy, :ngx]\n gy, gx = gy.ravel(), gx.ravel()\n ng = gx.size\n\n # indices for raveled arrays\n inds = tuple(ind.ravel() for ind in np.indices((nshapes, nvert, ng)))\n\n # polygon vertices minus the lowest left pixel so we can\n # use gx, gy to perform faster vector operations.\n vx = px[take] - left[:, None]\n vy = py[take] - bottom[:, None]\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RuntimeWarning)\n ux, uy = shift1(vx), shift1(vy)\n dx, dy = vx - ux, vy - uy\n mx, my = dy / dx, dx / dy\n\n minx = np.min([ux, vx], axis=0)[..., None]\n maxx = np.max([ux, vx], axis=0)[..., None]\n miny = np.min([uy, vy], axis=0)[..., None]\n maxy = np.max([uy, vy], axis=0)[..., None]\n\n # y coordinates at x grid lines (left edge of cell)\n cross_left_y = gx[None, None] - ux[..., None]\n cross_left_y *= mx[..., None]\n cross_left_y += uy[..., None]\n\n # x coordinates at y grid lines (bottom edge of cell)\n cross_bottom_x = gy[None, None] - uy[..., None]\n cross_bottom_x *= my[..., None]\n cross_bottom_x += ux[..., None]\n\n parallel_x = (dy == 0)[..., None] \\\n & (uy[..., None] == gy[None, None])\n if parallel_x.any():\n parallel_x &= (minx <= gx[None, None])\n parallel_x &= (gx[None, None] <= maxx)\n cross_bottom_x[parallel_x] = gx[inds[2][parallel_x.ravel()]]\n\n parallel_y = (dx == 0)[..., None] \\\n & (ux[..., None] == gx[None, None])\n if parallel_y.any():\n parallel_y &= (miny <= gy[None, None])\n parallel_y &= (gy[None, None] <= maxy)\n cross_left_y[parallel_y] = gy[inds[2][parallel_y.ravel()]]\n\n # Lines crossing bottom of cell (u -> v)\n valid_b_cross = gy[None, None] >= miny\n valid_b_cross &= gy[None, None] < maxy\n valid_x_cross = cross_bottom_x >= gx[None, None]\n valid_x_cross &= cross_bottom_x < gx[None, None] + 1\n valid_b_cross &= valid_x_cross\n\n # Lines crossing left of cell (u -> v)\n valid_l_cross = gx[None, None] >= minx\n valid_l_cross &= gx[None, None] < maxx\n valid_y_cross = cross_left_y >= gy[None, None]\n valid_y_cross &= cross_left_y < gy[None, None] + 1\n valid_l_cross &= valid_y_cross\n\n corner = cross_bottom_x == gx[None, None]\n corner &= cross_left_y == gy[None, None]\n corner |= ((gx[None, None] == ux[..., None])\n & (gy[None, None] == uy[..., None]))\n\n # valid_b_cross |= corner\n # valid_l_cross |= corner\n\n # Add any grid points inside polygon, not intersected by lines\n xlines = valid_b_cross | corner\n xlines = xlines.reshape(nshapes, nvert, ngy, ngx)\n grid_points = np.sum(xlines, axis=1)\n grid_points = np.roll(np.cumsum(grid_points, axis=2), 1, axis=2)\n grid_points %= 2\n grid_points[:, :, 0] = 0\n grid_points = grid_points.astype(bool).reshape(nshapes, ng)\n grid_points |= corner.any(axis=1)\n\n # Now all grid points (in or on the polygon) have been determined,\n # they should be distinguished from edges to avoid duplication.\n # Inside grid points cannot coincide with intersections, so we only\n # need to examine corners.\n valid_b_cross &= ~corner\n valid_l_cross &= ~corner\n\n # Finally, vertices located inside cell\n vertex_inside = vx[..., None] > gx[None, None]\n vertex_inside &= vx[..., None] < (gx[None, None] + 1)\n vertex_inside &= vy[..., None] > gy[None, None]\n vertex_inside &= vy[..., None] < (gy[None, None] + 1)\n\n # okay, so we now have everything we need:\n # - edges (bottom, left)\n # - inside points\n # - grid points\n\n # populate\n counter = np.zeros((nshapes, ng), dtype=int)\n sout = nshapes, (nvert * 4), ng\n polx = np.full(sout, np.nan) # maximum size\n poly = np.full(sout, np.nan) # maximum size\n\n # populate inside vertices\n if vertex_inside.any():\n ri = vertex_inside.ravel()\n itake = inds[0][ri], inds[1][ri], inds[2][ri]\n n_inside = np.cumsum(vertex_inside, axis=1) - 1\n vput = counter[itake[0], itake[2]]\n vput += n_inside[itake]\n polx[itake[0], vput, itake[2]] = vx[itake[0], itake[1]]\n poly[itake[0], vput, itake[2]] = vy[itake[0], itake[1]]\n counter[itake[0], itake[2]] += n_inside[itake] + 1\n\n # Grid points are so far calculated as the bottom-left of a cell.\n # This needs to be shared by neighbors to the west, south, and\n # south-west.\n if grid_points.any():\n # ri = np.repeat(gp_inside[:, None], nvert, axis=1).ravel()\n # itake = inds[0][ri], inds[1][ri], inds[2][ri]\n for dpx, dpy in itertools.product([0, 1], [0, 1]):\n if dpx == dpy == 0:\n valid = grid_points\n else:\n valid = grid_points & (gx[None] >= dpx) & (gy[None] >= dpy)\n if not valid.any(): # pragma: no cover\n continue\n\n idx = np.nonzero(valid)\n gp_ind = idx[1] - (dpy * ngx + dpx)\n vput = counter[idx[0], gp_ind]\n\n polx[idx[0], vput, gp_ind] = gx[idx[1]]\n poly[idx[0], vput, gp_ind] = gy[idx[1]]\n vput += 1\n counter[idx[0], gp_ind] = vput\n\n # Left edge crossings: shared by neighbor to left on it's right edge\n if valid_l_cross.any():\n for dpx in [0, 1]:\n if dpx == 1:\n valid = valid_l_cross & (gx[None, None] >= dpx)\n else:\n valid = valid_l_cross\n ncross = valid.cumsum(axis=1) - 1\n ri = valid.ravel()\n itake = inds[0][ri], inds[1][ri], inds[2][ri]\n gp_ind = itake[2] - dpx\n vput = counter[itake[0], gp_ind] + ncross[itake]\n polx[itake[0], vput, gp_ind] = gx[itake[2]]\n poly[itake[0], vput, gp_ind] = cross_left_y[itake]\n vput += 1\n counter[itake[0], gp_ind] = vput\n\n # Bottom edge crossings: shared by neighbor below on it's top edge\n if valid_b_cross.any():\n for dpy in [0, 1]:\n if dpy == 1:\n valid = valid_b_cross & (gy[None, None] >= dpy)\n else:\n valid = valid_b_cross\n ncross = valid.cumsum(axis=1) - 1\n ri = valid.ravel()\n itake = inds[0][ri], inds[1][ri], inds[2][ri]\n gp_ind = itake[2] - (dpy * ngx)\n vput = counter[itake[0], gp_ind] + ncross[itake]\n polx[itake[0], vput, gp_ind] = cross_bottom_x[itake]\n poly[itake[0], vput, gp_ind] = gy[itake[2]]\n vput += 1\n counter[itake[0], gp_ind] = vput\n\n # print(\"populate: %f\" % (t4 - t3))\n\n # Trim down the array as necessary and move coordinates off\n # the shared grid\n maxv = counter.max()\n polx, poly = polx[:, :maxv], poly[:, :maxv]\n polx += left[:, None, None]\n poly += bottom[:, None, None]\n gxout = left[..., None] + gx[None]\n gyout = bottom[..., None] + gy[None]\n\n keep = np.isfinite(polx)\n if xrange is not None:\n keep = np.logical_and(\n keep, np.greater_equal(gxout[:, None], xrange[0]), out=keep)\n keep = np.logical_and(\n keep, np.less(gxout[:, None], xrange[1]), out=keep)\n if yrange is not None:\n keep = np.logical_and(\n keep, np.greater_equal(gyout[:, None], yrange[0]), out=keep)\n keep = np.logical_and(\n keep, np.less(gyout[:, None], yrange[1]), out=keep)\n\n # print(\"normalize: %f\" % (t5 - t4))\n\n # note that COM needs to be done before filling in NaNs\n # We also do this to kill any bad values (usually repeated), that\n # managed to find there way to this stage.\n comx = bottleneck.nanmean(polx, axis=1)\n comy = bottleneck.nanmean(poly, axis=1)\n polx = bottleneck.push(polx, axis=1)\n poly = bottleneck.push(poly, axis=1)\n np.subtract(polx, comx[:, None], out=polx)\n np.subtract(poly, comy[:, None], out=poly)\n angle = np.arctan2(poly, polx)\n sorti = np.argsort(angle, axis=1)\n og = np.ogrid[:nshapes, :maxv, :ng]\n polx = polx[og[0], sorti, og[2]]\n poly = poly[og[0], sorti, og[2]]\n\n pixareas = (0.5 * np.abs(bottleneck.nansum(\n (polx * np.roll(poly, -1, axis=1))\n - (poly * np.roll(polx, -1, axis=1)), axis=1)))\n\n keep &= pixareas[:, None] != 0\n\n # print(\"areas: %f, %f\" % (t6 - t5, t6 - t1))\n\n mask = np.any(keep, axis=1)\n npixels = mask.sum(axis=1)\n minpix, maxpix = np.min(npixels), np.max(npixels) + 1\n npixbins = maxpix - minpix\n pixbins = (npixels - minpix).astype(int)\n pixind = np.arange(npixels.size)\n spix = csr_matrix((pixind, [pixbins, np.arange(npixels.size)]),\n shape=(npixbins, npixels.size))\n\n for pixi, putpix in enumerate(np.split(spix.data, spix.indptr[1:-1])):\n npix = pixi + minpix\n if npix == 0 or len(putpix) == 0: # pragma: no cover\n continue\n npolys = putpix.size\n takepix = mask[putpix]\n cellx = np.reshape(gxout[putpix][takepix], (npolys, npix))\n celly = np.reshape(gyout[putpix][takepix], (npolys, npix))\n cellxy = np.append(celly[:, :, None], cellx[:, :, None], axis=2)\n # this gives the cells overlapped by each polygon\n for polyind, cxy in zip(putpix, cellxy):\n result[put[polyind]] = cxy\n\n if area:\n aselect = np.reshape(pixareas[putpix][takepix], (npolys, npix))\n for polyind, pixarea in zip(putpix, aselect):\n areas[put[polyind]] = pixarea\n\n # print(\"storing results: %f, %f\" % (t7 - t6, t7 - t1))\n\n if single:\n if len(result) != 0:\n result = result[0]\n if area:\n areas = areas[0]\n else:\n result = np.empty((0, 2))\n if area:\n areas = np.empty(0)\n\n if not area:\n return result\n else:\n return result, areas", "def draw_polygon(self, *points, color=DEFAULT.color):", "def Polygon(self, polyline = False):\n\n from geographiclib.polygonarea import PolygonArea\n return PolygonArea(self, polyline)", "def lineToPolygon(geom):\n assert(geom[\"type\"] == \"LineString\")\n # LineString is only the exterior line of a polygon (no holes possible)\n return geojson.Polygon(coordinates=[geom[\"coordinates\"]], validate=True)", "def create_new_polygon(self, coords, **options):\n\n if 'outline' not in options:\n options['outline'] = self.variables.foreground_color\n if 'width' not in options:\n options['width'] = self.variables.poly_border_width\n if 'fill' not in options:\n options['fill'] = ''\n\n shape_id = self.create_polygon(*coords, **options)\n self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.POLYGON, options)\n self.variables.shape_ids.append(shape_id)\n self.set_shape_pixel_coords_from_canvas_coords(shape_id, coords)\n self.variables.current_shape_id = shape_id\n return shape_id", "def _DrawPolygonList(*args, **kwargs):\n return _gdi_.DC__DrawPolygonList(*args, **kwargs)", "def _getshapepoly(self, polygon, compound=False):\n if self._resizemode == \"user\" or compound:\n t11, t12, t21, t22 = self._shapetrafo\n elif self._resizemode == \"auto\":\n l = max(1, self._pensize/5.0)\n t11, t12, t21, t22 = l, 0, 0, l\n elif self._resizemode == \"noresize\":\n return polygon\n return tuple((t11*x + t12*y, t21*x + t22*y) for (x, y) in polygon)", "def make_simple_poly(origin):\r\n # Create a rectangular ring\r\n lon, lat = origin[0], origin[1]\r\n width = 100\r\n ring = ogr.Geometry(ogr.wkbLinearRing)\r\n ring.AddPoint(lon, lat)\r\n ring.AddPoint(lon + width, lat)\r\n ring.AddPoint(lon + width, lat - width / 2.0)\r\n ring.AddPoint(lon, lat - width / 2.0)\r\n ring.AddPoint(lon, lat)\r\n\r\n # Create polygon geometry\r\n poly = ogr.Geometry(ogr.wkbPolygon)\r\n poly.AddGeometry(ring)\r\n return poly", "def polygonpts(nSides, radius=1.0):\n\treturn [[cos(theta)*radius, sin(theta)*radius] for theta in frange(0, twopi, nSides+1)[:-1] ]", "def shapePolyToShapely(p: pcbnew.SHAPE_POLY_SET) \\\n -> Union[shapely.geometry.Polygon, shapely.geometry.MultiPolygon]:\n polygons = []\n for pIdx in range(p.OutlineCount()):\n kOutline = p.Outline(pIdx)\n assert kOutline.IsClosed()\n outline = shapeLinechainToList(kOutline)\n holes = []\n for hIdx in range(p.HoleCount(pIdx)):\n kHole = p.Hole(hIdx)\n assert kHole.isClosed()\n holes.append(shapeLinechainToList(kHole))\n polygons.append(Polygon(outline, holes=holes))\n if len(polygons) == 1:\n return polygons[0]\n return MultiPolygon(polygons=polygons)", "def geos_geom_from_py(ob, create_func=...): # -> tuple[Any | Unknown, Unknown]:\n ...", "def parse_poly_osm_file(lines):\n in_ring = False\n coords = []\n ring = []\n\n for (index, line) in enumerate(lines):\n if index == 0:\n # first line is junk.\n continue\n\n elif in_ring and line.strip() == 'END':\n # we are at the end of a ring, perhaps with more to come.\n in_ring = False\n\n elif in_ring:\n # we are in a ring and picking up new coordinates.\n ring.append(list(map(float, line.split())))\n\n elif not in_ring and line.strip() == 'END':\n # we are at the end of the whole polygon.\n break\n\n elif not in_ring and line.startswith('!'):\n # we are at the start of a polygon part hole.\n coords[-1].append([])\n ring = coords[-1][-1]\n in_ring = True\n\n elif not in_ring:\n # we are at the start of a polygon part.\n coords.append([[]])\n ring = coords[-1][0]\n in_ring = True\n\n return MultiPolygon(*(Polygon(*polycoords) for polycoords in coords))", "def gml_to_polygon(footprint):\n footprint = footprint.replace('\\n', '').strip()\n coords_poly = []\n #\n # Sentinel-1\n # (http://www.opengis.net/gml/srs/epsg.xml#4326)\n #\n if ',' in footprint:\n coords_gml = footprint.split()\n for coord_pair in coords_gml:\n lat, lon = [float(_) for _ in coord_pair.split(',')]\n if lon < -180.0:\n lon = -180.0\n if lon > 180.0:\n lon = 180.0\n if lat < -90.0:\n lat = -90.0\n if lat > 90.0:\n lat = 90.0\n coords_poly.append('{lon:.4f} {lat:.4f}'.format(lon=lon, lat=lat))\n\n #\n # Sentinel-3 and Sentinel-2\n # (http://www.opengis.net/def/crs/EPSG/0/4326)\n #\n else:\n coords_gml = footprint.split()\n for i in range(len(coords_gml)//2):\n lat = float(coords_gml[2*i])\n lon = float(coords_gml[2*i+1])\n if lon < -180.0:\n lon = -180.0\n if lon > 180.0:\n lon = 180.0\n if lat < -90.0:\n lat = -90.0\n if lat > 90.0:\n lat = 90.0\n coords_poly.append('{lon:.4f} {lat:.4f}'.format(lon=lon, lat=lat))\n\n #\n # Make sure the polygon is a closed line string.\n #\n if coords_poly[0] != coords_poly[-1]:\n coords_poly.append(coords_poly[0])\n\n wkt = 'POLYGON (({}))'.format(','.join(coords_poly))\n return wkt", "def polygon_path(x, y=None):\n\n if y is None:\n y = x\n\n return np.vstack([\n np.vstack([x, np.full_like(x, y[0])]).T,\n np.vstack([np.full_like(y, x[-1]), y]).T[1:],\n np.vstack([x, np.full_like(x, y[-1])]).T[::-1][1:],\n np.vstack([np.full_like(y, x[0]), y]).T[::-1][1:]]).T", "def get_region(geom):\n polygons = []\n coordinates = geom.get('coordinates')\n polygons.append(ee.Geometry.Polygon(coordinates))\n return ee.FeatureCollection(polygons)", "def create_ogr_linestring_from_list(geom: list) -> ogr.Geometry:\n return ogr.CreateGeometryFromJson(json.dumps({\"type\": 'LineString', 'coordinates': geom}))", "def convert_shapely_points_to_tuples(list_of_points) -> list:\n return [(p.x, p.y) for p in list_of_points]", "def polygon_from_str(line):\n # remove possible utf-8 BOM\n if line.startswith('\\xef\\xbb\\xbf'):\n line = line[3:]\n polygon_points = [float(o) for o in line.split(',')[:8]]\n polygon_points = np.array(polygon_points).reshape(4, 2)\n polygon = Polygon(polygon_points).convex_hull\n return polygon", "def polygon_from_envelope(cls, min_x, min_y, max_x, max_y, crs=None):\n crs = _validate_crs(crs)\n return cls(arctern.ST_PolygonFromEnvelope(min_x, min_y, max_x, max_y), crs=crs)", "def poly(*args):\n if len(args) == 0 or len(args) == 2:\n raise ValueError('bad number of arguments {} passed to poly()'.format(len(args)))\n if len(args) == 1:\n if ispoly(args[0]):\n return deepcopy(args[0])\n else:\n raise VauleError('non-poly list passed to poly()')\n # args is of length 3 or greater. Check to see if args are points\n a = list(args)\n b = list(filter(lambda x: not ispoint(x),a))\n if len(b) > 0:\n raise ValueError('non-point arguments to poly(): {} '.format(b))\n return deepcopy(a)", "def PolygonPath(polygon):\n\n def coding(ob):\n # The codes will be all \"LINETO\" commands, except for \"MOVETO\"s at the\n # beginning of each subpath\n n = len(getattr(ob, 'coords', None) or ob)\n vals = ones(n, dtype=Path.code_type) * Path.LINETO\n vals[0] = Path.MOVETO\n return vals\n\n if hasattr(polygon, 'geom_type'): # Shapely\n ptype = polygon.geom_type\n if ptype == 'Polygon':\n polygon = [Polygon(polygon)]\n elif ptype == 'MultiPolygon':\n polygon = [Polygon(p) for p in polygon]\n else:\n raise ValueError(\n \"A polygon or multi-polygon representation is required\")\n\n else: # GeoJSON\n polygon = getattr(polygon, '__geo_interface__', polygon)\n ptype = polygon[\"type\"]\n if ptype == 'Polygon':\n polygon = [Polygon(polygon)]\n elif ptype == 'MultiPolygon':\n polygon = [Polygon(p) for p in polygon['coordinates']]\n else:\n raise ValueError(\n \"A polygon or multi-polygon representation is required\")\n\n vertices = concatenate([\n concatenate([asarray(t.exterior)[:, :2]] +\n [asarray(r)[:, :2] for r in t.interiors])\n for t in polygon])\n codes = concatenate([\n concatenate([coding(t.exterior)] +\n [coding(r) for r in t.interiors]) for t in polygon])\n\n return Path(vertices, codes)", "def toFillPolygon(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n return QPolygonF", "def polygon_weights(polygon, xrange=None, yrange=None,\n center=True): # pragma: no cover\n poly = np.array(polygon)\n if poly.ndim != 2 or poly.shape[-1] != 2 or poly.shape[0] < 3:\n log.warning(\"invalid polygon shape\")\n return []\n\n xlims = [poly[:, 1].min(), poly[:, 1].max()]\n ylims = [poly[:, 0].min(), poly[:, 0].max()]\n\n if xrange is not None:\n xlims[0] = np.nanmax((xlims[0], np.nanmin(xrange)))\n xlims[1] = np.nanmin((xlims[1], np.nanmax(xrange)))\n if yrange is not None:\n ylims[0] = np.nanmax((ylims[0], np.nanmin(yrange)))\n ylims[1] = np.nanmin((ylims[1], np.nanmax(yrange)))\n\n if xlims[0] >= xlims[1] or ylims[0] >= ylims[1]:\n log.debug(\"out of bounds\")\n return []\n\n xlims = [int(np.floor(xlims[0])), int(np.ceil(xlims[1]))]\n ylims = [int(np.floor(ylims[0])), int(np.ceil(ylims[1]))]\n\n if center:\n dx = -0.5, 0.5\n dy = -0.5, 0.5\n else:\n dx = 0, 1\n dy = 0, 1\n\n gy, gx = np.mgrid[ylims[0]:ylims[1] + 1, xlims[0]:xlims[1] + 1]\n p = path.Path(poly)\n result = []\n for ycen, xcen in zip(gy.ravel(), gx.ravel()):\n bbox = Bbox([[ycen + dy[0], xcen + dx[0]],\n [ycen + dy[1], xcen + dy[1]]])\n area = polygon_area(p.clip_to_bbox(bbox))\n if area != 0:\n result.append(((ycen, xcen), area))\n\n return result", "def drawPolygon(xlist,ylist,ucoords=1):\n if ucoords:\n dislin.rlarea(x,y,len(xlist))\n else:\n dislin.areaf(x,y,len(ylist))", "def get_mesh_boundary(triangles):\n # Create edges and sort each vertices on each edge.\n edge0 = triangles[:,0:2]\n edge1 = triangles[:,1:3]\n edge2 = triangles.take((0,2), axis=1)\n edges = np.concatenate((edge0, edge1, edge2), axis=0)\n edge_sort = np.sort(edges, axis=1)\n\n # Get unique edges that are only present once.\n (uniq, uniq_ids, counts) = np.unique(edge_sort, axis=0, return_index=True, return_counts=True)\n edge_inds = np.arange(edge_sort.shape[0], dtype=int)\n outer_edge_ids = edge_inds[np.in1d(edge_inds, uniq_ids[counts==1])]\n outer_edges = edge_sort[outer_edge_ids,:]\n num_outer_edges = outer_edges.shape[0]\n\n # Assume we need to close the polygon.\n num_outer_verts = num_outer_edges + 1\n\n # Loop over outer edges and use traversal method to get ordered vertices.\n v_start = outer_edges[0,0]\n v_end = outer_edges[0,1]\n vert_inds = -1*np.ones(num_outer_verts, dtype=int)\n vert_inds[0] = v_start\n vert_inds[1] = v_end\n vert_num = 2\n outer_edges[0,:] = -1\n for edge_num in range(1,num_outer_edges):\n edge_inds_next = np.where(outer_edges == v_end)\n if (edge_inds_next[0].shape[0] < 1):\n msg = \"Next edge not found for vertex %d\" % v_end\n raise ValueError(msg)\n edge_ind_next = edge_inds_next[0][0]\n vert_ind_next = 0\n if (edge_inds_next[1][0] == 0):\n vert_ind_next = 1\n vert_inds[vert_num] = outer_edges[edge_ind_next, vert_ind_next]\n outer_edges[edge_ind_next, :] = -1\n v_end = vert_inds[vert_num]\n vert_num += 1\n\n return vert_inds", "def polygon_array(cls, polygon, num, space, space_series, n_series, origin, subsampling=1):\n if (subsampling == 0) or (subsampling == -1):\n raise ValueError('Subsampling cannot be 0 or -1') \n \n # take care of subsampling\n n_series_np = np.arange(0,n_series)\n if subsampling>0:\n num_np = [x for x in range(num) if np.mod(x, subsampling)==0]\n else:\n num_np = [x for x in range(num) if np.mod(x, subsampling)!=0]\n\n # create arrays with combinations of objects and series positions \n m1, m2 = np.meshgrid(n_series_np, num_np, indexing='ij')\n\n # compute all x locations\n all_coords = np.ravel(origin[0] + m2*space+m1*(space*num+space_series))\n num_obj_after_sampling = len(all_coords)\n \n # combine x with y locations\n all_coords = np.stack([all_coords, origin[1]*np.ones_like(all_coords)])\n\n # concatenate all polygons and keep their length in memory\n poly_len = [len(p) for p in polygon.coord]\n poly_concat = np.concatenate(polygon.coord)\n\n # compute final coordinates using broadcasting\n # num_poly_edges x 2 x 1\n # x 2 x num_new_coords\n # num_poly_edges x 2 x num_new_coords\n complete = np.moveaxis(poly_concat[:,:, np.newaxis] + all_coords, 2,0)\n\n # reshape as long 2d list of length num_new_coords * num_poly_edges\n commplete_reshaped = np.reshape(complete, (complete.shape[0]*complete.shape[1], 2))\n\n # split into correct polygon lists\n split_pos=np.cumsum(num_obj_after_sampling * poly_len)\n pg_array = np.split(commplete_reshaped, split_pos[:-1])\n\n pg_array_obj = cls()\n pg_array_obj.coord = pg_array\n pg_array_obj.params = {'num':num, 'space':space, 'space_series':space_series, 'n_series':n_series, 'origin':origin, 'subsampling':subsampling}\n \n return pg_array_obj", "def get_polygon_envelope(polygon, x_pixel_size, y_pixel_size):\n # retrieve polygon points\n poly_pts = list(polygon.exterior.coords)\n # split tuple points into x and y coordinates and convert them to numpy arrays\n xs, ys = [np.array(coords) for coords in zip(*poly_pts)]\n # compute bounding box\n min_x, min_y, max_x, max_y = min(xs), min(ys), max(xs), max(ys)\n # round boundary coordinates to upper-left corner\n min_x = int(round(min_x / x_pixel_size, DECIMALS)) * x_pixel_size\n min_y = int(np.ceil(round(min_y / y_pixel_size, DECIMALS))) * y_pixel_size\n max_x = int(round(max_x / x_pixel_size, DECIMALS)) * x_pixel_size\n max_y = int(np.ceil(round(max_y / y_pixel_size, DECIMALS))) * y_pixel_size\n\n return min_x, min_y, max_x, max_y", "def read_shp(filename):\n sr = shapefile.Reader(filename)\n \n if sr.shapeType == shapefile.POLYGON:\n shapes = sr.shapes()\n geometries = [Polygon(shape.points) for shape in shapes]\n \n fields = sr.fields[:]\n if fields[0][0] == 'DeletionFlag':\n fields.pop(0)\n fields = [field[0] for field in fields] # extract field name only\n \n records = []\n for record in sr.records():\n for i, value in enumerate(record):\n try:\n record[i] = float(value) # convert record values to numeric...\n except ValueError:\n pass # ... if possible\n \n records.append(record)\n \n return (geometries, records, fields)\n \n elif sr.shapeType == shapefile.POLYLINE:\n shapes = sr.shapes()\n geometries = [LineString(shape.points) for shape in shapes]\n \n fields = sr.fields[:] # [:] = duplicate field list\n if fields[0][0] == 'DeletionFlag':\n fields.pop(0)\n fields = [field[0] for field in fields] # extract field name only\n \n records = []\n for record in sr.records():\n for i, value in enumerate(record):\n try:\n record[i] = float(value) # convert record values to numeric...\n except ValueError:\n pass # ... if possible\n \n records.append(record)\n \n return (geometries, records, fields)\n \n \n elif sr.shapeType == shapefile.MULTIPOINT:\n raise NotImplementedError\n \n else:\n raise NotImplementedError", "def create_polygon(meshcode):\r\n lat1,lon1 = ju.to_meshpoint(meshcode,0,0)\r\n lat2,lon2 = ju.to_meshpoint(meshcode,1,1)\r\n poly_text = 'POLYGON (('+str(lon1)+' '+str(lat1)+','+str(lon1)+' '+str(lat2)+','+str(lon2)+' '+str(lat2)+','+str(lon2)+' '+str(lat1)+','+str(lon1)+' '+str(lat1)+'))'\r\n return poly_text", "def getPolyCoords(row, geom, coord_type):\n # Parse the exterior of the coordinate\n exterior = row[geom].exterior\n if coord_type == 'x':\n # Get the x coordinates of the exterior\n return list( exterior.coords.xy[0])\n elif coord_type == 'y':\n # Get the y coordinates of the exterior\n return list( exterior.coords.xy[1])", "def _createpoly(self):\n return self.cv.create_polygon((0, 0, 0, 0, 0, 0), fill=\"\", outline=\"\")", "def polygon_from_str(line):\r\n polygon_points = [float(o) for o in line.split(',')[:8]]\r\n polygon_points = np.array(polygon_points).reshape(4, 2)\r\n polygon = Polygon(polygon_points).convex_hull\r\n return polygon", "def polygon_to_points(coords, z=None):\n\n bounds = array(coords).astype('int')\n\n bmax = bounds.max(0)\n bmin = bounds.min(0)\n\n path = Path(bounds)\n\n grid = meshgrid(range(bmin[0], bmax[0]+1), range(bmin[1], bmax[1]+1))\n\n grid_flat = zip(grid[0].ravel(), grid[1].ravel())\n\n points = path.contains_points(grid_flat).reshape(grid[0].shape).astype('int')\n points = where(points)\n points = (vstack([points[0], points[1]]).T + bmin[-1::-1]).tolist()\n if z is not None:\n points = map(lambda p: [p[0], p[1], z], points)\n\n return points", "def polygon(self):\n return self._polygon", "def polygonal(resolution, in_vertices, out_vertices_list = None):\n in_vertices = [Point(in_vertices[k,0],in_vertices[k,1]) for k in range(in_vertices.shape[0])] \n\n domain = mshr.Polygon(in_vertices) # https://bitbucket.org/fenics-project/mshr/wiki/API/Polygon\n # Create polygon defined by the given vertices. Vertices must be in counter-clockwise order and free of self-intersections.\n \n if(out_vertices_list is not None):\n for out_vertices in out_vertices_list:\n out_vertices = [Point(out_vertices[k,0],out_vertices[k,1]) for k in range(out_vertices.shape[0])]\n domain -= mshr.Polygon(out_vertices)\n \n mesh=mshr.generate_mesh(domain, resolution)\n\n # TODO : add refined mesh\n # if(refine_mesh):\n # d = mesh.topology().dim()\n \n # class To_refine(SubDomain):\n # def inside(self, x, on_boundary):\n # return x[1]<=0 and x[1]>= -l_mot/2-h_grid-l_vacuum/4\n\n # to_refine = To_refine()\n # marker = MeshFunction(\"bool\", mesh, d, False)\n # to_refine.mark(marker, True)\n # mesh = refine(mesh,marker)\n\n return mesh", "def _get_voronoi_poly_points(vert_index_list, voronoi_vertices,\n voronoi_centroid):\n voronoi_poly_points = []\n if -1 not in vert_index_list and len(vert_index_list) > 3:\n voronoi_poly_points = voronoi_vertices[vert_index_list]\n elif vert_index_list.size > 0:\n # ASSUME RECTANGLE\n vert_index_list = vert_index_list[vert_index_list >= 0]\n voronoi_poly_points = voronoi_vertices[vert_index_list]\n # CASE 1: 2 valid voronoi vertices\n if vert_index_list.size == 2:\n center_lon = voronoi_centroid[0]\n center_lat = voronoi_centroid[1]\n corner_lon1 = voronoi_poly_points[0][0]\n corner_lat1 = voronoi_poly_points[0][1]\n corner_lon2 = voronoi_poly_points[1][0]\n corner_lat2 = voronoi_poly_points[1][1]\n\n # check if need to add points in lon or lat\n if abs(corner_lon1-corner_lon2) > abs(corner_lat1-corner_lat2):\n dLat = center_lat - corner_lat1\n # append the corners in order\n voronoi_poly_points = np.array([\n [corner_lon1, corner_lat1],\n [corner_lon2, corner_lat2],\n [corner_lon2, center_lat + dLat],\n [corner_lon1, center_lat + dLat]\n ])\n else:\n dLon = center_lon - corner_lon1\n # append the corners in order\n voronoi_poly_points = np.array([\n [corner_lon1, corner_lat1],\n [corner_lon2, corner_lat2],\n [center_lon + dLon, corner_lat2],\n [center_lon + dLon, corner_lat1]\n ])\n # CASE 2: 1 valid voronoi vertex\n elif vert_index_list.size == 1:\n center_lon = voronoi_centroid[0]\n center_lat = voronoi_centroid[1]\n corner_lon = voronoi_poly_points[0][0]\n corner_lat = voronoi_poly_points[0][1]\n dLat = center_lat - corner_lat\n dLon = center_lon - corner_lon\n # append the corners in order\n voronoi_poly_points = np.array([\n [corner_lon, corner_lat],\n [center_lon + dLon, corner_lat],\n [center_lon + dLon, center_lat + dLat],\n [corner_lon, center_lat + dLat]\n ])\n\n return voronoi_poly_points", "def json2polygon(geojson_str):\n geojson_object = geojson.loads(geojson_str)\n return geometry.shape(geojson_object)", "def to2D(geometry):\n\n return LineString(np.column_stack(geometry.xy))", "def to_geom(self):\n return [\n self.create_poly(bbox)\n for group_idx, indices, bbox in self.sindex.leaves()\n ]", "def draw_polygon(self, coords, holes=[], **options):\n options = self._check_options(options)\n \n path = aggdraw.Path()\n \n if not hasattr(coords[0], \"__iter__\"):\n coords = _grouper(coords, 2)\n else: coords = (point for point in coords)\n\n def traverse_ring(coords):\n # begin\n startx,starty = next(coords)\n path.moveto(startx, starty)\n \n # connect to each successive point\n for nextx,nexty in coords:\n path.lineto(nextx, nexty)\n path.close()\n\n # first exterior\n traverse_ring(coords)\n\n # then holes\n for hole in holes:\n # !!! need to test for ring direction !!!\n if not hasattr(hole[0], \"__iter__\"):\n hole = _grouper(hole, 2)\n else: hole = (point for point in hole)\n traverse_ring(hole)\n\n # options \n args = []\n if options[\"fillcolor\"]:\n fillbrush = aggdraw.Brush(options[\"fillcolor\"])\n args.append(fillbrush)\n if options[\"outlinecolor\"]:\n outlinepen = aggdraw.Pen(options[\"outlinecolor\"], options[\"outlinewidth\"])\n args.append(outlinepen)\n \n self.drawer.path((0,0), path, *args)", "def from_polyfile(name):\n\n from anuga.utilities.numerical_tools import anglediff\n from math import pi\n import os.path\n root, ext = os.path.splitext(name)\n\n if ext == 'poly':\n filename = name\n else:\n filename = name + '.poly'\n\n\n fid = open(filename)\n\n points = [] #x, y\n values = [] #z\n ##vertex_values = [] #Repeated z\n triangles = [] #v0, v1, v2\n\n lines = fid.readlines()\n\n keyword = lines[0].strip()\n msg = 'First line in .poly file must contain the keyword: POINTS'\n assert keyword == 'POINTS', msg\n\n offending = 0\n i = 1\n while keyword == 'POINTS':\n line = lines[i].strip()\n i += 1\n\n if line == 'POLYS':\n keyword = line\n break\n\n fields = line.split(':')\n assert int(fields[0]) == i-1, 'Point indices not consecutive'\n\n #Split the three floats\n xyz = fields[1].split()\n\n x = float(xyz[0])\n y = float(xyz[1])\n z = float(xyz[2])\n\n points.append([x, y])\n values.append(z)\n\n\n k = i\n while keyword == 'POLYS':\n line = lines[i].strip()\n i += 1\n\n if line == 'END':\n keyword = line\n break\n\n\n fields = line.split(':')\n assert int(fields[0]) == i-k, 'Poly indices not consecutive'\n\n #Split the three indices\n vvv = fields[1].split()\n\n i0 = int(vvv[0])-1\n i1 = int(vvv[1])-1\n i2 = int(vvv[2])-1\n\n #Check for and exclude degenerate areas\n x0 = points[i0][0]\n y0 = points[i0][1]\n x1 = points[i1][0]\n y1 = points[i1][1]\n x2 = points[i2][0]\n y2 = points[i2][1]\n\n area = abs((x1*y0-x0*y1)+(x2*y1-x1*y2)+(x0*y2-x2*y0))/2\n if area > 0:\n\n #Ensure that points are arranged in counter clock-wise order\n v0 = [x1-x0, y1-y0]\n v1 = [x2-x1, y2-y1]\n v2 = [x0-x2, y0-y2]\n\n a0 = anglediff(v1, v0)\n a1 = anglediff(v2, v1)\n a2 = anglediff(v0, v2)\n\n\n if a0 < pi and a1 < pi and a2 < pi:\n #all is well\n j0 = i0\n j1 = i1\n j2 = i2\n else:\n #Swap two vertices\n j0 = i1\n j1 = i0\n j2 = i2\n\n triangles.append([j0, j1, j2])\n ##vertex_values.append([values[j0], values[j1], values[j2]])\n else:\n offending +=1\n\n log.critical('Removed %d offending triangles out of %d'\n % (offending, len(lines)))\n return points, triangles, values", "def SplitIntoPolygons(shape):\n ret = []\n this_polygon = []\n restart_indices = set(shape.parts)\n for idx, point in enumerate(shape.points):\n if idx in restart_indices:\n if this_polygon:\n ret.append(this_polygon)\n this_polygon = [[point[0], point[1]]]\n else:\n this_polygon.append([point[0], point[1]])\n if this_polygon:\n ret.append(this_polygon)\n return ret", "def polygon(self):\n radius = self._get_max_rupture_projection_radius()\n return self.location.to_polygon(radius)", "def extent_geom(extent):\n ring = ogr.Geometry(ogr.wkbLinearRing)\n ring.AddPoint(extent[0], extent[3])\n ring.AddPoint(extent[2], extent[3])\n ring.AddPoint(extent[2], extent[1])\n ring.AddPoint(extent[0], extent[1])\n ring.CloseRings()\n polygon = ogr.Geometry(ogr.wkbPolygon)\n polygon.AddGeometry(ring)\n return polygon", "def convert_list_to_wkt(self, geom):\n if geom['type'] == \"Polygon\":\n coords = [f\"{coord[0]} {coord[1]}\" for coord in geom['coordinates'][0]]\n return f\"POLYGON (( {', '.join(coords)} ))\"\n else:\n raise Exception(f\"Unknown type of Geometry in GeoJSON of {geom['type']}\")", "def multi2poly(returned_vector_pred, layer_name=None):\n try: # Try to convert multipolygon to polygon\n df = gpd.read_file(returned_vector_pred, layer=layer_name)\n if 'MultiPolygon' in df['geometry'].geom_type.values:\n logging.info(\"\\nConverting multiPolygon to Polygon...\")\n gdf_exploded = df.explode(index_parts=True, ignore_index=True)\n gdf_exploded.to_file(returned_vector_pred, layer=layer_name) # overwrite the layer readed\n except Exception as e:\n logging.error(f\"\\nSomething went wrong during the conversion of Polygon. \\nError {type(e)}: {e}\")", "def __getGeometry(self, geom):\n if \"POLYGON\" in self.geometryType:\n rings = geom['rings']\n polygon = getMultiGeometry(rings)\n polyGeom = arcpy.Polygon(polygon, self.sr)\n return polyGeom\n elif \"POLYLINE\" in self.geometryType:\n paths = geom['paths']\n polyline = getMultiGeometry(paths)\n lineGeom = arcpy.Polyline(polyline, self.sr)\n return lineGeom\n elif \"POINT\" in self.geometryType:\n try:\n point = arcpy.Point(float(geom['x']), float(geom['y']))\n except:\n raise NullGeometryError(\"Point geometry is invalid or null\")\n pointGeom = arcpy.Geometry(\"point\", point, self.sr)\n return pointGeom", "def draw_polygon(self, coords, holes=[], **options):\n path = aggdraw.Path()\n\n def traverse_ring(coords):\n # begin\n coords = grouper(coords, 2)\n startx,starty = next(coords)\n path.moveto(startx, starty)\n \n # connect to each successive point\n for nextx,nexty in coords:\n path.lineto(nextx, nexty)\n path.close()\n\n # first exterior\n traverse_ring(coords)\n\n # then holes\n for hole in holes:\n # !!! need to test for ring direction !!!\n hole = (xory for point in reversed(tuple(grouper(hole, 2))) for xory in point)\n traverse_ring(hole)\n\n # options\n args = []\n if options[\"fillcolor\"]:\n fillbrush = aggdraw.Brush(options[\"fillcolor\"])\n args.append(fillbrush)\n if options[\"outlinecolor\"]:\n outlinepen = aggdraw.Pen(options[\"outlinecolor\"], options[\"outlinewidth\"])\n args.append(outlinepen)\n \n self.drawer.path((0,0), path, *args)", "def optimal_polygon(y, w=0.5, debug=False):\n # Make sure that we use numpy array\n y = np.array(y)\n x = np.arange(len(y))\n\n # Initialization\n y = np.round(y, 6)\n p_plus = (x[0], y[0] + w)\n l_plus = (x[0], y[0] + w)\n r_plus = (x[1], y[1] + w)\n s_plus = {(x[0], y[0] + w): (x[1], y[1] + w)}\n t_plus = {(x[1], y[1] + w): (x[0], y[0] + w)}\n p_minus = (x[0], y[0] - w)\n l_minus = (x[0], y[0] - w)\n r_minus = (x[1], y[1] - w)\n s_minus = {(x[0], y[0] - w): (x[1], y[1] - w)}\n t_minus = {(x[1], y[1] - w): (x[0], y[0] - w)}\n q = []\n i = 2\n\n while i < len(y):\n # Updating CH_plus (convex hull) and CH_minus\n p = (x[i - 1], y[i - 1] + w)\n p_i_plus = (x[i], y[i] + w)\n while (p != p_plus) and _angle(p_i_plus, p, t_plus[p], '+') > np.pi:\n p = t_plus[p]\n s_plus[p] = p_i_plus\n t_plus[p_i_plus] = p\n\n p = (x[i - 1], y[i - 1] - w)\n p_i_minus = (x[i], y[i] - w)\n while (p != p_minus) and _angle(p_i_minus, p, t_minus[p], '-') > np.pi:\n p = t_minus[p]\n s_minus[p] = p_i_minus\n t_minus[p_i_minus] = p\n\n # Check if CH_plus and CH_minus intersect\n if _angle(p_i_plus, l_plus, r_minus, '+') < np.pi:\n q.append((_intersect(l_plus, r_minus, p_plus, p_minus), l_plus, r_minus, p_plus, p_minus))\n p_minus = r_minus\n p_plus = _intersect(l_plus, r_minus, (x[i - 1], y[i - 1] + w), p_i_plus)\n s_plus[p_plus] = p_i_plus\n t_plus[p_i_plus] = p_plus\n r_plus = p_i_plus\n r_minus = p_i_minus\n l_plus = p_plus\n l_minus = p_minus\n while _angle(l_minus, r_plus, s_minus[l_minus], '-') < np.pi:\n l_minus = s_minus[l_minus]\n elif _angle(p_i_minus, l_minus, r_plus, '-') < np.pi:\n q.append((_intersect(l_minus, r_plus, p_minus, p_plus), l_minus, r_plus, p_minus, p_plus))\n p_plus = r_plus\n p_minus = _intersect(l_minus, r_plus, (x[i - 1], y[i - 1] - w), p_i_minus)\n s_minus[p_minus] = p_i_minus\n t_minus[p_i_minus] = p_minus\n r_minus = p_i_minus\n r_plus = p_i_plus\n l_minus = p_minus\n l_plus = p_plus\n while _angle(l_plus, r_minus, s_plus[l_plus], '+') < np.pi:\n l_plus = s_plus[l_plus]\n else:\n # Updating the two seperating and supporting lines\n if _angle(p_i_plus, l_minus, r_plus, '+') < np.pi:\n r_plus = p_i_plus\n while _angle(p_i_plus, l_minus, s_minus[l_minus], '+') < np.pi:\n l_minus = s_minus[l_minus]\n\n if _angle(p_i_minus, l_plus, r_minus, '-') < np.pi:\n r_minus = p_i_minus\n while _angle(p_i_minus, l_plus, s_plus[l_plus], '-') < np.pi:\n l_plus = s_plus[l_plus]\n i += 1\n\n # Add last change point\n a = _intersect(l_plus, r_minus, p_plus, p_minus)\n b = _intersect(l_minus, r_plus, p_minus, p_plus)\n p = ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2)\n q.append((p, r_minus, r_plus, p_minus, p_plus))\n\n end_a = _intersect(p, r_plus, p_i_minus, p_i_plus)\n end_b = _intersect(p, r_minus, p_i_minus, p_i_plus)\n end = ((end_a[0] + end_b[0]) / 2, (end_a[1] + end_b[1]) / 2)\n q.append((end, (None, None), (None, None), p_i_minus, p_i_plus))\n\n if debug:\n return np.array(q)\n else:\n return np.array([o[0] for o in q])", "def poly_to_list_with_overlap(self, polygon):\n added = 0\n polygon_item = polygon.polygon()\n polygon_item.translate(polygon.x(), polygon.y())\n\n # Comparator to determine which x value of two points is the highest\n def compare_x(item1, item2):\n if item1.x() < item2.x():\n return -1\n elif item1.x() > item2.x():\n return 1\n else:\n return 0\n\n # Comparator to determine which y value of two points is the highest\n def compare_y(item1, item2):\n if item1.y() < item2.y():\n return -1\n elif item1.y() > item2.y():\n return 1\n else:\n return 0\n\n # Create two lists, one sorted by ascending x-values, one by ascending y-values\n x_list = sorted(self.potential_edge_splitters, key=cmp_to_key(compare_x))\n y_list = sorted(self.potential_edge_splitters, key=cmp_to_key(compare_y))\n\n # Loop over all children to the polygon\n for item in polygon.childItems():\n # Look only at edges (overlapping of points is handled elsewhere)\n if isinstance(item, PyQt5.QtWidgets.QGraphicsLineItem):\n edge = item\n\n p1 = edge.line().p1()\n p2 = edge.line().p2()\n added_this = 0\n\n # Choose the direction with the largest disparity (to avoid scenario of straight lines)\n # then use the sorted list for that direction\n if abs(p1.x() - p2.x()) > abs(p1.y() - p2.y()):\n mode = \"X\"\n circ_list = x_list\n else:\n mode = \"Y\"\n circ_list = y_list\n\n for circ in circ_list:\n poly = circ.parentItem()\n p = circ.scenePos()\n\n # temp_p needed since edge.contains does not account for the edge being moved in the canvas\n temp_p = circ.scenePos()\n temp_p.setX(temp_p.x() - edge.scenePos().x())\n temp_p.setY(temp_p.y() - edge.scenePos().y())\n\n # Find the edges to split which contain temp_p, if the edge contains decide the orientation (in x-\n # or y-direction decided earlier) of p1 and p2, based on this insert the new point in the polygon\n # in the correct position\n if edge.contains(temp_p):\n if edge in poly.childItems():\n pass # Ignore if the edge is in the same polygon as the point\n else:\n if temp_p == p1 or temp_p == p2:\n pass # Don't compare if it contains an edge point, instead handled later by the overlapping points\n elif mode == \"Y\":\n if p1.y() < p2.y(): # Left to right\n index = abs(edge.localIndex)\n polygon_item.insert(index + added, p)\n added += 1\n elif p1.y() > p2.y(): # Right to left\n index = abs(edge.localIndex)\n polygon_item.insert(index + added - added_this, p)\n added_this += 1\n added += 1\n else:\n if p1.x() < p2.x(): # Left to right\n index = abs(edge.localIndex)\n polygon_item.insert(index + added, p)\n added += 1\n elif p1.x() > p2.x(): # Right to left\n index = abs(edge.localIndex)\n polygon_item.insert(index + added - added_this, p)\n added_this += 1\n added += 1\n\n return self.poly_to_list(polygon_item, \"Global\")", "def get_MultiPolyLists(mpoly,coord_type='x'):\n if coord_type == 'x':\n i=0\n elif coord_type == 'y':\n i=1\n\n # Get the x or y coordinates\n c = []\n if isinstance(mpoly,Polygon):\n mpoly = [mpoly]\n for poly in mpoly: # the polygon objects return arrays, it's important they be lists or Bokeh fails\n exterior_coords = poly.exterior.coords.xy[i].tolist();\n interior_coords = []\n for interior in poly.interiors:\n if isinstance(interior.coords.xy[i],list):\n interior_coords += [interior.coords.xy[i]];\n else:\n interior_coords += [interior.coords.xy[i].tolist()];\n c.append([exterior_coords, *interior_coords])\n return c", "def boundary_polygon_by_union(self):\n cell_geoms = [None]*self.Ncells()\n\n for i in self.valid_cell_iter():\n xy = self.nodes['x'][self.cell_to_nodes(i)]\n cell_geoms[i] = geometry.Polygon(xy)\n return ops.cascaded_union(cell_geoms)", "def extent_as_polygon(self, crs=wgs84):\n from shapely.geometry import Polygon\n\n # this is not so trivial\n # for optimisation we will transform the boundaries only\n _i = np.hstack([np.arange(self.nx+1),\n np.ones(self.ny+1)*self.nx,\n np.arange(self.nx+1)[::-1],\n np.zeros(self.ny+1)]).flatten()\n _j = np.hstack([np.zeros(self.nx+1),\n np.arange(self.ny+1),\n np.ones(self.nx+1)*self.ny,\n np.arange(self.ny+1)[::-1]]).flatten()\n _i, _j = self.corner_grid.ij_to_crs(_i, _j, crs=crs)\n return Polygon(zip(_i, _j))", "def read_geojson_polygon(geojson_polygon: str) -> List:\n geojson_polygon_dict = json.loads(geojson_polygon)\n polygon_coordinates = geojson_polygon_dict['features'][0]['geometry']['coordinates'][0]\n polygon = []\n for item in polygon_coordinates:\n polygon += [[item[1], item[0]]]\n return polygon", "def getCoords(geom):\n geomDict = eval(geom.ExportToJson())\n coords = geomDict['coordinates']\n if geomDict['type'] == 'Polygon':\n coordsArray = numpy.array(coords[0])\n elif geomDict['type'] == 'MultiPoint':\n coordsArray = numpy.array(coords)\n else:\n coordsArray = None\n return coordsArray", "def toShapely(ring, geometryList):\n outline = []\n for idxA, idxB in zip(ring, ring[1:] + ring[:1]):\n shape = geometryList[idxA].GetShape()\n if shape in [STROKE_T.S_ARC, STROKE_T.S_CIRCLE]:\n outline += approximateArc(geometryList[idxA],\n commonEndPoint(geometryList[idxA], geometryList[idxB]))\n elif shape in [STROKE_T.S_CURVE]:\n outline += approximateBezier(geometryList[idxA],\n commonEndPoint(geometryList[idxA], geometryList[idxB]))\n elif shape in [STROKE_T.S_RECT]:\n assert idxA == idxB\n outline += geometryList[idxA].GetRectCorners()\n elif shape in [STROKE_T.S_POLYGON]:\n # Polygons are always closed, so they should appear as stand-alone\n assert len(ring) in [1, 2]\n return shapePolyToShapely(geometryList[idxA].GetPolyShape())\n elif shape in [STROKE_T.S_SEGMENT]:\n outline.append(commonEndPoint(geometryList[idxA], geometryList[idxB]))\n else:\n raise RuntimeError(f\"Unsupported shape {shape} in outline\")\n return Polygon(outline)", "def draw_polygon(left_x, right_x, left_y, right_y, img_):\n pts_left = np.array([np.flipud(np.transpose(np.vstack([left_x, left_y])))])\n pts_right = np.array([np.transpose(np.vstack([right_x, right_y]))])\n pts = np.hstack((pts_left, pts_right))\n img_ = cv2.polylines(img_, np.int_([pts]), isClosed=False, color=(60, 200, 60), thickness=10, lineType=cv2.LINE_AA)\n img_ = cv2.fillPoly(img_, np.int_(pts), (50, 90, 50))\n return img_", "def zip_geom():\r\n engine = get_sql_engine()\r\n zipgeom = text(\r\n \"\"\"\r\n SELECT zip_code, geom\r\n FROM philly_zipcode\r\n \"\"\"\r\n )\r\n zipgeom = gpd.read_postgis(zipgeom, con=engine)\r\n return zipgeom", "def draw_collection(collection):\n args = [FrameArtist._get_args(primitive) for primitive in collection]\n points, lines = zip(*args)\n lines = itertools.chain(*lines)\n geometry = [None, None]\n geometry[0] = compas_ghpython.draw_points(points)\n geometry[1] = compas_ghpython.draw_lines(lines)\n return geometry", "def get_vertices_list(feature):\n return [\n point\n for part in Geometry.get_multipolygon(feature)\n for ring in part\n for point in ring[0:-1]\n ]", "def render_fill_2d(self, **kwds):\n poly = [polygon2d(self.coordinates_of(p), **kwds) \n for p in self.polygons]\n return sum(poly)", "def draw_polygon(\n i, sx, tx, sy, ty, xmin, xmax, ymin, ymax,\n offsets, values, xs, ys, yincreasing, eligible,\n *aggs_and_cols\n ):\n # Initialize values of pre-allocated buffers\n xs.fill(np.nan)\n ys.fill(np.nan)\n yincreasing.fill(0)\n eligible.fill(1)\n\n # First pass, compute bounding box of polygon vertices in data coordinates\n start_index = offsets[0]\n stop_index = offsets[-1]\n # num_edges = stop_index - start_index - 2\n poly_xmin = np.min(values[start_index:stop_index:2])\n poly_ymin = np.min(values[start_index + 1:stop_index:2])\n poly_xmax = np.max(values[start_index:stop_index:2])\n poly_ymax = np.max(values[start_index + 1:stop_index:2])\n\n # skip polygon if outside viewport\n if (poly_xmax < xmin or poly_xmin > xmax\n or poly_ymax < ymin or poly_ymin > ymax):\n return\n\n # Compute pixel bounds for polygon\n startxi, startyi = map_onto_pixel(\n sx, tx, sy, ty, xmin, xmax, ymin, ymax,\n max(poly_xmin, xmin), max(poly_ymin, ymin)\n )\n stopxi, stopyi = map_onto_pixel(\n sx, tx, sy, ty, xmin, xmax, ymin, ymax,\n min(poly_xmax, xmax), min(poly_ymax, ymax)\n )\n stopxi += 1\n stopyi += 1\n\n # Handle subpixel polygons (pixel width and/or height of polygon is 1)\n if (stopxi - startxi) == 1 and (stopyi - startyi) == 1:\n append(i, startxi, startyi, *aggs_and_cols)\n return\n elif (stopxi - startxi) == 1:\n for yi in range(min(startyi, stopyi) + 1, max(startyi, stopyi)):\n append(i, startxi, yi, *aggs_and_cols)\n return\n elif (stopyi - startyi) == 1:\n for xi in range(min(startxi, stopxi) + 1, max(startxi, stopxi)):\n append(i, xi, startyi, *aggs_and_cols)\n return\n\n # Build arrays of edges in canvas coordinates\n ei = 0\n for j in range(len(offsets) - 1):\n start = offsets[j]\n stop = offsets[j + 1]\n for k in range(start, stop - 2, 2):\n x0 = values[k]\n y0 = values[k + 1]\n x1 = values[k + 2]\n y1 = values[k + 3]\n\n # Map to canvas coordinates without rounding\n x0c = x_mapper(x0) * sx + tx - 0.5\n y0c = y_mapper(y0) * sy + ty - 0.5\n x1c = x_mapper(x1) * sx + tx - 0.5\n y1c = y_mapper(y1) * sy + ty - 0.5\n\n if y1c > y0c:\n xs[ei, 0] = x0c\n ys[ei, 0] = y0c\n xs[ei, 1] = x1c\n ys[ei, 1] = y1c\n yincreasing[ei] = 1\n elif y1c < y0c:\n xs[ei, 1] = x0c\n ys[ei, 1] = y0c\n xs[ei, 0] = x1c\n ys[ei, 0] = y1c\n yincreasing[ei] = -1\n else:\n # Skip horizontal edges\n continue\n\n ei += 1\n\n # Perform scan-line algorithm\n num_edges = ei\n for yi in range(startyi, stopyi):\n # All edges eligible at start of new row\n eligible.fill(1)\n for xi in range(startxi, stopxi):\n # Init winding number\n winding_number = 0\n for ei in range(num_edges):\n if eligible[ei] == 0:\n # We've already determined that edge is above, below, or left\n # of edge for the current pixel\n continue\n\n # Get edge coordinates.\n # Note: y1c > y0c due to how xs/ys were populated\n x0c = xs[ei, 0]\n x1c = xs[ei, 1]\n y0c = ys[ei, 0]\n y1c = ys[ei, 1]\n\n # Reject edges that are above, below, or left of current pixel.\n # Note: Edge skipped if lower vertex overlaps,\n # but is kept if upper vertex overlaps\n if (y0c >= yi or y1c < yi\n or (x0c < xi and x1c < xi)\n ):\n # Edge not eligible for any remaining pixel in this row\n eligible[ei] = 0\n continue\n\n if xi <= x0c and xi <= x1c:\n # Edge is fully to the right of the pixel, so we know ray to the\n # the right of pixel intersects edge.\n winding_number += yincreasing[ei]\n else:\n # Now check if edge is to the right of pixel using cross product\n # A is vector from pixel to first vertex\n ax = x0c - xi\n ay = y0c - yi\n\n # B is vector from pixel to second vertex\n bx = x1c - xi\n by = y1c - yi\n\n # Compute cross product of B and A\n bxa = (bx * ay - by * ax)\n\n if bxa < 0 or (bxa == 0 and yincreasing[ei]):\n # Edge to the right\n winding_number += yincreasing[ei]\n else:\n # Edge to left, not eligible for any remaining pixel in row\n eligible[ei] = 0\n continue\n\n if winding_number != 0:\n # If winding number is not zero, point\n # is inside polygon\n append(i, xi, yi, *aggs_and_cols)" ]
[ "0.7088763", "0.6743673", "0.6743673", "0.65676457", "0.65600157", "0.6526626", "0.64702845", "0.64233387", "0.6330234", "0.6271293", "0.6227688", "0.6217067", "0.6208036", "0.6129953", "0.610706", "0.6104611", "0.6093786", "0.60640717", "0.6050751", "0.60261005", "0.6024823", "0.6015104", "0.60010046", "0.596467", "0.5961635", "0.5947548", "0.59293056", "0.5921264", "0.5881323", "0.5869994", "0.58698225", "0.58649766", "0.58546835", "0.5835548", "0.5835072", "0.5820017", "0.5803684", "0.5786424", "0.5784071", "0.5774783", "0.5767852", "0.5757366", "0.574903", "0.5742481", "0.57114", "0.57005", "0.56718165", "0.5658645", "0.5645327", "0.56281376", "0.5619985", "0.5616246", "0.5613524", "0.5600841", "0.559552", "0.55809563", "0.55717427", "0.55715555", "0.5570921", "0.55648947", "0.5555096", "0.55548847", "0.55543804", "0.555274", "0.55511904", "0.55493915", "0.5549256", "0.5541474", "0.5539135", "0.5531021", "0.55270886", "0.55235016", "0.5519957", "0.5509386", "0.55031633", "0.5492142", "0.5490013", "0.5488092", "0.5483822", "0.5478961", "0.5478933", "0.54704833", "0.54576355", "0.545709", "0.5456432", "0.5450242", "0.5446065", "0.54367226", "0.54303616", "0.54275876", "0.54252595", "0.54186773", "0.54136956", "0.540783", "0.53953344", "0.5392418", "0.5390297", "0.5384268", "0.5383818", "0.538131" ]
0.63739526
8
The given list of pairs (or 2d numpy array) is the (x, y) coords of a set of internal points inside a polygon. Returns a MultiPoint Geometry.
def geomFromInteriorPoints(coords): if isinstance(coords, numpy.ndarray): coords = coords.tolist() geomDict = {'type':'MultiPoint', 'coordinates':coords} geomPoints = ogr.CreateGeometryFromJson(repr(geomDict)) return geomPoints
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_MultiPolyLists_xy(mpoly):\n # Get the x or y coordinates\n x = []\n y = []\n if isinstance(mpoly,Polygon):\n mpoly = [mpoly]\n for poly in mpoly: # the polygon objects return arrays, it's important they be lists or Bokeh fails\n exterior_coords_x = poly.exterior.coords.xy[0].tolist();\n interior_coords_x = []\n exterior_coords_y = poly.exterior.coords.xy[1].tolist();\n interior_coords_y = []\n\n for interior in poly.interiors:\n if isinstance(interior.coords.xy[0],list):\n interior_coords_x += [interior.coords.xy[0]];\n interior_coords_y += [interior.coords.xy[1]];\n else:\n interior_coords_x += [interior.coords.xy[0].tolist()];\n interior_coords_y += [interior.coords.xy[1].tolist()];\n x.append([exterior_coords_x, *interior_coords_x])\n y.append([exterior_coords_y, *interior_coords_y])\n return (x,y)", "def getMultiGeometry(geometry):\n geom = arcpy.Array()\n for feature in geometry:\n array = arcpy.Array()\n for point in feature:\n point = arcpy.Point(float(point[0]), float(point[1]))\n array.add(point)\n geom.add(array)\n return geom", "def get_MultiPolyLists(mpoly,coord_type='x'):\n if coord_type == 'x':\n i=0\n elif coord_type == 'y':\n i=1\n\n # Get the x or y coordinates\n c = []\n if isinstance(mpoly,Polygon):\n mpoly = [mpoly]\n for poly in mpoly: # the polygon objects return arrays, it's important they be lists or Bokeh fails\n exterior_coords = poly.exterior.coords.xy[i].tolist();\n interior_coords = []\n for interior in poly.interiors:\n if isinstance(interior.coords.xy[i],list):\n interior_coords += [interior.coords.xy[i]];\n else:\n interior_coords += [interior.coords.xy[i].tolist()];\n c.append([exterior_coords, *interior_coords])\n return c", "def make_bounding_box_vectors(coord_pairs):\n\n # indexing multi-d arrays by multi-d arrays is a bit tricky\n # need to be explicit\n # this was helpful: http://bit.ly/1BcSm5y\n coord_pair_grid = np.mgrid[[slice(x) for x in coord_pairs.shape]]\n i0 = coord_pair_grid[0]\n # order by the 2nd axis of the input coord pairs\n # (i.e. compare lon to lon, lat to lat)\n i1 = np.argsort(coord_pairs, axis=1)\n i2 = coord_pair_grid[2]\n\n return coord_pairs[i0, i1, i2]", "def buildMultiPolygon(self,polygonList):\r\n geomlist=[]\r\n for geom in polygonList:\r\n # Cut 'MULTIPOLYGON(*) if we got one'\r\n if geom.exportToWkt()[:12]==\"MULTIPOLYGON\":\r\n geomWkt=geom.exportToWkt()[13:len(geom.exportToWkt())-1]\r\n else:\r\n # Cut 'POLYGON' if we got one\r\n geomWkt=geom.exportToWkt()[7:]\r\n geomlist.append(str(geomWkt))\r\n multiGeomWKT=\"MULTIPOLYGON(\"\r\n multiGeomWKT +=\",\".join(geomlist)\r\n multiGeomWKT+=\")\"\r\n #if self.debug: print multiGeomWKT\r\n multiGeom=QgsGeometry.fromWkt(multiGeomWKT)\r\n return multiGeom", "def convert_shapely_points_to_tuples(list_of_points) -> list:\n return [(p.x, p.y) for p in list_of_points]", "def get_multipolygon(request, location):\n geometries = request.data.get('FeatureCollection', None)\n if geometries is not None:\n geometry_list = []\n for g in geometries['features']:\n if g['geometry']['type'] == 'Point':\n g = point_to_polygon_geojson(g)\n geometry_list.append(GEOSGeometry(json.dumps(g['geometry'])))\n lng, lat = location['Longitude']['Value'], location['Latitude']['Value']\n if lat is not None and lng is not None:\n loc_point = {'type': 'Feature', 'properties': {}, 'geometry': {'type': 'Point', 'coordinates': [lng, lat]}}\n loc_polygon = point_to_polygon_geojson(loc_point)\n geometry_list.append(GEOSGeometry(json.dumps(loc_polygon['geometry'])))\n return MultiPolygon(geometry_list)\n return None", "def _multi_poly_handler(self, multi_polygon, coord_type):\n for i, part in enumerate(multi_polygon):\n if i == 0:\n coord_arrays = np.append(self._get_poly_coords(part, coord_type), np.nan)\n else:\n coord_arrays = np.concatenate([coord_arrays,\n np.append(self._get_poly_coords(\n part, coord_type), np.nan)])\n # Return the coordinates\n return coord_arrays", "def split_bygeom(self, iterable, geom_getter=lambda x: x.geom):\n points, linestrings, multipoints, multilinestrings = [], [], [], []\n\n for x in iterable:\n geom = geom_getter(x)\n if geom is None:\n pass\n elif isinstance(geom, GeometryCollection):\n # Duplicate object, shapefile do not support geometry collections !\n subpoints, sublines, pp, ll = self.split_bygeom(geom, geom_getter=lambda geom: geom)\n if subpoints:\n clone = x.__class__.objects.get(pk=x.pk)\n clone.geom = MultiPoint(subpoints, srid=geom.srid)\n multipoints.append(clone)\n if sublines:\n clone = x.__class__.objects.get(pk=x.pk)\n clone.geom = MultiLineString(sublines, srid=geom.srid)\n multilinestrings.append(clone)\n elif isinstance(geom, Point):\n points.append(x)\n elif isinstance(geom, LineString):\n linestrings.append(x)\n else:\n raise ValueError(\"Only LineString and Point geom should be here. Got %s for pk %d\" % (geom, x.pk))\n return points, linestrings, multipoints, multilinestrings", "def polygon(self, pointlist, cls=None, style=None, attrs=None):\n payload = self._meta.make_payload(cls, style, attrs)\n pts_str = ' '.join('%s,%s' % (x, y) for x, y in pointlist)\n self.elements.append(\"\"\"<polygon points=\"%s\" %s/>\"\"\" % (pts_str, payload))\n return self", "def polygon(self, pointlist, cls=None, style=None, attrs=None):\n payload = self._meta.make_payload(cls, style, attrs)\n pts_str = ' '.join('%s,%s' % (x, y) for x, y in pointlist)\n self.elements.append(\"\"\"<polygon points=\"%s\" %s/>\"\"\" % (pts_str, payload))\n return self", "def decompose_polygon(points):\n N, _ = points.shape\n\n for i in range(1, N - 1):\n yield numpy.array([points[0], points[i], points[i + 1]])", "def coordinates(self, xy_pairs=False):\n\n if xy_pairs:\n return tuple(zip(self._coordinates[:-1:2], self._coordinates[1::2]))\n else:\n return tuple(self._coordinates)", "def coordinates(self, xy_pairs=False):\n\n if xy_pairs:\n return tuple(zip(self._coordinates[:-1:2], self._coordinates[1::2]))\n else:\n return tuple(self._coordinates)", "def afficher_points_2D(set_points):\n X, Y = [p[0][0] for p in set_points], [p[0][1] for p in set_points]\n return(X, Y)", "def polygon_to_multipolygon(geom):\n if geom.__class__.__name__ == 'Polygon':\n g = OGRGeometry(OGRGeomType('MultiPolygon'))\n g.add(geom)\n return g\n elif geom.__class__.__name__ == 'MultiPolygon':\n return geom\n else:\n raise ValueError('Geom is neither Polygon nor MultiPolygon.')", "def cartesian_to_geographical(coordinate_triples):\n if len(coordinate_triples.shape) == 1:\n x = coordinate_triples[0]\n y = coordinate_triples[1]\n z = coordinate_triples[2]\n elif len(coordinate_triples.shape) == 2:\n assert coordinate_triples.shape[1] == 3\n x = coordinate_triples[:, 0]\n y = coordinate_triples[:, 1]\n z = coordinate_triples[:, 2]\n radius = np.sqrt(x**2 + y**2 + z**2)\n longitudes = np.arctan2(y, x)\n latitudes = np.arcsin(z/radius)\n return (latitudes, longitudes)", "def _pointset_from_tuples(self, *tuples):\n newset = set()\n for t in tuples:\n newset.add(Point(*t))\n return newset", "def coordinate_pairs(lat_axis, lon_axis):\n \n lon_mesh, lat_mesh = numpy.meshgrid(lon_axis, lat_axis) # This is the correct order\n \n return lat_mesh.flatten(), lon_mesh.flatten()", "def point_from_lists(x: Iterable[float], y: Iterable[float],\n z: Iterable[float]) -> List['Point']:\n return [Point(x[i], y[i], z[i]) for i in range(len(x))]", "def projectBack(points, proj):\n\n mpoints = MultiPoint(points)\n project = partial(\n pyproj.transform,\n proj,\n pyproj.Proj(proj='latlong', datum='WGS84'))\n gmpoints = transform(project, mpoints)\n coords = []\n for point in gmpoints.geoms:\n x, y = point.coords[0]\n coords.append((x, y))\n coords = np.array(coords)\n return coords", "def get_polygon(self):\n ret_array = []\n if self.parcel_polygon_string_list:\n cur_array = []\n for num in str(self.parcel_polygon_string_list).split(','):\n cur_array.append(float(num))\n if len(cur_array) == 2:\n ret_array.append(cur_array.copy())\n cur_array = []\n return ret_array", "def get_polygon_coordinates(self) -> Tuple[List, List]:\n\n polygon_query = f\"https://nominatim.openstreetmap.org/\" \\\n f\"search?city={self.location.replace(' ', '+')}&polygon_geojson=1&format=json\"\n r = requests.get(polygon_query)\n js = ast.literal_eval(r.text)\n\n self.monitor.info(\"-> Downloaded area polygon data points.\")\n clean_polygon_coords = js[0]['geojson']['coordinates'][0]\n\n polygon_lats = [float(i[1]) for i in clean_polygon_coords]\n polygon_longs = [float(i[0]) for i in clean_polygon_coords]\n\n self.monitor.info(\"-> Created lat/long vectors.\")\n return polygon_lats, polygon_longs", "def _build_point_grid(n_per_side: int) -> np.ndarray:\n offset = 1 / (2 * n_per_side)\n points_one_side = np.linspace(offset, 1 - offset, n_per_side)\n points_x = np.tile(points_one_side[None, :], (n_per_side, 1))\n points_y = np.tile(points_one_side[:, None], (1, n_per_side))\n points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)\n return points", "def is_multipoint_on_polygon(feature_1: Sequence, feature_2: Sequence) -> bool:\n points_on_poly = False\n\n points_on_poly = all(\n boolean_point_in_polygon(coords_1, feature_2[1]) for coords_1 in feature_1[1]\n )\n\n if not points_on_poly:\n return points_on_poly\n\n points_on_poly = any(\n boolean_point_in_polygon(coords_1, feature_2[1], {\"ignoreBoundary\": True})\n for coords_1 in feature_1[1]\n )\n\n return points_on_poly", "def shapePolyToShapely(p: pcbnew.SHAPE_POLY_SET) \\\n -> Union[shapely.geometry.Polygon, shapely.geometry.MultiPolygon]:\n polygons = []\n for pIdx in range(p.OutlineCount()):\n kOutline = p.Outline(pIdx)\n assert kOutline.IsClosed()\n outline = shapeLinechainToList(kOutline)\n holes = []\n for hIdx in range(p.HoleCount(pIdx)):\n kHole = p.Hole(hIdx)\n assert kHole.isClosed()\n holes.append(shapeLinechainToList(kHole))\n polygons.append(Polygon(outline, holes=holes))\n if len(polygons) == 1:\n return polygons[0]\n return MultiPolygon(polygons=polygons)", "def proj_coords(coords, proj_in, proj_out): \n return [proj_coord(coord, proj_in, proj_out) for coord in coords]", "def createCoordTuples(data):\n data['xy'] = None\n for i, row in data.iterrows():\n data['xy'][i] = [np.round(row['geometry'].x, decimals=5), np.round(row['geometry'].y, decimals=5)]\n return data", "def parse_poly_osm_file(lines):\n in_ring = False\n coords = []\n ring = []\n\n for (index, line) in enumerate(lines):\n if index == 0:\n # first line is junk.\n continue\n\n elif in_ring and line.strip() == 'END':\n # we are at the end of a ring, perhaps with more to come.\n in_ring = False\n\n elif in_ring:\n # we are in a ring and picking up new coordinates.\n ring.append(list(map(float, line.split())))\n\n elif not in_ring and line.strip() == 'END':\n # we are at the end of the whole polygon.\n break\n\n elif not in_ring and line.startswith('!'):\n # we are at the start of a polygon part hole.\n coords[-1].append([])\n ring = coords[-1][-1]\n in_ring = True\n\n elif not in_ring:\n # we are at the start of a polygon part.\n coords.append([[]])\n ring = coords[-1][0]\n in_ring = True\n\n return MultiPolygon(*(Polygon(*polycoords) for polycoords in coords))", "def get_multipolygon(feature_or_geometry):\n if isinstance(feature_or_geometry, QgsFeature):\n geom = feature_or_geometry.geometry()\n else:\n geom = feature_or_geometry\n if geom.wkbType() == WKBPolygon:\n return [geom.asPolygon()]\n return geom.asMultiPolygon()", "def shp2Mpol(fn, return_coordinate_system = False, print_coordinate_system = False):\n\n # load shapefile with the ogr toolbox of osgeo\n file = ogr.Open(fn)\n shape = file.GetLayer(0)\n\n\n epsg = int(shape.GetSpatialRef().ExportToPrettyWkt().splitlines()[-1].split('\"')[3])\n crs = CRS.from_epsg(epsg)\n if print_coordinate_system:\n print(\"The EPSG code of the coordinate system is: %d\" % (crs.to_epsg()))\n # get number of polygons in shapefile\n n_features = shape.GetFeatureCount()\n\n # initialize new polygon list\n pols = []\n\n # loop over all polygons\n for i in range(n_features):\n # get feature object\n feature = shape.GetFeature(i)\n print(dir(feature))\n # export to JS objects\n feature_JSON = feature.ExportToJson()\n # loads as JS object array\n feature_JSON = json.loads(feature_JSON)\n\n\n # extract coordinate attribute from JS object\n # coor is a list of all rings, first one is the outer ring, further elements are coordinate pair lists of the inner rings\n coor = feature_JSON['geometry']['coordinates']\n ex = coor[0]; inner = coor[1:]\n\n # create a shapely polygon\n if len(inner) > 0: pol = shPol(ex, inner)\n else: pol = shPol(ex)\n pols.append(pol)\n\n # create a shapely MultiPolygon\n mpol = shMPol(pols)\n\n if return_coordinate_system:\n if len(pols)==1:\n return pol, crs.to_epsg()\n else:\n return mpol, crs.to_epsg()\n else:\n if len(pols)==1:\n return pol\n else:\n return mpol", "def generatePolygons():", "def cvt2array(tuples):\n rc = []\n for t in tuples:\n rc.append(point3d(np.float32(t[X]), np.float32(t[Y]), np.float32(t[Z])))\n return rc", "def _point_array(self, objects, unique_id):\n points = []\n ids = []\n for idx, row in tqdm(objects.iterrows(), total=objects.shape[0]):\n if row[\"geometry\"].type in [\"Polygon\", \"MultiPolygon\"]:\n poly_ext = row[\"geometry\"].boundary\n else:\n poly_ext = row[\"geometry\"]\n if poly_ext is not None:\n if poly_ext.type == \"MultiLineString\":\n for line in poly_ext:\n point_coords = line.coords\n row_array = np.array(point_coords[:-1]).tolist()\n for i, a in enumerate(row_array):\n points.append(row_array[i])\n ids.append(row[unique_id])\n elif poly_ext.type == \"LineString\":\n point_coords = poly_ext.coords\n row_array = np.array(point_coords[:-1]).tolist()\n for i, a in enumerate(row_array):\n points.append(row_array[i])\n ids.append(row[unique_id])\n else:\n raise Exception(\"Boundary type is {}\".format(poly_ext.type))\n return points, ids", "def multi(*args):\n xs, ys, interpolations = util.multi_parseargs(args)\n return core.Multi(xs, ys, interpolations)", "def polygon_array(cls, polygon, num, space, space_series, n_series, origin, subsampling=1):\n if (subsampling == 0) or (subsampling == -1):\n raise ValueError('Subsampling cannot be 0 or -1') \n \n # take care of subsampling\n n_series_np = np.arange(0,n_series)\n if subsampling>0:\n num_np = [x for x in range(num) if np.mod(x, subsampling)==0]\n else:\n num_np = [x for x in range(num) if np.mod(x, subsampling)!=0]\n\n # create arrays with combinations of objects and series positions \n m1, m2 = np.meshgrid(n_series_np, num_np, indexing='ij')\n\n # compute all x locations\n all_coords = np.ravel(origin[0] + m2*space+m1*(space*num+space_series))\n num_obj_after_sampling = len(all_coords)\n \n # combine x with y locations\n all_coords = np.stack([all_coords, origin[1]*np.ones_like(all_coords)])\n\n # concatenate all polygons and keep their length in memory\n poly_len = [len(p) for p in polygon.coord]\n poly_concat = np.concatenate(polygon.coord)\n\n # compute final coordinates using broadcasting\n # num_poly_edges x 2 x 1\n # x 2 x num_new_coords\n # num_poly_edges x 2 x num_new_coords\n complete = np.moveaxis(poly_concat[:,:, np.newaxis] + all_coords, 2,0)\n\n # reshape as long 2d list of length num_new_coords * num_poly_edges\n commplete_reshaped = np.reshape(complete, (complete.shape[0]*complete.shape[1], 2))\n\n # split into correct polygon lists\n split_pos=np.cumsum(num_obj_after_sampling * poly_len)\n pg_array = np.split(commplete_reshaped, split_pos[:-1])\n\n pg_array_obj = cls()\n pg_array_obj.coord = pg_array\n pg_array_obj.params = {'num':num, 'space':space, 'space_series':space_series, 'n_series':n_series, 'origin':origin, 'subsampling':subsampling}\n \n return pg_array_obj", "def bounding_box_xy(points):\n x, y = islice(zip(*points), 2)\n min_x = min(x)\n max_x = max(x)\n min_y = min(y)\n max_y = max(y)\n return [\n [min_x, min_y, 0.0],\n [max_x, min_y, 0.0],\n [max_x, max_y, 0.0],\n [min_x, max_y, 0.0],\n ]", "def to_mesh(\n self,\n lims_x: array_like = (-1, 1),\n lims_y: array_like = (-1, 1),\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n a, b, c, d = self.cartesian()\n x_center, y_center = self.point[:2]\n\n values_x = x_center + lims_x\n values_y = y_center + lims_y\n\n X, Y = np.meshgrid(values_x, values_y)\n\n if c != 0:\n Z = -(a * X + b * Y + d) / c\n\n elif b != 0:\n Z = -(a * X + c * Y + d) / b\n X, Y, Z = X, Z, Y\n\n else:\n Z = -(b * X + c * Y + d) / a\n X, Y, Z = Z, X, Y\n\n return X, Y, Z", "def get_city_points(city):\n for item in coordinate_list:\n if item[0] == city:\n return (item[1], item[2])", "def get_array_of_matching_points(size, triangles_list,\n intermediate_triangles_list):\n\n # initiate the 2D array\n matching_points_list = [[] for _ in range(size[1])]\n # run over each coordinate in the 2D array by the given size\n\n for i in range(0, size[1]):\n for j in range(0, size[0]):\n # search for the right intermediate triangle\n for k in range(0,len(intermediate_triangles_list)):\n # the current intermediate triangle\n inter_tri = intermediate_triangles_list[k]\n # the tuple containing if the point is inside and its\n # coefficients\n point_tuple = is_point_inside_triangle((j, i), inter_tri[0],\n inter_tri[1],\n inter_tri[2])\n if point_tuple[0]:\n # if the current point is inside the current intermediate\n # triangle append the converted point to the list\n matching_points_list[i].\\\n append(convert_point(triangles_list[k],point_tuple[1]))\n break\n return matching_points_list", "def get_quad_points():\n points = np.array(\n [[0.333333333333333333333333333333, 0.333333333333333333333333333333],\n [0.950275662924105565450352089520, 0.024862168537947217274823955239],\n [0.024862168537947217274823955239, 0.950275662924105565450352089520],\n [0.024862168537947217274823955239, 0.024862168537947217274823955239],\n [0.171614914923835347556304795551, 0.414192542538082326221847602214],\n [0.414192542538082326221847602214, 0.171614914923835347556304795551],\n [0.414192542538082326221847602214, 0.414192542538082326221847602214],\n [0.539412243677190440263092985511, 0.230293878161404779868453507244],\n [0.230293878161404779868453507244, 0.539412243677190440263092985511],\n [0.230293878161404779868453507244, 0.230293878161404779868453507244],\n [0.772160036676532561750285570113, 0.113919981661733719124857214943],\n [0.113919981661733719124857214943, 0.772160036676532561750285570113],\n [0.113919981661733719124857214943, 0.113919981661733719124857214943],\n [0.009085399949835353883572964740, 0.495457300025082323058213517632],\n [0.495457300025082323058213517632, 0.009085399949835353883572964740],\n [0.495457300025082323058213517632, 0.495457300025082323058213517632],\n [0.062277290305886993497083640527, 0.468861354847056503251458179727],\n [0.468861354847056503251458179727, 0.062277290305886993497083640527],\n [0.468861354847056503251458179727, 0.468861354847056503251458179727],\n [0.022076289653624405142446876931, 0.851306504174348550389457672223],\n [0.022076289653624405142446876931, 0.126617206172027096933163647918],\n [0.851306504174348550389457672223, 0.022076289653624405142446876931],\n [0.851306504174348550389457672223, 0.126617206172027096933163647918],\n [0.126617206172027096933163647918, 0.022076289653624405142446876931],\n [0.126617206172027096933163647918, 0.851306504174348550389457672223],\n [0.018620522802520968955913511549, 0.689441970728591295496647976487],\n [0.018620522802520968955913511549, 0.291937506468887771754472382212],\n [0.689441970728591295496647976487, 0.018620522802520968955913511549],\n [0.689441970728591295496647976487, 0.291937506468887771754472382212],\n [0.291937506468887771754472382212, 0.018620522802520968955913511549],\n [0.291937506468887771754472382212, 0.689441970728591295496647976487],\n [0.096506481292159228736516560903, 0.635867859433872768286976979827],\n [0.096506481292159228736516560903, 0.267625659273967961282458816185],\n [0.635867859433872768286976979827, 0.096506481292159228736516560903],\n [0.635867859433872768286976979827, 0.267625659273967961282458816185],\n [0.267625659273967961282458816185, 0.096506481292159228736516560903],\n [0.267625659273967961282458816185, 0.635867859433872768286976979827]]);\n\n w = np.array(\n [0.051739766065744133555179145422,\n 0.008007799555564801597804123460,\n 0.008007799555564801597804123460,\n 0.008007799555564801597804123460,\n 0.046868898981821644823226732071,\n 0.046868898981821644823226732071,\n 0.046868898981821644823226732071,\n 0.046590940183976487960361770070,\n 0.046590940183976487960361770070,\n 0.046590940183976487960361770070,\n 0.031016943313796381407646220131,\n 0.031016943313796381407646220131,\n 0.031016943313796381407646220131,\n 0.010791612736631273623178240136,\n 0.010791612736631273623178240136,\n 0.010791612736631273623178240136,\n 0.032195534242431618819414482205,\n 0.032195534242431618819414482205,\n 0.032195534242431618819414482205,\n 0.015445834210701583817692900053,\n 0.015445834210701583817692900053,\n 0.015445834210701583817692900053,\n 0.015445834210701583817692900053,\n 0.015445834210701583817692900053,\n 0.015445834210701583817692900053,\n 0.017822989923178661888748319485,\n 0.017822989923178661888748319485,\n 0.017822989923178661888748319485,\n 0.017822989923178661888748319485,\n 0.017822989923178661888748319485,\n 0.017822989923178661888748319485,\n 0.037038683681384627918546472190,\n 0.037038683681384627918546472190,\n 0.037038683681384627918546472190,\n 0.037038683681384627918546472190,\n 0.037038683681384627918546472190,\n 0.037038683681384627918546472190])*0.5;\n quad_x = np.copy(points[:,0])\n quad_y = np.copy(points[:,1])\n return (quad_x, quad_y, w)", "def polygon_to_points(coords, z=None):\n\n bounds = array(coords).astype('int')\n\n bmax = bounds.max(0)\n bmin = bounds.min(0)\n\n path = Path(bounds)\n\n grid = meshgrid(range(bmin[0], bmax[0]+1), range(bmin[1], bmax[1]+1))\n\n grid_flat = zip(grid[0].ravel(), grid[1].ravel())\n\n points = path.contains_points(grid_flat).reshape(grid[0].shape).astype('int')\n points = where(points)\n points = (vstack([points[0], points[1]]).T + bmin[-1::-1]).tolist()\n if z is not None:\n points = map(lambda p: [p[0], p[1], z], points)\n\n return points", "def morphPointSet(v_1, v_2, warp_frac):\n v = []\n for y in range(len(v_1)):\n v.append([(v_1[y][0] * (1 - warp_frac) + v_2[y][0] * warp_frac) , (v_1[y][1] * (1 - warp_frac) + v_2[y][1] * warp_frac)])\n return ginput_to_array(v)", "def getCoordinates(p):\n if p[0] == 'p': # minimum bounding rectangle for point\n return (int(p[1]), int(p[2]), int(p[1]), int(p[2]))\n elif p[0] == 'c': # minimum bounding rectangle for circle\n x = int(p[1])\n y = int(p[2])\n r = int(p[3])\n return (x - r, y - r, x + r, y + r)\n elif p[0] == 'l': # minimum bounding rectangle for line segment\n x1 = int(p[1])\n y1 = int(p[2])\n x2 = int(p[3])\n y2 = int(p[4])\n if y2 > y1:\n if x1 < x2:\n return (x1, y1, x2, y2)\n else:\n return (x2, y1, x1, y2)\n else:\n if x1 < x2:\n return (x1, y2, x2, y1)\n else:\n return (x2, y2, x1, y1)", "def jiggle_on_grid_points(data, grid_polys):\n new_data = []\n centroids = np.array([t.centroid.coords for t in grid_polys])\n for t in data:\n x = t[1]\n y = t[2]\n idx = np.where(np.sum(centroids == t[1:], axis=1) == 2)[0]\n if idx:\n assert len(idx) == 1, \"Overlapping polygons are not supported\"\n idx = idx[0]\n this_datum = (t[0],) + random_points_within_poly(grid_polys[idx], 1)\n else:\n this_datum = t\n new_data.append(this_datum)\n\n return np.array(new_data)", "def basicGetPointsGeodesic(self):\n\n # more geodesic, distance=2 (complicated because random)\n data = numpy.array([[0, 1, 1, 1, 2, 2, 2, 0],\n [0, 1, 1, 1, 2, 2, 2, 0],\n [0, 1, 1, 1, 2, 2, 2, 0]])\n labels = Labels(data=data)\n result = labels.getPoints(ids=[1], mode='geodesic', distance=2, \n connectivity=1)\n result = result.tolist()\n if len(result) == 5:\n desired = [[0, 1], [0, 3], [1, 2], [2, 1], [2, 3]]\n elif len(result) == 4:\n desired = [[0, 2], [1, 1], [1, 3], [2, 2]]\n elif len(result) == 3:\n if [1, 2] in result:\n if [0, 1] in result:\n desired = [[0, 1], [1, 2], [2, 3]] \n elif [0, 3] in result:\n desired = [[0, 3], [1, 2], [2, 1]]\n elif [0, 1] in result:\n if [0, 3] in result:\n desired = [[0, 1], [0, 3], [2, 2]]\n elif [2, 1] in result:\n desired = [[0, 1], [2, 1], [1, 3]]\n else:\n desired = [[0, 1], [1, 3], [2, 2]]\n elif [2, 3] in result:\n if [0, 3] in result:\n desired = [[0, 3], [1, 1], [2, 3]]\n elif [2, 1] in result:\n desired = [[0, 2], [2, 1], [2, 3]]\n else:\n desired = [[2, 3], [1, 1], [0, 2]] \n elif [0, 3] in result:\n desired = [[0, 3], [1, 1], [2, 2]]\n elif [2, 1] in result:\n desired = [[2, 1], [1, 3], [0, 2]]\n for des in desired:\n np_test.assert_equal(des in result, True)\n for res in result:\n np_test.assert_equal(res in desired, True)\n\n # mode geodesic, distance=3, inset\n labels = Labels(data=data[1:3, 2:8])\n labels.setInset([slice(1, 3), slice(2, 8)])\n result = labels.getPoints(ids=[2], mode='geodesic', distance=3, \n connectivity=1)\n result = result.tolist()\n if len(result) == 1:\n np_test.assert_equal(result[0][1], 5)\n elif len(result) == 2:\n desired = []\n if [1, 4] in result:\n desired = [[1, 4], [2, 6]]\n elif [2, 4] in result:\n desired = [[2, 4], [1, 6]]\n for des in desired: \n np_test.assert_equal(des in result, True)\n for res in result:\n np_test.assert_equal(res in desired, True)", "def ipset_x_2d():\n x1 = np.linspace(0, 10, 11)\n x2 = np.linspace(-3, 3, 11)\n return IPSet(x=np.stack((x1, x2), axis=1), y=np.linspace(-1, 1, 11), x_new=np.stack((x1[1:-1], x2[1:-1]), axis=1))", "def simplices2polytopes(points, triangles):\n polytopes = []\n for triangle in triangles:\n logger.debug('Triangle: ' + str(triangle))\n triangle_vertices = points[triangle, :]\n logger.debug('\\t triangle points: ' +\n str(triangle_vertices))\n poly = qhull(triangle_vertices)\n logger.debug('\\n Polytope:\\n:' + str(poly))\n polytopes += [poly]\n return polytopes", "def polybbox(a):\n if len(a) == 0:\n return False\n elif len(a) == 1:\n return pointbbox(a[0])\n else:\n minx = maxx = a[0][0]\n miny = maxy = a[0][1]\n for i in range(1,len(a)):\n x=a[i][0]\n y=a[i][1]\n if x < minx:\n minx =x\n elif x > maxx:\n maxx = x\n if y < miny:\n miny = y\n elif y > maxy:\n maxy = y\n return [ point(minx,miny),point(maxx,maxy)]", "def read_polygon_shapefile(filename):\n result = cpp_read_polygon_shapefile(filename)\n f_pos = Series(result[0], name=\"f_pos\")\n r_pos = Series(result[1], name=\"r_pos\")\n return (\n f_pos,\n r_pos,\n DataFrame({\"x\": result[2], \"y\": result[3]}),\n )", "def generate_polygon(x,y,N):\r\n # Add the first point to the end of the list and convert to array if needed\r\n if type(x) == list:\r\n x = np.array(x + [x[0]])\r\n y = np.array(y + [y[0]])\r\n else:\r\n x = np.append(x,x[0])\r\n y = np.append(y,y[0])\r\n \r\n # Parameterize the arrays and interpolate\r\n d = [get_distance((x[i],y[i]),(x[i+1],y[i+1])) for i in range(len(x)-1)]\r\n d = np.cumsum([0]+d)\r\n t = np.linspace(0,d[-1],N)\r\n fx = interp1d(d,x)\r\n fy = interp1d(d,y)\r\n x = fx(t)\r\n y = fy(t)\r\n \r\n return x,y", "def convert_input(coords: Union[npt.NDArray, List]) -> Tuple[npt.NDArray, ...]:\n if isinstance(coords, np.ndarray):\n assert coords.ndim == 2, \"'coords' must be a (n, 2) array\"\n assert coords.shape[1] == 2, \"'coords' must be a (n, 2) array\"\n x = coords[:, 0]\n y = coords[:, 1]\n elif isinstance(coords, list):\n x = np.array([point[0] for point in coords])\n y = np.array([point[1] for point in coords])\n else:\n raise Exception(\"Parameter 'coords' is an unsupported type: \" + str(type(coords)))\n return x, y", "def _build_points(self, xs, ys):\n return np.hstack([xs, ys])", "def _proc_polygon(self, tokens, filled):\n\n pts = [(p[\"x\"], p[\"y\"]) for p in tokens[\"points\"]]\n component = Polygon(pen=self.pen, points=pts, filled=filled)\n\n return component", "def get_outer_vertices(feature):\n return [\n point\n for part in Geometry.get_multipolygon(feature)\n for point in part[0][0:-1]\n ]", "def generate_interpolated_points(point1, point2):\n points = connect(np.array([point2, point1]))\n return set(map(tuple, points))", "def _multigeometry(self, ogr_geometry):\n\n geo_type = ogr_geometry.GetGeometryType()\n\n if geo_type == ogr.wkbPolygon:\n return ogr.ForceToMultiPolygon(ogr_geometry)\n elif geo_type == ogr.wkbPoint:\n return ogr.ForceToMultiPoint(ogr_geometry)\n elif geo_type in [ogr.wkbLineString, ogr.wkbLinearRing]:\n return ogr.ForceToMultiLineString(ogr_geometry)\n else:\n return ogr_geometry", "def mesh_generation(coordinates):\n # Get the minimum and maximum for the latitudes\n min_latitude = np.min(coordinates[:, 0])\n max_latitude = np.max(coordinates[:, 0])\n # Get the minimum and maximum for the longitudes\n min_longitude = np.min(coordinates[:, 1])\n max_longitude = np.max(coordinates[:, 1])\n # Get the number of provided coordinates\n size = int(np.min([1e5, np.max([5e4, len(coordinates)])]))\n # Create an array of uniform-random points as a mesh\n mesh_1 = np.random.uniform(min_latitude, max_latitude, size)\n mesh_2 = np.random.uniform(min_longitude, max_longitude, size)\n mesh = np.vstack((mesh_1.flatten(), mesh_2.flatten())).T\n # Return the evenly-spaced mesh for the coordinates\n return mesh", "def get_polygon_points(starting_point, vector_seq):\n \n res=[[]]\n res[0] = starting_point\n curr_point = starting_point\n\n i=1\n\n while i<len(vector_seq):\n if are_parallel(vector_seq[i],vector_seq[i-1]):\n tmp = vector_seq[i]\n vector_seq[i-1][0]=vector_seq[i-1][0]+ tmp[0]\n vector_seq[i-1][1]=vector_seq[i-1][1]+ tmp[1]\n vector_seq.pop(i)\n else:\n i=i+1\n \n for x in vector_seq:\n x_coord = curr_point[0]+x[0]\n y_coord = curr_point[1]+x[1]\n curr_point=[x_coord, y_coord]\n res.append(curr_point)\n\n return res", "def polyfillaa(px, py, xrange=None, yrange=None, start_indices=None,\n area=False):\n if start_indices is None:\n if hasattr(px[0], '__len__'):\n single = False\n poly_ind = [0]\n count = 0\n ox, oy = px, py\n px, py = [], []\n for i in range(len(ox)):\n count += len(ox[i])\n poly_ind.append(count)\n px.extend(ox[i])\n py.extend(oy[i])\n poly_ind = np.array(poly_ind)\n else:\n single = True\n poly_ind = np.array([0, len(px)])\n else:\n poly_ind = np.array(start_indices, dtype=int)\n poly_ind = np.append(poly_ind, px.size)\n single = False\n\n if not isinstance(px, np.ndarray):\n px = np.array(px, dtype=float)\n py = np.array(py, dtype=float)\n\n if px.shape != py.shape:\n raise ValueError(\"px and py must be the same shape\")\n elif px.ndim != 1:\n raise ValueError(\"polygons must be flat arrays\")\n\n npoly = poly_ind[1:] - poly_ind[:-1]\n n = npoly.size\n minpoly = np.min(npoly)\n nbins = np.max(npoly) - minpoly + 1\n binned = (npoly - minpoly).astype(int)\n npoly_ind = np.arange(n)\n csr = csr_matrix(\n (npoly_ind, [binned, np.arange(n)]), shape=(nbins, n))\n\n areas = {} if area else None\n result = {}\n\n for i, put in enumerate(np.split(csr.data, csr.indptr[1:-1])):\n\n # number of vertices for each polygon in this group\n nvert = i + minpoly\n nshapes = put.size # number of nvert sided shapes in polygon list\n\n # take holds indices of vertices in px and py for each polygon\n take = np.repeat([poly_ind[put]], nvert, axis=0).T\n take += np.arange(nvert)\n\n # store the left most and lowest pixel covered by each polygon\n left = np.floor(np.min(px[take], axis=1)).astype(int)\n bottom = np.floor(np.min(py[take], axis=1)).astype(int)\n\n # nx and ny are the span of pixels covered in x/y directions\n nx = np.floor(np.max(px[take], axis=1)).astype(int) - left + 1\n ny = np.floor(np.max(py[take], axis=1)).astype(int) - bottom + 1\n\n # create cell grids\n ngy, ngx = ny.max(), nx.max()\n gy, gx = np.mgrid[:ngy, :ngx]\n gy, gx = gy.ravel(), gx.ravel()\n ng = gx.size\n\n # indices for raveled arrays\n inds = tuple(ind.ravel() for ind in np.indices((nshapes, nvert, ng)))\n\n # polygon vertices minus the lowest left pixel so we can\n # use gx, gy to perform faster vector operations.\n vx = px[take] - left[:, None]\n vy = py[take] - bottom[:, None]\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RuntimeWarning)\n ux, uy = shift1(vx), shift1(vy)\n dx, dy = vx - ux, vy - uy\n mx, my = dy / dx, dx / dy\n\n minx = np.min([ux, vx], axis=0)[..., None]\n maxx = np.max([ux, vx], axis=0)[..., None]\n miny = np.min([uy, vy], axis=0)[..., None]\n maxy = np.max([uy, vy], axis=0)[..., None]\n\n # y coordinates at x grid lines (left edge of cell)\n cross_left_y = gx[None, None] - ux[..., None]\n cross_left_y *= mx[..., None]\n cross_left_y += uy[..., None]\n\n # x coordinates at y grid lines (bottom edge of cell)\n cross_bottom_x = gy[None, None] - uy[..., None]\n cross_bottom_x *= my[..., None]\n cross_bottom_x += ux[..., None]\n\n parallel_x = (dy == 0)[..., None] \\\n & (uy[..., None] == gy[None, None])\n if parallel_x.any():\n parallel_x &= (minx <= gx[None, None])\n parallel_x &= (gx[None, None] <= maxx)\n cross_bottom_x[parallel_x] = gx[inds[2][parallel_x.ravel()]]\n\n parallel_y = (dx == 0)[..., None] \\\n & (ux[..., None] == gx[None, None])\n if parallel_y.any():\n parallel_y &= (miny <= gy[None, None])\n parallel_y &= (gy[None, None] <= maxy)\n cross_left_y[parallel_y] = gy[inds[2][parallel_y.ravel()]]\n\n # Lines crossing bottom of cell (u -> v)\n valid_b_cross = gy[None, None] >= miny\n valid_b_cross &= gy[None, None] < maxy\n valid_x_cross = cross_bottom_x >= gx[None, None]\n valid_x_cross &= cross_bottom_x < gx[None, None] + 1\n valid_b_cross &= valid_x_cross\n\n # Lines crossing left of cell (u -> v)\n valid_l_cross = gx[None, None] >= minx\n valid_l_cross &= gx[None, None] < maxx\n valid_y_cross = cross_left_y >= gy[None, None]\n valid_y_cross &= cross_left_y < gy[None, None] + 1\n valid_l_cross &= valid_y_cross\n\n corner = cross_bottom_x == gx[None, None]\n corner &= cross_left_y == gy[None, None]\n corner |= ((gx[None, None] == ux[..., None])\n & (gy[None, None] == uy[..., None]))\n\n # valid_b_cross |= corner\n # valid_l_cross |= corner\n\n # Add any grid points inside polygon, not intersected by lines\n xlines = valid_b_cross | corner\n xlines = xlines.reshape(nshapes, nvert, ngy, ngx)\n grid_points = np.sum(xlines, axis=1)\n grid_points = np.roll(np.cumsum(grid_points, axis=2), 1, axis=2)\n grid_points %= 2\n grid_points[:, :, 0] = 0\n grid_points = grid_points.astype(bool).reshape(nshapes, ng)\n grid_points |= corner.any(axis=1)\n\n # Now all grid points (in or on the polygon) have been determined,\n # they should be distinguished from edges to avoid duplication.\n # Inside grid points cannot coincide with intersections, so we only\n # need to examine corners.\n valid_b_cross &= ~corner\n valid_l_cross &= ~corner\n\n # Finally, vertices located inside cell\n vertex_inside = vx[..., None] > gx[None, None]\n vertex_inside &= vx[..., None] < (gx[None, None] + 1)\n vertex_inside &= vy[..., None] > gy[None, None]\n vertex_inside &= vy[..., None] < (gy[None, None] + 1)\n\n # okay, so we now have everything we need:\n # - edges (bottom, left)\n # - inside points\n # - grid points\n\n # populate\n counter = np.zeros((nshapes, ng), dtype=int)\n sout = nshapes, (nvert * 4), ng\n polx = np.full(sout, np.nan) # maximum size\n poly = np.full(sout, np.nan) # maximum size\n\n # populate inside vertices\n if vertex_inside.any():\n ri = vertex_inside.ravel()\n itake = inds[0][ri], inds[1][ri], inds[2][ri]\n n_inside = np.cumsum(vertex_inside, axis=1) - 1\n vput = counter[itake[0], itake[2]]\n vput += n_inside[itake]\n polx[itake[0], vput, itake[2]] = vx[itake[0], itake[1]]\n poly[itake[0], vput, itake[2]] = vy[itake[0], itake[1]]\n counter[itake[0], itake[2]] += n_inside[itake] + 1\n\n # Grid points are so far calculated as the bottom-left of a cell.\n # This needs to be shared by neighbors to the west, south, and\n # south-west.\n if grid_points.any():\n # ri = np.repeat(gp_inside[:, None], nvert, axis=1).ravel()\n # itake = inds[0][ri], inds[1][ri], inds[2][ri]\n for dpx, dpy in itertools.product([0, 1], [0, 1]):\n if dpx == dpy == 0:\n valid = grid_points\n else:\n valid = grid_points & (gx[None] >= dpx) & (gy[None] >= dpy)\n if not valid.any(): # pragma: no cover\n continue\n\n idx = np.nonzero(valid)\n gp_ind = idx[1] - (dpy * ngx + dpx)\n vput = counter[idx[0], gp_ind]\n\n polx[idx[0], vput, gp_ind] = gx[idx[1]]\n poly[idx[0], vput, gp_ind] = gy[idx[1]]\n vput += 1\n counter[idx[0], gp_ind] = vput\n\n # Left edge crossings: shared by neighbor to left on it's right edge\n if valid_l_cross.any():\n for dpx in [0, 1]:\n if dpx == 1:\n valid = valid_l_cross & (gx[None, None] >= dpx)\n else:\n valid = valid_l_cross\n ncross = valid.cumsum(axis=1) - 1\n ri = valid.ravel()\n itake = inds[0][ri], inds[1][ri], inds[2][ri]\n gp_ind = itake[2] - dpx\n vput = counter[itake[0], gp_ind] + ncross[itake]\n polx[itake[0], vput, gp_ind] = gx[itake[2]]\n poly[itake[0], vput, gp_ind] = cross_left_y[itake]\n vput += 1\n counter[itake[0], gp_ind] = vput\n\n # Bottom edge crossings: shared by neighbor below on it's top edge\n if valid_b_cross.any():\n for dpy in [0, 1]:\n if dpy == 1:\n valid = valid_b_cross & (gy[None, None] >= dpy)\n else:\n valid = valid_b_cross\n ncross = valid.cumsum(axis=1) - 1\n ri = valid.ravel()\n itake = inds[0][ri], inds[1][ri], inds[2][ri]\n gp_ind = itake[2] - (dpy * ngx)\n vput = counter[itake[0], gp_ind] + ncross[itake]\n polx[itake[0], vput, gp_ind] = cross_bottom_x[itake]\n poly[itake[0], vput, gp_ind] = gy[itake[2]]\n vput += 1\n counter[itake[0], gp_ind] = vput\n\n # print(\"populate: %f\" % (t4 - t3))\n\n # Trim down the array as necessary and move coordinates off\n # the shared grid\n maxv = counter.max()\n polx, poly = polx[:, :maxv], poly[:, :maxv]\n polx += left[:, None, None]\n poly += bottom[:, None, None]\n gxout = left[..., None] + gx[None]\n gyout = bottom[..., None] + gy[None]\n\n keep = np.isfinite(polx)\n if xrange is not None:\n keep = np.logical_and(\n keep, np.greater_equal(gxout[:, None], xrange[0]), out=keep)\n keep = np.logical_and(\n keep, np.less(gxout[:, None], xrange[1]), out=keep)\n if yrange is not None:\n keep = np.logical_and(\n keep, np.greater_equal(gyout[:, None], yrange[0]), out=keep)\n keep = np.logical_and(\n keep, np.less(gyout[:, None], yrange[1]), out=keep)\n\n # print(\"normalize: %f\" % (t5 - t4))\n\n # note that COM needs to be done before filling in NaNs\n # We also do this to kill any bad values (usually repeated), that\n # managed to find there way to this stage.\n comx = bottleneck.nanmean(polx, axis=1)\n comy = bottleneck.nanmean(poly, axis=1)\n polx = bottleneck.push(polx, axis=1)\n poly = bottleneck.push(poly, axis=1)\n np.subtract(polx, comx[:, None], out=polx)\n np.subtract(poly, comy[:, None], out=poly)\n angle = np.arctan2(poly, polx)\n sorti = np.argsort(angle, axis=1)\n og = np.ogrid[:nshapes, :maxv, :ng]\n polx = polx[og[0], sorti, og[2]]\n poly = poly[og[0], sorti, og[2]]\n\n pixareas = (0.5 * np.abs(bottleneck.nansum(\n (polx * np.roll(poly, -1, axis=1))\n - (poly * np.roll(polx, -1, axis=1)), axis=1)))\n\n keep &= pixareas[:, None] != 0\n\n # print(\"areas: %f, %f\" % (t6 - t5, t6 - t1))\n\n mask = np.any(keep, axis=1)\n npixels = mask.sum(axis=1)\n minpix, maxpix = np.min(npixels), np.max(npixels) + 1\n npixbins = maxpix - minpix\n pixbins = (npixels - minpix).astype(int)\n pixind = np.arange(npixels.size)\n spix = csr_matrix((pixind, [pixbins, np.arange(npixels.size)]),\n shape=(npixbins, npixels.size))\n\n for pixi, putpix in enumerate(np.split(spix.data, spix.indptr[1:-1])):\n npix = pixi + minpix\n if npix == 0 or len(putpix) == 0: # pragma: no cover\n continue\n npolys = putpix.size\n takepix = mask[putpix]\n cellx = np.reshape(gxout[putpix][takepix], (npolys, npix))\n celly = np.reshape(gyout[putpix][takepix], (npolys, npix))\n cellxy = np.append(celly[:, :, None], cellx[:, :, None], axis=2)\n # this gives the cells overlapped by each polygon\n for polyind, cxy in zip(putpix, cellxy):\n result[put[polyind]] = cxy\n\n if area:\n aselect = np.reshape(pixareas[putpix][takepix], (npolys, npix))\n for polyind, pixarea in zip(putpix, aselect):\n areas[put[polyind]] = pixarea\n\n # print(\"storing results: %f, %f\" % (t7 - t6, t7 - t1))\n\n if single:\n if len(result) != 0:\n result = result[0]\n if area:\n areas = areas[0]\n else:\n result = np.empty((0, 2))\n if area:\n areas = np.empty(0)\n\n if not area:\n return result\n else:\n return result, areas", "def RegularPolygonPoints(n,c):\n coord = []\n for i in range(n):\n x = m.cos(2*m.pi*i/n)+c[0]\n y = m.sin(2*m.pi*i/n)+c[1]\n coord.append([x,y])\n return(coord)", "def fromVertices(cls,\n xp0, yp0, zp0, xp1, yp1, zp1,\n xp2, yp2, zp2, xp3, yp3, zp3,\n origin,\n group_index=None,\n reference=None):\n if len(xp0) == len(yp0) == len(zp0) == len(xp1) == len(yp1) == \\\n len(zp1) == len(xp2) == len(yp2) == len(zp2) == len(xp3) == \\\n len(yp3) == len(zp3):\n pass\n else:\n raise ShakeLibException('All vectors specifying quadrilateral '\n 'vertices must have the same length.')\n\n nq = len(xp0)\n if group_index is not None:\n if len(group_index) != nq:\n raise Exception(\n \"group_index must have same length as vertices.\")\n else:\n group_index = np.array(range(nq))\n\n xp0 = np.array(xp0, dtype='d')\n yp0 = np.array(yp0, dtype='d')\n zp0 = np.array(zp0, dtype='d')\n xp1 = np.array(xp1, dtype='d')\n yp1 = np.array(yp1, dtype='d')\n zp1 = np.array(zp1, dtype='d')\n xp2 = np.array(xp2, dtype='d')\n yp2 = np.array(yp2, dtype='d')\n zp2 = np.array(zp2, dtype='d')\n xp3 = np.array(xp3, dtype='d')\n yp3 = np.array(yp3, dtype='d')\n zp3 = np.array(zp3, dtype='d')\n\n #----------------------------------------------------------------------\n # Create GeoJSON object\n #----------------------------------------------------------------------\n\n coords = []\n u_groups = np.unique(group_index)\n n_groups = len(u_groups)\n for i in range(n_groups):\n ind = np.where(u_groups[i] == group_index)[0]\n lons = np.concatenate(\n [xp0[ind[0]].reshape((1,)),\n xp1[ind],\n xp2[ind][::-1],\n xp3[ind][::-1][-1].reshape((1,)),\n xp0[ind[0]].reshape((1,))\n ])\n lats = np.concatenate(\n [yp0[ind[0]].reshape((1,)),\n yp1[ind],\n yp2[ind][::-1],\n yp3[ind][::-1][-1].reshape((1,)),\n yp0[ind[0]].reshape((1,))\n ])\n deps = np.concatenate(\n [zp0[ind[0]].reshape((1,)),\n zp1[ind],\n zp2[ind][::-1],\n zp3[ind][::-1][-1].reshape((1,)),\n zp0[ind[0]].reshape((1,))\n ])\n\n poly = []\n for lon, lat, dep in zip(lons, lats, deps):\n poly.append([lon, lat, dep])\n coords.append(poly)\n\n d = {\"type\": \"FeatureCollection\",\n \"metadata\": {},\n \"features\": [{\n \"type\": \"Feature\",\n \"properties\": {\n \"rupture type\": \"rupture extent\",\n \"reference\": reference,\n },\n \"geometry\": {\n \"type\": \"MultiPolygon\",\n \"coordinates\": [coords]\n }\n }]}\n\n # Add origin information to metadata\n odict = origin.__dict__\n for k, v in odict.items():\n if isinstance(v, HistoricTime):\n d['metadata'][k] = v.strftime('%Y-%m-%dT%H:%M:%SZ')\n else:\n d['metadata'][k] = v\n if hasattr(origin, 'id'):\n d['metadata']['eventid'] = origin.id\n\n return cls(d, origin)", "def _dense_point_array(self, geoms, distance, index):\n # interpolate lines to represent them as points for Voronoi\n points = []\n ids = []\n\n if pygeos.get_type_id(geoms[0]) not in [1, 2, 5]:\n lines = pygeos.boundary(geoms)\n else:\n lines = geoms\n lengths = pygeos.length(lines)\n for ix, line, length in zip(index, lines, lengths):\n if length > distance: # some polygons might have collapsed\n pts = pygeos.line_interpolate_point(\n line,\n np.linspace(0.1, length - 0.1, num=int((length - 0.1) // distance)),\n ) # .1 offset to keep a gap between two segments\n points.append(pygeos.get_coordinates(pts))\n ids += [ix] * len(pts)\n\n points = np.vstack(points)\n\n return points, ids\n\n # here we might also want to append original coordinates of each line\n # to get a higher precision on the corners", "def make_polygon(*coords):\n global GEOMETRY_SURF, POLYGONS,col\n if len(coords) < 3:\n print(\"Warning: Invalid polygon passed, ignoring...\")\n return\n start = coords[0]\n prev = coords[0]\n for coord in coords:\n POLYGONS |= {coord}\n line = Boundary(prev[0],prev[1],coord[0],coord[1]) #add segment to WALL list\n prev = coord\n line = Boundary(prev[0], prev[1],start[0],start[1])\n #now draw poly\n pygame.draw.polygon(GEOMETRY_SURF,col[\"SHAPECOL\"], coords)\n return", "def SplitIntoPolygons(shape):\n ret = []\n this_polygon = []\n restart_indices = set(shape.parts)\n for idx, point in enumerate(shape.points):\n if idx in restart_indices:\n if this_polygon:\n ret.append(this_polygon)\n this_polygon = [[point[0], point[1]]]\n else:\n this_polygon.append([point[0], point[1]])\n if this_polygon:\n ret.append(this_polygon)\n return ret", "def projectPoints(self, points):\n return [self.projectPoint(point) for point in points]", "def containsManyPos(self, aerial_pos_list):\n # Get boundary points\n ordered_pts = self.boundary_pts.order_by('order')\n path_pts = [[wpt.position.gps_position.latitude,\n wpt.position.gps_position.longitude]\n for wpt in ordered_pts]\n # First check enough points to define a polygon\n if len(path_pts) < 3:\n return [False] * len(aerial_pos_list)\n\n # Create path to use for testing polygon inclusion\n path_pts.append(path_pts[0])\n path = mplpath.Path(np.array(path_pts))\n\n # Test each aerial position for altitude\n results = list()\n for aerial_pos in aerial_pos_list:\n # Check altitude bounds\n alt = aerial_pos.altitude_msl\n altitude_check = (alt <= self.altitude_msl_max\n and alt >= self.altitude_msl_min)\n results.append(altitude_check)\n\n # Create a list of positions to test whether inside polygon\n polygon_test_point_ids = [cur_id\n for cur_id in range(len(aerial_pos_list))\n if results[cur_id]]\n if len(polygon_test_point_ids) == 0:\n return results\n polygon_test_points = [[aerial_pos_list[cur_id].gps_position.latitude,\n aerial_pos_list[cur_id].gps_position.longitude]\n for cur_id in polygon_test_point_ids]\n\n # Test each point for inside polygon\n polygon_test_results = path.contains_points(\n np.array(polygon_test_points))\n for test_id in range(len(polygon_test_point_ids)):\n cur_id = polygon_test_point_ids[test_id]\n results[cur_id] = (polygon_test_results[test_id] == True)\n\n return results", "def get_polys(m_data: List[Dict[str, Any]], poly_tag: str) -> List[FpPoly]:\n polys = get_all_dicts_by_key(m_data, poly_tag)\n res_polys: List[FpPoly] = list()\n if polys:\n for poly in polys:\n poly_data = poly[poly_tag]\n layer: Layer = convert_to_layers(get_dict_by_key(poly_data, 'layer')['layer'])[0]\n width: str = get_dict_by_key(poly_data, 'width')['width']\n pts_data: List[Dict[str, Any]] = get_dict_by_key(poly_data, 'pts')['pts']\n points: List[Coords] = list()\n for p in pts_data:\n point = [p['xy'][0], str(-1*float(p['xy'][1]))]\n points.append(point)\n res_polys.append(FpPoly(layer=layer, width=width, points=points))\n return res_polys", "def _get_unstructured_collection(vlon, vlat, xm, vmin, vmax, basemap_object=None):\n\n #init\n Path = mpath.Path\n patches = []\n pdata = xm[0, :] * 1. # full list of data\n vmsk = np.ones_like(pdata).astype('bool') # mask to indicate which cells contain valid data\n\n for i in xrange(x.ncell):\n if np.any(vlon[i, :]) > 180.: # todo fix this properly !!!\n vmsk[i] = False\n continue\n if basemap_object is None:\n xv = vlon[i, :]\n yv = vlat[i, :]\n else:\n xv, yv = basemap_object(vlon[i, :], vlat[i, :]) # todo: how to properly deal with boundary problem ????\n if (vlon[i, :].min() < -100.) & (vlon[i, :].max() > 100.): # todo\n #... triangles across the boundaries of the projection are a problem\n # ... solution: generate two triangles ! TODO\n vmsk[i] = False\n continue\n\n verts = np.asarray([xv, yv]).T\n\n #--- specify how vertices are interconnected (here simple connection by lines)\n codes = [Path.MOVETO, Path.LINETO, Path.LINETO]\n\n #--- construct object and append to library of objects ---\n path = mpath.Path(verts, codes, closed=True)\n patches.append(mpatches.PathPatch(path))\n\n pdata = np.asarray(pdata)\n\n if vmin is None:\n vmin = pdata.min()\n if vmax is None:\n vmax = pdata.max()\n\n norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n collection = PatchCollection(patches, cmap=cmap, norm=norm, alpha=1., match_original=False, edgecolors='grey') # construct library of all objects\n collection.set_array(pdata[vmsk]) # assign data values here\n\n return collection", "def CreateReleasePoints(points_on_longitude, points_on_latitude, grids):\n \n ReleasePointsLon = []\n ReleasePointsLat = []\n \n GridsCW_array = np.asarray(grids[['min_lon', 'min_lat', 'max_lon', 'max_lat']])\n \n for i in range(len(GridsCW_array)):\n \n lon_space = np.linspace(GridsCW_array[i,0], GridsCW_array[i,2], num = points_on_longitude+2 )\n lat_space = np.linspace(GridsCW_array[i,1], GridsCW_array[i,3], num = points_on_latitude+2 )\n \n \n lon_space_cor = lon_space[1:-1]\n lat_space_cor = lat_space[1:-1]\n \n for j in lon_space_cor:\n for k in lat_space_cor:\n \n ReleasePointsLon.append(j)\n ReleasePointsLat.append(k)\n \n return ReleasePointsLon, ReleasePointsLat", "def sampleFromMultiGrid(multigrid, layer, xypoints):\n\n if layer not in multigrid.getLayerNames():\n raise Exception('Layer %s not found in grid' % layer)\n hazgrid = multigrid.getLayer(layer)\n return sampleFromGrid(hazgrid, xypoints)", "def get_vertices_list(feature):\n return [\n point\n for part in Geometry.get_multipolygon(feature)\n for ring in part\n for point in ring[0:-1]\n ]", "def m1_make_middle_shape_points_list(tx, ty, m1_info, SEN_info):\n \"\"\"\n 1 Get information from m1_info & SEN_info\n \"\"\"\n x_m1 = m1_info[0]\n y_m1 = m1_info[1]\n z_m = m1_info[2]\n\n m1_points = m1_info[3]\n\n m1_p0 = m1_points[0]\n m1_p1 = m1_points[1]\n m1_p2 = m1_points[2]\n m1_p3 = m1_points[3]\n\n w_sen = SEN_info[0]\n n_w_sen = SEN_info[1]\n h_sen = SEN_info[2]\n t_sen = SEN_info[3]\n u_n = SEN_info[4]\n l_n = SEN_info[5]\n set = SEN_info[6]\n u_offset = SEN_info[7]\n l_offset = SEN_info[8]\n\n \"\"\"\n 2 Make lists.\n middle_shape_upper_left_row list\n middle_shape_upper_right_row list\n\n middle_shape_lower_left_row list\n middle_shape_lower_right_row list\n \"\"\"\n # upper side\n middle_shape_upper_left_row = []\n middle_shape_upper_right_row = []\n\n for i in range(u_n):\n # left row\n ix = tx + t_sen\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p2, p1, p4, p3]\n middle_shape_upper_left_row.extend((left_points))\n\n for i in range(u_n - 1, -1, -1):\n # right row\n ix = tx + (x_m1 - t_sen)\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p4, p3, p2, p1]\n middle_shape_upper_right_row.extend(right_points)\n\n # lower side\n middle_shape_lower_left_row = []\n middle_shape_lower_right_row = []\n\n for i in range(l_n - 1, -1, -1):\n # left row\n ix = tx + t_sen\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p2, p1, p4, p3]\n middle_shape_lower_left_row.extend((left_points))\n\n for i in range(l_n):\n # right row\n ix = tx + (x_m1 - t_sen)\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p4, p3, p2, p1]\n middle_shape_lower_right_row.extend(right_points)\n\n middle_shape_upper = [middle_shape_upper_left_row, middle_shape_upper_right_row]\n middle_shape_lower = [middle_shape_lower_left_row, middle_shape_lower_right_row]\n\n return middle_shape_upper, middle_shape_lower", "def generate_mesh(\n poly_coords: np.ndarray,\n hole_coords: Optional[List[np.ndarray]] = None,\n min_points: Optional[int] = None,\n max_edge_length: Optional[float] = None,\n convex_hull: bool = False,\n boundary: Optional[np.ndarray] = None,\n preserve_boundary: bool = False,\n min_angle: float = 32.5,\n **kwargs,\n) -> Tuple[np.ndarray, np.ndarray]:\n poly_coords = ensure_unique(poly_coords)\n if hole_coords is None:\n hole_coords = []\n hole_coords = [ensure_unique(coords) for coords in hole_coords]\n # Facets is a shape (m, 2) array of edge indices.\n # coords[facets] is a shape (m, 2, 2) array of edge coordinates:\n # [(x0, y0), (x1, y1)]\n coords = np.concatenate([poly_coords] + hole_coords, axis=0)\n xmin = coords[:, 0].min()\n dx = np.ptp(coords[:, 0])\n ymin = coords[:, 1].min()\n dy = np.ptp(coords[:, 1])\n r0 = np.array([[xmin, ymin]]) + np.array([[dx, dy]]) / 2\n # Center the coordinates at (0, 0) to avoid floating point issues.\n coords = coords - r0\n indices = np.arange(len(poly_coords), dtype=int)\n if convex_hull:\n if boundary is not None:\n raise ValueError(\n \"Cannot have both boundary is not None and convex_hull = True.\"\n )\n facets = spatial.ConvexHull(coords).simplices\n else:\n if boundary is not None:\n boundary = list(map(tuple, ensure_unique(boundary - r0)))\n indices = [i for i in indices if tuple(coords[i]) in boundary]\n facets = np.array([indices, np.roll(indices, -1)]).T\n # Create facets for the holes.\n for hole in hole_coords:\n hole_indices = np.arange(\n indices[-1] + 1, indices[-1] + 1 + len(hole), dtype=int\n )\n hole_facets = np.array([hole_indices, np.roll(hole_indices, -1)]).T\n indices = np.concatenate([indices, hole_indices], axis=0)\n facets = np.concatenate([facets, hole_facets], axis=0)\n\n mesh_info = triangle.MeshInfo()\n mesh_info.set_points(coords)\n mesh_info.set_facets(facets)\n if hole_coords:\n # Triangle allows you to set holes by specifying a single point\n # that lies in each hole. Here we use the centroid of the hole.\n holes = [\n np.array(Polygon(hole).centroid.coords[0]) - r0.squeeze()\n for hole in hole_coords\n ]\n mesh_info.set_holes(holes)\n\n kwargs = kwargs.copy()\n kwargs[\"allow_boundary_steiner\"] = not preserve_boundary\n if \"min_angle\" not in kwargs:\n kwargs[\"min_angle\"] = min_angle\n\n mesh = triangle.build(mesh_info=mesh_info, **kwargs)\n points = np.array(mesh.points) + r0\n triangles = np.array(mesh.elements)\n if min_points is None and (max_edge_length is None or max_edge_length <= 0):\n return points, triangles\n\n kwargs[\"max_volume\"] = dx * dy / 100\n i = 1\n if min_points is None:\n min_points = 0\n if max_edge_length is None or max_edge_length <= 0:\n max_edge_length = np.inf\n max_length = get_edge_lengths(points, triangles).max()\n while (len(points) < min_points) or (max_length > max_edge_length):\n mesh = triangle.build(mesh_info=mesh_info, **kwargs)\n points = np.array(mesh.points) + r0\n triangles = np.array(mesh.elements)\n edges, is_boundary = get_edges(triangles)\n if preserve_boundary:\n # Only constrain the length of interior edges, i.e. edges not on the boundary.\n edges = edges[~is_boundary]\n edge_lengths = np.linalg.norm(np.diff(points[edges], axis=1), axis=2)\n max_length = edge_lengths.max()\n logger.debug(\n f\"Iteration {i}: Made mesh with {len(points)} points and \"\n f\"{len(triangles)} triangles with maximum interior edge length: \"\n f\"{max_length:.2e}. Target maximum edge length: {max_edge_length:.2e}.\"\n )\n if np.isfinite(max_edge_length):\n kwargs[\"max_volume\"] *= min(0.98, np.sqrt(max_edge_length / max_length))\n else:\n kwargs[\"max_volume\"] *= 0.98\n i += 1\n return points, triangles", "def give_polygon(vertices, points):\n polygon = np.zeros((len(vertices), 2))\n for i, vertex in enumerate(vertices):\n polygon[i] = points[vertex]\n # End point of a polygon equals to start point\n polygon = polygon.tolist()\n if polygon[-1] != polygon[0]:\n polygon.append(polygon[0])\n return polygon", "def sampleShapeFile(shapefile, xypoints, attribute):\n\n xmin = np.min(xypoints[:, 0])\n xmax = np.max(xypoints[:, 0])\n ymin = np.min(xypoints[:, 1])\n ymax = np.max(xypoints[:, 1])\n #xypoints should be projected back to lat/lon\n f = fiona.collection(shapefile, 'r')\n tshapes = list(f.items(bbox=(xmin, ymin, xmax, ymax)))\n shapes = []\n for fid, shape1 in tshapes:\n shapes.append(shape1)\n f.close()\n return sampleShapes(shapes, xypoints, attribute)", "def polygons(self):\n if self.type == 'Polygon':\n polygons = [self._geojson['geometry']['coordinates']]\n elif self.type == 'MultiPolygon':\n polygons = self._geojson['geometry']['coordinates']\n return [ [ [_lat_lons_from_geojson(s) for\n s in ring ] for\n ring in polygon] for\n polygon in polygons]", "def point_location(tri, p): \n simplex_index = tri.find_simplex(p)\n bc = []\n for id_, point in zip(simplex_index, p):\n # Calculate the two first barycentric coordinates for the relevant\n # simplex\n b = tri.transform[id_, :2].dot(point-tri.transform[id_, 2])\n bc.append(np.c_[np.atleast_2d(b), 1-b.sum()])\n # Create the full array and squeeze the shit out of it\n bc = np.array(bc).squeeze()\n return simplex_index, bc", "def multisegment_in_polygon(multisegment: _Multisegment,\n polygon: _Polygon,\n *,\n context: _Optional[_Context] = None) -> _Relation:\n return _polygon.relate_multisegment(\n polygon, multisegment,\n _get_context() if context is None else context\n )", "def pinp_multiple_crossing(points, edges, include_edges = True):\n crossing_number = []\n initialized = False\n for edge in edges:\n d_y, d_x, b = line_equation(edge)\n index = -1\n for point in points:\n index += 1\n if not initialized:\n crossing_number.append([0, False])\n elif crossing_number[index][1]:\n continue\n if include_edges and point_on_line(point, edge, d_y, d_x, b):\n # If the point is on the edge, then we know it is in the polygon.\n crossing_number[index] = [1, True]\n continue\n if is_horizontal(edge):\n # We ignore horizontal edges (unless points are on them, as above).\n continue\n if intersects_right(point, edge, d_y, d_x, b, positive_slope(edge), include_edges):\n crossing_number[index][0] += 1\n initialized = True\n index = 0\n for point in points:\n if crossing_number[index] % 2 == 1:\n yield point", "def polygonpts(nSides, radius=1.0):\n\treturn [[cos(theta)*radius, sin(theta)*radius] for theta in frange(0, twopi, nSides+1)[:-1] ]", "def m1_make_upper_shape_points_list(tx, ty, m1_info, SEN_info):\n \"\"\"\n 1 Get information from m1_info & SEN_info\n \"\"\"\n x_m1 = m1_info[0]\n y_m1 = m1_info[1]\n z_m = m1_info[2]\n\n m1_points = m1_info[3]\n\n m1_p0 = m1_points[0]\n m1_p1 = m1_points[1]\n m1_p2 = m1_points[2]\n m1_p3 = m1_points[3]\n\n w_sen = SEN_info[0]\n n_w_sen = SEN_info[1]\n h_sen = SEN_info[2]\n t_sen = SEN_info[3]\n u_n = SEN_info[4]\n l_n = SEN_info[5]\n set = SEN_info[6]\n u_offset = SEN_info[7]\n l_offset = SEN_info[8]\n\n \"\"\"\n 2 Make lists.\n upper_shape_upper_left_row list\n upper_shape_upper_right_row list\n\n upper_shape_lower_left_row list\n upper_shape_lower_right_row list\n \"\"\"\n # upper side\n upper_shape_upper_left_row = []\n upper_shape_upper_right_row = []\n\n for i in range(u_n):\n # left row\n ix = tx + t_sen\n iy = ty + (i * u_offset + set) + 10 # have to \"+\" something now its magic number\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p1, p2, p3, p4, p5, p6, p7, p8]\n upper_shape_upper_left_row.extend((left_points))\n\n for i in range(u_n - 1, -1, -1):\n # right row\n ix = tx + (x_m1 - t_sen)\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p5, p6, p7, p8, p1, p2, p3, p4]\n upper_shape_upper_right_row.extend(right_points)\n\n # lower side\n upper_shape_lower_left_row = []\n upper_shape_lower_right_row = []\n\n for i in range(l_n -1, -1, -1):\n # left row\n ix = tx + t_sen\n iy = ty - (i * l_offset + set) - 10 # have to \"-\" something now its magic number\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p1, p2, p3, p4, p5, p6, p7, p8]\n upper_shape_lower_left_row.extend((left_points))\n\n for i in range(l_n):\n # right row\n ix = tx + (x_m1 - t_sen)\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p5, p6, p7, p8, p1, p2, p3, p4]\n upper_shape_lower_right_row.extend(right_points)\n\n upper_shape_upper = [upper_shape_upper_left_row, upper_shape_upper_right_row]\n upper_shape_lower = [upper_shape_lower_left_row, upper_shape_lower_right_row]\n\n return upper_shape_upper, upper_shape_lower", "def getTilingSplitCoordsMP(args):\n (metadata, index) = args\n return getTilingSplitCoordsTuple(*metadata, index)", "def points(self):\n p = []\n for v in self.iter():\n p.append((v.x, v.y))\n return p", "def feature_coords(features):\n coords_list = []\n for feature in features:\n coord_start = feature.location.nofuzzy_start\n coord_end = feature.location.nofuzzy_end\n coord_pair = (coord_start, coord_end)\n coords_list.append(coord_pair)\n ## consider adding some info to the log\n return coords_list", "def m4_make_middle_shape_points_list(tx, ty, m4_info, SEN_info):\n \"\"\"\n 1 Get information from m1_info & SEN_info\n \"\"\"\n x_m4 = m4_info[0]\n y_m4 = m4_info[1]\n z_m = m4_info[2]\n\n m4_points = m4_info[3]\n\n m4_p0 = m4_points[0]\n m4_p1 = m4_points[1]\n m4_p2 = m4_points[2]\n m4_p3 = m4_points[3]\n\n w_sen = SEN_info[0]\n n_w_sen = SEN_info[1]\n h_sen = SEN_info[2]\n t_sen = SEN_info[3]\n u_n = SEN_info[4]\n l_n = SEN_info[5]\n set = SEN_info[6]\n u_offset = SEN_info[7]\n l_offset = SEN_info[8]\n\n \"\"\"\n 2 Make lists.\n middle_shape_upper_left_row list\n middle_shape_upper_right_row list\n\n middle_shape_lower_left_row list\n middle_shape_lower_right_row list\n \"\"\"\n # upper side\n middle_shape_upper_left_row = []\n middle_shape_upper_right_row = []\n\n for i in range(u_n - 1, -1, -1):\n # left row\n ix = tx - (x_m4 - t_sen)\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p3, p4, p1, p2]\n middle_shape_upper_left_row.extend((left_points))\n\n for i in range(u_n):\n # right row\n ix = tx - t_sen\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p1, p2, p3, p4]\n middle_shape_upper_right_row.extend(right_points)\n\n # lower side\n middle_shape_lower_left_row = []\n middle_shape_lower_right_row = []\n\n for i in range(l_n):\n # left row\n ix = tx - (x_m4 - t_sen)\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p3, p4, p1, p2]\n middle_shape_lower_left_row.extend((left_points))\n\n for i in range(l_n - 1, -1, -1):\n # right row\n ix = tx - t_sen\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p1, p2, p3, p4]\n middle_shape_lower_right_row.extend(right_points)\n\n middle_shape_upper = [middle_shape_upper_left_row, middle_shape_upper_right_row]\n middle_shape_lower = [middle_shape_lower_left_row, middle_shape_lower_right_row]\n\n return middle_shape_upper, middle_shape_lower", "def pristine_coords_to_objects(list_of_coords):\n list_of_objects = []\n for element in range(len(list_of_coords)):\n list_of_objects.append(Atom(element, \"CX\", \"GGG\", element, list_of_coords[element][0], list_of_coords[element][1], list_of_coords[element][2]))\n return list_of_objects", "def read_polygon_shapefile(filename):\n result = cpp_read_polygon_shapefile(filename)\n return (\n DataFrame({\"f_pos\": result[0], \"r_pos\": result[1]}),\n DataFrame({\"x\": result[2], \"y\": result[3]}),\n )", "def boundary_vertices(points: np.ndarray, triangles: np.ndarray) -> np.ndarray:\n tri = Triangulation(points[:, 0], points[:, 1], triangles)\n boundary_edges = set()\n for i, neighbors in enumerate(tri.neighbors):\n for k in range(3):\n if neighbors[k] == -1:\n boundary_edges.add((triangles[i, k], triangles[i, (k + 1) % 3]))\n edges = MultiLineString([points[edge, :] for edge in boundary_edges])\n polygons = list(polygonize(edges))\n assert len(polygons) == 1, polygons\n polygon = orient(polygons[0])\n points_list = [tuple(xy) for xy in points]\n indices = np.array([points_list.index(xy) for xy in polygon.exterior.coords])\n return indices[:-1]", "def get_intersect(pair1, pair2):\n # calculate the homogeneous coords\n tmp = np.vstack((pair1, pair2))\n h = np.hstack((tmp, np.ones((4, 1))))\n\n # line through each pair of points\n l1 = np.cross(h[0], h[1])\n l2 = np.cross(h[2], h[3])\n\n # get the intersect\n x, y, z = np.cross(l1, l2)\n x /= z\n y /= z\n return x, y", "def getProjectedShapes(shapes, xmin, xmax, ymin, ymax):\n latmiddle = ymin + (ymax-ymin)/2.0\n lonmiddle = xmin + (xmax-xmin)/2.0\n projstr = '+proj=ortho +datum=WGS84 +lat_0=%.4f +lon_0=%.4f +x_0=0.0 +y_0=0.0' % (latmiddle, lonmiddle)\n proj = pyproj.Proj(projparams=projstr)\n project = partial(\n pyproj.transform,\n pyproj.Proj(proj='latlong', datum='WGS84'),\n proj)\n\n pshapes = []\n for tshape in shapes:\n if tshape['geometry']['type'] == 'Polygon':\n pshapegeo = shape(tshape['geometry'])\n else:\n pshapegeo = shape(tshape['geometry'])\n pshape = transform(project, pshapegeo)\n pshapes.append(pshape) # assuming here that these are simple polygons\n\n return (pshapes, proj)", "def xyz_points(packet):\n if not isinstance(packet, tuple):\n packet = unpack(packet)\n\n x = []\n y = []\n z = []\n\n for b in range(AZIMUTH_BLOCK_COUNT):\n block = azimuth_block(b, packet)\n\n if not azimuth_valid(block):\n continue\n\n for c in range(CHANNEL_BLOCK_COUNT):\n point = xyz_point(c, block)\n x.append(point[0])\n y.append(point[1])\n z.append(point[2])\n return x, y, z", "def grid_coordinates(points: np.array, dtype = np.uint16) -> np.array:\n xmin = np.min(points[:, 0])\n xmax = np.max(points[:, 0]) + 1\n ymin = np.min(points[:, 1])\n ymax = np.max(points[:, 1]) + 1\n return np.asarray([(x, y) for y in range(ymin, ymax)\n for x in range(xmin, xmax)], dtype = dtype)", "def validate_coordinates_input(points: tuple) -> None:\n\n for coordinate in points:\n if not isinstance(coordinate, tuple):\n raise InvalidGroundValueError(\n f\"Object must be a tuple\"\n f\" with format like (1, 2), not {coordinate}\"\n )", "def create_inner_tri(point, v1, v2, v3):\n return [(point, v1, v2), (point, v1, v3), (point, v2, v3)]", "def make_coordinate_combinations(lat=None, lon=None, alt=None, time=None):\n\n # make the 1D coordinates\n if lat is None:\n lat = ArrayCoordinates1d([0, 1, 2], name=\"lat\")\n if lon is None:\n lon = ArrayCoordinates1d([2, 4, 6], name=\"lon\")\n if alt is None:\n alt = ArrayCoordinates1d([6, 9, 12], name=\"alt\")\n if time is None:\n time = ArrayCoordinates1d([\"2018-01-01\", \"2018-02-01\", \"2018-03-01\"], name=\"time\")\n\n d = dict([(\"lat\", lat), (\"lon\", lon), (\"alt\", alt), (\"tim\", time)])\n\n dims_list = get_dims_list()\n\n # make the stacked coordinates\n for dim in [dim for dims in dims_list for dim in dims if \"_\" in dim]:\n cs = [d[k] for k in dim.split(\"_\")]\n if any(c.size != cs[0].size for c in cs):\n continue # can't stack these\n d[dim] = StackedCoordinates(cs)\n\n # make the ND coordinates\n coord_collection = OrderedDict()\n for dims in dims_list:\n if any(dim not in d for dim in dims):\n continue\n coord_collection[dims] = Coordinates([d[dim] for dim in dims])\n return coord_collection", "def recombine(uniques, intersections):\n polygons = []\n for i,u in enumerate(uniques):\n try:\n segs = [seg for seg in u]\n except TypeError: # single seg\n if u is None:\n segs = []\n else:\n segs = [u,]\n\n \n segs.extend([p for p in intersections[i] if p is not None])\n segs.extend([p[i] for p in intersections if p[i] is not None]) # transpose, get the lower triangle\n merged = shapely.ops.linemerge(segs)\n print(\"Merging poly %i with %s segments\"%(i,len(segs)))\n if type(merged) is not shapely.geometry.LineString:\n for seg in segs:\n plt.plot(seg.xy[0], seg.xy[1])\n plt.show()\n \n assert type(merged) is shapely.geometry.LineString\n polygons.append(shapely.geometry.Polygon(merged))\n return polygons", "def get_spec_points(spectre_file):\n observation_id = list(spectre_file['element_data.vol'].keys())[0]\n coords_dict = dict(spectre_file['element_data.vol'][observation_id])\n\n components = ['InertialCoordinates_x', 'InertialCoordinates_y', 'InertialCoordinates_z']\n dim = len(components)\n\n coords = [[], [], []]\n\n for i,component in enumerate(components):\n coords[i] = np.array(coords_dict[component])\n coords = np.asarray(coords)\n return np.transpose(coords)\n # return np.transpose(np.array([np.concatenate(x) for x in coords]))", "def create_coords_medium(ph):\n # Min: 8, max 12\n for start_row in xrange(ph.pizza.shape[0]):\n for start_col in xrange(ph.pizza.shape[1]-2*ph.min_ing_per_slice+1):\n # First scenario\n for i in xrange(ph.min_ing_per_slice*2, ph.max_cells_per_slice+1):\n end_row = start_row + 1\n end_col = start_col + i\n yield (start_row, start_col, end_row, end_col)\n yield (start_row, start_col, end_col, end_row)\n\n for start_row in xrange(ph.pizza.shape[0]-1):\n for start_col in xrange(ph.pizza.shape[1]-3):\n # Second scenario\n for i in xrange(ph.min_ing_per_slice, ph.min_ing_per_slice+3):\n end_row = start_row + 2\n end_col = start_col + i\n yield (start_row, start_col, end_row, end_col)\n yield (start_row, start_col, end_col, end_row)\n\n for start_row in xrange(ph.pizza.shape[0] - 2):\n for start_col in xrange(ph.pizza.shape[1] - 2):\n # Third scenario\n for i in xrange(3, 5):\n end_row = start_row + 3\n end_col = start_col + i\n yield (start_row, start_col, end_row, end_col)\n yield (start_row, start_col, end_col, end_row)", "def create_points(number): \n\n # generate x and y coordinates:\n x = np.random.permutation(2*number)[:number] - number\n y = np.random.permutation(2*number)[:number] - number\n\n points = [ { 0 : float(x[i]), 1 : float(y[i]), \"index\" : i} for i in range(len(x)) ]\n\n return points\n\n # generate points as coordinate pairs of floats.\n # return zip(map(float,x),map(float,y))" ]
[ "0.68716544", "0.65765846", "0.6421751", "0.6229348", "0.6228022", "0.62237096", "0.61734176", "0.6153338", "0.59607905", "0.58115107", "0.58115107", "0.57220775", "0.5716397", "0.5716397", "0.5684545", "0.5646158", "0.56197923", "0.5619396", "0.55598253", "0.55252755", "0.5508807", "0.548365", "0.54707944", "0.5456796", "0.5414509", "0.54103684", "0.54082996", "0.5380867", "0.53775364", "0.53670734", "0.53451717", "0.5336111", "0.5319866", "0.5319457", "0.53095436", "0.53089046", "0.5294391", "0.52910286", "0.52823544", "0.52756834", "0.5273975", "0.52607626", "0.525161", "0.5242859", "0.52417916", "0.5235459", "0.522361", "0.5221518", "0.5215482", "0.5210684", "0.52097964", "0.51976943", "0.51948214", "0.51876855", "0.51848114", "0.51672834", "0.5155106", "0.5150907", "0.5149581", "0.514528", "0.51151794", "0.5100447", "0.509871", "0.50975263", "0.50941175", "0.5088574", "0.50862724", "0.50806975", "0.50735044", "0.50596017", "0.50556445", "0.50528026", "0.50476205", "0.50476164", "0.50433445", "0.50379527", "0.5030001", "0.5028564", "0.50274086", "0.50151134", "0.50109947", "0.50061744", "0.5005382", "0.4985987", "0.49799535", "0.49794492", "0.4978705", "0.49736124", "0.4967608", "0.49654594", "0.4964541", "0.4958735", "0.49550277", "0.49459034", "0.49421552", "0.49418733", "0.4935277", "0.49347517", "0.49282822", "0.49261823" ]
0.63562393
3
Given a MultiPoint geometry object in lat/long, create a polygon of the convex hull of these points. First project the lat/long points into the preferred EPSG, so that when we find the convex hull, we are not crossing any discontinuities such as the international date line. Return a single polygon geometry in lat/long.
def polygonFromInteriorPoints(geom, preferredEpsg): (projTr, llTr) = makeTransformations(4326, preferredEpsg) geomProj = copyGeom(geom) geomProj.Transform(projTr) geomOutline = geomProj.ConvexHull() geomOutline.Transform(llTr) return geomOutline
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convex_hull(self):\n if isinstance(self.crs, GeographicalCRS):\n raise CRSError(\"not implemented for geographical coordinate \"\n \"systems. Project to a projected coordinate system.\")\n\n points = [pt for pt in self]\n\n # Find the lowermost (left?) point\n pt0 = points[0]\n idx = 0\n for i, pt in enumerate(points[1:]):\n if (pt.y < pt0.y) or ((pt.y == pt0.y) and (pt.x < pt0.x)):\n pt0 = pt\n idx = i+1\n points.pop(idx)\n\n # Sort CCW relative to pt0, and drop all but farthest of any duplicates\n points.sort(key=lambda pt: pt0.distance(pt))\n points.sort(key=lambda pt: _cvectorgeo.polarangle(pt0.vertex, pt.vertex))\n alpha = -1\n drop = []\n for i,pt in enumerate(points):\n a = _cvectorgeo.polarangle(pt0.vertex, pt.vertex)\n if a == alpha:\n drop.append(i)\n else:\n alpha = a\n\n if len(drop) != 0:\n for i in drop[::-1]:\n points.pop(i)\n\n # initialize convex hull\n if len(points) == 2:\n return Polygon([pt0, points[0], points[1]])\n elif len(points) == 1:\n raise GeometryError(\"convex polygon not defined for two points\")\n else:\n\n S = [pt0, points[0], points[1]]\n for pt in points[2:]:\n while not _cvectorgeo.isleft(S[-2].vertex, S[-1].vertex, pt.vertex):\n S.pop()\n S.append(pt)\n\n return Polygon(S, crs=self.crs)", "def _convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n # Concatenation of the lower and upper hulls gives the convex hull.\n # Last point of each list is omitted because it is repeated at the beginning of the other list.\n return lower[:-1] + upper[:-1]", "def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull \n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n # Concatenation of the lower and upper hulls gives the convex hull.\n # Last point of each list is omitted because it is repeated at the beginning of the other list. \n return lower[:-1] + upper[:-1]", "def convex_hull(points):\n pointList = ExtendedTupleList(points)\n complete_ranges = pointList.range_within(0, 1)\n # Filters for four quadrants\n filters = [\n ((0, complete_ranges[1][\"max\"][2], \">=\"), (1, complete_ranges[0][\"max\"][2], \">=\")), #Q1\n ((0, complete_ranges[1][\"max\"][1], \"<=\"), (1, complete_ranges[0][\"min\"][2], \">=\")), #Q2\n ((0, complete_ranges[1][\"min\"][1], \"<=\"), (1, complete_ranges[0][\"min\"][1], \"<=\")), #Q3\n ((0, complete_ranges[1][\"min\"][2], \">=\"), (1, complete_ranges[0][\"max\"][1], \"<=\")) #Q4\n ]\n # Sorting reversals (True means Desc sort, False means Asc sort. Y sort given first)\n sorts = [\n (True, True),\n (True, False),\n (False, False),\n (False, True),\n ]\n hull = ExtendedTupleList([])\n # In CW order of quadrants...\n for index in [0, 3, 2, 1]:\n # Find all the relevant points\n quad_points = ExtendedTupleList([point for point in pointList.filter(filters[index])])\n # Sort them properly\n quad_points.double_sort(1, 0, reverse_outside=sorts[index][0], reverse_inside=sorts[index][1])\n # Build a convex line segment\n line_segment = convex_line_segment(quad_points, sorts[index][0], sorts[index][1])\n # Reverse it, if we need to\n if index % 2 == 1:\n line_segment.reverse()\n # Add all the points in, avoiding repeated points.\n hull.extend(line_segment, avoid_repeats=True)\n return hull", "def to_shapely_polygon(self):\n # load shapely lazily, which makes the dependency more optional\n import shapely.geometry\n return shapely.geometry.Polygon([(point[0], point[1]) for point in self.exterior])", "def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross\n # product. Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n return lower, upper", "def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n cont = 1\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n print(\"antes \"), print(cont), print(lower)\n lower.pop()\n print(\"despues \"),print(lower)\n cont += 1\n lower.append(p)\n xlower ,ylower = getlists(lower)\n plt.plot(xlower,ylower,color=\"yellow\")\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n print(upper)\n print(\"hello2 \")\n print(cross((2,0),(2,4),(2.5,3)))\n\n xupper ,yupper = getlists(upper)\n plt.plot(xupper,yupper,color=\"blue\")\n\n\n return lower[:-1] + upper[:-1]", "def convex(points):\r\n if isinstance(points, np.ndarray):\r\n points = np.unique(points, axis=0)\r\n else:\r\n pts = []\r\n points = [pts.append(i) for i in points if i not in pts] # Remove duplicates\r\n del pts\r\n if len(points) <= 1:\r\n return points\r\n # Build lower hull\r\n lower = []\r\n for p in points:\r\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\r\n lower.pop()\r\n lower.append(p)\r\n # Build upper hull\r\n upper = []\r\n for p in reversed(points):\r\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\r\n upper.pop()\r\n upper.append(p)\r\n #print(\"lower\\n{}\\nupper\\n{}\".format(lower, upper))\r\n return np.array(lower[:-1] + upper) # upper[:-1]) # for open loop\r", "def convex_hull(*args):\n from point import Point\n from line import Segment\n from polygon import Polygon\n\n def uniquify(a):\n # not order preserving\n return list(set(a))\n\n p = args[0]\n if isinstance(p, Point):\n p = uniquify(args)\n\n if len(p) == 1:\n return p[0]\n elif len(p) == 2:\n return Segment(p[0], p[1])\n\n def orientation(p, q, r):\n '''Return positive if p-q-r are clockwise, neg if ccw, zero if\n collinear.'''\n return (q[1] - p[1])*(r[0] - p[0]) - (q[0] - p[0])*(r[1] - p[1])\n\n # scan to find upper and lower convex hulls of a set of 2d points.\n U = []\n L = []\n p.sort()\n for p_i in p:\n while len(U) > 1 and orientation(U[-2], U[-1], p_i) <= 0:\n U.pop()\n while len(L) > 1 and orientation(L[-2], L[-1], p_i) >= 0:\n L.pop()\n U.append(p_i)\n L.append(p_i)\n U.reverse()\n convexHull = tuple(L + U[1:-1])\n\n if len(convexHull) == 2:\n return Segment(convexHull[0], convexHull[1])\n return Polygon(convexHull)", "def concave_hull(hull:list, points:list, max_iterations:int=None, min_length_fraction:float=0, min_angle:float=90)->list:\n tweet.info(\"Creating concave hull; minimum side length {}% of average, minimum_angle {}\".format(min_length_fraction * 100, min_angle))\n test_points = set(points)\n ignore_points = []\n avg_sqr_distance = 0\n for k in range(0, len(hull)-1):\n avg_sqr_distance += point_sqr_distance(hull[k], hull[k+1])\n test_points.remove(hull[k])\n avg_sqr_distance /= len(hull) - 1\n min_sqr_length = avg_sqr_distance * (min_length_fraction ** 2) # since we get sqr_length, we square the fraction\n min_cosine = math.cos(math.radians(min_angle))\n \n while (max_iterations is None or max_iterations > 0) and test_points:\n selection, edge = select_longest_edge(hull, ignore_points, min_sqr_length)\n tweet.info(\"Considering edge {}; {} points left\".format(edge, len(test_points)))\n if selection is None:\n break\n selected_point = select_candidate_point(edge, test_points, hull, min_cosine)\n if selected_point is None:\n # This edge has no more candidate points, so we ignore it in the next pass\n ignore_points.append(edge[0])\n tweet.debug(\"No candidate point found.\")\n continue\n tweet.debug(\"Found point {}, inserting new edge.\".format(selected_point))\n if not max_iterations is None:\n max_iterations -= 1\n # We add the point into the concave hull\n hull.insert(selection + 1, selected_point)\n test_points.remove(selected_point)\n return hull", "def convex_hull(self):\n return self._geomgen(capi.geom_convex_hull)", "def give_polygon(vertices, points):\n polygon = np.zeros((len(vertices), 2))\n for i, vertex in enumerate(vertices):\n polygon[i] = points[vertex]\n # End point of a polygon equals to start point\n polygon = polygon.tolist()\n if polygon[-1] != polygon[0]:\n polygon.append(polygon[0])\n return polygon", "def construct_convex_hull(vertices: Sequence[Point]) -> Polyhedron:\n coords = np.zeros((len(vertices),3))\n for i,vertex in enumerate(vertices):\n coords[i,:] = vertex.coordinates\n hull = qconvex(\"i\", coords)\n n_facets = int(hull[0])\n facets = []\n for facet_vertices_str in hull[1:]:\n facet_vertices_idx = [int(x) for x in facet_vertices_str.split(' ')]\n facet_vertices = [vertices[i] for i in facet_vertices_idx]\n facet = Facet([Contour.from_vertices(facet_vertices)])\n facets.append(facet)\n polyhedron = Polyhedron(facets)\n return polyhedron", "def convex_hull(self):\n return _property_geo(arctern.ST_ConvexHull, self)", "def polygon_from_str(line):\n # remove possible utf-8 BOM\n if line.startswith('\\xef\\xbb\\xbf'):\n line = line[3:]\n polygon_points = [float(o) for o in line.split(',')[:8]]\n polygon_points = np.array(polygon_points).reshape(4, 2)\n polygon = Polygon(polygon_points).convex_hull\n return polygon", "def make_polygon(*coords):\n global GEOMETRY_SURF, POLYGONS,col\n if len(coords) < 3:\n print(\"Warning: Invalid polygon passed, ignoring...\")\n return\n start = coords[0]\n prev = coords[0]\n for coord in coords:\n POLYGONS |= {coord}\n line = Boundary(prev[0],prev[1],coord[0],coord[1]) #add segment to WALL list\n prev = coord\n line = Boundary(prev[0], prev[1],start[0],start[1])\n #now draw poly\n pygame.draw.polygon(GEOMETRY_SURF,col[\"SHAPECOL\"], coords)\n return", "def shapePolyToShapely(p: pcbnew.SHAPE_POLY_SET) \\\n -> Union[shapely.geometry.Polygon, shapely.geometry.MultiPolygon]:\n polygons = []\n for pIdx in range(p.OutlineCount()):\n kOutline = p.Outline(pIdx)\n assert kOutline.IsClosed()\n outline = shapeLinechainToList(kOutline)\n holes = []\n for hIdx in range(p.HoleCount(pIdx)):\n kHole = p.Hole(hIdx)\n assert kHole.isClosed()\n holes.append(shapeLinechainToList(kHole))\n polygons.append(Polygon(outline, holes=holes))\n if len(polygons) == 1:\n return polygons[0]\n return MultiPolygon(polygons=polygons)", "def polygonal(resolution, in_vertices, out_vertices_list = None):\n in_vertices = [Point(in_vertices[k,0],in_vertices[k,1]) for k in range(in_vertices.shape[0])] \n\n domain = mshr.Polygon(in_vertices) # https://bitbucket.org/fenics-project/mshr/wiki/API/Polygon\n # Create polygon defined by the given vertices. Vertices must be in counter-clockwise order and free of self-intersections.\n \n if(out_vertices_list is not None):\n for out_vertices in out_vertices_list:\n out_vertices = [Point(out_vertices[k,0],out_vertices[k,1]) for k in range(out_vertices.shape[0])]\n domain -= mshr.Polygon(out_vertices)\n \n mesh=mshr.generate_mesh(domain, resolution)\n\n # TODO : add refined mesh\n # if(refine_mesh):\n # d = mesh.topology().dim()\n \n # class To_refine(SubDomain):\n # def inside(self, x, on_boundary):\n # return x[1]<=0 and x[1]>= -l_mot/2-h_grid-l_vacuum/4\n\n # to_refine = To_refine()\n # marker = MeshFunction(\"bool\", mesh, d, False)\n # to_refine.mark(marker, True)\n # mesh = refine(mesh,marker)\n\n return mesh", "def convex_hull(points):\n points = np.array(points)\n hull = ConvexHull(points)\n return points[hull.vertices, :]", "def construct_polygon(self, polygon_longs: List, polygon_lats: List) -> gpd.GeoDataFrame:\n\n polygon_geom = Polygon(zip(polygon_longs, polygon_lats))\n\n crs = {'init': 'epsg:4326'}\n polygon = gpd.GeoDataFrame(index=[0], crs=crs, geometry=[polygon_geom])\n\n polygon.to_file(filename=f'{self.polygon_path}/polygon_{self.postfix}.geojson', driver='GeoJSON')\n polygon.to_file(filename=f'{self.polygon_path}/polygon_{self.postfix}.shp', driver=\"ESRI Shapefile\")\n\n self.monitor.info(\"-> Created area polygon.\")\n return polygon", "def polygon_from_str(line):\r\n polygon_points = [float(o) for o in line.split(',')[:8]]\r\n polygon_points = np.array(polygon_points).reshape(4, 2)\r\n polygon = Polygon(polygon_points).convex_hull\r\n return polygon", "def generate_mesh(\n poly_coords: np.ndarray,\n hole_coords: Optional[List[np.ndarray]] = None,\n min_points: Optional[int] = None,\n max_edge_length: Optional[float] = None,\n convex_hull: bool = False,\n boundary: Optional[np.ndarray] = None,\n preserve_boundary: bool = False,\n min_angle: float = 32.5,\n **kwargs,\n) -> Tuple[np.ndarray, np.ndarray]:\n poly_coords = ensure_unique(poly_coords)\n if hole_coords is None:\n hole_coords = []\n hole_coords = [ensure_unique(coords) for coords in hole_coords]\n # Facets is a shape (m, 2) array of edge indices.\n # coords[facets] is a shape (m, 2, 2) array of edge coordinates:\n # [(x0, y0), (x1, y1)]\n coords = np.concatenate([poly_coords] + hole_coords, axis=0)\n xmin = coords[:, 0].min()\n dx = np.ptp(coords[:, 0])\n ymin = coords[:, 1].min()\n dy = np.ptp(coords[:, 1])\n r0 = np.array([[xmin, ymin]]) + np.array([[dx, dy]]) / 2\n # Center the coordinates at (0, 0) to avoid floating point issues.\n coords = coords - r0\n indices = np.arange(len(poly_coords), dtype=int)\n if convex_hull:\n if boundary is not None:\n raise ValueError(\n \"Cannot have both boundary is not None and convex_hull = True.\"\n )\n facets = spatial.ConvexHull(coords).simplices\n else:\n if boundary is not None:\n boundary = list(map(tuple, ensure_unique(boundary - r0)))\n indices = [i for i in indices if tuple(coords[i]) in boundary]\n facets = np.array([indices, np.roll(indices, -1)]).T\n # Create facets for the holes.\n for hole in hole_coords:\n hole_indices = np.arange(\n indices[-1] + 1, indices[-1] + 1 + len(hole), dtype=int\n )\n hole_facets = np.array([hole_indices, np.roll(hole_indices, -1)]).T\n indices = np.concatenate([indices, hole_indices], axis=0)\n facets = np.concatenate([facets, hole_facets], axis=0)\n\n mesh_info = triangle.MeshInfo()\n mesh_info.set_points(coords)\n mesh_info.set_facets(facets)\n if hole_coords:\n # Triangle allows you to set holes by specifying a single point\n # that lies in each hole. Here we use the centroid of the hole.\n holes = [\n np.array(Polygon(hole).centroid.coords[0]) - r0.squeeze()\n for hole in hole_coords\n ]\n mesh_info.set_holes(holes)\n\n kwargs = kwargs.copy()\n kwargs[\"allow_boundary_steiner\"] = not preserve_boundary\n if \"min_angle\" not in kwargs:\n kwargs[\"min_angle\"] = min_angle\n\n mesh = triangle.build(mesh_info=mesh_info, **kwargs)\n points = np.array(mesh.points) + r0\n triangles = np.array(mesh.elements)\n if min_points is None and (max_edge_length is None or max_edge_length <= 0):\n return points, triangles\n\n kwargs[\"max_volume\"] = dx * dy / 100\n i = 1\n if min_points is None:\n min_points = 0\n if max_edge_length is None or max_edge_length <= 0:\n max_edge_length = np.inf\n max_length = get_edge_lengths(points, triangles).max()\n while (len(points) < min_points) or (max_length > max_edge_length):\n mesh = triangle.build(mesh_info=mesh_info, **kwargs)\n points = np.array(mesh.points) + r0\n triangles = np.array(mesh.elements)\n edges, is_boundary = get_edges(triangles)\n if preserve_boundary:\n # Only constrain the length of interior edges, i.e. edges not on the boundary.\n edges = edges[~is_boundary]\n edge_lengths = np.linalg.norm(np.diff(points[edges], axis=1), axis=2)\n max_length = edge_lengths.max()\n logger.debug(\n f\"Iteration {i}: Made mesh with {len(points)} points and \"\n f\"{len(triangles)} triangles with maximum interior edge length: \"\n f\"{max_length:.2e}. Target maximum edge length: {max_edge_length:.2e}.\"\n )\n if np.isfinite(max_edge_length):\n kwargs[\"max_volume\"] *= min(0.98, np.sqrt(max_edge_length / max_length))\n else:\n kwargs[\"max_volume\"] *= 0.98\n i += 1\n return points, triangles", "def point_to_polygon_geojson(g):\n point_coordinates = g['geometry']['coordinates']\n polygon_geojson = {\n 'type': 'Feature',\n 'properties': g['properties'],\n 'geometry': {\n 'type': 'Polygon',\n 'coordinates': [\n [point_coordinates, point_coordinates, point_coordinates, point_coordinates]\n ]\n }\n }\n return polygon_geojson", "def projection_exthull(poly1, new_dim):\n vert = extreme(poly1)\n if vert is None:\n # qhull failed\n return Polytope(fulldim=False, minrep=True)\n return reduce(qhull(vert[:, new_dim]))", "def gml_to_polygon(footprint):\n footprint = footprint.replace('\\n', '').strip()\n coords_poly = []\n #\n # Sentinel-1\n # (http://www.opengis.net/gml/srs/epsg.xml#4326)\n #\n if ',' in footprint:\n coords_gml = footprint.split()\n for coord_pair in coords_gml:\n lat, lon = [float(_) for _ in coord_pair.split(',')]\n if lon < -180.0:\n lon = -180.0\n if lon > 180.0:\n lon = 180.0\n if lat < -90.0:\n lat = -90.0\n if lat > 90.0:\n lat = 90.0\n coords_poly.append('{lon:.4f} {lat:.4f}'.format(lon=lon, lat=lat))\n\n #\n # Sentinel-3 and Sentinel-2\n # (http://www.opengis.net/def/crs/EPSG/0/4326)\n #\n else:\n coords_gml = footprint.split()\n for i in range(len(coords_gml)//2):\n lat = float(coords_gml[2*i])\n lon = float(coords_gml[2*i+1])\n if lon < -180.0:\n lon = -180.0\n if lon > 180.0:\n lon = 180.0\n if lat < -90.0:\n lat = -90.0\n if lat > 90.0:\n lat = 90.0\n coords_poly.append('{lon:.4f} {lat:.4f}'.format(lon=lon, lat=lat))\n\n #\n # Make sure the polygon is a closed line string.\n #\n if coords_poly[0] != coords_poly[-1]:\n coords_poly.append(coords_poly[0])\n\n wkt = 'POLYGON (({}))'.format(','.join(coords_poly))\n return wkt", "def _get_voronoi_poly_points(vert_index_list, voronoi_vertices,\n voronoi_centroid):\n voronoi_poly_points = []\n if -1 not in vert_index_list and len(vert_index_list) > 3:\n voronoi_poly_points = voronoi_vertices[vert_index_list]\n elif vert_index_list.size > 0:\n # ASSUME RECTANGLE\n vert_index_list = vert_index_list[vert_index_list >= 0]\n voronoi_poly_points = voronoi_vertices[vert_index_list]\n # CASE 1: 2 valid voronoi vertices\n if vert_index_list.size == 2:\n center_lon = voronoi_centroid[0]\n center_lat = voronoi_centroid[1]\n corner_lon1 = voronoi_poly_points[0][0]\n corner_lat1 = voronoi_poly_points[0][1]\n corner_lon2 = voronoi_poly_points[1][0]\n corner_lat2 = voronoi_poly_points[1][1]\n\n # check if need to add points in lon or lat\n if abs(corner_lon1-corner_lon2) > abs(corner_lat1-corner_lat2):\n dLat = center_lat - corner_lat1\n # append the corners in order\n voronoi_poly_points = np.array([\n [corner_lon1, corner_lat1],\n [corner_lon2, corner_lat2],\n [corner_lon2, center_lat + dLat],\n [corner_lon1, center_lat + dLat]\n ])\n else:\n dLon = center_lon - corner_lon1\n # append the corners in order\n voronoi_poly_points = np.array([\n [corner_lon1, corner_lat1],\n [corner_lon2, corner_lat2],\n [center_lon + dLon, corner_lat2],\n [center_lon + dLon, corner_lat1]\n ])\n # CASE 2: 1 valid voronoi vertex\n elif vert_index_list.size == 1:\n center_lon = voronoi_centroid[0]\n center_lat = voronoi_centroid[1]\n corner_lon = voronoi_poly_points[0][0]\n corner_lat = voronoi_poly_points[0][1]\n dLat = center_lat - corner_lat\n dLon = center_lon - corner_lon\n # append the corners in order\n voronoi_poly_points = np.array([\n [corner_lon, corner_lat],\n [center_lon + dLon, corner_lat],\n [center_lon + dLon, center_lat + dLat],\n [corner_lon, center_lat + dLat]\n ])\n\n return voronoi_poly_points", "def buildMultiPolygon(self,polygonList):\r\n geomlist=[]\r\n for geom in polygonList:\r\n # Cut 'MULTIPOLYGON(*) if we got one'\r\n if geom.exportToWkt()[:12]==\"MULTIPOLYGON\":\r\n geomWkt=geom.exportToWkt()[13:len(geom.exportToWkt())-1]\r\n else:\r\n # Cut 'POLYGON' if we got one\r\n geomWkt=geom.exportToWkt()[7:]\r\n geomlist.append(str(geomWkt))\r\n multiGeomWKT=\"MULTIPOLYGON(\"\r\n multiGeomWKT +=\",\".join(geomlist)\r\n multiGeomWKT+=\")\"\r\n #if self.debug: print multiGeomWKT\r\n multiGeom=QgsGeometry.fromWkt(multiGeomWKT)\r\n return multiGeom", "def hull_convex(ob, me, selected_only, precision = 0.1):\n # find convex hull\n vertices, triangles = pyffi.utils.quickhull.qhull3d(\n [tuple(v.co) for v in me.verts if v.sel or not selected_only],\n precision = precision)\n # create convex mesh\n box = Blender.Mesh.New('convexpoly')\n for vert in vertices:\n box.verts.extend(*vert)\n for triangle in triangles:\n box.faces.extend(triangle)\n # link mesh to scene and set transform\n scn = Blender.Scene.GetCurrent()\n boxob = scn.objects.new(box, 'convexpoly')\n boxob.setMatrix(ob.getMatrix('worldspace'))\n # set bounds type\n boxob.drawType = Blender.Object.DrawTypes['BOUNDBOX']\n boxob.rbShapeBoundType = 5 # convex hull shape not in blender Python API; Blender.Object.RBShapes['CONVEXHULL']?\n boxob.drawMode = Blender.Object.DrawModes['WIRE']", "def get_voronoi_polygons(input_pts, bbox=None):\n if not isinstance(input_pts, np.ndarray):\n input_pts = np.array(input_pts)\n\n if bbox is None:\n x_min = input_pts[:, 0].min()\n x_max = input_pts[:, 0].max()\n y_min = input_pts[:, 1].min()\n y_max = input_pts[:, 1].max()\n x_range = (x_max - x_min) * const.BBOX_MODIFIER\n y_range = (y_max - y_min) * const.BBOX_MODIFIER\n bbox = (x_min - x_range, y_min - y_range,\n x_max + x_range, y_max + y_range)\n\n # Constructing Delaunay triangulation, consisting of points and triangles.\n # (triangles are arrays of indexes of points)\n triangulation = matplotlib.tri.Triangulation(input_pts[:, 0],\n input_pts[:, 1])\n triangles = triangulation.triangles\n triangles_count = triangles.shape[0]\n\n # input_pts[triangles] = array of triangles: [[pt1], ..., ...] -- triangle.\n circle_centers = get_circles_centers(input_pts[triangles])\n\n segments = []\n for i in range(triangles_count):\n for j in range(3):\n neighbor = triangulation.neighbors[i][j]\n\n if neighbor != -1: # Trying to connect circle centers\n # Fitting centers to bbox.\n start, end = circle_centers[i], circle_centers[neighbor]\n\n if not check_inside(start, bbox):\n start = move_point(start, end, bbox)\n if start is None:\n continue\n\n if not check_inside(end, bbox):\n end = move_point(end, start, bbox)\n if end is None:\n continue\n\n segments.append([start, end])\n\n else: # Trying to create line leading to the bbox.\n # Ignore center outside of bbox\n if not check_inside(circle_centers[i], bbox):\n continue\n\n first, second, third = (input_pts[triangles[i, j]],\n input_pts[triangles[i, (j+1) % 3]],\n input_pts[triangles[i, (j+2) % 3]])\n\n edge = np.array([first, second])\n vector = np.array([[0, 1], [-1, 0]]).dot(edge[1] - edge[0])\n\n def line(pt):\n return ((pt[0] - first[0]) * (second[1] - first[1]) /\n (second[0] - first[0]) - pt[1] + first[1])\n\n orientation = (np.sign(line(third)) *\n np.sign(line(first + vector)))\n if orientation > 0:\n vector = -orientation * vector\n shift = calculate_shift(circle_centers[i], vector, bbox)\n if shift is not None:\n segments.append([circle_centers[i],\n circle_centers[i] + shift * vector])\n\n return segments", "def geomFromInteriorPoints(coords):\n if isinstance(coords, numpy.ndarray):\n coords = coords.tolist()\n geomDict = {'type':'MultiPoint', 'coordinates':coords}\n geomPoints = ogr.CreateGeometryFromJson(repr(geomDict))\n return geomPoints", "def convexHull(points):\n points = np.append(points, [[0, 0, 0]], axis=0) # All points plus origin\n hull = ConvexHull(points) # Visible points plus possible origin. Use its vertices property.\n\n return hull", "def produce_polygon(polygon_ordered_coordinates: List, zoom: int, plot_polygon: bool = False) -> Path:\n polygon_tile_points = []\n for item in polygon_ordered_coordinates:\n polygon_tile_points += [Utility.get_tile(*item, zoom)]\n polygon_tile_points += [polygon_tile_points[0]]\n polygon = Path(polygon_tile_points)\n if plot_polygon:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n patch = patches.PathPatch(polygon, facecolor='orange', lw=2)\n ax.add_patch(patch)\n ax.set_xlim(min(polygon_tile_points, key = lambda item: item[0])[0], max(polygon_tile_points, key = lambda item: item[0])[0])\n ax.set_ylim(min(polygon_tile_points, key = lambda item: item[1])[1], max(polygon_tile_points, key = lambda item: item[1])[1])\n plt.show()\n return polygon", "def Polygon(self, polyline = False):\n\n from geographiclib.polygonarea import PolygonArea\n return PolygonArea(self, polyline)", "def get_boundary_as_polygon(self, do_geo=True):\n xhor, yhor = self.get_coordinates()\n dimensions = xhor.shape\n xbottom = xhor[0, :]\n xright = xhor[:, dimensions[1]-1]\n xtop = xhor[dimensions[0]-1, :][::-1]\n xleft = xhor[:, 0][::-1]\n\n ybottom = yhor[0, :]\n yright = yhor[:, dimensions[1]-1]\n ytop = yhor[dimensions[0]-1, :][::-1]\n yleft = yhor[:, 0][::-1]\n\n lon_square = np.concatenate((xbottom, xright, xtop, xleft))\n lat_square = np.concatenate((ybottom, yright, ytop, yleft))\n\n return lon_square, lat_square", "def give_convex_hull(rand_points):\n return ConvexHull(rand_points)", "def polygon_from_envelope(cls, min_x, min_y, max_x, max_y, crs=None):\n crs = _validate_crs(crs)\n return cls(arctern.ST_PolygonFromEnvelope(min_x, min_y, max_x, max_y), crs=crs)", "def create_hull(vertices):\n dt = np.dtype([('vertex', np.float64, (2,)),\n ('length', np.float64),\n ('is_processed', bool)])\n\n hull = np.empty(len(vertices), dtype=dt)\n for i, v in enumerate(vertices):\n j = 0 if i == len(vertices)-1 else i+1\n hull[i] = (v, dist(v, vertices[j]), False)\n\n return np.rec.array(hull)", "def get_mesh_boundary(triangles):\n # Create edges and sort each vertices on each edge.\n edge0 = triangles[:,0:2]\n edge1 = triangles[:,1:3]\n edge2 = triangles.take((0,2), axis=1)\n edges = np.concatenate((edge0, edge1, edge2), axis=0)\n edge_sort = np.sort(edges, axis=1)\n\n # Get unique edges that are only present once.\n (uniq, uniq_ids, counts) = np.unique(edge_sort, axis=0, return_index=True, return_counts=True)\n edge_inds = np.arange(edge_sort.shape[0], dtype=int)\n outer_edge_ids = edge_inds[np.in1d(edge_inds, uniq_ids[counts==1])]\n outer_edges = edge_sort[outer_edge_ids,:]\n num_outer_edges = outer_edges.shape[0]\n\n # Assume we need to close the polygon.\n num_outer_verts = num_outer_edges + 1\n\n # Loop over outer edges and use traversal method to get ordered vertices.\n v_start = outer_edges[0,0]\n v_end = outer_edges[0,1]\n vert_inds = -1*np.ones(num_outer_verts, dtype=int)\n vert_inds[0] = v_start\n vert_inds[1] = v_end\n vert_num = 2\n outer_edges[0,:] = -1\n for edge_num in range(1,num_outer_edges):\n edge_inds_next = np.where(outer_edges == v_end)\n if (edge_inds_next[0].shape[0] < 1):\n msg = \"Next edge not found for vertex %d\" % v_end\n raise ValueError(msg)\n edge_ind_next = edge_inds_next[0][0]\n vert_ind_next = 0\n if (edge_inds_next[1][0] == 0):\n vert_ind_next = 1\n vert_inds[vert_num] = outer_edges[edge_ind_next, vert_ind_next]\n outer_edges[edge_ind_next, :] = -1\n v_end = vert_inds[vert_num]\n vert_num += 1\n\n return vert_inds", "def cutPoly(self,geom,startPt,endPt,debug=False):\r\n #if we have disjoint Multi geometry as geom to split we need to iterate over its parts\r\n splittedGeoms=[]\r\n leftFragments=[]\r\n rightFragments=[]\r\n #if self.debug: print \"Number of geoms when slicing: \",str(len(geom.asGeometryCollection()))\r\n for geomPart in geom.asGeometryCollection():\r\n #split the actual part by cut line defined by startPt,endPt\r\n (res,splittedGeomsPart,topo)=geomPart.splitGeometry([startPt,endPt],False)\r\n splittedGeoms+=splittedGeomsPart\r\n #Add the remaining geomPart to the rightFragments or letfFragments\r\n #depending on distance\r\n d=self.signedDistCentroidFromLine(geomPart,startPt,endPt)\r\n if d>0:\r\n rightFragments.append(geomPart)\r\n else:\r\n leftFragments.append(geomPart)\r\n #if self.debug: print j,splittedGeoms\r\n\r\n for fragment in splittedGeoms:\r\n \"\"\"\r\n calculate signed distance of centroid of fragment and the splitline\r\n if signed distance is below zero, the point is to the left of the line\r\n if above zero the point is to the right of the line\r\n \"\"\"\r\n d=self.signedDistCentroidFromLine(fragment,startPt,endPt)\r\n #if debug==True:\r\n #if self.debug: print d\r\n\r\n if d>0:\r\n rightFragments.append(fragment)\r\n else:\r\n leftFragments.append(fragment)\r\n\r\n #if self.debug: print \"Left frags:\",len(leftFragments),\"Right frags:\",len(rightFragments)\r\n leftGeom=self.buildMultiPolygon(leftFragments)\r\n rightGeom=self.buildMultiPolygon(rightFragments)\r\n return leftGeom,rightGeom", "def _quickhull(self, pt1, pt2, point_list):\n if not point_list:\n return []\n pt3 = max(point_list, key=lambda p: oriented_area(pt1, p, pt2))\n # Nie trzeba dzielic przez abs(pt2-pt1).\n list1 = self._points_on_the_right(pt1, pt3, point_list)\n list2 = self._points_on_the_right(pt3, pt2, point_list)\n return (self._quickhull(pt1, pt3, list1) + [pt3]\n + self._quickhull(pt3, pt2, list2))", "def extract_hull_from_shapefile ( logger, shape_file ) :\n try :\n logger.info ( \"Extract hull from shapefile \" + str(shape_file) ) \n fIn = ogr.Open ( str(shape_file) )\n layer = fIn.GetLayer(0)\n feature = layer.GetNextFeature() \n geom = feature.GetGeometryRef()\n hull_wkt = str(geom.ExportToWkt())\n return hull_wkt\n except Exception, err:\n logger.critical(\"Extract hull from shapefile failed: ERROR: %s\\n\" % str(err))\n raise", "def getHull(x_data, y_data):\n xhull = []\n yhull = []\n if len(x_data) == 0 or len(y_data) == 0:\n return xhull, yhull\n xhull.append(x_data[0])\n yhull.append(y_data[0])\n\n lasthullindex = 0\n\n points = len(y_data)\n while lasthullindex < points - 1:\n slope = (y_data[lasthullindex + 1] - y_data[lasthullindex]) / (\n x_data[lasthullindex + 1] - x_data[lasthullindex])\n currenthullindex = lasthullindex + 1\n currenthully = y_data[lasthullindex]\n\n for i in range(currenthullindex + 1, points):\n extrapolation = currenthully + slope * (x_data[i] - x_data[lasthullindex])\n if y_data[i] < extrapolation:\n slope = ((y_data[i] - y_data[lasthullindex]) / (x_data[i] - x_data[lasthullindex]))\n currenthullindex = i\n\n # Store the hull points to be used for a spline fit\n xhull.append(x_data[currenthullindex])\n yhull.append(y_data[currenthullindex])\n lasthullindex = currenthullindex\n\n return xhull, yhull", "def convex_hull(L):\r\n CH=list()\r\n if L != []:\r\n P = list(L)\r\n # find the starting point of the algorithm and add it to the convex hull:\r\n ind0 = find_start(P)\r\n CH.append(P.pop(ind0))\r\n # find the next point and add it to the convex hull list CH:\r\n if P != []:\r\n ind1 = next_in_hull(CH[0], np.array([1,0]), P)\r\n CH.append(P.pop(ind1))\r\n # use the hyperplane criterion as function side_points to complete CH:\r\n while P != []:\r\n p = CH[-2]\r\n q = CH[-1]\r\n v = q - p \r\n P = side_points(CH[0], CH[-1] - CH[0], P)\r\n ind = next_in_hull(q, v, P)\r\n if P != []:\r\n CH.append(P.pop(ind))\r\n return CH", "def get_multipolygon(request, location):\n geometries = request.data.get('FeatureCollection', None)\n if geometries is not None:\n geometry_list = []\n for g in geometries['features']:\n if g['geometry']['type'] == 'Point':\n g = point_to_polygon_geojson(g)\n geometry_list.append(GEOSGeometry(json.dumps(g['geometry'])))\n lng, lat = location['Longitude']['Value'], location['Latitude']['Value']\n if lat is not None and lng is not None:\n loc_point = {'type': 'Feature', 'properties': {}, 'geometry': {'type': 'Point', 'coordinates': [lng, lat]}}\n loc_polygon = point_to_polygon_geojson(loc_point)\n geometry_list.append(GEOSGeometry(json.dumps(loc_polygon['geometry'])))\n return MultiPolygon(geometry_list)\n return None", "def pointToSmallPolygon(point, width=0.1):\n offset = width * 0.5\n x, y = point\n return [(x - offset, y - offset),\n (x - offset, y + offset),\n (x + offset, y + offset),\n (x + offset, y - offset),]", "def convex_hull(l):\n\tpass", "def polygon(self, pointlist, cls=None, style=None, attrs=None):\n payload = self._meta.make_payload(cls, style, attrs)\n pts_str = ' '.join('%s,%s' % (x, y) for x, y in pointlist)\n self.elements.append(\"\"\"<polygon points=\"%s\" %s/>\"\"\" % (pts_str, payload))\n return self", "def polygon(self, pointlist, cls=None, style=None, attrs=None):\n payload = self._meta.make_payload(cls, style, attrs)\n pts_str = ' '.join('%s,%s' % (x, y) for x, y in pointlist)\n self.elements.append(\"\"\"<polygon points=\"%s\" %s/>\"\"\" % (pts_str, payload))\n return self", "def polygon(self):\n radius = self._get_max_rupture_projection_radius()\n return self.location.to_polygon(radius)", "def optimal_polygon(y, w=0.5, debug=False):\n # Make sure that we use numpy array\n y = np.array(y)\n x = np.arange(len(y))\n\n # Initialization\n y = np.round(y, 6)\n p_plus = (x[0], y[0] + w)\n l_plus = (x[0], y[0] + w)\n r_plus = (x[1], y[1] + w)\n s_plus = {(x[0], y[0] + w): (x[1], y[1] + w)}\n t_plus = {(x[1], y[1] + w): (x[0], y[0] + w)}\n p_minus = (x[0], y[0] - w)\n l_minus = (x[0], y[0] - w)\n r_minus = (x[1], y[1] - w)\n s_minus = {(x[0], y[0] - w): (x[1], y[1] - w)}\n t_minus = {(x[1], y[1] - w): (x[0], y[0] - w)}\n q = []\n i = 2\n\n while i < len(y):\n # Updating CH_plus (convex hull) and CH_minus\n p = (x[i - 1], y[i - 1] + w)\n p_i_plus = (x[i], y[i] + w)\n while (p != p_plus) and _angle(p_i_plus, p, t_plus[p], '+') > np.pi:\n p = t_plus[p]\n s_plus[p] = p_i_plus\n t_plus[p_i_plus] = p\n\n p = (x[i - 1], y[i - 1] - w)\n p_i_minus = (x[i], y[i] - w)\n while (p != p_minus) and _angle(p_i_minus, p, t_minus[p], '-') > np.pi:\n p = t_minus[p]\n s_minus[p] = p_i_minus\n t_minus[p_i_minus] = p\n\n # Check if CH_plus and CH_minus intersect\n if _angle(p_i_plus, l_plus, r_minus, '+') < np.pi:\n q.append((_intersect(l_plus, r_minus, p_plus, p_minus), l_plus, r_minus, p_plus, p_minus))\n p_minus = r_minus\n p_plus = _intersect(l_plus, r_minus, (x[i - 1], y[i - 1] + w), p_i_plus)\n s_plus[p_plus] = p_i_plus\n t_plus[p_i_plus] = p_plus\n r_plus = p_i_plus\n r_minus = p_i_minus\n l_plus = p_plus\n l_minus = p_minus\n while _angle(l_minus, r_plus, s_minus[l_minus], '-') < np.pi:\n l_minus = s_minus[l_minus]\n elif _angle(p_i_minus, l_minus, r_plus, '-') < np.pi:\n q.append((_intersect(l_minus, r_plus, p_minus, p_plus), l_minus, r_plus, p_minus, p_plus))\n p_plus = r_plus\n p_minus = _intersect(l_minus, r_plus, (x[i - 1], y[i - 1] - w), p_i_minus)\n s_minus[p_minus] = p_i_minus\n t_minus[p_i_minus] = p_minus\n r_minus = p_i_minus\n r_plus = p_i_plus\n l_minus = p_minus\n l_plus = p_plus\n while _angle(l_plus, r_minus, s_plus[l_plus], '+') < np.pi:\n l_plus = s_plus[l_plus]\n else:\n # Updating the two seperating and supporting lines\n if _angle(p_i_plus, l_minus, r_plus, '+') < np.pi:\n r_plus = p_i_plus\n while _angle(p_i_plus, l_minus, s_minus[l_minus], '+') < np.pi:\n l_minus = s_minus[l_minus]\n\n if _angle(p_i_minus, l_plus, r_minus, '-') < np.pi:\n r_minus = p_i_minus\n while _angle(p_i_minus, l_plus, s_plus[l_plus], '-') < np.pi:\n l_plus = s_plus[l_plus]\n i += 1\n\n # Add last change point\n a = _intersect(l_plus, r_minus, p_plus, p_minus)\n b = _intersect(l_minus, r_plus, p_minus, p_plus)\n p = ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2)\n q.append((p, r_minus, r_plus, p_minus, p_plus))\n\n end_a = _intersect(p, r_plus, p_i_minus, p_i_plus)\n end_b = _intersect(p, r_minus, p_i_minus, p_i_plus)\n end = ((end_a[0] + end_b[0]) / 2, (end_a[1] + end_b[1]) / 2)\n q.append((end, (None, None), (None, None), p_i_minus, p_i_plus))\n\n if debug:\n return np.array(q)\n else:\n return np.array([o[0] for o in q])", "def extent_as_polygon(self, crs=wgs84):\n from shapely.geometry import Polygon\n\n # this is not so trivial\n # for optimisation we will transform the boundaries only\n _i = np.hstack([np.arange(self.nx+1),\n np.ones(self.ny+1)*self.nx,\n np.arange(self.nx+1)[::-1],\n np.zeros(self.ny+1)]).flatten()\n _j = np.hstack([np.zeros(self.nx+1),\n np.arange(self.ny+1),\n np.ones(self.nx+1)*self.ny,\n np.arange(self.ny+1)[::-1]]).flatten()\n _i, _j = self.corner_grid.ij_to_crs(_i, _j, crs=crs)\n return Polygon(zip(_i, _j))", "def projection_iterhull(poly1, new_dim, max_iter=1000,\n verbose=0, abs_tol=ABS_TOL):\n r, xc = cheby_ball(poly1)\n org_dim = poly1.A.shape[1]\n logger.debug(\"Starting iterhull projection from dim \" +\n str(org_dim) + \" to dim \" + str(len(new_dim)))\n if len(new_dim) == 1:\n f1 = np.zeros(poly1.A.shape[1])\n f1[new_dim] = 1\n sol = lpsolve(f1, poly1.A, poly1.b)\n if sol['status'] == 0:\n vert1 = sol['x']\n else:\n raise RuntimeError((\n '`polytope.solvers.lpsolve` returned: {v}\\n'\n 'its docstring describes return values'\n ).format(\n v=sol))\n sol = lpsolve(np.negative(f1), poly1.A, poly1.b)\n if sol['status'] == 0:\n vert2 = sol['x']\n else:\n raise RuntimeError((\n '`polytope.solvers.lpsolve` returned: {v}\\n'\n 'its docstring describes return values'\n ).format(\n v=sol))\n vert = np.vstack([vert1, vert2])\n return qhull(vert, abs_tol=abs_tol)\n else:\n OK = False\n cnt = 0\n Vert = None\n while not OK:\n # Maximizing in random directions\n # to find a starting simplex\n cnt += 1\n if cnt > max_iter:\n raise Exception(\"iterative_hull: \"\n \"could not find starting simplex\")\n f1 = np.random.rand(len(new_dim)).flatten() - 0.5\n f = np.zeros(org_dim)\n f[new_dim] = f1\n sol = lpsolve(np.negative(f), poly1.A, poly1.b)\n xopt = np.array(sol['x']).flatten()\n if Vert is None:\n Vert = xopt.reshape(1, xopt.size)\n else:\n k = np.nonzero(Vert[:, new_dim[0]] == xopt[new_dim[0]])[0]\n for j in new_dim[range(1, len(new_dim))]:\n ii = np.nonzero(Vert[k, j] == xopt[j])[0]\n k = k[ii]\n if k.size == 0:\n break\n if k.size == 0:\n Vert = np.vstack([Vert, xopt])\n if Vert.shape[0] > len(new_dim):\n u, s, v = np.linalg.svd(\n np.transpose(Vert[:, new_dim] - Vert[0, new_dim]))\n rank = np.sum(s > abs_tol * 10)\n if rank == len(new_dim):\n # If rank full we have found a starting simplex\n OK = True\n logger.debug(\"Found starting simplex after \" +\n str(cnt) + \" iterations\")\n cnt = 0\n P1 = qhull(Vert[:, new_dim], abs_tol=abs_tol)\n HP = None\n while True:\n # Iteration:\n # Maximaze in direction of each facet\n # Take convex hull of all vertices\n cnt += 1\n if cnt > max_iter:\n raise Exception(\"iterative_hull: \"\n \"maximum number of iterations reached\")\n logger.debug(\"Iteration number \" + str(cnt))\n for ind in range(P1.A.shape[0]):\n f1 = np.round(P1.A[ind, :] / abs_tol) * abs_tol\n f2 = np.hstack([np.round(P1.A[ind, :] / abs_tol) * abs_tol,\n np.round(P1.b[ind] / abs_tol) * abs_tol])\n # See if already stored\n k = np.array([])\n if HP is not None:\n k = np.nonzero(HP[:, 0] == f2[0])[0]\n for j in range(1, np.shape(P1.A)[1] + 1):\n ii = np.nonzero(HP[k, j] == f2[j])[0]\n k = k[ii]\n if k.size == 0:\n break\n if k.size == 1:\n # Already stored\n xopt = HP[\n k,\n range(\n np.shape(P1.A)[1] + 1,\n np.shape(P1.A)[1] + np.shape(Vert)[1] + 1)\n ]\n else:\n # Solving optimization to find new vertex\n f = np.zeros(poly1.A.shape[1])\n f[new_dim] = f1\n sol = lpsolve(np.negative(f), poly1.A, poly1.b)\n if sol['status'] != 0:\n logger.error(\"iterhull: LP failure\")\n continue\n xopt = np.array(sol['x']).flatten()\n add = np.hstack([f2, np.round(xopt / abs_tol) * abs_tol])\n # Add new half plane information\n # HP format: [ P1.Ai P1.bi xopt]\n if HP is None:\n HP = add.reshape(1, add.size)\n else:\n HP = np.vstack([HP, add])\n Vert = np.vstack([Vert, xopt])\n logger.debug(\"Taking convex hull of new points\")\n P2 = qhull(Vert[:, new_dim], abs_tol=abs_tol)\n logger.debug(\"Checking if new points are inside convex hull\")\n OK = 1\n for i in range(np.shape(Vert)[0]):\n if not P1.contains(np.transpose([Vert[i, new_dim]]),\n abs_tol=abs_tol):\n # If all new points are inside\n # old polytope -> Finished\n OK = 0\n break\n if OK == 1:\n logger.debug(\"Returning projection after \" +\n str(cnt) + \" iterations\\n\")\n return P2\n else:\n # Iterate\n P1 = P2", "def create_polygon(meshcode):\r\n lat1,lon1 = ju.to_meshpoint(meshcode,0,0)\r\n lat2,lon2 = ju.to_meshpoint(meshcode,1,1)\r\n poly_text = 'POLYGON (('+str(lon1)+' '+str(lat1)+','+str(lon1)+' '+str(lat2)+','+str(lon2)+' '+str(lat2)+','+str(lon2)+' '+str(lat1)+','+str(lon1)+' '+str(lat1)+'))'\r\n return poly_text", "def process_latlon(self):\n data = self.unixtext.replace(\"\\n\", \" \")\n search = LAT_LON_PREFIX.search(data)\n if search is None:\n return None\n pos = search.start()\n newdata = data[pos+9:]\n # Go find our next non-digit, non-space character, if we find it, we\n # should truncate our string, this could be improved, I suspect\n search = re.search(r\"[^\\s0-9]\", newdata)\n if search is not None:\n pos2 = search.start()\n newdata = newdata[:pos2]\n\n poly = str2polygon(newdata)\n if poly is None:\n return None\n\n # check 0, PGUM polygons are east longitude akrherz/pyIEM#74\n if self.tp.source == 'PGUM':\n newpts = [[0 - pt[0], pt[1]] for pt in poly.exterior.coords]\n poly = Polygon(newpts)\n\n # check 1, is the polygon valid?\n if not poly.is_valid:\n self.tp.warnings.append(\n (\"LAT...LON polygon is invalid!\\n%s\") % (poly.exterior.xy,))\n return\n # check 2, is the exterior ring of the polygon clockwise?\n if poly.exterior.is_ccw:\n self.tp.warnings.append(\n (\"LAT...LON polygon exterior is CCW, reversing\\n%s\"\n ) % (poly.exterior.xy,))\n poly = Polygon(zip(poly.exterior.xy[0][::-1],\n poly.exterior.xy[1][::-1]))\n self.giswkt = 'SRID=4326;%s' % (dumps(MultiPolygon([poly]),\n rounding_precision=6),)\n return poly", "def construct_convex_hull_from_coords(coords: np.ndarray) -> Polyhedron:\n dim = coords.shape[1]\n assert dim <= 3, 'We cannot visualise anything with more than three\\\n dimensions.'\n vertices = []\n for i in range(coords.shape[0]):\n if dim == 2:\n vertices.append(Point(np.array([coords[i,0],coords[i,1],0])))\n else:\n vertices.append(Point(coords[i,:]))\n if dim == 3:\n hull = qconvex(\"i\", coords)\n n_facets = int(hull[0])\n facets = []\n for facet_vertices_str in hull[1:]:\n facet_vertices_idx = [int(x) for x in facet_vertices_str.split(' ')]\n facet_vertices = [vertices[i] for i in facet_vertices_idx]\n facet = Facet([Contour.from_vertices(facet_vertices)])\n facets.append(facet)\n polyhedron = Polyhedron(facets)\n elif dim == 2:\n hull = qconvex(\"Fx\", coords)\n n_hull_vertices = int(hull[0])\n hull_vertices = []\n for vertex_str in hull[1:]:\n vertex_idx = int(vertex_str)\n hull_vertices.append(vertices[vertex_idx])\n contour = Contour.from_vertices(hull_vertices)\n polyhedron = Polyhedron([Facet([contour])])\n return polyhedron", "def getContourRep(self):\n\t\tvertex1 = [[self.startX, self.startY]]\n\t\tvertex2 = [[self.startX, self.endY]]\n\t\tvertex3 = [[self.endX, self.startY]]\n\t\tvertex4 = [[self.endX, self.endY]]\n\t\tvertices = [vertex1, vertex2, vertex3, vertex4]\n\t\treturn convexHull(np.asarray(vertices, dtype = np.int32))", "def polygon_to_multipolygon(geom):\n if geom.__class__.__name__ == 'Polygon':\n g = OGRGeometry(OGRGeomType('MultiPolygon'))\n g.add(geom)\n return g\n elif geom.__class__.__name__ == 'MultiPolygon':\n return geom\n else:\n raise ValueError('Geom is neither Polygon nor MultiPolygon.')", "def boundary_polygon(self):\n try:\n return self.boundary_polygon_by_edges()\n except Exception as exc:\n self.log.warning('Warning, boundary_polygon() failed using edges! Trying polygon union method')\n self.log.warning(exc,exc_info=True)\n return self.boundary_polygon_by_union()", "def multi2poly(returned_vector_pred, layer_name=None):\n try: # Try to convert multipolygon to polygon\n df = gpd.read_file(returned_vector_pred, layer=layer_name)\n if 'MultiPolygon' in df['geometry'].geom_type.values:\n logging.info(\"\\nConverting multiPolygon to Polygon...\")\n gdf_exploded = df.explode(index_parts=True, ignore_index=True)\n gdf_exploded.to_file(returned_vector_pred, layer=layer_name) # overwrite the layer readed\n except Exception as e:\n logging.error(f\"\\nSomething went wrong during the conversion of Polygon. \\nError {type(e)}: {e}\")", "def lineToPolygon(geom):\n assert(geom[\"type\"] == \"LineString\")\n # LineString is only the exterior line of a polygon (no holes possible)\n return geojson.Polygon(coordinates=[geom[\"coordinates\"]], validate=True)", "def SplitIntoPolygons(shape):\n ret = []\n this_polygon = []\n restart_indices = set(shape.parts)\n for idx, point in enumerate(shape.points):\n if idx in restart_indices:\n if this_polygon:\n ret.append(this_polygon)\n this_polygon = [[point[0], point[1]]]\n else:\n this_polygon.append([point[0], point[1]])\n if this_polygon:\n ret.append(this_polygon)\n return ret", "def get_polygon_coordinates(self) -> Tuple[List, List]:\n\n polygon_query = f\"https://nominatim.openstreetmap.org/\" \\\n f\"search?city={self.location.replace(' ', '+')}&polygon_geojson=1&format=json\"\n r = requests.get(polygon_query)\n js = ast.literal_eval(r.text)\n\n self.monitor.info(\"-> Downloaded area polygon data points.\")\n clean_polygon_coords = js[0]['geojson']['coordinates'][0]\n\n polygon_lats = [float(i[1]) for i in clean_polygon_coords]\n polygon_longs = [float(i[0]) for i in clean_polygon_coords]\n\n self.monitor.info(\"-> Created lat/long vectors.\")\n return polygon_lats, polygon_longs", "def _createpoly(self):\n return self.cv.create_polygon((0, 0, 0, 0, 0, 0), fill=\"\", outline=\"\")", "def extent_geom(extent):\n ring = ogr.Geometry(ogr.wkbLinearRing)\n ring.AddPoint(extent[0], extent[3])\n ring.AddPoint(extent[2], extent[3])\n ring.AddPoint(extent[2], extent[1])\n ring.AddPoint(extent[0], extent[1])\n ring.CloseRings()\n polygon = ogr.Geometry(ogr.wkbPolygon)\n polygon.AddGeometry(ring)\n return polygon", "def create_geofence(self):\n\t\tring = ogr.Geometry(ogr.wkbLinearRing)\n\t\tring.AddPoint(*self.north_coords)\n\t\tring.AddPoint(*self.northeast_coords)\n\t\tring.AddPoint(*self.east_coords)\n\t\tring.AddPoint(*self.southeast_coords)\n\t\tring.AddPoint(*self.south_coords)\n\t\tring.AddPoint(*self.southwest_coords)\n\t\tring.AddPoint(*self.west_coords)\n\t\tring.AddPoint(*self.northwest_coords)\n\t\tring.AddPoint(*self.north_coords)\n\t\tself.polygon.AddGeometry(ring)", "def shrink_vertex(hull_vertices, inside, shrinking_threshold):\n hull = create_hull(hull_vertices)\n hull, max_edge_length = sort_hull(hull)\n avg_edge_length = average_distance(inside)\n\n if max_edge_length < avg_edge_length:\n # mark current hull as released, compute new hull from remaining points\n new_hull_vertices, inside = convex_hull(inside)\n new_hull_vertices, released = shrink_vertex(new_hull_vertices, inside)\n return new_hull_vertices, np.append(released, hull_vertices, axis=0)\n\n all_points = np.append(inside, hull_vertices, axis=0)\n\n if len(all_points) < 3:\n # nothing to shrink\n return hull.vertex, np.zeros((0, 2))\n\n while max_edge_length >= shrinking_threshold * avg_edge_length:\n V1 = hull[0].vertex\n V2 = hull[1].vertex\n V21 = V2 - V1\n V21dot = np.dot(V21, V21)\n\n edges = list(\n zip(hull.vertex[1:],\n np.append(hull.vertex[2:], [hull.vertex[0]], axis=0)))\n\n candidates = []\n for P in all_points:\n # find closest point from x to the line between V1 and V2:\n # 1) its projection falls between V1 and V2\n # 2) it resides on the left of V1 and V2\n # 3) the perpendicular line from P to the line between V1 and V2\n # doesn't have an intersection with other edges between vertices\n\n PV1 = P - V1\n u = np.dot(PV1, V21) / V21dot\n\n if not (0-eps <= u <= 1+eps):\n # 1) failed\n continue\n\n M = np.vstack((np.array([V1, V2, P]).T,[1,1,1]))\n if np.linalg.det(M) <= 0+eps:\n # 2) failed\n continue\n\n # get projected point\n PP = V1 + u*V21\n PPP = PP - P\n\n num_intersections = 0\n for i, edge in enumerate(edges):\n\n if array_equal(P, edge[0]) or array_equal(P, edge[1]):\n # a point always intersects with its own edge\n continue\n\n has_intersec = seg_intersect(P, PPP, edge[0], edge[1]-edge[0])\n if not has_intersec:\n # no intersection with this edge\n continue\n\n # we found an intersection. These are only allowed if the\n # candidate vertex is either the V_last or V3...\n if array_equal(P, hull[-1].vertex) or \\\n array_equal(P, hull[2].vertex):\n continue\n\n # otherwise this is an invalid intersection\n num_intersections += 1\n if num_intersections > 1:\n # only one intersection is allowed at max\n # (see condition below)\n break\n\n if num_intersections == 0 or \\\n num_intersections == 1 and (0-eps <= u <= 0+eps or\n 1-eps <= u <= 1+eps):\n # add point if it has no intersection or the only intersection\n # is at V1 or V2. This happens if u == 0 or u == 1.\n candidates.append((P, dist(P, PP)))\n\n if len(candidates) == 0:\n # no candidate for shrinking found\n hull[0].is_processed = True\n\n if all(hull.is_processed):\n # finished search\n break\n else:\n # add closest point to hull between V1 and V2\n Q = min(candidates, key = lambda t: t[1])[0]\n # update edge length\n hull[0].length = dist(V1, Q)\n hull = np.insert(hull, 1, (Q, dist(Q, V2), False), axis=0)\n\n hull, max_edge_length = sort_hull(hull)\n\n # the original releasing has not been implemented -> return an empty array\n return hull.vertex, np.zeros((0, 2))", "def _convex_hull_side(image, start, end):\n\n convex_points = [start]\n\n x_start, y_start = start\n x_end, y_end = end\n\n side = (x_start <= x_end, y_start <= y_end)\n\n\n ranges = {\n (True, True): [\n [x_start + 1, x_end + 1],\n [y_start, y_end + 1],\n False\n ],\n (False, True): [\n [y_start + 1, y_end + 1],\n [x_start, x_end - 1, -1],\n True\n ],\n (False, False): [\n [x_start - 1, x_end - 1, -1],\n [y_start, y_end - 1, -1],\n False\n ],\n (True, False): [\n [y_start - 1, y_end - 1, -1],\n [x_start, x_end + 1],\n True\n ]\n }\n\n prev = 0\n\n for outer in range(*ranges[side][0]):\n\n curr_pixel = None\n\n for inner in range(*ranges[side][1]):\n if ranges[side][2] and image[outer, inner] == 0:\n curr_pixel = (inner, outer)\n break\n elif not ranges[side][2] and image[inner, outer] == 0:\n curr_pixel = (outer, inner)\n break\n\n if curr_pixel is None:\n continue\n\n while True:\n # slope infinite for first point\n prev_slope = (\n float(\"-inf\") if prev == 0\n else slope(\n convex_points[prev - 1],\n convex_points[prev],\n ranges[side][2]))\n\n # remove previous point if it yields concave segment\n if prev_slope > slope(\n convex_points[prev],\n curr_pixel,\n ranges[side][2]\n ):\n convex_points.pop(prev)\n prev -= 1\n # add point to hull if it yields convex segment\n else:\n convex_points.append(curr_pixel)\n prev += 1\n break\n\n return convex_points[1:]", "def _get_polygon(areasrc):\n\n str = areasrc.geometry.wkt\n str = re.sub('POLYGON\\(\\(', '', str)\n str = re.sub('\\)\\)', '', str)\n aa = re.split('\\,', str)\n lons = []\n lats = []\n for str in aa:\n bb = re.split('\\s+', re.sub('^\\s+', '', str))\n lons.append(float(bb[0]))\n lats.append(float(bb[1]))\n return lons, lats", "def _get_polygon(element):\n polygon = element.find('%s/%s/%s/%s/%s' %\n (NRML04_AREA_GEOMETRY, gml.GML_POLYGON,\n gml.GML_EXTERIOR, gml.GML_LINEAR_RING,\n gml.GML_POS_LIST)).text\n\n polygon = gml._get_polygon_from_2DLinestring(polygon)\n\n return polygon", "def boundary_polygon_by_edges(self):\n lines=self.boundary_linestrings()\n polys=join_features.lines_to_polygons(lines,close_arc=False)\n if len(polys)>1:\n raise GridException(\"somehow there are multiple boundary polygons\")\n return polys[0]", "def splitAtDateline(geom, preferredEpsg):\n crosses = crossesDateline(geom, preferredEpsg)\n if crosses:\n (projTr, llTr) = makeTransformations(4326, preferredEpsg)\n coords = getCoords(geom)\n (x, y) = (coords[:, 0], coords[:, 1])\n (yMin, yMax) = (y.min(), y.max())\n xMinPositive = None\n xMaxNegative = None\n xGe0 = (x >= 0)\n xLt0 = (x < 0)\n if xGe0.any() > 0 and xLt0.any() > 0:\n xMaxNegative = x[xLt0].max()\n xMinPositive = x[xGe0].min()\n \n # Create rectangles for the east and west hemispheres, constrained by the \n # extent of this polygon. Note that this assumes that we do not\n # cross both the date line, and also the prime (zero) meridian. This may not\n # always be true, notably when we are close to the pole. \n eastHemiRectCoords = [[xMinPositive, yMax], [xMinPositive, yMin], [180, yMin], \n [180, yMax], [xMinPositive, yMax]]\n eastHemiRectJson = repr({'type':'Polygon', 'coordinates':[eastHemiRectCoords]})\n westHemiRectCoords = [[-180, yMax], [-180, yMin], [xMaxNegative, yMin], \n [xMaxNegative, yMax], [-180, yMax]]\n westHemiRectJson = repr({'type':'Polygon', 'coordinates':[westHemiRectCoords]})\n eastHemiRect = ogr.CreateGeometryFromJson(eastHemiRectJson)\n westHemiRect = ogr.CreateGeometryFromJson(westHemiRectJson)\n\n geomProj = copyGeom(geom)\n geomProj.Transform(projTr)\n eastHemiRect.Transform(projTr)\n westHemiRect.Transform(projTr)\n\n eastHemiPart = geomProj.Intersection(eastHemiRect)\n westHemiPart = geomProj.Intersection(westHemiRect)\n eastHemiPart.Transform(llTr)\n westHemiPart.Transform(llTr)\n \n # Put these together as a single multipolygon\n eastPartCoords = getCoords(eastHemiPart)\n westPartCoords = getCoords(westHemiPart)\n # Discard any vertices which are still no the wrong side of the 180 line. I\n # do not understand what is going on here, but I have invested far more of \n # my valuable time than I should, and this kludge will be a reasonable approximation. \n eastPartCoords = eastPartCoords[eastPartCoords[:, 0] > 0, :]\n westPartCoords = westPartCoords[westPartCoords[:, 0] < 0, :]\n \n # Convert to lists\n eastPartCoords = eastPartCoords.tolist()\n westPartCoords = westPartCoords.tolist()\n # Discard anything left with only 2 points\n if len(eastPartCoords) < 3:\n eastPartCoords = []\n if len(westPartCoords) < 3:\n westPartCoords = []\n # Close polygons. What a kludge.....\n if len(eastPartCoords) > 2:\n if eastPartCoords[-1][0] != eastPartCoords[0][0] or eastPartCoords[-1][1] != eastPartCoords[0][1]:\n eastPartCoords.append(eastPartCoords[0])\n if len(westPartCoords) > 2:\n if westPartCoords[-1][0] != westPartCoords[0][0] or westPartCoords[-1][1] != westPartCoords[0][1]:\n westPartCoords.append(westPartCoords[0])\n \n # Make a multi-polygon from the two parts\n coordsMulti = [[eastPartCoords], [westPartCoords]]\n jsonStr = repr({'type':'MultiPolygon', 'coordinates':coordsMulti})\n newGeom = ogr.CreateGeometryFromJson(jsonStr)\n else:\n # It didn't really cross the date line, but seems to due to rounding\n # error in crossesDateline(). \n newGeom = copyGeom(geom)\n else:\n newGeom = copyGeom(geom)\n return newGeom", "def test_to_wkt_list_complex_polygon(self):\n from pykml.util import to_wkt_list\n\n # create a polygon\n poly = KML.Polygon(\n KML.extrude('1'),\n KML.altitudeMode('relativeToGround'),\n KML.outerBoundaryIs(\n KML.LinearRing(\n KML.coordinates(\n '-122.366278,37.818844,30 '\n '-122.365248,37.819267,30 '\n '-122.365640,37.819861,30 '\n '-122.366669,37.819429,30 '\n '-122.366278,37.818844,30 '\n ),\n ),\n ),\n KML.innerBoundaryIs(\n KML.LinearRing(\n KML.coordinates(\n '-122.366212,37.818977,30 '\n '-122.365424,37.819294,30 '\n '-122.365704,37.819731,30 '\n '-122.366212,37.818977,30 '\n ),\n ),\n ),\n KML.innerBoundaryIs(\n KML.LinearRing(\n KML.coordinates(\n '-122.366212,37.818977,30 '\n '-122.365704,37.819731,30 '\n '-122.366488,37.819402,30 '\n '-122.366212,37.818977,30 '\n ),\n ),\n ),\n )\n\n poly_wkt_list = to_wkt_list(poly)\n\n self.assertEqual(len(poly_wkt_list), 1)\n self.assertEqual(\n poly_wkt_list[0],\n ('POLYGON ((-122.366278 37.818844 30, '\n '-122.365248 37.819267 30, '\n '-122.365640 37.819861 30, '\n '-122.366669 37.819429 30, '\n '-122.366278 37.818844 30), '\n '(-122.366212 37.818977 30, '\n '-122.365424 37.819294 30, '\n '-122.365704 37.819731 30, '\n '-122.366212 37.818977 30), '\n '(-122.366212 37.818977 30, '\n '-122.365704 37.819731 30, '\n '-122.366488 37.819402 30, '\n '-122.366212 37.818977 30))')\n )", "def hex_to_polygon(hexid):\n list_of_coords_list=h3.h3_to_geo_boundary(hexid,geo_json=True)\n return Polygon([tuple(i) for i in list_of_coords_list])", "def __push_polygons(self, graph, u, v, vert_dict):\n # Here we assume that the edge of the wall is not in the graph, otherwise there is no adjustment needed\n # Find the shortest path from start to end\n # Use this path to check which polygons are on the side of the path away from the origin\n path = bfs_path(graph, u, v)\n if len(path) <= 2:\n return\n # Vector representing the edge of the wall\n wall_vector = Point(v.get_x() - u.get_x(), v.get_y() - u.get_y())\n # Find the midpoint of the edge\n push_polygons = set()\n edge_start = path.pop()\n while len(path) > 0:\n edge_end = path.pop()\n # The midpoint will be treated as the endpoint of a vector from the origin and used to determine which\n # normal vector is pointing away from the origin\n midpoint = Point((edge_start.get_x() + edge_end.get_x()) / 2,\n (edge_start.get_y() + edge_end.get_y()) / 2)\n # Create <edge_start, edge_end> as an edge vector, normalize it\n edge_vector = Point(edge_end.get_x() - edge_start.get_x(),\n edge_end.get_y() - edge_start.get_y())\n norm = edge_vector.simple_distance(edge_start)\n edge_vector.set(edge_vector.get_x(), edge_vector.get_y())\n # Find the normalized normal vector in relation to edge_vector\n normal = Point(-edge_vector.get_y() / norm, edge_vector.get_x() / norm)\n del norm\n # Find the dot product between the midpoint vector and the normal vector\n dot_product = midpoint.get_x() * normal.get_x() + midpoint.get_y() * normal.get_y()\n if dot_product > 0:\n # This is the point going away from the origin, find the polygon containing this midpoint + normal\n point = Point(midpoint.get_x() + normal.get_x(), midpoint.get_y() + normal.get_y())\n elif dot_product < 0:\n # Flip the direction\n point = Point(midpoint.get_x() - normal.get_x(), midpoint.get_y() - normal.get_y())\n else:\n continue\n point_edge_vector = Point(edge_end.get_x() - u.get_x(), edge_end.get_y() - u.get_y())\n new_vertex = u + project_vector(point_edge_vector, wall_vector)\n for r in self.regions:\n if r.is_contained(point):\n push_polygons.add(r)\n move_vertex(r, edge_end, new_vertex, vert_dict)\n break\n # The end of this edge is by definition the start of the next edge\n edge_start = new_vertex", "def create_pressure_vessel_geometry():\r\n\r\n # configure sigmoid function\r\n bounds_upper = [3, 6]\r\n h = 5\r\n w = 6\r\n\r\n sigmoid_function = lambda x: (1 / (1 + np.exp(-1 * h * x + w))) + 1\r\n\r\n sigmoid_function_reverse = lambda x: 1 / (1 + np.exp(h * x - w - 18)) + 1\r\n\r\n funcs_upper = [sigmoid_function, sigmoid_function_reverse]\r\n\r\n bounds_lower = None\r\n funcs_lower = 0\r\n\r\n x_max = 6\r\n x_min = 0\r\n resolution = 10000\r\n\r\n pressure_vessel = Geometry(x_max, x_min, resolution,\r\n bounds_upper, funcs_upper,\r\n bounds_lower, funcs_lower)\r\n\r\n return pressure_vessel", "def create_poly(self, bounds):\n\n left, bottom, right, top = bounds\n\n return Polygon(\n [\n (left, bottom),\n (left, top),\n (right, top),\n (right, bottom),\n (left, bottom),\n ]\n )", "def _FindHull(s: List[sg.Point2], p: sg.Point2, q: sg.Point2, hull_points: List[sg.Point2]):\n if len(s) == 0:\n return\n seg = sg.Segment2(p, q)\n c = max(s, key=lambda point: sg.squared_distance(seg, point))\n hull_points.insert(hull_points.index(p) + 1, c)\n s.remove(c)\n s1, s2 = split_points_triangle(s, (p, q, c))\n _FindHull(s1, p, c, hull_points)\n _FindHull(s2, c, q, hull_points)", "def _preprocess_polygon(polygon):\n\n # Could catch ValueErrors for unsuitable inputs\n polygon = numpy.array(polygon)\n\n if len(polygon.shape) == 1:\n if len(polygon) % 2:\n raise ValueError('Number of values for polygon not divisible by two.'\n 'Coordinates need an x and y coordinate: '.format(polygon))\n polygon = polygon.reshape((-1, 2))\n\n if not len(polygon.shape) == 2 or polygon.shape[1] != 2:\n raise ValueError('polygon of wrong dimensions. It should be of shape. '\n 'Should be: (num_points, 2). Input: {}'.format(polygon))\n\n polygon = Polygon(numpy.array(polygon))\n\n # Mainly for self-intersection\n if not polygon.is_valid:\n raise ValueError('polygon is invalid, likely self-intersecting: {}'.\n format(polygon))\n\n return polygon", "def find_hull_vertices(points: np.ndarray) -> np.ndarray:\n M = 3\n N = points.shape[0]\n for i in range(4, N):\n while ccw(points[M], points[M - 1], points[i]) >= 0:\n M -= 1\n\n M += 1\n swap(points, M, i)\n\n return points[1:M + 1]", "def build_polygon(self, \n dataset_metadata_dict, \n bounding_box, \n visibility=True, \n parent_folder=None, \n polygon_name=None):\n if parent_folder is None:\n parent_folder=self.dataset_type_folder\n if polygon_name is None:\n polygon_name = str(dataset_metadata_dict['dataset_title'])\n\n try:\n if dataset_metadata_dict['convex_hull_polygon']:\n polygon_bounds = [[float(ordinate)\n for ordinate in coord_pair.strip().split(' ')\n ]\n for coord_pair in\n re.search('POLYGON\\(\\((.*)\\)\\)',\n dataset_metadata_dict['convex_hull_polygon']\n ).group(1).split(',')\n ]\n # build the polygon based on the bounds. Also set the polygon name. It is inserted into the self.dataset_type_folder.\n polygon_kml = parent_folder.newpolygon(name=polygon_name,\n outerboundaryis=polygon_bounds, visibility=visibility)\n \n polygon_kml.style = self.polygon_style\n \n # Always set timestamps on polygons\n self.set_timestamps(polygon_kml, dataset_metadata_dict)\n\n # build the polygon description\n description_string = '<![CDATA['\n description_string = description_string + '<p><b>{0}: </b>{1}</p>'.format('Survey Name',\n str(dataset_metadata_dict['dataset_title']))\n description_string = description_string + '<p><b>{0}: </b>{1}</p>'.format('Survey ID', str(dataset_metadata_dict['ga_survey_id']))\n description_string = description_string + '<p><b>{0}: </b>{1}</p>'.format('Survey Start Date',\n str(dataset_metadata_dict['start_date']))\n description_string = description_string + '<p><b>{0}: </b>{1}</p>'.format('Survey End Date',\n str(dataset_metadata_dict['end_date']))\n if dataset_metadata_dict['dataset_link']:\n description_string = description_string + '<p><b>{0}: </b>{1}</p>'.format('Link to dataset', str(\n dataset_metadata_dict['dataset_link']))\n description_string = description_string + ']]>'\n polygon_kml.description = description_string\n \n return polygon_kml\n \n except Exception as e:\n logger.debug('Unable to display polygon \"{}\": {}'.format(dataset_metadata_dict['convex_hull_polygon'], e))", "def create_polygon(self, vertices, style=None, parent=None):\n d = 'M %f %f L' % (vertices[0].x, vertices[0].y)\n for p in vertices[1:]:\n d = d + ' %f,%f' % (p.x, p.y)\n if vertices[0] != vertices[-1]:\n d = d + ' %f,%f' % (vertices[0].x, vertices[0].y)\n attrs = {'d': d}\n return self.create_path(attrs, style, parent)", "def get_outer_vertices(feature):\n return [\n point\n for part in Geometry.get_multipolygon(feature)\n for point in part[0][0:-1]\n ]", "def polyCreateFacet(*args, constructionHistory: bool=True, hole: Union[bool, List[bool]]=True,\n name: AnyStr=\"\", point: Union[List[float, float, float], List[List[float,\n float, float]]]=None, subdivision: Union[int, bool]=1, texture: Union[int,\n bool]=0, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[List[AnyStr], Any]:\n pass", "def convex_hull(self):\n nodes = self._datacontroller.get_data('nodes')\n scale = self._datacontroller.get_data('scale')\n hull = tsputil.convex_hull_helper(nodes)\n if hull:\n result = construct_step(hull, 'Most Top Left Node', 'Clockwise', nodes, scale)\n self._datacontroller.commit_change('path', result)", "def json2polygon(geojson_str):\n geojson_object = geojson.loads(geojson_str)\n return geometry.shape(geojson_object)", "def PolygonPatch(polygon, **kwargs):\n return PathPatch(PolygonPath(polygon), **kwargs)", "def pointsToVoronoiGridShapefile(lat, lon, vor_shp_path, extent=None):\n voronoi_centroids = _get_voronoi_centroid_array(lat, lon, extent)\n\n # set-up output polygon shp\n log(\"Creating output polygon shp {0}\"\n .format(os.path.basename(vor_shp_path)))\n if os.path.exists(vor_shp_path):\n os.remove(vor_shp_path)\n drv = ogr.GetDriverByName('ESRI Shapefile')\n outShp = drv.CreateDataSource(vor_shp_path)\n osr_geographic_proj = osr.SpatialReference()\n osr_geographic_proj.ImportFromEPSG(4326)\n layer = outShp.CreateLayer('', osr_geographic_proj, ogr.wkbPolygon)\n layer.CreateField(ogr.FieldDefn('GRID_LAT', ogr.OFTReal))\n layer.CreateField(ogr.FieldDefn('GRID_LON', ogr.OFTReal))\n layerDefn = layer.GetLayerDefn()\n\n # find nodes surrounding polygon centroid\n # sort nodes in counterclockwise order\n # create polygon perimeter through nodes\n log(\"Building Voronoi polygons...\")\n # compute voronoi\n voronoi_manager = Voronoi(voronoi_centroids)\n voronoi_vertices = voronoi_manager.vertices\n voronoi_regions = voronoi_manager.regions\n for point_id, region_index in enumerate(voronoi_manager.point_region):\n vert_index_list = np.array(voronoi_regions[region_index])\n voronoi_centroid = voronoi_centroids[point_id]\n voronoi_poly_points = _get_voronoi_poly_points(vert_index_list,\n voronoi_vertices,\n voronoi_centroid)\n if len(voronoi_poly_points) == 4:\n poly = ogr.Geometry(ogr.wkbPolygon)\n ring = ogr.Geometry(ogr.wkbLinearRing)\n for node in voronoi_poly_points:\n ring.AddPoint(node[0], node[1])\n\n # grab first node to close ring\n ring.AddPoint(voronoi_poly_points[0][0], voronoi_poly_points[0][1])\n\n poly.AddGeometry(ring)\n feat = ogr.Feature(layerDefn)\n feat.SetField('GRID_LON', float(voronoi_centroid[0]))\n feat.SetField('GRID_LAT', float(voronoi_centroid[1]))\n feat.SetGeometry(poly)\n layer.CreateFeature(feat)", "def QHull(points: List[sg.Point2]) -> List[sg.Segment2]:\n point_list = copy.copy(points)\n hull_points = []\n points.sort(key=lambda point: point.x())\n mn = points[0]\n mx = points[-1]\n hull_points.append(mn)\n hull_points.append(mx)\n point_list.remove(mn)\n point_list.remove(mx)\n seg = sg.Segment2(mn, mx)\n # a line between the left most and right most point\n s1, s2 = split_points(point_list, seg)\n _FindHull(s1, mn, mx, hull_points)\n _FindHull(s2, mx, mn, hull_points)\n return points_to_segment(hull_points)", "def _get_patch(poly1, **kwargs):\n import matplotlib as mpl\n V = extreme(poly1)\n rc, xc = cheby_ball(poly1)\n x = V[:, 1] - xc[1]\n y = V[:, 0] - xc[0]\n mult = np.sqrt(x**2 + y**2)\n x = x / mult\n angle = np.arccos(x)\n corr = np.ones(y.size) - 2 * (y < 0)\n angle = angle * corr\n ind = np.argsort(angle)\n # create patch\n patch = mpl.patches.Polygon(V[ind, :], True, **kwargs)\n patch.set_zorder(0)\n return patch", "def to_geom(self):\n return [self.create_poly(bbox) for bbox in self.tree_bounds]", "def _generate_geometry_from_points(self, geometry_type, points):\n if geometry_type == 'line':\n # Only x and y coordinates are considered for line\n geometry = LineString([(x[0], x[1]) for x in points])\n elif geometry_type == 'area':\n # Only x and y coordinates are considered for polygon area\n geometry = Polygon([(x[0], x[1]) for x in points])\n else:\n raise NotImplementedError()\n return geometry", "def poly(*args):\n if len(args) == 0 or len(args) == 2:\n raise ValueError('bad number of arguments {} passed to poly()'.format(len(args)))\n if len(args) == 1:\n if ispoly(args[0]):\n return deepcopy(args[0])\n else:\n raise VauleError('non-poly list passed to poly()')\n # args is of length 3 or greater. Check to see if args are points\n a = list(args)\n b = list(filter(lambda x: not ispoint(x),a))\n if len(b) > 0:\n raise ValueError('non-point arguments to poly(): {} '.format(b))\n return deepcopy(a)", "def polyhedra_from_xyz(xyz_file: str,\n try_convex_hull: bool = True)\\\n -> Tuple[List[Polyhedron],\\\n List[VertexCollection],\\\n List[Union[Polyhedron,VertexCollection]]]:\n\n object_coordinates_dict: Dict[str, List[List[float]]] = {}\n polyhedron_list: List[Polyhedron] = []\n vertex_collection_list: List[VertexCollection] = []\n object_list: List[Union[Polyhedron,VertexCollection]] = []\n type_order: List[str] = []\n with open(xyz_file, 'r') as f:\n lines = f.readlines()\n for i in range(len(lines)):\n line = lines[i].strip()\n if i == 0:\n l = re.search(\"\\d+$\", line)\n assert l is not None\n n_points = int(l.group())\n elif i == 1:\n l = re.search(\"\\d+$\", line)\n assert l is not None\n dim = int(l.group())\n assert dim <= 3, 'We cannot visualise the fourth dimension and\\\n above.'\n else:\n if line == '':\n continue\n l = re.search(\"([A-Za-z]+[0-9]*)[\\s\\t]+\", line)\n assert l is not None\n point_type = l.group(1)\n l2 = re.findall(\"[+-]?\\d+\\.\\d*\", line)\n point_coordinates = []\n for coordinate in l2:\n point_coordinates.append(float(coordinate))\n assert len(point_coordinates) == dim\n if point_type not in object_coordinates_dict:\n object_coordinates_dict[point_type] = []\n object_coordinates_dict[point_type].append(point_coordinates)\n if point_type not in type_order:\n type_order.append(point_type)\n\n for point_type in type_order:\n object_coordinates = np.array(object_coordinates_dict[point_type])\n if try_convex_hull:\n try:\n print(\"Attempting to construct a convex hull for {}...\"\\\n .format(point_type))\n polyhedron = construct_convex_hull_from_coords\\\n (object_coordinates)\n polyhedron_list.append(polyhedron)\n object_list.append(polyhedron) \n except:\n print(\"Failed to construct a convex hull for {}.\"\\\n .format(point_type))\n print(\"Falling back to vertex collection for {}...\"\\\n .format(point_type))\n vertex_collection = construct_vertex_collection_from_coords\\\n (object_coordinates, 2)\n vertex_collection_list.append(vertex_collection)\n object_list.append(vertex_collection) \n else:\n print(\"Constructing a vertex collection for {}...\"\\\n .format(point_type))\n vertex_collection = construct_vertex_collection_from_coords\\\n (object_coordinates, 2)\n vertex_collection_list.append(vertex_collection)\n object_list.append(vertex_collection) \n\n return polyhedron_list,vertex_collection_list,object_list", "def plot_polygon(points, **kwargs):\n plt.gca().add_collection(\n PatchCollection(\n [Polygon(points, True)],\n **kwargs)\n )", "def from_shapely(polygon_shapely, label=None):\n # load shapely lazily, which makes the dependency more optional\n import shapely.geometry\n\n assert isinstance(polygon_shapely, shapely.geometry.Polygon)\n\n # polygon_shapely.exterior can be None if the polygon was instantiated without points\n if polygon_shapely.exterior is None or len(polygon_shapely.exterior.coords) == 0:\n return Polygon([], label=label)\n exterior = np.float32([[x, y] for (x, y) in polygon_shapely.exterior.coords])\n return Polygon(exterior, label=label)", "def convex_hull_intersection(p1, p2):\n inter_p = polygon_clip(p1,p2)\n if inter_p is not None:\n hull_inter = ConvexHull(inter_p)\n return inter_p, hull_inter.volume\n else:\n return None, 0.0", "def convex_hull_intersection(p1, p2):\n inter_p = polygon_clip(p1,p2)\n if inter_p is not None:\n hull_inter = ConvexHull(inter_p)\n return inter_p, hull_inter.volume\n else:\n return None, 0.0", "def make_convex_hull(self):\n hull_points_d = []\n try:\n print \"self.V_bar_list_d******************\", self.V_bar_list_d\n hull = ConvexHull(self.V_bar_list_d)\n hull_vertices = hull.vertices\n\n for i in hull_vertices:\n hull_points_d.append(self.V_bar_list_d[i])\n\n except scipy.spatial.qhull.QhullError:\n hull_points_d = self.V_bar_list_d\n\n return hull_points_d", "def create_new_polygon(self, coords, **options):\n\n if 'outline' not in options:\n options['outline'] = self.variables.foreground_color\n if 'width' not in options:\n options['width'] = self.variables.poly_border_width\n if 'fill' not in options:\n options['fill'] = ''\n\n shape_id = self.create_polygon(*coords, **options)\n self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.POLYGON, options)\n self.variables.shape_ids.append(shape_id)\n self.set_shape_pixel_coords_from_canvas_coords(shape_id, coords)\n self.variables.current_shape_id = shape_id\n return shape_id", "def convex_hull_intersection(p1, p2):\n inter_p = polygon_clip(p1, p2)\n if inter_p is not None:\n hull_inter = ConvexHull(inter_p)\n return inter_p, hull_inter.volume\n else:\n return None, 0.0" ]
[ "0.7075242", "0.6431152", "0.64101326", "0.63916564", "0.63589376", "0.627315", "0.61870193", "0.6043088", "0.6030819", "0.59753376", "0.5926664", "0.59115064", "0.5896636", "0.589403", "0.5877571", "0.5867895", "0.58546466", "0.5851499", "0.5834695", "0.58292884", "0.5810926", "0.58076584", "0.57584", "0.57107246", "0.5700266", "0.56935036", "0.5680749", "0.56681496", "0.56553435", "0.56336844", "0.56022006", "0.5585583", "0.557263", "0.55637103", "0.5563552", "0.55566305", "0.55538887", "0.54748136", "0.5466508", "0.546235", "0.5454565", "0.5454026", "0.54488313", "0.5436728", "0.5433662", "0.5420552", "0.5411192", "0.5411192", "0.5391111", "0.5375532", "0.53663486", "0.53638566", "0.53531384", "0.534743", "0.53331316", "0.53294766", "0.5320924", "0.53156394", "0.53037155", "0.5297256", "0.52789795", "0.523314", "0.52292544", "0.5224376", "0.52101624", "0.520773", "0.5198513", "0.5186042", "0.518344", "0.51557714", "0.5152067", "0.5135814", "0.513488", "0.51333624", "0.51313025", "0.51241475", "0.5120733", "0.511818", "0.51130754", "0.5109557", "0.5108633", "0.51068753", "0.51068354", "0.5105137", "0.5100846", "0.50949556", "0.50939035", "0.50909233", "0.50876224", "0.5084727", "0.5051036", "0.5035002", "0.50348365", "0.5028013", "0.5026461", "0.50260687", "0.50260687", "0.50231194", "0.5021587", "0.50182456" ]
0.7046996
1
Given a Polygon Geometry object, in lat/long, detect whether it crosses the dateline. Do this in the projection of the preferred EPSG, so we remove (reduce?) the ambiguity about inside/outside.
def crossesDateline(geom, preferredEpsg): (xMin, xMax, yMin, yMax) = geom.GetEnvelope() (projTr, llTr) = makeTransformations(4326, preferredEpsg) geomProj = copyGeom(geom) geomProj.Transform(projTr) dateLineGeom = ogr.Geometry(wkt='LINESTRING(180 {}, 180 {})'.format(yMin, yMax)) try: dateLineGeom.Transform(projTr) crosses = geomProj.Intersects(dateLineGeom) except Exception: # If we can't transform into the preferred EPSG, then it seems likely that # the geom is nowhere near the date line. crosses = False return crosses
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def point_inside_polygon(xxx_todo_changeme,poly):\n (x,y) = xxx_todo_changeme\n n = len(poly)\n inside = False\n\n p1x, p1y = poly[0]\n for i in range(n + 1):\n p2x, p2y = poly[i % n]\n if y > min(p1y, p2y):\n if y <= max(p1y, p2y):\n if x <= max(p1x, p2x):\n if p1y != p2y:\n xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x\n if p1x == p2x or x <= xinters:\n inside = not inside\n p1x, p1y = p2x, p2y\n\n return inside", "def ispolygon(a):\n return ispoly(a) and dist(a[0],a[-1]) < epsilon", "def isPointInPolygon(xPolygon, yPolygon, xPt, yPt):\n \n # How to tell if a point is inside a polygon:\n # Determine the change in angle made by the point and the vertices\n # of the polygon. Add up the delta(angle)'s from the first (include\n # the first point again at the end). If the point is inside the\n # polygon, then the total angle will be +/-360 deg. If the point is\n # outside, then the total angle will be 0 deg. Points on the edge will\n # outside.\n # This is called the Winding Algorithm\n # http://geomalgorithms.com/a03-_inclusion.html\n\n n = len(xPolygon)\n # Array for the angles\n angle = np.zeros(n)\n\n # add first vertex to the end\n xPolygon1 = np.append( xPolygon, xPolygon[0] )\n yPolygon1 = np.append( yPolygon, yPolygon[0] )\n\n wn = 0 # winding number counter\n\n # Loop through the edges of the polygon\n for i in range(n):\n # if edge crosses upward (includes its starting endpoint, and excludes its final endpoint)\n if yPolygon1[i] <= yPt and yPolygon1[i+1] > yPt:\n # if (P is strictly left of E[i]) // Rule #4\n if isLeft(xPolygon1[i], yPolygon1[i], xPolygon1[i+1], yPolygon1[i+1], xPt, yPt) > 0: \n wn += 1 # a valid up intersect right of P.x\n\n # if edge crosses downward (excludes its starting endpoint, and includes its final endpoint)\n if yPolygon1[i] > yPt and yPolygon1[i+1] <= yPt:\n # if (P is strictly right of E[i]) // Rule #4\n if isLeft(xPolygon1[i], yPolygon1[i], xPolygon1[i+1], yPolygon1[i+1], xPt, yPt) < 0: \n wn -= 1 # a valid up intersect right of P.x\n\n # wn = 0 only when P is outside the polygon\n if wn == 0:\n return False\n else:\n return True", "def is_dateline(vertices):\n vertices = np.asarray(vertices, dtype=\"d\")\n longitudes = vertices[:, 0]\n return np.abs(longitudes.min(axis=0) - longitudes.max(axis=0)) > 180", "def crosses_dateline(self):\n if not isinstance(self.crs, GeographicalCRS):\n raise CRSError(\"Dateline detection only defined for geographical \"\n \"coordinates\")\n\n return any(self._seg_crosses_dateline(seg) for seg in self.segments)", "def in_polygon(point, poly):\n x, y = point\n n = len(poly)\n inside = False\n\n p1x, p1y = poly[0]\n for i in range(n+1):\n p2x, p2y = poly[i % n]\n if y > min(p1y, p2y):\n if y <= max(p1y, p2y):\n if x <= max(p1x, p2x):\n if p1y != p2y:\n xinters = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x\n if p1x == p2x or x <= xinters:\n inside = not inside\n p1x, p1y = p2x, p2y\n return inside", "def testLocation(point, polygon):\n # begin\n if polygon.first.y == point.y and polygon.first.x == point.x:\n return \"on\" # vertex\n w =0\n for v in polygon.iter():\n if v.next.y == point.y:\n if v.next.x == point.x:\n return \"on\" # vertex\n else:\n if v.y == point.y and (v.next.x > point.x) == (v.x < point.x):\n return \"on\" # edge\n # if crossing horizontal line\n if (v.y < point.y and v.next.y >= point.y)\\\n or (v.y >= point.y and v.next.y < point.y):\n if v.x >= point.x:\n if v.next.x > point.x:\n # modify w\n if v.next.y > v.y: w += 1\n else: w -= 1\n else:\n det = (v.x - point.x) * (v.next.y - point.y) \\\n - (v.next.x - point.x) * (v.y - point.y)\n if det == 0: return \"on\" # edge\n # if right crossing\n if (det > 0 and v.next.y > v.y)\\\n or (det < 0 and v.next.y < v.y):\n # modify w\n if v.next.y > v.y: w += 1\n else: w -= 1\n else:\n if v.next.x > point.x:\n det = (v.x - point.x) * (v.next.y - point.y) \\\n - (v.next.x - point.x) * (v.y - point.y)\n if det == 0: return \"on\" # edge\n # if right crossing\n if (det > 0 and v.next.y > v.y)\\\n or (det < 0 and v.next.y < v.y):\n # modify w\n if v.next.y > v.y: w += 1\n else: w -= 1\n if (w % 2) != 0:\n return \"in\"\n else:\n return \"out\"", "def point_in_poly(x_point: float, y_point: float) -> bool:\n\n # Semi-F47 extended states all devices should be able to ride out a sag of up to 1 cycle.\n if x_point <= 1:\n return False\n\n point = shapely.geometry.Point(x_point, y_point)\n return POLYGON.contains(point) or POLYGON.intersects(point)", "def is_in_polygon(self, point):\n reference_point = self.get_reference_point()\n \n reference_segment = DirectedEdge(point, reference_point)\n\n num_crossings = 0\n \n left_idx = 0\n while left_idx != len(self):\n right_idx = (left_idx + 1) % len(self)\n \n are_crossing = False\n \n if reference_segment.contains_point(self[left_idx]):\n while reference_segment.contains_point(self[right_idx]):\n right_idx = (right_idx + 1) % len(self)\n \n if right_idx == left_idx:\n break\n \n if left_idx == right_idx:\n left_idx = len(self)\n continue\n \n left_endpoint = self[(left_idx - 1) % len(self)]\n right_endpoint = self[(right_idx + 1) % len(self)]\n \n are_crossing = reference_segment.orientation(left_endpoint) != reference_segment.orientation(right_endpoint)\n left_idx = (right_idx + 1) % len(self) if left_idx < (right_idx + 1) % len(self) else len(self) \n elif reference_segment.contains_point(self[right_idx]):\n left_idx += 1\n continue\n else:\n edge = DirectedEdge(self[left_idx], self[right_idx])\n are_crossing = reference_segment.is_intersecting(edge)\n left_idx += 1\n \n num_crossings = num_crossings + 1 if are_crossing else num_crossings\n \n return num_crossings % 2 == 1", "def inside_polygon(point, polygon):\n x = point[0]\n y = point[1]\n n = len(polygon)\n inside = False\n p1x, p1y = polygon[0]\n for i in range(1, n + 1):\n p2x, p2y = polygon[i % n]\n if y > min(p1y, p2y):\n if y <= max(p1y, p2y):\n if x <= max(p1x, p2x):\n if p1y != p2y:\n xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x\n if p1x == p2x or x <= xinters:\n inside = not inside\n p1x, p1y = p2x, p2y\n return inside", "def point_in_polygon(x, y, poly):\n n = len(poly)\n inside = False\n\n p1x, p1y = poly[0]\n for i in range(n + 1):\n p2x, p2y = poly[i % n]\n if y > min(p1y, p2y):\n if y <= max(p1y, p2y):\n if x <= max(p1x, p2x):\n if p1y != p2y:\n xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x\n if p1x == p2x or x <= xinters:\n inside = not inside\n p1x, p1y = p2x, p2y\n return inside", "def ispolygonXY(a):\n return ispolygon(a) and isXYPlanar(a)", "def pnpoly(test_point, polygon):\r\n is_inside = False\r\n minX = polygon[0][0]\r\n maxX = polygon[0][0]\r\n minY = polygon[0][1]\r\n maxY = polygon[0][1]\r\n for p in polygon:\r\n minX = min(p[0], minX)\r\n maxX = max(p[0], maxX)\r\n minY = min(p[1], minY)\r\n maxY = max(p[1], maxY)\r\n if test_point[0] < minX or test_point[0] > maxX or test_point[1] < minY or test_point[1] > maxY:\r\n return False\r\n j = len(polygon) - 1\r\n for i in range(len(polygon)):\r\n if ((polygon[i][1] > test_point[1]) != (polygon[j][1] > test_point[1]) and (\r\n test_point[0] < (polygon[j][0] - polygon[i][0]) * (test_point[1] - polygon[i][1]) / (\r\n polygon[j][1] - polygon[i][1]) + polygon[i][0])):\r\n is_inside = not is_inside\r\n j = i\r\n return is_inside", "def splitAtDateline(geom, preferredEpsg):\n crosses = crossesDateline(geom, preferredEpsg)\n if crosses:\n (projTr, llTr) = makeTransformations(4326, preferredEpsg)\n coords = getCoords(geom)\n (x, y) = (coords[:, 0], coords[:, 1])\n (yMin, yMax) = (y.min(), y.max())\n xMinPositive = None\n xMaxNegative = None\n xGe0 = (x >= 0)\n xLt0 = (x < 0)\n if xGe0.any() > 0 and xLt0.any() > 0:\n xMaxNegative = x[xLt0].max()\n xMinPositive = x[xGe0].min()\n \n # Create rectangles for the east and west hemispheres, constrained by the \n # extent of this polygon. Note that this assumes that we do not\n # cross both the date line, and also the prime (zero) meridian. This may not\n # always be true, notably when we are close to the pole. \n eastHemiRectCoords = [[xMinPositive, yMax], [xMinPositive, yMin], [180, yMin], \n [180, yMax], [xMinPositive, yMax]]\n eastHemiRectJson = repr({'type':'Polygon', 'coordinates':[eastHemiRectCoords]})\n westHemiRectCoords = [[-180, yMax], [-180, yMin], [xMaxNegative, yMin], \n [xMaxNegative, yMax], [-180, yMax]]\n westHemiRectJson = repr({'type':'Polygon', 'coordinates':[westHemiRectCoords]})\n eastHemiRect = ogr.CreateGeometryFromJson(eastHemiRectJson)\n westHemiRect = ogr.CreateGeometryFromJson(westHemiRectJson)\n\n geomProj = copyGeom(geom)\n geomProj.Transform(projTr)\n eastHemiRect.Transform(projTr)\n westHemiRect.Transform(projTr)\n\n eastHemiPart = geomProj.Intersection(eastHemiRect)\n westHemiPart = geomProj.Intersection(westHemiRect)\n eastHemiPart.Transform(llTr)\n westHemiPart.Transform(llTr)\n \n # Put these together as a single multipolygon\n eastPartCoords = getCoords(eastHemiPart)\n westPartCoords = getCoords(westHemiPart)\n # Discard any vertices which are still no the wrong side of the 180 line. I\n # do not understand what is going on here, but I have invested far more of \n # my valuable time than I should, and this kludge will be a reasonable approximation. \n eastPartCoords = eastPartCoords[eastPartCoords[:, 0] > 0, :]\n westPartCoords = westPartCoords[westPartCoords[:, 0] < 0, :]\n \n # Convert to lists\n eastPartCoords = eastPartCoords.tolist()\n westPartCoords = westPartCoords.tolist()\n # Discard anything left with only 2 points\n if len(eastPartCoords) < 3:\n eastPartCoords = []\n if len(westPartCoords) < 3:\n westPartCoords = []\n # Close polygons. What a kludge.....\n if len(eastPartCoords) > 2:\n if eastPartCoords[-1][0] != eastPartCoords[0][0] or eastPartCoords[-1][1] != eastPartCoords[0][1]:\n eastPartCoords.append(eastPartCoords[0])\n if len(westPartCoords) > 2:\n if westPartCoords[-1][0] != westPartCoords[0][0] or westPartCoords[-1][1] != westPartCoords[0][1]:\n westPartCoords.append(westPartCoords[0])\n \n # Make a multi-polygon from the two parts\n coordsMulti = [[eastPartCoords], [westPartCoords]]\n jsonStr = repr({'type':'MultiPolygon', 'coordinates':coordsMulti})\n newGeom = ogr.CreateGeometryFromJson(jsonStr)\n else:\n # It didn't really cross the date line, but seems to due to rounding\n # error in crossesDateline(). \n newGeom = copyGeom(geom)\n else:\n newGeom = copyGeom(geom)\n return newGeom", "def point_in_polygon(point, poly):\n if len(poly) < 3: return False\n\n inside = False\n for i in range(len(poly)):\n j = (i+1) % len(poly)\n if (((poly[i][1] > point[1]) != (poly[j][1] > point[1])) and\n (point[0] < (poly[j][0] - poly[i][0]) * (point[1] - poly[i][1]) /\n (poly[j][1] - poly[i][1]) + poly[i][0])):\n inside = not inside\n return inside", "def check_cross_polygon(polygons_dict, region):\n result_poly_name = ''\n start_len = len(polygons_dict)\n poly_names = []\n poly_region_default_area = area(geojson.Feature(geometry=region, properties={}).geometry)\n for main_el in polygons_dict:\n for child_el in polygons_dict:\n intersection_region_area = 0\n main_poly = shapely.geometry.asShape(main_el['geometry'])\n child_poly = shapely.geometry.asShape(child_el['geometry'])\n intersection_polygon = main_poly.intersection(child_poly)\n control_area = area(\n geojson.Feature(geometry=child_poly, properties={}).geometry)\n if not intersection_polygon.is_empty and area(\n geojson.Feature(geometry=intersection_polygon, properties={}).geometry) < control_area:\n intersection_region = region.intersection(intersection_polygon)\n if not intersection_region.is_empty:\n intersection_region_area = area(\n geojson.Feature(geometry=intersection_region, properties={}).geometry)\n if float(\"{0:.2f}\".format(intersection_region_area)) == float(\n \"{0:.2f}\".format(poly_region_default_area)):\n poly_names.append(main_el[\"properties\"][\"Name\"])\n poly_names.append(child_el[\"properties\"][\"Name\"])\n if poly_names:\n result_poly_name = sorted(set(poly_names))[0]\n idx = 0\n iteration_range = len(polygons_dict)\n while idx < iteration_range:\n if polygons_dict[idx][\"properties\"][\"Name\"] != result_poly_name:\n del polygons_dict[idx]\n iteration_range -= 1\n else:\n idx += 1\n if len(polygons_dict) != start_len:\n return polygons_dict\n else:\n return None", "def check_point(point: Point, polygon: List[Point]):\n intersections = 0\n n = len(polygon)\n \n p1 = polygon[-1]\n # Checking every edge of the polygon inside for loop\n for i in range(n):\n p2 = polygon[i]\n # if point higher or lower than edge we immediately go to the next edge\n if min(p1.y, p2.y) <= point.y <= max(p1.y, p2.y):\n # Here goes the check for horizontal edge case\n if abs(point.y - p1.y) <= ATOL:\n if p1.y < p2.y:\n intersections += 1\n \n # Here goes the check for vertical edge case, if point to the left - it's an intersection\n elif abs(p2.x - p1.x) <= ATOL:\n if point.x < p2.x:\n intersections += 1\n # Here goes a \"regular\" edge case\n # Line equation is used to figure out does ray intersect the edge or not\n else:\n k = (p2.y - p1.y) / (p2.x - p1.x) # y = k*x + b -- line equation\n b = p1.y - k * p1.x\n x_of_point_y = (point.y - b) / k\n \n if point.x <= x_of_point_y:\n intersections += 1\n \n p1 = p2\n \n return intersections % 2 == 1", "def inside_polygon(x, y, points):\n n = len(points)\n inside = False\n p1x, p1y = points[0]\n for i in range(1, n + 1):\n p2x, p2y = points[i % n]\n if y > min(p1y, p2y):\n if y <= max(p1y, p2y):\n if x <= max(p1x, p2x):\n if p1y != p2y:\n xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x\n if p1x == p2x or x <= xinters:\n inside = not inside\n p1x, p1y = p2x, p2y\n return inside", "def in_geofence(self, coordinates):\n\t\tcoords_transformed = ogr.Geometry(ogr.wkbPoint)\n\t\tcoords_transformed.AddPoint(*coordinates)\n\t\treturn self.polygon.Contains(coords_transformed)", "def _pointInsidePolygon(self,point,polygon):\n # try:\n # import cv2\n # except:\n # logger.warning(\"Unable to import cv2\")\n # return False\n\n if( len(polygon) < 3 ):\n logger.warning(\"feature._pointInsidePolygon - this is not a valid polygon\")\n return False\n\n if( not isinstance(polygon,list)):\n logger.warning(\"feature._pointInsidePolygon - this is not a valid polygon\")\n return False\n\n #if( not isinstance(point,tuple) ):\n #if( len(point) == 2 ):\n # point = tuple(point)\n #else:\n # logger.warning(\"feature._pointInsidePolygon - this is not a valid point\")\n # return False\n #if( cv2.__version__ == '$Rev:4557'):\n counter = 0\n retVal = True\n p1 = None\n #print \"point: \" + str(point)\n poly = copy.deepcopy(polygon)\n poly.append(polygon[0])\n #for p2 in poly:\n N = len(poly)\n p1 = poly[0]\n for i in range(1,N+1):\n p2 = poly[i%N]\n if( point[1] > np.min((p1[1],p2[1])) ):\n if( point[1] <= np.max((p1[1],p2[1])) ):\n if( point[0] <= np.max((p1[0],p2[0])) ):\n if( p1[1] != p2[1] ):\n test = float((point[1]-p1[1])*(p2[0]-p1[0]))/float(((p2[1]-p1[1])+p1[0]))\n if( p1[0] == p2[0] or point[0] <= test ):\n counter = counter + 1\n p1 = p2\n\n if( counter % 2 == 0 ):\n retVal = False\n return retVal\n return retVal\n #else:\n # result = cv2.pointPolygonTest(np.array(polygon,dtype='float32'),point,0)\n # return result > 0 ", "def point_in_polygon(pnt, poly): # pnt_in_poly(pnt, poly): #\r\n x, y = pnt\r\n N = len(poly)\r\n for i in range(N):\r\n x0, y0, xy = [poly[i][0], poly[i][1], poly[(i + 1) % N]]\r\n c_min = min([x0, xy[0]])\r\n c_max = max([x0, xy[0]])\r\n if c_min < x <= c_max:\r\n p = y0 - xy[1]\r\n q = x0 - xy[0]\r\n y_cal = (x - x0) * p / q + y0\r\n if y_cal < y:\r\n return True\r\n return False", "def inside_polygon(x, y, points):\r\n n = len(points)\r\n inside = False\r\n p1x, p1y = points[0]\r\n for i in range(1, n + 1):\r\n p2x, p2y = points[i % n]\r\n if y > min(p1y, p2y):\r\n if y <= max(p1y, p2y):\r\n if x <= max(p1x, p2x):\r\n if p1y != p2y:\r\n xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x\r\n if p1x == p2x or x <= xinters:\r\n inside = not inside\r\n p1x, p1y = p2x, p2y\r\n return inside", "def is_in_polygon(x, y, xs, ys):\n\n xs = [xi - x for xi in xs]\n n = len(xs)\n assert n > 2\n xs.append(xs[0])\n ys = [yi - y for yi in ys]\n assert len(ys) == n\n ys.append(ys[0])\n th = 0\n for i in range(n):\n x1 = xs[i]\n y1 = ys[i]\n x2 = xs[i + 1]\n y2 = ys[i + 1]\n th += sign(x1 * y2 - y1 * x2) * _acos(\n (x1 * x2 + y1 * y2) / (hypot(x1, y1) * hypot(x2, y2))\n )\n return abs(th) > 1", "def inside_polygon(self, x, y, points):\n n = len(points)\n inside = False\n p1x, p1y = points[0]\n for i in range(1, n + 1):\n p2x, p2y = points[i % n]\n if y > min(p1y, p2y):\n if y <= max(p1y, p2y):\n if x <= max(p1x, p2x):\n if p1y != p2y:\n xinters = (y - p1y) * (p2x - p1x) / \\\n (p2y - p1y) + p1x\n if p1x == p2x or x <= xinters:\n inside = not inside\n p1x, p1y = p2x, p2y\n return inside", "def point_in_poly(x, y, poly): \n n = len(poly)\n inside = False\n\n p1x,p1y = poly[0]\n for i in range(n+1):\n p2x,p2y = poly[i % n]\n if y > min(p1y,p2y):\n if y <= max(p1y,p2y):\n if x <= max(p1x,p2x):\n if p1y != p2y:\n xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x\n if p1x == p2x or x <= xints:\n inside = not inside\n p1x,p1y = p2x,p2y\n\n return inside", "def _is_polyline_closed(latitudes_deg, longitudes_deg):\n\n absolute_lat_diff_deg = numpy.absolute(latitudes_deg[0] - latitudes_deg[-1])\n absolute_lng_diff_deg = numpy.absolute(\n longitudes_deg[0] - longitudes_deg[-1])\n\n return (absolute_lat_diff_deg < TOLERANCE_DEG and\n absolute_lng_diff_deg < TOLERANCE_DEG)", "def is_polygon_convex(polygon):\n c = center_of_mass_polygon(polygon)\n for i in range(-1, len(polygon) - 1):\n p0 = polygon[i]\n p1 = polygon[i - 1]\n p2 = polygon[i + 1]\n v0 = subtract_vectors(c, p0)\n v1 = subtract_vectors(p1, p0)\n v2 = subtract_vectors(p2, p0)\n a1 = angle_smallest_vectors(v1, v0)\n a2 = angle_smallest_vectors(v0, v2)\n if a1 + a2 > pi:\n return False\n return True", "def convex_polygon_intersect_test(polygon1, polygon2):\n\n # Find the polygon that has fewer sides so that we can do fewer checks\n polygon_a = polygon1 if len(polygon1) <= len(polygon2) else polygon2\n polygon_b = polygon2 if len(polygon1) > len(polygon2) else polygon1\n\n # Perform Separating Axis Test\n intersect = True\n edge_index = 0\n edges = polygon_a.edges() + polygon_b.edges()\n\n # Loop through the edges of polygonA searching for a separating axis\n while intersect and edge_index < len(edges):\n\n # Get an axis normal to the current edge\n edge = edges[edge_index]\n edge_vector = linalg.sub(edge[1], edge[0])\n projection_axis = linalg.lnormal(edge_vector)\n\n # Get the projection ranges for each polygon onto the projection axis\n min_a, max_a = range_project_polygon(projection_axis, polygon_a)\n min_b, max_b = range_project_polygon(projection_axis, polygon_b)\n\n # test if projections overlap\n if min_a > max_b or max_a < min_b:\n intersect = False\n edge_index += 1\n\n return intersect", "def is_inner(point,poly):\n npts = len(poly)\n p1 = [0.,0.]\n p2 = point\n inner = True\n k = 0\n while inner==True and k < npts:\n p3 = poly[k]\n if k == npts - 1:\n p4 = poly[0]\n else:\n p4 = poly[k+1]\n (intercept,flag) = line_intercept(p1,p2,p3,p4)\n if flag == 1 :\n inner = False\n k = k+1\n return inner", "def poly_to_list_with_overlap(self, polygon):\n added = 0\n polygon_item = polygon.polygon()\n polygon_item.translate(polygon.x(), polygon.y())\n\n # Comparator to determine which x value of two points is the highest\n def compare_x(item1, item2):\n if item1.x() < item2.x():\n return -1\n elif item1.x() > item2.x():\n return 1\n else:\n return 0\n\n # Comparator to determine which y value of two points is the highest\n def compare_y(item1, item2):\n if item1.y() < item2.y():\n return -1\n elif item1.y() > item2.y():\n return 1\n else:\n return 0\n\n # Create two lists, one sorted by ascending x-values, one by ascending y-values\n x_list = sorted(self.potential_edge_splitters, key=cmp_to_key(compare_x))\n y_list = sorted(self.potential_edge_splitters, key=cmp_to_key(compare_y))\n\n # Loop over all children to the polygon\n for item in polygon.childItems():\n # Look only at edges (overlapping of points is handled elsewhere)\n if isinstance(item, PyQt5.QtWidgets.QGraphicsLineItem):\n edge = item\n\n p1 = edge.line().p1()\n p2 = edge.line().p2()\n added_this = 0\n\n # Choose the direction with the largest disparity (to avoid scenario of straight lines)\n # then use the sorted list for that direction\n if abs(p1.x() - p2.x()) > abs(p1.y() - p2.y()):\n mode = \"X\"\n circ_list = x_list\n else:\n mode = \"Y\"\n circ_list = y_list\n\n for circ in circ_list:\n poly = circ.parentItem()\n p = circ.scenePos()\n\n # temp_p needed since edge.contains does not account for the edge being moved in the canvas\n temp_p = circ.scenePos()\n temp_p.setX(temp_p.x() - edge.scenePos().x())\n temp_p.setY(temp_p.y() - edge.scenePos().y())\n\n # Find the edges to split which contain temp_p, if the edge contains decide the orientation (in x-\n # or y-direction decided earlier) of p1 and p2, based on this insert the new point in the polygon\n # in the correct position\n if edge.contains(temp_p):\n if edge in poly.childItems():\n pass # Ignore if the edge is in the same polygon as the point\n else:\n if temp_p == p1 or temp_p == p2:\n pass # Don't compare if it contains an edge point, instead handled later by the overlapping points\n elif mode == \"Y\":\n if p1.y() < p2.y(): # Left to right\n index = abs(edge.localIndex)\n polygon_item.insert(index + added, p)\n added += 1\n elif p1.y() > p2.y(): # Right to left\n index = abs(edge.localIndex)\n polygon_item.insert(index + added - added_this, p)\n added_this += 1\n added += 1\n else:\n if p1.x() < p2.x(): # Left to right\n index = abs(edge.localIndex)\n polygon_item.insert(index + added, p)\n added += 1\n elif p1.x() > p2.x(): # Right to left\n index = abs(edge.localIndex)\n polygon_item.insert(index + added - added_this, p)\n added_this += 1\n added += 1\n\n return self.poly_to_list(polygon_item, \"Global\")", "def inner_polygon(poly1,poly2):\n npts1 = len(poly1)\n npts2 = len(poly2)\n if npts1 < 3 or npts2 < 3: return None\n (poly1,angles1) = sort_points(*poly1)\n (poly2,angles2) = sort_points(*poly2)\n # loop through all possible line combinations \n # looking for valid line intersections \n intercepts = []\n for j in range(npts1):\n p1 = poly1[j]\n if j == npts1 - 1:\n p2 = poly1[0]\n else:\n p2 = poly1[j+1]\n for k in range(npts2):\n p3 = poly2[k]\n if k == npts2 - 1:\n p4 = poly2[0]\n else:\n p4 = poly2[k+1]\n (intercept,flag) = line_intercept(p1,p2,p3,p4)\n if flag > 0:\n intercepts.append(intercept)\n #############\n # now determine which points we can get to from\n # the origin without crossing any poly lines, \n # ie the inner set of points\n points = []\n for p in poly1: points.append(p)\n for p in poly2: points.append(p)\n for p in intercepts: points.append(p)\n (points,angles) = sort_points(*points)\n inner_points = []\n for p in points:\n # check against poly1\n inner = is_inner(p,poly1)\n # check against poly2\n if inner == True:\n inner = is_inner(p,poly2)\n if inner == True:\n inner_points.append(p)\n # sort the inner points\n (inner_points,angles) = sort_points(*inner_points)\n return inner_points", "def check_nearness(polygon1, polygon2):\n\n c1, r1 = polygon1.bounding_circle\n c2, r2 = polygon2.bounding_circle\n return c1.distance(c2) <= r1 + r2", "def is_multipoint_on_polygon(feature_1: Sequence, feature_2: Sequence) -> bool:\n points_on_poly = False\n\n points_on_poly = all(\n boolean_point_in_polygon(coords_1, feature_2[1]) for coords_1 in feature_1[1]\n )\n\n if not points_on_poly:\n return points_on_poly\n\n points_on_poly = any(\n boolean_point_in_polygon(coords_1, feature_2[1], {\"ignoreBoundary\": True})\n for coords_1 in feature_1[1]\n )\n\n return points_on_poly", "def _check_curve(layer: ogr.Layer) -> None:\n # Check if the feature geometry is polygonal:\n feature_defn = layer.GetLayerDefn()\n layer.ResetReading()\n feature = layer.GetNextFeature()\n while feature is not None:\n geom = feature.GetGeometryRef()\n name_wkt = geom.ExportToWkt()\n\n # Approximate a curvature by a polygon geometry:\n if 'curv' in name_wkt.lower():\n linear_geom = geom.GetLinearGeometry()\n new_feature = ogr.Feature(feature_defn)\n new_feature.SetGeometryDirectly(linear_geom)\n layer.CreateFeature(new_feature)\n layer.DeleteFeature(feature.GetFID())\n\n feature = layer.GetNextFeature()", "def __polygon_collision(self, polygon):\n raise Exception('--- This methods have not been implemented yet! Use circle_collider instead ---')", "def points_inside_poly(points, all_verts):\n return Path(all_verts, close=True).contains_points(points)", "def compare_polygon_to_point(df_point, df_polygon): \n\n #check for each polygon which points are overlaying with it \n df_polygon['geometry'] = pygeos.buffer(df_polygon.geometry,0) #avoid intersection\n spat_tree = pygeos.STRtree(df_point.geometry) # https://pygeos.readthedocs.io/en/latest/strtree.html\n for polygon_row in df_polygon.itertuples():\n df_point_overlap = (df_point.loc[spat_tree.query(polygon_row.geometry,predicate='intersects').tolist()]).sort_index(ascending=True) #get point that overlaps with polygon\n if not df_point_overlap.empty:\n if polygon_row.asset in df_point_overlap['asset'].tolist():\n df_polygon = df_polygon.drop(polygon_row.Index)\n \n return df_polygon.reset_index(drop=True)", "def polygon_overlaps_other_polygon(self, outer_poly):\n contain_list = []\n for inner_poly in self.poly_list:\n if outer_poly == inner_poly:\n pass\n elif all(self.polygon_contains(outer_poly, inner_poly)):\n pass\n elif any(self.polygon_contains(outer_poly, inner_poly)):\n contain_list.append(inner_poly)\n return contain_list", "def is_poly_in_poly(feature_1: Sequence, feature_2: Sequence) -> bool:\n poly_bbox_1 = bbox(feature_1)\n poly_bbox_2 = bbox(feature_2)\n\n if not bbox_overlap(poly_bbox_2, poly_bbox_1):\n return False\n\n feature_1 = polygon_to_line(polygon(feature_1))\n line_coords = get_coords_from_features(feature_1)\n\n for coords in line_coords:\n\n if not boolean_point_in_polygon(coords, feature_2):\n return False\n\n return True", "def is_line_in_poly(feature_1: Sequence, feature_2: Sequence) -> bool:\n line_in_poly = False\n\n line_bbox = bbox(feature_1)\n poly_bbox = bbox(feature_2)\n\n if not bbox_overlap(poly_bbox, line_bbox):\n return False\n\n for i in range(len(feature_1) - 1):\n\n if not boolean_point_in_polygon(feature_1[i], feature_2):\n return False\n\n if not line_in_poly:\n line_in_poly = boolean_point_in_polygon(\n feature_1[i], feature_2, {\"ignoreBoundary\": True}\n )\n\n if not line_in_poly:\n mid = midpoint(point(feature_1[i]), point(feature_1[i + 1]))\n line_in_poly = boolean_point_in_polygon(\n mid, feature_2, {\"ignoreBoundary\": True}\n )\n\n return line_in_poly", "def contains(polygon, point):\n in_hole = functools.reduce(\n lambda P, Q: P and Q,\n [interior.covers(point) for interior in polygon.interiors]\n ) if polygon.interiors else False\n return polygon.covers(point) and not in_hole", "def isinsidepolyXY(a,p):\n closed=False\n\n if len(a) > 2 and dist(a[0],a[-1]) < epsilon:\n closed = True\n\n ## if not closed, use \"unsample\" test to determine if ``p`` lies\n ## on the polyline\n if not closed:\n if unsamplepoly(a,p) == False:\n return False\n else:\n return True\n ## poly is closed polygon\n bb = polybbox(a)\n ## do quick bounding box check\n if not isinsidebbox(bb,p):\n return False\n ## inside the bounding box, do intersection testing\n p2 = add([1,1,0,1],bb[1])\n if vclose(p2,p): ## did we randomly pick an outside point near the\n ## test point?\n p2 = sub(bb[0],[1,1,0,1])\n l = line(p,p2)\n\n pp = intersectSimplePolyXY(l,a)\n if pp == False:\n return False\n return len(pp) % 2 == 1", "def lefton(hedge, point):\r\n\r\n return area2(hedge, point) >= 0", "def points_in_polygon(polygon, points, buffer=0.):\n mpath = Path( polygon )\n return mpath.contains_points(points, radius=-buffer)", "def address_in_service_area(x, y, polygon_list=None, subsectie=None):\n if polygon_list is None:\n polygon_list = load_geodata_containers(subsectie=subsectie)\n point = shapely.geometry.Point(float(x), float(y))\n for polygon in polygon_list:\n if polygon.contains(point):\n return True\n return False", "def test_polygon_with_duplicate_nodes_is_valid():\n geom = query_row(db_conf, 'osm_landusages', 30005)['geometry']\n assert geom.is_valid\n assert len(geom.exterior.coords) == 4", "def collision_detection(p, poly):\r\n _eps = 0.00001\r\n _huge = sys.float_info.max\r\n _tiny = sys.float_info.min\r\n \r\n def rayintersectseg(p, edge):\r\n ''' takes a point p=Pt() and an edge of two endpoints a,b=Pt() of a line segment returns boolean\r\n '''\r\n a,b = edge\r\n if a.y > b.y:\r\n a,b = b,a\r\n if p.y == a.y or p.y == b.y:\r\n p = Pt(p.x, p.y + _eps)\r\n \r\n intersect = False\r\n \r\n if (p.y > b.y or p.y < a.y) or (\r\n p.x > max(a.x, b.x)):\r\n return False\r\n \r\n if p.x < min(a.x, b.x):\r\n intersect = True\r\n else:\r\n if abs(a.x - b.x) > _tiny:\r\n m_red = (b.y - a.y) / float(b.x - a.x)\r\n else:\r\n m_red = _huge\r\n if abs(a.x - p.x) > _tiny:\r\n m_blue = (p.y - a.y) / float(p.x - a.x)\r\n else:\r\n m_blue = _huge\r\n intersect = m_blue >= m_red\r\n return intersect\r\n \r\n def _odd(x): return x%2 == 1\r\n \r\n def ispointinside(p, poly):\r\n\r\n return _odd(sum(rayintersectseg(p, edge)\r\n for edge in poly.edges ))\r\n \r\n detection = ispointinside(p,poly)\r\n return detection", "def isSelfIntersecting(poly):\n # For possible improvements, see:\n # http://en.wikipedia.org/wiki/Bentley%E2%80%93Ottmann_algorithm\n # http://geomalgorithms.com/a09-_intersect-3.html\n\n # We start be removing duplicate points and points on straight lines\n polyPruned = Polygon.Utils.prunePoints(poly)\n\n # Polygon must not have any self-intersection for each contour, but also between contours\n # This version only manages single contour polygons, without holes, because we don't need them yet\n # We assume at least, and at most, one contour.\n _checkPoly1Contour(polyPruned)\n\n # Get edges\n edges = _polyEdges(polyPruned)\n\n # Look for intersections\n for i in range(len(edges)):\n e1 = edges[i]\n for j in range(i+1, len(edges)):\n e2 = edges[j]\n if _intersect(e1, e2):\n # logger.error(\"Intersection: e1= %s ; e2= %s\", e1, e2)\n return True\n\n return False", "def ispoly(a):\n return isinstance(a,list) and len(a) > 2 and \\\n len(list(filter(lambda x: not ispoint(x),a))) == 0", "def get_bounding_rect(polygon):\n x1, y1, x2, y2 = float('inf'), float('inf'), float('-inf'), float('-inf')\n for x, y in polygon:\n if x < x1:\n x1 = x\n if y < y1:\n y1 = y\n if x > x2:\n x2 = x\n if y > y2:\n y2 = y\n return x1, y1, x2, y2", "def is_inside(inner_path, outer_path):\r\n if not hasattr(inner_path, 'bounding_box'):\r\n inner_path.bounding_box = CutPlanner.bounding_box(inner_path)\r\n if not hasattr(outer_path, 'bounding_box'):\r\n outer_path.bounding_box = CutPlanner.bounding_box(outer_path)\r\n if outer_path.bounding_box[0] > inner_path.bounding_box[0]:\r\n # outer minx > inner minx (is not contained)\r\n return False\r\n if outer_path.bounding_box[1] > inner_path.bounding_box[1]:\r\n # outer miny > inner miny (is not contained)\r\n return False\r\n if outer_path.bounding_box[2] < inner_path.bounding_box[2]:\r\n # outer maxx < inner maxx (is not contained)\r\n return False\r\n if outer_path.bounding_box[3] < inner_path.bounding_box[3]:\r\n # outer maxy < inner maxy (is not contained)\r\n return False\r\n if outer_path.bounding_box == inner_path.bounding_box:\r\n if outer_path == inner_path: # This is the same object.\r\n return False\r\n if not hasattr(outer_path, 'vm'):\r\n outer_path = Polygon([outer_path.point(i / 100.0, error=1e4) for i in range(101)])\r\n vm = VectorMontonizer()\r\n vm.add_cluster(outer_path)\r\n outer_path.vm = vm\r\n for i in range(101):\r\n p = inner_path.point(i / 100.0, error=1e4)\r\n if not outer_path.vm.is_point_inside(p.x, p.y):\r\n return False\r\n return True", "def is_closed(geometry, **kwargs):\n return lib.is_closed(geometry, **kwargs)", "def polygonFromInteriorPoints(geom, preferredEpsg):\n (projTr, llTr) = makeTransformations(4326, preferredEpsg)\n\n geomProj = copyGeom(geom)\n geomProj.Transform(projTr)\n geomOutline = geomProj.ConvexHull()\n geomOutline.Transform(llTr)\n return geomOutline", "def are_polygons_intersecting(a: Vector, b: Vector,\n displacement_a: Vector, displacement_b: Vector) \\\n -> Tuple[bool, bool, Optional[np.ndarray]]:\n intersecting = will_intersect = True\n min_distance = np.inf\n translation, translation_axis = None, None\n for polygon in [a, b]:\n for p1, p2 in zip(polygon, polygon[1:]):\n normal = np.array([-p2[1] + p1[1], p2[0] - p1[0]])\n normal /= np.linalg.norm(normal)\n min_a, max_a = project_polygon(a, normal)\n min_b, max_b = project_polygon(b, normal)\n\n if interval_distance(min_a, max_a, min_b, max_b) > 0:\n intersecting = False\n\n velocity_projection = normal.dot(displacement_a - displacement_b)\n if velocity_projection < 0:\n min_a += velocity_projection\n else:\n max_a += velocity_projection\n\n distance = interval_distance(min_a, max_a, min_b, max_b)\n if distance > 0:\n will_intersect = False\n if not intersecting and not will_intersect:\n break\n if abs(distance) < min_distance:\n min_distance = abs(distance)\n d = a[:-1].mean(axis=0) - b[:-1].mean(axis=0) # center difference\n translation_axis = normal if d.dot(normal) > 0 else -normal\n\n if will_intersect:\n translation = min_distance * translation_axis\n return intersecting, will_intersect, translation", "def point_outside_conus(pt):\n return not pt.within(CONUS[\"poly\"]) and pt.distance(CONUS[\"poly\"]) > 0.001", "def inside(self, x, on_boundary):\n return bool((near(x[0], xmin) or near(x[1], ymin)) and \\\n (not ((near(x[0], xmin) and near(x[1], ymax)) \\\n or (near(x[0], xmax) and near(x[1], ymin)))) \\\n and on_boundary)", "def polygon_contains_other_polygon(self, outer_poly):\n contain_list = []\n for inner_poly in self.poly_list:\n if outer_poly == inner_poly:\n pass\n elif all(self.polygon_contains(outer_poly, inner_poly)):\n contain_list.append(inner_poly)\n return contain_list", "def checkGeom(geodataframe):\n for geometry in geodataframe.geometry:\n if explain_validity(geometry) != 'Valid Geometry':\n print(explain_validity(geometry))", "def find_enclosing_polygon_attributes(self, lon, lat):\n fldvals = {}\n try:\n lon = float(lon)\n lat = float(lat)\n except ValueError:\n raise ValueError(f\"Longitude {lon} or latitude {lat} is not a number\")\n\n start = time.time()\n # Construct point\n pt = ogr.Geometry(ogr.wkbPoint)\n pt.AddPoint(lon, lat)\n # Intersect with spatial index to get ID (fid) of intersecting features,\n # buffer if necessary\n try:\n fldvals = self._find_intersecting_feature_values(pt)\n except ValueError:\n raise\n except GeoException as e:\n self._log.error(f\"No polygon found: {e}\")\n for fn in self.bison_spatial_fields:\n fldvals[fn] = None\n\n # Elapsed time\n ogr_seconds = time.time()-start\n if ogr_seconds > 0.75:\n self._log.log(\n f\"Intersect point {lon}, {lat}; OGR time {ogr_seconds}\",\n refname=self.__class__.__name__, log_level=DEBUG)\n\n return fldvals", "def is_vertex_inside(self, point):\n return Geometry.polygon_point_intersection(self.get_point_list(), point)", "def merge_adjacent_polygons(feature):\n if feature.geometry().wkbType() != WKBMultiPolygon:\n return False\n mp = Geometry.get_multipolygon(feature)\n if len(mp) < 2:\n return False\n else:\n geom = None\n for p in mp:\n g = Geometry.fromPolygonXY(p)\n ng = g if geom is None else geom.combine(g)\n if ng.isGeosValid():\n geom = ng\n if geom is not None:\n feature.setGeometry(geom)\n return geom.isGeosValid()", "def test_clip_points_by_polygons_with_holes0(self):\n\n # Define an outer ring\n outer_ring = numpy.array([[106.79, -6.233],\n [106.80, -6.24],\n [106.78, -6.23],\n [106.77, -6.21],\n [106.79, -6.233]])\n\n # Define inner rings\n inner_rings = [numpy.array([[106.77827, -6.2252],\n [106.77775, -6.22378],\n [106.78, -6.22311],\n [106.78017, -6.22530],\n [106.77827, -6.2252]])[::-1],\n numpy.array([[106.78652, -6.23215],\n [106.78642, -6.23075],\n [106.78746, -6.23143],\n [106.78831, -6.23307],\n [106.78652, -6.23215]])[::-1]]\n\n v = Vector(geometry=[Polygon(outer_ring=outer_ring,\n inner_rings=inner_rings)])\n assert v.is_polygon_data\n\n # Write it to file\n tmp_filename = unique_filename(suffix='.shp')\n v.write_to_file(tmp_filename)\n\n # Read polygon it back\n L = read_layer(tmp_filename)\n P = L.get_geometry(as_geometry_objects=True)[0]\n\n outer_ring = P.outer_ring\n inner_ring0 = P.inner_rings[0]\n inner_ring1 = P.inner_rings[1]\n\n # Make some test points\n points = generate_random_points_in_bbox(outer_ring, 1000, seed=13)\n\n # Clip to outer ring, excluding holes\n indices = inside_polygon(points, P.outer_ring, holes=P.inner_rings)\n\n # Sanity\n for point in points[indices, :]:\n # Must be inside outer ring\n assert is_inside_polygon(point, outer_ring)\n\n # But not in any of the inner rings\n assert not is_inside_polygon(point, inner_ring0)\n assert not is_inside_polygon(point, inner_ring1)\n\n if False:\n # Store for visual check\n pol = Vector(geometry=[P])\n tmp_filename = unique_filename(suffix='.shp')\n pol.write_to_file(tmp_filename)\n print 'Polygon with holes written to %s' % tmp_filename\n\n pts = Vector(geometry=points[indices, :])\n tmp_filename = unique_filename(suffix='.shp')\n pts.write_to_file(tmp_filename)\n print 'Clipped points written to %s' % tmp_filename", "def planar_intersection_polygon(area_corners, segment_corners):\n # First test each \n lons = np.array([])\n lats = np.array([])\n for segment_corner in segment_corners:\n if planar_point_inside(segment_corner,area_corners):\n currlon = segment_corner.lon\n # MLS use wrap_longitudes?\n if currlon < 0:\n currlon += 2*math.pi\n lons = np.concatenate((lons,[currlon]))\n lats = np.concatenate((lats,[segment_corner.lat]))\n log.info('Adding intersection from segment '+str(segment_corner))\n for area_corner in area_corners:\n if planar_point_inside(area_corner,segment_corners):\n currlon = area_corner.lon\n # MLS use wrap_longitudes?\n if currlon < 0:\n currlon += 2*math.pi\n lons = np.concatenate((lons,[currlon]))\n lats = np.concatenate((lats,[area_corner.lat]))\n log.info('Adding intersection from area '+str(area_corner))\n\n area_line1 = Line(area_corners[0], area_corners[1])\n area_line2 = Line(area_corners[1], area_corners[2])\n area_line3 = Line(area_corners[2], area_corners[3])\n area_line4 = Line(area_corners[3], area_corners[0])\n\n segment_line1 = Line(segment_corners[0], segment_corners[1])\n segment_line2 = Line(segment_corners[1], segment_corners[2])\n segment_line3 = Line(segment_corners[2], segment_corners[3])\n segment_line4 = Line(segment_corners[3], segment_corners[0])\n\n for i in (area_line1, area_line2, area_line3, area_line4):\n for j in (segment_line1, segment_line2, segment_line3, segment_line4):\n intersect = i.intersection(j)\n if intersect:\n log.info('Adding actual intersection '+str(intersect))\n currlon = intersect.lon\n # MLS use wrap_longitudes?\n if intersect.lon < 0:\n currlon += 2*math.pi\n lons = np.concatenate((lons,[currlon]))\n lats = np.concatenate((lats,[intersect.lat]))\n\n minlon = math.degrees(lons.min())\n maxlon = math.degrees(lons.max())\n minlat = math.degrees(lats.min())\n maxlat = math.degrees(lats.max())\n # Coordinate MUST be between -180 and 180\n # MLS use wrap_longitudes?\n if minlon > 180:\n minlon -= 180\n if maxlon > 180:\n maxlon -= 180\n from pyresample.spherical_geometry import Coordinate\n return [Coordinate(minlon,maxlat),\n Coordinate(maxlon,maxlat),\n Coordinate(maxlon,minlat),\n Coordinate(minlon,minlat)]", "def point_in_polygon_2d(polygon, point):\r\n return geometry.gmPointInPolygon2D(polygon, point)", "def check_borders_of_traffic_event_document(self, traffic_event_document):\n included_in_borders = True\n traffic_event_point_document = traffic_event_document.get('point')\n traffic_event_longitude = traffic_event_point_document.get('longitude')\n traffic_event_latitude = traffic_event_point_document.get('latitude')\n\n if (traffic_event_longitude < self.minimum_longitude or traffic_event_longitude > self.maximum_longitude or\n traffic_event_latitude < self.minimum_latitude or traffic_event_latitude > self.maximum_latitude):\n included_in_borders = False\n\n return included_in_borders", "def polygon_clip(subjectPolygon, clipPolygon):\n\n def inside(p):\n return (cp2[0] - cp1[0]) * (p[1] - cp1[1]) > (cp2[1] - cp1[1]) * (p[0] - cp1[0])\n\n def computeIntersection():\n dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]]\n dp = [s[0] - e[0], s[1] - e[1]]\n n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]\n n2 = s[0] * e[1] - s[1] * e[0]\n n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])\n return [(n1 * dp[0] - n2 * dc[0]) * n3, (n1 * dp[1] - n2 * dc[1]) * n3]\n\n outputList = subjectPolygon\n cp1 = clipPolygon[-1]\n\n for clipVertex in clipPolygon:\n cp2 = clipVertex\n inputList = outputList\n outputList = []\n s = inputList[-1]\n\n for subjectVertex in inputList:\n e = subjectVertex\n if inside(e):\n if not inside(s):\n outputList.append(computeIntersection())\n outputList.append(e)\n elif inside(s):\n outputList.append(computeIntersection())\n s = e\n cp1 = cp2\n if len(outputList) == 0:\n return None\n return outputList", "def point_inside_polygon(self, location, points):\n # Simplification: if the point is above the mean altitude of all the \n # points, then do not consider it to be inside the polygon. We could \n # also perform interesting calculations here, but we won't have that \n # many objects of differing altitude anyway.\n avg_alt = float(sum([point.alt for point in points]))/len(points)\n if avg_alt < location.alt - self.altitude_margin:\n return False\n\n edges = get_point_edges(points)\n num = sum(ray_intersects_segment(location, e[0], e[1]) for e in edges)\n return num % 2 == 1", "def ray_trace(x, y, poly):\n\n @vectorize([bool_(float64, float64)])\n def ray(x, y):\n # where xy is a coordinate\n n = len(poly)\n inside = False\n p2x = 0.0\n p2y = 0.0\n xints = 0.0\n p1x, p1y = poly[0]\n for i in range(n + 1):\n p2x, p2y = poly[i % n]\n if y > min(p1y, p2y):\n if y <= max(p1y, p2y):\n if x <= max(p1x, p2x):\n if p1y != p2y:\n xints = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x\n if p1x == p2x or x <= xints:\n inside = not inside\n p1x, p1y = p2x, p2y\n return inside\n\n return ray(x, y)", "def test_create_polygon_with_bad_poly_data_fails(self, api_client):\n url = self.base_url + \"/polygons/\"\n prov = baker.make(Provider)\n data = {\n \"name\": \"NRB\",\n \"price\": 930,\n \"provider\": prov.id,\n \"poly\": \"-98.503358 -29.335668, -98.503086 29.335668, -98.503086 29.335423, -98.503358 29.335423, -98.503358 29.335668\" # noqa\n }\n response = api_client().post(url, data)\n assert response.status_code == 400\n assert 'Points of LinearRing do not form a closed linestring.' in response.data['poly']", "def ispolycw(x, y):\n\n l = len(x)\n if l < 3:\n raise ValueError(\"ispolycw::X length is less than 3\")\n\n if len(y) < 3:\n raise ValueError(\"ispolycw::Y length is less than 3\")\n\n if l != len(y):\n raise ValueError(\"ispolycw::Non-equal sized arrays\")\n\n s = 0.0\n for k in range(0, l):\n kn = (k + 1) % l\n\n s += (x[kn] - x[k]) * (y[kn] + y[k])\n\n return (s > 0.0, 0.5*s) # CW flag, signed area of the polygon", "def Polygon(self, polyline = False):\n\n from geographiclib.polygonarea import PolygonArea\n return PolygonArea(self, polyline)", "def point_inside_polygon(plan, (x,y,theta)):\n count = 0\n ray = line_equation_ap(theta, (x,y))\n for (a, b) in plan:\n (ax, ay) = a\n (bx, by) = b\n wall = line_equation_pp(a, b)\n if not is_parallel(wall, ray):\n (xi, yi) = intersection(wall, ray)\n #print \"Intersection candidate:\", xi, yi\n if abs(xi) < eps:\n xi = 0.0\n if abs(yi) < eps:\n yi = 0.0\n if xi > min(ax,bx)-eps and xi < max(ax,bx)+eps and yi > min(ay,by)-eps and yi < max(ay,by)+eps:\n if sign(xi-x) == sign(cos(theta)) and sign(yi-y) == sign(sin(theta)):\n count += 1\n # IN if count is odd OUT if count is even\n return count % 2 is not 0", "def _confidence_interval_to_polygon(\n x_coords_bottom, y_coords_bottom, x_coords_top, y_coords_top,\n for_performance_diagram=False):\n\n nan_flags_top = numpy.logical_or(\n numpy.isnan(x_coords_top), numpy.isnan(y_coords_top))\n real_indices_top = numpy.where(numpy.invert(nan_flags_top))[0]\n\n nan_flags_bottom = numpy.logical_or(\n numpy.isnan(x_coords_bottom), numpy.isnan(y_coords_bottom))\n real_indices_bottom = numpy.where(numpy.invert(nan_flags_bottom))[0]\n\n if for_performance_diagram:\n y_coords_top = y_coords_top[real_indices_top]\n sort_indices_top = numpy.argsort(y_coords_top)\n y_coords_top = y_coords_top[sort_indices_top]\n x_coords_top = x_coords_top[real_indices_top][sort_indices_top]\n\n y_coords_bottom = y_coords_bottom[real_indices_bottom]\n sort_indices_bottom = numpy.argsort(-y_coords_bottom)\n y_coords_bottom = y_coords_bottom[sort_indices_bottom]\n x_coords_bottom = x_coords_bottom[real_indices_bottom][\n sort_indices_bottom]\n else:\n x_coords_top = x_coords_top[real_indices_top]\n sort_indices_top = numpy.argsort(-x_coords_top)\n x_coords_top = x_coords_top[sort_indices_top]\n y_coords_top = y_coords_top[real_indices_top][sort_indices_top]\n\n x_coords_bottom = x_coords_bottom[real_indices_bottom]\n sort_indices_bottom = numpy.argsort(x_coords_bottom)\n x_coords_bottom = x_coords_bottom[sort_indices_bottom]\n y_coords_bottom = y_coords_bottom[real_indices_bottom][\n sort_indices_bottom]\n\n polygon_x_coords = numpy.concatenate((\n x_coords_top, x_coords_bottom, numpy.array([x_coords_top[0]])))\n polygon_y_coords = numpy.concatenate((\n y_coords_top, y_coords_bottom, numpy.array([y_coords_top[0]])))\n\n return polygons.vertex_arrays_to_polygon_object(\n polygon_x_coords, polygon_y_coords)", "def getPolygonBoundaries(self, polygon: Polygon):\n polygon_df = gpd.GeoDataFrame([polygon], columns=['geometry'])\n\n polygon_df.set_crs(epsg=self.output_epsg, inplace=True)\n polygon_df['geometry'] = polygon_df['geometry'].to_crs(epsg=self.input_epsg)\n minx, miny, maxx, maxy = polygon_df['geometry'][0].bounds\n\n polygon_input = 'POLYGON(('\n xcords, ycords = polygon_df['geometry'][0].exterior.coords.xy\n for x, y in zip(list(xcords), list(ycords)):\n polygon_input += f'{x} {y}, '\n polygon_input = polygon_input[:-2]\n polygon_input += '))'\n\n return f\"({[minx, maxx]},{[miny,maxy]})\", polygon_input", "def polygon_contains(self, poly_outer, poly_inner):\n inner_list = self.poly_to_list(poly_inner, \"Global\")\n contain_list = []\n\n # Loop over all points in the inner polygon to see if they are contained by the outer polygon\n for point in inner_list:\n # Points are defined in local coordinates, move them so they are both in the local coordinates\n # of the outer polygon\n p_x = point.x() - poly_outer.x()\n p_y = point.y() - poly_outer.y()\n point.setX(p_x)\n point.setY(p_y)\n\n # Check if the outer polygon contains none, some, or all of the points\n if poly_outer.contains(point):\n true_contain = []\n # check a square area around the point to see if the whole square is contained, else the point\n # is on a edge and should not be included\n for i, j in itertools.product(range(-1, 2), range(-1, 2)):\n point.setX(p_x + i)\n point.setY(p_y + j)\n if poly_outer.contains(point):\n true_contain.append(True)\n else:\n true_contain.append(False)\n # Add to contain_list if the whole square area is inside the outer polygon\n if all(true_contain):\n contain_list.append(True)\n else:\n contain_list.append(False)\n else:\n contain_list.append(False)\n return contain_list", "def _get_polygon(areasrc):\n\n str = areasrc.geometry.wkt\n str = re.sub('POLYGON\\(\\(', '', str)\n str = re.sub('\\)\\)', '', str)\n aa = re.split('\\,', str)\n lons = []\n lats = []\n for str in aa:\n bb = re.split('\\s+', re.sub('^\\s+', '', str))\n lons.append(float(bb[0]))\n lats.append(float(bb[1]))\n return lons, lats", "def polygon_clip(subjectPolygon, clipPolygon):\n def inside(p):\n return(cp2[0]-cp1[0])*(p[1]-cp1[1]) > (cp2[1]-cp1[1])*(p[0]-cp1[0])\n \n def computeIntersection():\n dc = [ cp1[0] - cp2[0], cp1[1] - cp2[1] ]\n dp = [ s[0] - e[0], s[1] - e[1] ]\n n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]\n n2 = s[0] * e[1] - s[1] * e[0] \n n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])\n return [(n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3]\n\n outputList = subjectPolygon\n cp1 = clipPolygon[-1]\n\n for clipVertex in clipPolygon:\n cp2 = clipVertex\n inputList = outputList\n outputList = []\n s = inputList[-1]\n\n for subjectVertex in inputList:\n e = subjectVertex\n if inside(e):\n if not inside(s):\n outputList.append(computeIntersection())\n outputList.append(e)\n elif inside(s):\n outputList.append(computeIntersection())\n s = e\n cp1 = cp2\n if len(outputList) == 0:\n return None\n return(outputList)", "def test_nested_exception(testing_polygon):\n with pytest.raises(\n ValueError,\n match=\"No Shapely geometry can be created from null value\",\n ):\n geometry_union = shapely.ops.unary_union(\n [ele.footprint_geometry for ele in testing_polygon]\n )\n with pytest.raises(\n ValueError,\n match=\"No Shapely geometry can be created from null value\",\n ):\n geometry_union = shapely.ops.unary_union(\n [ele.footprint_geometry.buffer(0.00) for ele in testing_polygon]\n )\n\n polygonlist = _polygon_chain(testing_polygon)\n assert type(polygonlist) is list\n assert len(polygonlist) == 262\n filtered_geom = _filter_geom(polygonlist)\n assert len(filtered_geom) == 199\n geometry_union = shapely.ops.unary_union(filtered_geom)\n\n assert geometry_union.is_valid", "def test_generalized_banana_polygon_is_valid():\n park = query_row(db_conf, 'osm_landusages', 7101)\n # geometry is not valid\n assert not park['geometry'].is_valid, park\n park = query_row(db_conf, 'osm_landusages_gen0', 7101)\n # but simplified geometies are valid\n assert park['geometry'].is_valid, park\n park = query_row(db_conf, 'osm_landusages_gen1', 7101)\n assert park['geometry'].is_valid, park", "def get_boundary_as_polygon(self, do_geo=True):\n xhor, yhor = self.get_coordinates()\n dimensions = xhor.shape\n xbottom = xhor[0, :]\n xright = xhor[:, dimensions[1]-1]\n xtop = xhor[dimensions[0]-1, :][::-1]\n xleft = xhor[:, 0][::-1]\n\n ybottom = yhor[0, :]\n yright = yhor[:, dimensions[1]-1]\n ytop = yhor[dimensions[0]-1, :][::-1]\n yleft = yhor[:, 0][::-1]\n\n lon_square = np.concatenate((xbottom, xright, xtop, xleft))\n lat_square = np.concatenate((ybottom, yright, ytop, yleft))\n\n return lon_square, lat_square", "def removeIslands(self):\n if isinstance(self.substrates, Polygon):\n return\n mainland = []\n for i, substrate in enumerate(self.substrates.geoms):\n ismainland = True\n for j, otherSubstrate in enumerate(self.substrates.geoms):\n if j == i:\n continue\n if Polygon(otherSubstrate.exterior.coords).contains(substrate):\n ismainland = False\n break\n if ismainland:\n mainland.append(substrate)\n self.substrates = shapely.geometry.collection.GeometryCollection(mainland)\n self.oriented = False", "def edge_not_in_component(edge, component):\n x_start = edge[0]\n x_stop = edge[0] + edge[2]\n y_start = edge[1]\n y_stop = edge[1] + edge[3]\n if x_start >= component[1].start and x_stop <= component[1].stop and y_start >= component[0].start and y_stop <= \\\n component[0].stop:\n return False\n else:\n return True", "def polygon_clip(subjectPolygon, clipPolygon):\n def inside(p):\n return (cp2[0] - cp1[0]) * (p[1] - cp1[1]) > (cp2[1] - cp1[1]) * (p[0] - cp1[0])\n\n def computeIntersection():\n dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]]\n dp = [s[0] - e[0], s[1] - e[1]]\n n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]\n n2 = s[0] * e[1] - s[1] * e[0]\n n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])\n return [(n1 * dp[0] - n2 * dc[0]) * n3, (n1 * dp[1] - n2 * dc[1]) * n3]\n\n outputList = subjectPolygon\n cp1 = clipPolygon[-1]\n\n for clipVertex in clipPolygon:\n cp2 = clipVertex\n inputList = outputList\n outputList = []\n s = inputList[-1]\n\n for subjectVertex in inputList:\n e = subjectVertex\n if inside(e):\n if not inside(s):\n outputList.append(computeIntersection())\n outputList.append(e)\n elif inside(s):\n outputList.append(computeIntersection())\n s = e\n cp1 = cp2\n if len(outputList) == 0:\n return None\n return (outputList)", "def PolygonPath(polygon):\n\n def coding(ob):\n # The codes will be all \"LINETO\" commands, except for \"MOVETO\"s at the\n # beginning of each subpath\n n = len(getattr(ob, 'coords', None) or ob)\n vals = ones(n, dtype=Path.code_type) * Path.LINETO\n vals[0] = Path.MOVETO\n return vals\n\n if hasattr(polygon, 'geom_type'): # Shapely\n ptype = polygon.geom_type\n if ptype == 'Polygon':\n polygon = [Polygon(polygon)]\n elif ptype == 'MultiPolygon':\n polygon = [Polygon(p) for p in polygon]\n else:\n raise ValueError(\n \"A polygon or multi-polygon representation is required\")\n\n else: # GeoJSON\n polygon = getattr(polygon, '__geo_interface__', polygon)\n ptype = polygon[\"type\"]\n if ptype == 'Polygon':\n polygon = [Polygon(polygon)]\n elif ptype == 'MultiPolygon':\n polygon = [Polygon(p) for p in polygon['coordinates']]\n else:\n raise ValueError(\n \"A polygon or multi-polygon representation is required\")\n\n vertices = concatenate([\n concatenate([asarray(t.exterior)[:, :2]] +\n [asarray(r)[:, :2] for r in t.interiors])\n for t in polygon])\n codes = concatenate([\n concatenate([coding(t.exterior)] +\n [coding(r) for r in t.interiors]) for t in polygon])\n\n return Path(vertices, codes)", "def lineToPolygon(geom):\n assert(geom[\"type\"] == \"LineString\")\n # LineString is only the exterior line of a polygon (no holes possible)\n return geojson.Polygon(coordinates=[geom[\"coordinates\"]], validate=True)", "def are_vertices_clockwise(self,line):\r\n \r\n import numpy as np\r\n \r\n signed_area = 0\r\n for idx in range(line.shape[0]):\r\n \r\n x1 = line[idx,0]\r\n y1 = line[idx,1]\r\n if idx == line.shape[0]-1:\r\n x2 = line[0,0]\r\n y2 = line[0,1]\r\n else:\r\n x2 = line[idx+1,0]\r\n y2 = line[idx+1,1]\r\n \r\n signed_area += (x1 * y2 - x2 * y1)\r\n \r\n return (np.sign(signed_area) == -1.)", "def are_vertices_clockwise(self,line):\r\n \r\n import numpy as np\r\n \r\n signed_area = 0\r\n for idx in range(line.shape[0]):\r\n \r\n x1 = line[idx,0]\r\n y1 = line[idx,1]\r\n if idx == line.shape[0]-1:\r\n x2 = line[0,0]\r\n y2 = line[0,1]\r\n else:\r\n x2 = line[idx+1,0]\r\n y2 = line[idx+1,1]\r\n \r\n signed_area += (x1 * y2 - x2 * y1)\r\n \r\n return (np.sign(signed_area) == -1.)", "def collisionDetecter(shipPoints,asteroidPoints):\n\n # Getting ship's corners\n sP1 = shipPoints[0]\n sP2 = shipPoints[1]\n sP3 = shipPoints[2]\n\n # Here, we check for every possible combination of line intersections\n # If one of them is crossing, then we have a crossing\n # If none of them are crossing, then we don't have a crossing\n for i in range(len(asteroidPoints)-1):\n\n intersectTest = intersect(sP1,sP2,asteroidPoints[i],asteroidPoints[i+1])\n if intersectTest == True:\n return True\n\n intersectTest = intersect(sP1,sP3,asteroidPoints[i],asteroidPoints[i+1])\n if intersectTest == True:\n return True\n\n intersectTest = intersect(sP2,sP3,asteroidPoints[i],asteroidPoints[i+1])\n if intersectTest == True:\n return True\n\n # These specific cases check for the line between last point and first point\n intersectTest = intersect(sP1,sP2,asteroidPoints[-1],asteroidPoints[0])\n if intersectTest == True:\n return True\n\n intersectTest = intersect(sP1,sP3,asteroidPoints[-1],asteroidPoints[0])\n if intersectTest == True:\n return True\n\n intersectTest = intersect(sP2,sP3,asteroidPoints[-1],asteroidPoints[0])\n if intersectTest == True:\n return True\n\n return False", "def isoutside(coords, shape):\n # Label external pores for trimming below\n if len(shape) == 1: # Spherical\n # Find external points\n r = np.sqrt(np.sum(coords**2, axis=1))\n Ps = r > shape[0]\n elif len(shape) == 2: # Cylindrical\n # Find external pores outside radius\n r = np.sqrt(np.sum(coords[:, [0, 1]]**2, axis=1))\n Ps = r > shape[0]\n # Find external pores above and below cylinder\n if shape[1] > 0:\n Ps = Ps + (coords[:, 2] > shape[1])\n Ps = Ps + (coords[:, 2] < 0)\n else:\n pass\n elif len(shape) == 3: # Rectilinear\n shape = np.array(shape, dtype=float)\n try:\n lo_lim = shape[:, 0]\n hi_lim = shape[:, 1]\n except IndexError:\n lo_lim = np.array([0, 0, 0])\n hi_lim = shape\n Ps1 = np.any(coords > hi_lim, axis=1)\n Ps2 = np.any(coords < lo_lim, axis=1)\n Ps = Ps1 + Ps2\n return Ps", "def is_valid(self):\n if len(self.exterior) < 3:\n return False\n return self.to_shapely_polygon().is_valid", "def is_line_in_multipoly(feature_1: Sequence, feature_2: Sequence) -> bool:\n\n return any(is_line_in_poly(feature_1, coords_2) for coords_2 in feature_2)", "def points_in_convex_polygon_jit(points, polygon, clockwise=True):\n # first convert polygon to directed lines\n num_points_of_polygon = polygon.shape[1]\n num_points = points.shape[0]\n num_polygons = polygon.shape[0]\n vec1 = polygon[:, [num_points_of_polygon - 1] + list(range(num_points_of_polygon - 1)), :]\n if clockwise:\n vec1 = polygon - vec1\n else:\n vec1 = vec1 - polygon\n # vec1: [num_polygon, num_points_of_polygon, 2]\n ret = np.zeros((num_points, num_polygons), dtype=np.bool_)\n for i in range(num_points):\n for j in range(num_polygons):\n success = True\n for k in range(num_points_of_polygon):\n cross = vec1[j, k, 1] * (polygon[j, k, 0] - points[i, 0])\n cross -= vec1[j, k, 0] * (polygon[j, k, 1] - points[i, 1])\n if cross >= 0:\n success = False\n break\n ret[i, j] = success\n return ret", "def create_geofence(self):\n\t\tring = ogr.Geometry(ogr.wkbLinearRing)\n\t\tring.AddPoint(*self.north_coords)\n\t\tring.AddPoint(*self.northeast_coords)\n\t\tring.AddPoint(*self.east_coords)\n\t\tring.AddPoint(*self.southeast_coords)\n\t\tring.AddPoint(*self.south_coords)\n\t\tring.AddPoint(*self.southwest_coords)\n\t\tring.AddPoint(*self.west_coords)\n\t\tring.AddPoint(*self.northwest_coords)\n\t\tring.AddPoint(*self.north_coords)\n\t\tself.polygon.AddGeometry(ring)", "def check_infinite(coord, sides, coordinates):\n return is_border(coord, sides)\\\n and coord not in coordinates\\\n and (\\\n (coord[0]+1, coord[1]) not in coordinates and (coord[0]-1, coord[1]) not in coordinates\\\n or (coord[0], coord[1]+1) not in coordinates and (coord[0], coord[1]-1) not in coordinates)", "def within(point: tuple, box: tuple) -> bool:\r\n \r\n return box[0] < point[0] < box[2] and box[1] < point[1] < box[3]", "def inside(point, rectangle):\n\n ll = rectangle.getP1() # assume p1 is ll (lower left)\n ur = rectangle.getP2() # assume p2 is ur (upper right)\n\n return ll.getX() < point.getX() < ur.getX() and ll.getY() < point.getY() < ur.getY()", "def inside(points, polygons, short_circuit='any', precision=0.001):\n poly = []\n if isinstance(polygons, PolygonSet):\n poly.extend(polygons.polygons)\n elif isinstance(polygons, CellReference) or isinstance(\n polygons, CellArray):\n poly.extend(polygons.get_polygons())\n else:\n for obj in polygons:\n if isinstance(obj, PolygonSet):\n poly.extend(obj.polygons)\n elif isinstance(obj, CellReference) or isinstance(obj, CellArray):\n poly.extend(obj.get_polygons())\n else:\n poly.append(obj)\n if hasattr(points[0][0], '__iter__'):\n pts = points\n sc = 1 if short_circuit == 'any' else -1\n else:\n pts = (points, )\n sc = 0\n return clipper.inside(pts, poly, sc, 1 / precision)", "def buildings_in_area(self, polygon):\n return [b for b in self.buildings if polygon.contains(b.geometry.convex_hull)]", "def ispolar(self, pole=None):\n\n if not isinstance(self.crs, GeographicalCRS):\n raise CRSError(\"ispolar defined only for geographical CRS\")\n\n if pole is None:\n pole = Point((0, 90), crs=SphericalEarth)\n\n lon0 = geodesy.reduce_deg(self[-1].vertex[0])\n sum_angle = 0.0\n for vertex in self.vertices:\n lon1 = geodesy.reduce_deg(vertex[0])\n if _cdateline.crosses_dateline(lon0, lon1):\n sum_angle += 360.0 + lon1 - lon0\n else:\n sum_angle += lon1 - lon0\n lon0 = lon1\n\n return True if abs(sum_angle) > 1e-4 else False", "def remove_excess_polygon(polygons_dict, region):\n start_len = len(polygons_dict)\n poly_region_default_area = area(\n geojson.Feature(geometry=region, properties={}).geometry)\n idx = 0\n iteration_range = start_len\n while idx < iteration_range:\n intersection_polygon_area = 0\n poly_list = []\n poly_copy = copy.deepcopy(polygons_dict)\n del poly_copy[idx]\n for el in poly_copy:\n el_poly = shapely.geometry.asShape(el['geometry'])\n poly_list.append(el_poly)\n union_poly = cascaded_union(poly_list)\n intersection_polygon = union_poly.intersection(region)\n if not (intersection_polygon.is_empty and union_poly.is_empty):\n intersection_polygon_area = area(geojson.Feature(geometry=intersection_polygon, properties={}).geometry)\n else:\n break\n if float(\"{0:.2f}\".format(poly_region_default_area)) == float(\"{0:.2f}\".format(intersection_polygon_area)):\n del polygons_dict[idx]\n iteration_range -= 1\n else:\n idx += 1\n if len(polygons_dict) > 0 and (len(polygons_dict) != start_len):\n return polygons_dict\n else:\n return None" ]
[ "0.69540954", "0.6897939", "0.6799872", "0.6788985", "0.67285305", "0.66589004", "0.6657012", "0.6512406", "0.64687806", "0.64495367", "0.64015037", "0.63911974", "0.6390966", "0.6381956", "0.6351574", "0.62924826", "0.62201375", "0.61754906", "0.611618", "0.6106009", "0.6104662", "0.609203", "0.6080931", "0.60282606", "0.60035056", "0.5960359", "0.5951692", "0.59430593", "0.59157705", "0.59030616", "0.59023666", "0.58923376", "0.587286", "0.5855798", "0.5850758", "0.5833058", "0.58010954", "0.5768794", "0.5728938", "0.57127947", "0.57077616", "0.56931317", "0.56892717", "0.5666108", "0.56550115", "0.5643677", "0.56426054", "0.56195265", "0.5618481", "0.5600495", "0.559353", "0.5585914", "0.55587894", "0.5536677", "0.5533122", "0.5518268", "0.5492577", "0.54778975", "0.5470581", "0.5465384", "0.54649734", "0.54606897", "0.5453566", "0.5452695", "0.54280317", "0.54173034", "0.54157025", "0.5411074", "0.5409021", "0.539875", "0.5397899", "0.53974736", "0.5396619", "0.5375168", "0.53732294", "0.5364831", "0.53631616", "0.53587896", "0.5355858", "0.53335077", "0.53284585", "0.5327285", "0.5318432", "0.5316597", "0.53063905", "0.5303823", "0.5303823", "0.529589", "0.529428", "0.52917486", "0.52826965", "0.5279768", "0.52790743", "0.5277244", "0.52764845", "0.52659035", "0.5250186", "0.5248101", "0.52431655", "0.5235472" ]
0.7383287
0
Given a Polygon Geometry object in lat/long, determine whether it crosses the date line, and if so, split it into a multipolygon with a part on either side. Use the given preferred EPSG to perform calculations. Return a new Geometry in lat/long.
def splitAtDateline(geom, preferredEpsg): crosses = crossesDateline(geom, preferredEpsg) if crosses: (projTr, llTr) = makeTransformations(4326, preferredEpsg) coords = getCoords(geom) (x, y) = (coords[:, 0], coords[:, 1]) (yMin, yMax) = (y.min(), y.max()) xMinPositive = None xMaxNegative = None xGe0 = (x >= 0) xLt0 = (x < 0) if xGe0.any() > 0 and xLt0.any() > 0: xMaxNegative = x[xLt0].max() xMinPositive = x[xGe0].min() # Create rectangles for the east and west hemispheres, constrained by the # extent of this polygon. Note that this assumes that we do not # cross both the date line, and also the prime (zero) meridian. This may not # always be true, notably when we are close to the pole. eastHemiRectCoords = [[xMinPositive, yMax], [xMinPositive, yMin], [180, yMin], [180, yMax], [xMinPositive, yMax]] eastHemiRectJson = repr({'type':'Polygon', 'coordinates':[eastHemiRectCoords]}) westHemiRectCoords = [[-180, yMax], [-180, yMin], [xMaxNegative, yMin], [xMaxNegative, yMax], [-180, yMax]] westHemiRectJson = repr({'type':'Polygon', 'coordinates':[westHemiRectCoords]}) eastHemiRect = ogr.CreateGeometryFromJson(eastHemiRectJson) westHemiRect = ogr.CreateGeometryFromJson(westHemiRectJson) geomProj = copyGeom(geom) geomProj.Transform(projTr) eastHemiRect.Transform(projTr) westHemiRect.Transform(projTr) eastHemiPart = geomProj.Intersection(eastHemiRect) westHemiPart = geomProj.Intersection(westHemiRect) eastHemiPart.Transform(llTr) westHemiPart.Transform(llTr) # Put these together as a single multipolygon eastPartCoords = getCoords(eastHemiPart) westPartCoords = getCoords(westHemiPart) # Discard any vertices which are still no the wrong side of the 180 line. I # do not understand what is going on here, but I have invested far more of # my valuable time than I should, and this kludge will be a reasonable approximation. eastPartCoords = eastPartCoords[eastPartCoords[:, 0] > 0, :] westPartCoords = westPartCoords[westPartCoords[:, 0] < 0, :] # Convert to lists eastPartCoords = eastPartCoords.tolist() westPartCoords = westPartCoords.tolist() # Discard anything left with only 2 points if len(eastPartCoords) < 3: eastPartCoords = [] if len(westPartCoords) < 3: westPartCoords = [] # Close polygons. What a kludge..... if len(eastPartCoords) > 2: if eastPartCoords[-1][0] != eastPartCoords[0][0] or eastPartCoords[-1][1] != eastPartCoords[0][1]: eastPartCoords.append(eastPartCoords[0]) if len(westPartCoords) > 2: if westPartCoords[-1][0] != westPartCoords[0][0] or westPartCoords[-1][1] != westPartCoords[0][1]: westPartCoords.append(westPartCoords[0]) # Make a multi-polygon from the two parts coordsMulti = [[eastPartCoords], [westPartCoords]] jsonStr = repr({'type':'MultiPolygon', 'coordinates':coordsMulti}) newGeom = ogr.CreateGeometryFromJson(jsonStr) else: # It didn't really cross the date line, but seems to due to rounding # error in crossesDateline(). newGeom = copyGeom(geom) else: newGeom = copyGeom(geom) return newGeom
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def crossesDateline(geom, preferredEpsg):\n (xMin, xMax, yMin, yMax) = geom.GetEnvelope()\n (projTr, llTr) = makeTransformations(4326, preferredEpsg)\n\n geomProj = copyGeom(geom)\n geomProj.Transform(projTr)\n dateLineGeom = ogr.Geometry(wkt='LINESTRING(180 {}, 180 {})'.format(yMin, yMax))\n try:\n dateLineGeom.Transform(projTr)\n crosses = geomProj.Intersects(dateLineGeom)\n except Exception:\n # If we can't transform into the preferred EPSG, then it seems likely that\n # the geom is nowhere near the date line. \n crosses = False\n return crosses", "def polygonFromInteriorPoints(geom, preferredEpsg):\n (projTr, llTr) = makeTransformations(4326, preferredEpsg)\n\n geomProj = copyGeom(geom)\n geomProj.Transform(projTr)\n geomOutline = geomProj.ConvexHull()\n geomOutline.Transform(llTr)\n return geomOutline", "def cutPoly(self,geom,startPt,endPt,debug=False):\r\n #if we have disjoint Multi geometry as geom to split we need to iterate over its parts\r\n splittedGeoms=[]\r\n leftFragments=[]\r\n rightFragments=[]\r\n #if self.debug: print \"Number of geoms when slicing: \",str(len(geom.asGeometryCollection()))\r\n for geomPart in geom.asGeometryCollection():\r\n #split the actual part by cut line defined by startPt,endPt\r\n (res,splittedGeomsPart,topo)=geomPart.splitGeometry([startPt,endPt],False)\r\n splittedGeoms+=splittedGeomsPart\r\n #Add the remaining geomPart to the rightFragments or letfFragments\r\n #depending on distance\r\n d=self.signedDistCentroidFromLine(geomPart,startPt,endPt)\r\n if d>0:\r\n rightFragments.append(geomPart)\r\n else:\r\n leftFragments.append(geomPart)\r\n #if self.debug: print j,splittedGeoms\r\n\r\n for fragment in splittedGeoms:\r\n \"\"\"\r\n calculate signed distance of centroid of fragment and the splitline\r\n if signed distance is below zero, the point is to the left of the line\r\n if above zero the point is to the right of the line\r\n \"\"\"\r\n d=self.signedDistCentroidFromLine(fragment,startPt,endPt)\r\n #if debug==True:\r\n #if self.debug: print d\r\n\r\n if d>0:\r\n rightFragments.append(fragment)\r\n else:\r\n leftFragments.append(fragment)\r\n\r\n #if self.debug: print \"Left frags:\",len(leftFragments),\"Right frags:\",len(rightFragments)\r\n leftGeom=self.buildMultiPolygon(leftFragments)\r\n rightGeom=self.buildMultiPolygon(rightFragments)\r\n return leftGeom,rightGeom", "def findSensibleProjection(geom):\n coords = getCoords(geom)\n y = coords[:, 1]\n x = coords[:, 0]\n yMin = y.min()\n yMax = y.max()\n if (yMax - yMin) > 90:\n # We are crossing a lot of latitude, which suggests that we have a \n # long strip> In this case, we don't even bother to suggest an EPSG. \n epsg = None\n elif yMin < -80:\n # We are nearing the south pole, so go with UPS south\n epsg = 32761\n elif yMax > 80:\n # Nearing north pole, so UPS North\n epsg = 32661\n else:\n # Work out a UTM zone. Note that we use the median value to get a rough \n # idea of the centre, rather than the mean, because the mean is subject to all \n # sorts of problems when crossing the date line\n xMedian = numpy.median(x)\n yMedian = numpy.median(y)\n zone = int((xMedian + 180)/6) % 60 + 1\n if yMedian < 0:\n epsgBase = 32700\n else:\n epsgBase = 32600\n epsg = epsgBase + zone\n return epsg", "def process_latlon(self):\n data = self.unixtext.replace(\"\\n\", \" \")\n search = LAT_LON_PREFIX.search(data)\n if search is None:\n return None\n pos = search.start()\n newdata = data[pos+9:]\n # Go find our next non-digit, non-space character, if we find it, we\n # should truncate our string, this could be improved, I suspect\n search = re.search(r\"[^\\s0-9]\", newdata)\n if search is not None:\n pos2 = search.start()\n newdata = newdata[:pos2]\n\n poly = str2polygon(newdata)\n if poly is None:\n return None\n\n # check 0, PGUM polygons are east longitude akrherz/pyIEM#74\n if self.tp.source == 'PGUM':\n newpts = [[0 - pt[0], pt[1]] for pt in poly.exterior.coords]\n poly = Polygon(newpts)\n\n # check 1, is the polygon valid?\n if not poly.is_valid:\n self.tp.warnings.append(\n (\"LAT...LON polygon is invalid!\\n%s\") % (poly.exterior.xy,))\n return\n # check 2, is the exterior ring of the polygon clockwise?\n if poly.exterior.is_ccw:\n self.tp.warnings.append(\n (\"LAT...LON polygon exterior is CCW, reversing\\n%s\"\n ) % (poly.exterior.xy,))\n poly = Polygon(zip(poly.exterior.xy[0][::-1],\n poly.exterior.xy[1][::-1]))\n self.giswkt = 'SRID=4326;%s' % (dumps(MultiPolygon([poly]),\n rounding_precision=6),)\n return poly", "def ispolygon(a):\n return ispoly(a) and dist(a[0],a[-1]) < epsilon", "def SplitIntoPolygons(shape):\n ret = []\n this_polygon = []\n restart_indices = set(shape.parts)\n for idx, point in enumerate(shape.points):\n if idx in restart_indices:\n if this_polygon:\n ret.append(this_polygon)\n this_polygon = [[point[0], point[1]]]\n else:\n this_polygon.append([point[0], point[1]])\n if this_polygon:\n ret.append(this_polygon)\n return ret", "def polygon_to_multipolygon(geom):\n if geom.__class__.__name__ == 'Polygon':\n g = OGRGeometry(OGRGeomType('MultiPolygon'))\n g.add(geom)\n return g\n elif geom.__class__.__name__ == 'MultiPolygon':\n return geom\n else:\n raise ValueError('Geom is neither Polygon nor MultiPolygon.')", "def poly_to_list_with_overlap(self, polygon):\n added = 0\n polygon_item = polygon.polygon()\n polygon_item.translate(polygon.x(), polygon.y())\n\n # Comparator to determine which x value of two points is the highest\n def compare_x(item1, item2):\n if item1.x() < item2.x():\n return -1\n elif item1.x() > item2.x():\n return 1\n else:\n return 0\n\n # Comparator to determine which y value of two points is the highest\n def compare_y(item1, item2):\n if item1.y() < item2.y():\n return -1\n elif item1.y() > item2.y():\n return 1\n else:\n return 0\n\n # Create two lists, one sorted by ascending x-values, one by ascending y-values\n x_list = sorted(self.potential_edge_splitters, key=cmp_to_key(compare_x))\n y_list = sorted(self.potential_edge_splitters, key=cmp_to_key(compare_y))\n\n # Loop over all children to the polygon\n for item in polygon.childItems():\n # Look only at edges (overlapping of points is handled elsewhere)\n if isinstance(item, PyQt5.QtWidgets.QGraphicsLineItem):\n edge = item\n\n p1 = edge.line().p1()\n p2 = edge.line().p2()\n added_this = 0\n\n # Choose the direction with the largest disparity (to avoid scenario of straight lines)\n # then use the sorted list for that direction\n if abs(p1.x() - p2.x()) > abs(p1.y() - p2.y()):\n mode = \"X\"\n circ_list = x_list\n else:\n mode = \"Y\"\n circ_list = y_list\n\n for circ in circ_list:\n poly = circ.parentItem()\n p = circ.scenePos()\n\n # temp_p needed since edge.contains does not account for the edge being moved in the canvas\n temp_p = circ.scenePos()\n temp_p.setX(temp_p.x() - edge.scenePos().x())\n temp_p.setY(temp_p.y() - edge.scenePos().y())\n\n # Find the edges to split which contain temp_p, if the edge contains decide the orientation (in x-\n # or y-direction decided earlier) of p1 and p2, based on this insert the new point in the polygon\n # in the correct position\n if edge.contains(temp_p):\n if edge in poly.childItems():\n pass # Ignore if the edge is in the same polygon as the point\n else:\n if temp_p == p1 or temp_p == p2:\n pass # Don't compare if it contains an edge point, instead handled later by the overlapping points\n elif mode == \"Y\":\n if p1.y() < p2.y(): # Left to right\n index = abs(edge.localIndex)\n polygon_item.insert(index + added, p)\n added += 1\n elif p1.y() > p2.y(): # Right to left\n index = abs(edge.localIndex)\n polygon_item.insert(index + added - added_this, p)\n added_this += 1\n added += 1\n else:\n if p1.x() < p2.x(): # Left to right\n index = abs(edge.localIndex)\n polygon_item.insert(index + added, p)\n added += 1\n elif p1.x() > p2.x(): # Right to left\n index = abs(edge.localIndex)\n polygon_item.insert(index + added - added_this, p)\n added_this += 1\n added += 1\n\n return self.poly_to_list(polygon_item, \"Global\")", "def isPointInPolygon(xPolygon, yPolygon, xPt, yPt):\n \n # How to tell if a point is inside a polygon:\n # Determine the change in angle made by the point and the vertices\n # of the polygon. Add up the delta(angle)'s from the first (include\n # the first point again at the end). If the point is inside the\n # polygon, then the total angle will be +/-360 deg. If the point is\n # outside, then the total angle will be 0 deg. Points on the edge will\n # outside.\n # This is called the Winding Algorithm\n # http://geomalgorithms.com/a03-_inclusion.html\n\n n = len(xPolygon)\n # Array for the angles\n angle = np.zeros(n)\n\n # add first vertex to the end\n xPolygon1 = np.append( xPolygon, xPolygon[0] )\n yPolygon1 = np.append( yPolygon, yPolygon[0] )\n\n wn = 0 # winding number counter\n\n # Loop through the edges of the polygon\n for i in range(n):\n # if edge crosses upward (includes its starting endpoint, and excludes its final endpoint)\n if yPolygon1[i] <= yPt and yPolygon1[i+1] > yPt:\n # if (P is strictly left of E[i]) // Rule #4\n if isLeft(xPolygon1[i], yPolygon1[i], xPolygon1[i+1], yPolygon1[i+1], xPt, yPt) > 0: \n wn += 1 # a valid up intersect right of P.x\n\n # if edge crosses downward (excludes its starting endpoint, and includes its final endpoint)\n if yPolygon1[i] > yPt and yPolygon1[i+1] <= yPt:\n # if (P is strictly right of E[i]) // Rule #4\n if isLeft(xPolygon1[i], yPolygon1[i], xPolygon1[i+1], yPolygon1[i+1], xPt, yPt) < 0: \n wn -= 1 # a valid up intersect right of P.x\n\n # wn = 0 only when P is outside the polygon\n if wn == 0:\n return False\n else:\n return True", "def merge_adjacent_polygons(feature):\n if feature.geometry().wkbType() != WKBMultiPolygon:\n return False\n mp = Geometry.get_multipolygon(feature)\n if len(mp) < 2:\n return False\n else:\n geom = None\n for p in mp:\n g = Geometry.fromPolygonXY(p)\n ng = g if geom is None else geom.combine(g)\n if ng.isGeosValid():\n geom = ng\n if geom is not None:\n feature.setGeometry(geom)\n return geom.isGeosValid()", "def split_PolygonSegments(lon, lat, lon_split=180., epsilon=0.000001):\n lon = np.asarray(lon)\n lat = np.asarray(lat)\n mask = lon <= lon_split + epsilon\n lon0 = lon[mask]\n lat0 = lat[mask]\n\n lon1 = lon[~mask]\n lat1 = lat[~mask]\n return list(x for x in ((lon0, lat0), (lon1, lat1)))", "def construct_polygon(self, polygon_longs: List, polygon_lats: List) -> gpd.GeoDataFrame:\n\n polygon_geom = Polygon(zip(polygon_longs, polygon_lats))\n\n crs = {'init': 'epsg:4326'}\n polygon = gpd.GeoDataFrame(index=[0], crs=crs, geometry=[polygon_geom])\n\n polygon.to_file(filename=f'{self.polygon_path}/polygon_{self.postfix}.geojson', driver='GeoJSON')\n polygon.to_file(filename=f'{self.polygon_path}/polygon_{self.postfix}.shp', driver=\"ESRI Shapefile\")\n\n self.monitor.info(\"-> Created area polygon.\")\n return polygon", "def from_gca_polygon(gca_obj, name_header, folder_name, folder_description='',\n altitude_mode=\"ctg\", style_to_use=None, poly_hidden=False,\n poly_follow_terrain=True, poly_extrude_to_ground=False, folder_collapsed=True):\n\n name_col = gca_obj.headers.index(name_header)\n\n polygons = list()\n\n for feature in gca_obj.features:\n name = feature[0][name_col]\n coords = feature[1]\n attrs = feature[0]\n headers = gca_obj.headers\n\n poly = kml_base.polygon(coords, name, headers, attrs, altitude_mode, style_to_use, poly_hidden, poly_follow_terrain, poly_extrude_to_ground)\n polygons.append(poly)\n\n poly_folder = kml_base.folder(folder_name, polygons, folder_description, folder_collapsed)\n\n return poly_folder", "def grid_in_shape(up_lat,low_lat,left_long,right_long,shape,\n lat_resolution=15,long_resolution=60): \n \n longitudes = np.linspace(left_long,right_long,60)\n latitudes = np.linspace(low_lat,up_lat,15)\n prods = list(itertools.product(longitudes,latitudes))\n points = [shapely.geometry.Point(point) for point in prods]\n points_within = [point for point in points if shape.contains(point)]\n points_gdf = gpd.GeoDataFrame(geometry = points_within)\n \n return points_gdf", "def point_inside_polygon(xxx_todo_changeme,poly):\n (x,y) = xxx_todo_changeme\n n = len(poly)\n inside = False\n\n p1x, p1y = poly[0]\n for i in range(n + 1):\n p2x, p2y = poly[i % n]\n if y > min(p1y, p2y):\n if y <= max(p1y, p2y):\n if x <= max(p1x, p2x):\n if p1y != p2y:\n xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x\n if p1x == p2x or x <= xinters:\n inside = not inside\n p1x, p1y = p2x, p2y\n\n return inside", "def split_polygon(polygon):\n\n if len(polygon) < 3:\n raise ValueError(\n 'At least 3 lat/lon float value pairs must be provided')\n\n polygon_string = ''\n\n for poly in polygon:\n polygon_string += ' '.join(map(str, poly))\n polygon_string += ' '\n\n return polygon_string.strip()", "def extent_geom(extent):\n ring = ogr.Geometry(ogr.wkbLinearRing)\n ring.AddPoint(extent[0], extent[3])\n ring.AddPoint(extent[2], extent[3])\n ring.AddPoint(extent[2], extent[1])\n ring.AddPoint(extent[0], extent[1])\n ring.CloseRings()\n polygon = ogr.Geometry(ogr.wkbPolygon)\n polygon.AddGeometry(ring)\n return polygon", "def get_boundary_as_polygon(self, do_geo=True):\n xhor, yhor = self.get_coordinates()\n dimensions = xhor.shape\n xbottom = xhor[0, :]\n xright = xhor[:, dimensions[1]-1]\n xtop = xhor[dimensions[0]-1, :][::-1]\n xleft = xhor[:, 0][::-1]\n\n ybottom = yhor[0, :]\n yright = yhor[:, dimensions[1]-1]\n ytop = yhor[dimensions[0]-1, :][::-1]\n yleft = yhor[:, 0][::-1]\n\n lon_square = np.concatenate((xbottom, xright, xtop, xleft))\n lat_square = np.concatenate((ybottom, yright, ytop, yleft))\n\n return lon_square, lat_square", "def gml_to_polygon(footprint):\n footprint = footprint.replace('\\n', '').strip()\n coords_poly = []\n #\n # Sentinel-1\n # (http://www.opengis.net/gml/srs/epsg.xml#4326)\n #\n if ',' in footprint:\n coords_gml = footprint.split()\n for coord_pair in coords_gml:\n lat, lon = [float(_) for _ in coord_pair.split(',')]\n if lon < -180.0:\n lon = -180.0\n if lon > 180.0:\n lon = 180.0\n if lat < -90.0:\n lat = -90.0\n if lat > 90.0:\n lat = 90.0\n coords_poly.append('{lon:.4f} {lat:.4f}'.format(lon=lon, lat=lat))\n\n #\n # Sentinel-3 and Sentinel-2\n # (http://www.opengis.net/def/crs/EPSG/0/4326)\n #\n else:\n coords_gml = footprint.split()\n for i in range(len(coords_gml)//2):\n lat = float(coords_gml[2*i])\n lon = float(coords_gml[2*i+1])\n if lon < -180.0:\n lon = -180.0\n if lon > 180.0:\n lon = 180.0\n if lat < -90.0:\n lat = -90.0\n if lat > 90.0:\n lat = 90.0\n coords_poly.append('{lon:.4f} {lat:.4f}'.format(lon=lon, lat=lat))\n\n #\n # Make sure the polygon is a closed line string.\n #\n if coords_poly[0] != coords_poly[-1]:\n coords_poly.append(coords_poly[0])\n\n wkt = 'POLYGON (({}))'.format(','.join(coords_poly))\n return wkt", "def to_shapely_polygon(self):\n # load shapely lazily, which makes the dependency more optional\n import shapely.geometry\n return shapely.geometry.Polygon([(point[0], point[1]) for point in self.exterior])", "def produce_polygon(polygon_ordered_coordinates: List, zoom: int, plot_polygon: bool = False) -> Path:\n polygon_tile_points = []\n for item in polygon_ordered_coordinates:\n polygon_tile_points += [Utility.get_tile(*item, zoom)]\n polygon_tile_points += [polygon_tile_points[0]]\n polygon = Path(polygon_tile_points)\n if plot_polygon:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n patch = patches.PathPatch(polygon, facecolor='orange', lw=2)\n ax.add_patch(patch)\n ax.set_xlim(min(polygon_tile_points, key = lambda item: item[0])[0], max(polygon_tile_points, key = lambda item: item[0])[0])\n ax.set_ylim(min(polygon_tile_points, key = lambda item: item[1])[1], max(polygon_tile_points, key = lambda item: item[1])[1])\n plt.show()\n return polygon", "def create_partition(mesh,polygons,enforce_exact=False):", "def _split_lines(polygon, distance):\n list_points = []\n current_dist = distance # set the current distance to place the point\n\n boundary = polygon.boundary # make shapely MultiLineString object\n if boundary.type == \"LineString\":\n line_length = boundary.length # get the total length of the line\n while (\n current_dist < line_length\n ): # while the current cumulative distance is less than the total length of the line\n list_points.append(\n boundary.interpolate(current_dist)\n ) # use interpolate and increase the current distance\n current_dist += distance\n elif boundary.type == \"MultiLineString\":\n for ls in boundary:\n line_length = ls.length # get the total length of the line\n while (\n current_dist < line_length\n ): # while the current cumulative distance is less than the total length of the line\n list_points.append(\n ls.interpolate(current_dist)\n ) # use interpolate and increase the current distance\n current_dist += distance\n\n cutted = shapely.ops.split(\n boundary, shapely.geometry.MultiPoint(list_points).buffer(0.001)\n )\n return cutted", "def make_polygon(*coords):\n global GEOMETRY_SURF, POLYGONS,col\n if len(coords) < 3:\n print(\"Warning: Invalid polygon passed, ignoring...\")\n return\n start = coords[0]\n prev = coords[0]\n for coord in coords:\n POLYGONS |= {coord}\n line = Boundary(prev[0],prev[1],coord[0],coord[1]) #add segment to WALL list\n prev = coord\n line = Boundary(prev[0], prev[1],start[0],start[1])\n #now draw poly\n pygame.draw.polygon(GEOMETRY_SURF,col[\"SHAPECOL\"], coords)\n return", "def pnpoly(test_point, polygon):\r\n is_inside = False\r\n minX = polygon[0][0]\r\n maxX = polygon[0][0]\r\n minY = polygon[0][1]\r\n maxY = polygon[0][1]\r\n for p in polygon:\r\n minX = min(p[0], minX)\r\n maxX = max(p[0], maxX)\r\n minY = min(p[1], minY)\r\n maxY = max(p[1], maxY)\r\n if test_point[0] < minX or test_point[0] > maxX or test_point[1] < minY or test_point[1] > maxY:\r\n return False\r\n j = len(polygon) - 1\r\n for i in range(len(polygon)):\r\n if ((polygon[i][1] > test_point[1]) != (polygon[j][1] > test_point[1]) and (\r\n test_point[0] < (polygon[j][0] - polygon[i][0]) * (test_point[1] - polygon[i][1]) / (\r\n polygon[j][1] - polygon[i][1]) + polygon[i][0])):\r\n is_inside = not is_inside\r\n j = i\r\n return is_inside", "def lineToPolygon(geom):\n assert(geom[\"type\"] == \"LineString\")\n # LineString is only the exterior line of a polygon (no holes possible)\n return geojson.Polygon(coordinates=[geom[\"coordinates\"]], validate=True)", "def _preprocess_polygon(polygon):\n\n # Could catch ValueErrors for unsuitable inputs\n polygon = numpy.array(polygon)\n\n if len(polygon.shape) == 1:\n if len(polygon) % 2:\n raise ValueError('Number of values for polygon not divisible by two.'\n 'Coordinates need an x and y coordinate: '.format(polygon))\n polygon = polygon.reshape((-1, 2))\n\n if not len(polygon.shape) == 2 or polygon.shape[1] != 2:\n raise ValueError('polygon of wrong dimensions. It should be of shape. '\n 'Should be: (num_points, 2). Input: {}'.format(polygon))\n\n polygon = Polygon(numpy.array(polygon))\n\n # Mainly for self-intersection\n if not polygon.is_valid:\n raise ValueError('polygon is invalid, likely self-intersecting: {}'.\n format(polygon))\n\n return polygon", "def _densify(self, geom, segment):\n # temporary solution for readthedocs fail. - cannot mock osgeo\n try:\n from osgeo import ogr\n except ModuleNotFoundError:\n import warnings\n\n warnings.warn(\"OGR (GDAL) is required.\")\n\n poly = geom\n wkt = geom.wkt # shapely Polygon to wkt\n geom = ogr.CreateGeometryFromWkt(wkt) # create ogr geometry\n geom.Segmentize(segment) # densify geometry by 2 metres\n geom.CloseRings() # fix for GDAL 2.4.1 bug\n wkt2 = geom.ExportToWkt() # ogr geometry to wkt\n try:\n new = loads(wkt2) # wkt to shapely Polygon\n return new\n except Exception:\n return poly", "def compare_polygon_to_point(df_point, df_polygon): \n\n #check for each polygon which points are overlaying with it \n df_polygon['geometry'] = pygeos.buffer(df_polygon.geometry,0) #avoid intersection\n spat_tree = pygeos.STRtree(df_point.geometry) # https://pygeos.readthedocs.io/en/latest/strtree.html\n for polygon_row in df_polygon.itertuples():\n df_point_overlap = (df_point.loc[spat_tree.query(polygon_row.geometry,predicate='intersects').tolist()]).sort_index(ascending=True) #get point that overlaps with polygon\n if not df_point_overlap.empty:\n if polygon_row.asset in df_point_overlap['asset'].tolist():\n df_polygon = df_polygon.drop(polygon_row.Index)\n \n return df_polygon.reset_index(drop=True)", "def _confidence_interval_to_polygon(\n x_coords_bottom, y_coords_bottom, x_coords_top, y_coords_top,\n for_performance_diagram=False):\n\n nan_flags_top = numpy.logical_or(\n numpy.isnan(x_coords_top), numpy.isnan(y_coords_top))\n real_indices_top = numpy.where(numpy.invert(nan_flags_top))[0]\n\n nan_flags_bottom = numpy.logical_or(\n numpy.isnan(x_coords_bottom), numpy.isnan(y_coords_bottom))\n real_indices_bottom = numpy.where(numpy.invert(nan_flags_bottom))[0]\n\n if for_performance_diagram:\n y_coords_top = y_coords_top[real_indices_top]\n sort_indices_top = numpy.argsort(y_coords_top)\n y_coords_top = y_coords_top[sort_indices_top]\n x_coords_top = x_coords_top[real_indices_top][sort_indices_top]\n\n y_coords_bottom = y_coords_bottom[real_indices_bottom]\n sort_indices_bottom = numpy.argsort(-y_coords_bottom)\n y_coords_bottom = y_coords_bottom[sort_indices_bottom]\n x_coords_bottom = x_coords_bottom[real_indices_bottom][\n sort_indices_bottom]\n else:\n x_coords_top = x_coords_top[real_indices_top]\n sort_indices_top = numpy.argsort(-x_coords_top)\n x_coords_top = x_coords_top[sort_indices_top]\n y_coords_top = y_coords_top[real_indices_top][sort_indices_top]\n\n x_coords_bottom = x_coords_bottom[real_indices_bottom]\n sort_indices_bottom = numpy.argsort(x_coords_bottom)\n x_coords_bottom = x_coords_bottom[sort_indices_bottom]\n y_coords_bottom = y_coords_bottom[real_indices_bottom][\n sort_indices_bottom]\n\n polygon_x_coords = numpy.concatenate((\n x_coords_top, x_coords_bottom, numpy.array([x_coords_top[0]])))\n polygon_y_coords = numpy.concatenate((\n y_coords_top, y_coords_bottom, numpy.array([y_coords_top[0]])))\n\n return polygons.vertex_arrays_to_polygon_object(\n polygon_x_coords, polygon_y_coords)", "def geodjango_to_shapely(geos_obj):\n assert HAS_GEODJANGO, \"Requires Geodjango\"\n\n geodjango_poly_to_shapely = lambda t: geometry.Polygon(shell=t.coords[0], holes=t.coords[1:])\n\n converters = {\n geos.Point: lambda t: geometry.Point(t.coords),\n geos.LineString: lambda t: geometry.LineString(t.coords),\n geos.Polygon: lambda t: geodjango_poly_to_shapely(t),\n geos.MultiPolygon: lambda t: geometry.MultiPolygon([geodjango_poly_to_shapely(x) for x in t])\n }\n\n if not issubclass(geos_obj.__class__, geos.GEOSGeometry):\n raise TypeError(\"Require object that inherits from geos.GEOSGeometry\")\n\n return converters[type(geos_obj)](geos_obj) # FIXME: why is PyCharm complaining about this line?!", "def str2polygon(strdata):\n pts = []\n partial = None\n\n # We have two potential formats, one with 4 or 5 places and one\n # with eight!\n vals = re.findall(LAT_LON, strdata)\n for val in vals:\n if len(val) == 8:\n lat = float(val[:4]) / 100.00\n lon = float(val[4:]) / 100.00\n if lon < 40:\n lon += 100.\n lon = 0 - lon\n pts.append(checker(lon, lat, strdata))\n else:\n fval = float(val) / 100.00\n if partial is None: # we have lat\n partial = fval\n continue\n # we have a lon\n if fval < 40:\n fval += 100.\n fval = 0 - fval\n pts.append(checker(fval, partial, strdata))\n partial = None\n\n if not pts:\n return None\n if pts[0][0] != pts[-1][0] and pts[0][1] != pts[-1][1]:\n pts.append(pts[0])\n return Polygon(pts)", "def point_to_polygon_geojson(g):\n point_coordinates = g['geometry']['coordinates']\n polygon_geojson = {\n 'type': 'Feature',\n 'properties': g['properties'],\n 'geometry': {\n 'type': 'Polygon',\n 'coordinates': [\n [point_coordinates, point_coordinates, point_coordinates, point_coordinates]\n ]\n }\n }\n return polygon_geojson", "def Polygon(self, polyline = False):\n\n from geographiclib.polygonarea import PolygonArea\n return PolygonArea(self, polyline)", "def test_split_outer_multipolygon_way_1():\n park_15001 = query_row(db_conf, 'osm_landusages', -15001)\n assert park_15001['type'] == 'park'\n assert_almost_equal(park_15001['geometry'].area, 9816216452, -1)\n assert query_row(db_conf, 'osm_roads', 15002) == None", "def create_geofence(self):\n\t\tring = ogr.Geometry(ogr.wkbLinearRing)\n\t\tring.AddPoint(*self.north_coords)\n\t\tring.AddPoint(*self.northeast_coords)\n\t\tring.AddPoint(*self.east_coords)\n\t\tring.AddPoint(*self.southeast_coords)\n\t\tring.AddPoint(*self.south_coords)\n\t\tring.AddPoint(*self.southwest_coords)\n\t\tring.AddPoint(*self.west_coords)\n\t\tring.AddPoint(*self.northwest_coords)\n\t\tring.AddPoint(*self.north_coords)\n\t\tself.polygon.AddGeometry(ring)", "def test_polygon_with_duplicate_nodes_is_valid():\n geom = query_row(db_conf, 'osm_landusages', 30005)['geometry']\n assert geom.is_valid\n assert len(geom.exterior.coords) == 4", "def test_nested_exception(testing_polygon):\n with pytest.raises(\n ValueError,\n match=\"No Shapely geometry can be created from null value\",\n ):\n geometry_union = shapely.ops.unary_union(\n [ele.footprint_geometry for ele in testing_polygon]\n )\n with pytest.raises(\n ValueError,\n match=\"No Shapely geometry can be created from null value\",\n ):\n geometry_union = shapely.ops.unary_union(\n [ele.footprint_geometry.buffer(0.00) for ele in testing_polygon]\n )\n\n polygonlist = _polygon_chain(testing_polygon)\n assert type(polygonlist) is list\n assert len(polygonlist) == 262\n filtered_geom = _filter_geom(polygonlist)\n assert len(filtered_geom) == 199\n geometry_union = shapely.ops.unary_union(filtered_geom)\n\n assert geometry_union.is_valid", "def exclude_small_shapes(x,regionalized=False):\n # if its a single polygon, just return the polygon geometry\n if x.geometry.geom_type == 'Polygon':\n return x.geometry\n\n # if its a multipolygon, we start trying to simplify and\n # remove shapes if its too big.\n elif x.geometry.geom_type == 'MultiPolygon':\n\n if regionalized == False:\n area1 = 0.1\n area2 = 250\n\n elif regionalized == True:\n area1 = 0.01\n area2 = 50\n\n # dont remove shapes if total area is already very small\n if x.geometry.area < area1:\n return x.geometry\n # remove bigger shapes if country is really big\n\n if x['GID_0'] in ['CHL','IDN']:\n threshold = 0.01\n elif x['GID_0'] in ['RUS','GRL','CAN','USA']:\n if regionalized == True:\n threshold = 0.01\n else:\n threshold = 0.01\n\n elif x.geometry.area > area2:\n threshold = 0.1\n else:\n threshold = 0.001\n\n # save remaining polygons as new multipolygon for the\n # specific country\n new_geom = []\n for y in x.geometry:\n if y.area > threshold:\n new_geom.append(y)\n\n return MultiPolygon(new_geom)", "def inner_polygon(poly1,poly2):\n npts1 = len(poly1)\n npts2 = len(poly2)\n if npts1 < 3 or npts2 < 3: return None\n (poly1,angles1) = sort_points(*poly1)\n (poly2,angles2) = sort_points(*poly2)\n # loop through all possible line combinations \n # looking for valid line intersections \n intercepts = []\n for j in range(npts1):\n p1 = poly1[j]\n if j == npts1 - 1:\n p2 = poly1[0]\n else:\n p2 = poly1[j+1]\n for k in range(npts2):\n p3 = poly2[k]\n if k == npts2 - 1:\n p4 = poly2[0]\n else:\n p4 = poly2[k+1]\n (intercept,flag) = line_intercept(p1,p2,p3,p4)\n if flag > 0:\n intercepts.append(intercept)\n #############\n # now determine which points we can get to from\n # the origin without crossing any poly lines, \n # ie the inner set of points\n points = []\n for p in poly1: points.append(p)\n for p in poly2: points.append(p)\n for p in intercepts: points.append(p)\n (points,angles) = sort_points(*points)\n inner_points = []\n for p in points:\n # check against poly1\n inner = is_inner(p,poly1)\n # check against poly2\n if inner == True:\n inner = is_inner(p,poly2)\n if inner == True:\n inner_points.append(p)\n # sort the inner points\n (inner_points,angles) = sort_points(*inner_points)\n return inner_points", "def __draw_polygon(self, event, klick):\n global creating_polygon\n curX = self.canvas.canvasx(event.x)\n curY = self.canvas.canvasy(event.y)\n if not klick and len(self.polygon_points) >= 2:\n c_r_x, c_r_y = self.get_canvas_relative_coords((self.polygon_points[-2], self.polygon_points[-1]))\n distanceX = curX - c_r_x\n distanceY = curY - c_r_y\n if pow(pow(distanceX, 2) + pow(distanceY, 2), 1 / 2) <= 15:\n return\n image_relative_x, image_relative_y = self.get_image_relative_coords((curX, curY))\n self.polygon_points.extend((image_relative_x, image_relative_y))\n self.polygon_groundstructure.append(self.canvas.create_rectangle(curX - 2, curY - 2, curX + 2, curY + 2,\n outline='magenta', width=1,\n activewidth=2))\n if not creating_polygon: # start with a new polygon\n creating_polygon = True\n return\n else: # draw a line between the last points\n c_r_x1, c_r_y1 = self.get_canvas_relative_coords((self.polygon_points[-4], self.polygon_points[-3]))\n c_r_x2, c_r_y2 = self.get_canvas_relative_coords((self.polygon_points[-2], self.polygon_points[-1]))\n self.polygon_groundstructure.append(self.canvas.create_line([c_r_x1, c_r_y1, c_r_x2, c_r_y2],\n fill='magenta', width=2))", "def _proc_polygon(self, tokens, filled):\n\n pts = [(p[\"x\"], p[\"y\"]) for p in tokens[\"points\"]]\n component = Polygon(pen=self.pen, points=pts, filled=filled)\n\n return component", "def boundary_polygon_by_edges(self):\n lines=self.boundary_linestrings()\n polys=join_features.lines_to_polygons(lines,close_arc=False)\n if len(polys)>1:\n raise GridException(\"somehow there are multiple boundary polygons\")\n return polys[0]", "def merge_adjacent_features(group):\n geom = False\n for p in group:\n g = p.geometry()\n if g.isGeosValid():\n geom = geom.combine(g) if geom else g\n else:\n msg = _(\"The geometry of zone '%s' is not valid\") % p[\"label\"]\n log.warning(msg)\n report.warnings.append(msg)\n return geom", "def clip_polygon(subject, clipper, operation = 'difference'):\n Subject = Polygon()\n Clipper = Polygon()\n\n for s in subject:\n Subject.add(Vertex(s))\n\n for c in clipper:\n Clipper.add(Vertex(c))\n\n clipped = Clipper.difference(Subject)\\\n if operation == 'reversed-diff'\\\n else Subject.__getattribute__(operation)(Clipper)\n\n clipped = [(ext.points,[hole.points for hole in holes]) for ext,holes in clipped]\n return clipped", "def _get_polygon(areasrc):\n\n str = areasrc.geometry.wkt\n str = re.sub('POLYGON\\(\\(', '', str)\n str = re.sub('\\)\\)', '', str)\n aa = re.split('\\,', str)\n lons = []\n lats = []\n for str in aa:\n bb = re.split('\\s+', re.sub('^\\s+', '', str))\n lons.append(float(bb[0]))\n lats.append(float(bb[1]))\n return lons, lats", "def testLocation(point, polygon):\n # begin\n if polygon.first.y == point.y and polygon.first.x == point.x:\n return \"on\" # vertex\n w =0\n for v in polygon.iter():\n if v.next.y == point.y:\n if v.next.x == point.x:\n return \"on\" # vertex\n else:\n if v.y == point.y and (v.next.x > point.x) == (v.x < point.x):\n return \"on\" # edge\n # if crossing horizontal line\n if (v.y < point.y and v.next.y >= point.y)\\\n or (v.y >= point.y and v.next.y < point.y):\n if v.x >= point.x:\n if v.next.x > point.x:\n # modify w\n if v.next.y > v.y: w += 1\n else: w -= 1\n else:\n det = (v.x - point.x) * (v.next.y - point.y) \\\n - (v.next.x - point.x) * (v.y - point.y)\n if det == 0: return \"on\" # edge\n # if right crossing\n if (det > 0 and v.next.y > v.y)\\\n or (det < 0 and v.next.y < v.y):\n # modify w\n if v.next.y > v.y: w += 1\n else: w -= 1\n else:\n if v.next.x > point.x:\n det = (v.x - point.x) * (v.next.y - point.y) \\\n - (v.next.x - point.x) * (v.y - point.y)\n if det == 0: return \"on\" # edge\n # if right crossing\n if (det > 0 and v.next.y > v.y)\\\n or (det < 0 and v.next.y < v.y):\n # modify w\n if v.next.y > v.y: w += 1\n else: w -= 1\n if (w % 2) != 0:\n return \"in\"\n else:\n return \"out\"", "def check_cross_polygon(polygons_dict, region):\n result_poly_name = ''\n start_len = len(polygons_dict)\n poly_names = []\n poly_region_default_area = area(geojson.Feature(geometry=region, properties={}).geometry)\n for main_el in polygons_dict:\n for child_el in polygons_dict:\n intersection_region_area = 0\n main_poly = shapely.geometry.asShape(main_el['geometry'])\n child_poly = shapely.geometry.asShape(child_el['geometry'])\n intersection_polygon = main_poly.intersection(child_poly)\n control_area = area(\n geojson.Feature(geometry=child_poly, properties={}).geometry)\n if not intersection_polygon.is_empty and area(\n geojson.Feature(geometry=intersection_polygon, properties={}).geometry) < control_area:\n intersection_region = region.intersection(intersection_polygon)\n if not intersection_region.is_empty:\n intersection_region_area = area(\n geojson.Feature(geometry=intersection_region, properties={}).geometry)\n if float(\"{0:.2f}\".format(intersection_region_area)) == float(\n \"{0:.2f}\".format(poly_region_default_area)):\n poly_names.append(main_el[\"properties\"][\"Name\"])\n poly_names.append(child_el[\"properties\"][\"Name\"])\n if poly_names:\n result_poly_name = sorted(set(poly_names))[0]\n idx = 0\n iteration_range = len(polygons_dict)\n while idx < iteration_range:\n if polygons_dict[idx][\"properties\"][\"Name\"] != result_poly_name:\n del polygons_dict[idx]\n iteration_range -= 1\n else:\n idx += 1\n if len(polygons_dict) != start_len:\n return polygons_dict\n else:\n return None", "def any_geom2ogr_geom(geom, osr_sref):\n\n if isinstance(geom, (tuple, list)) and (not isinstance(geom[0], (tuple, list))) and \\\n (len(geom) == 4) and osr_sref:\n geom_ogr = geometry.bbox2polygon(geom, osr_sref)\n geom_ogr = swap_axis(geom_ogr) # ensure lon lat order\n elif isinstance(geom, (tuple, list)) and (isinstance(geom[0], (tuple, list))) and \\\n (len(geom) == 2) and osr_sref:\n edge = ogr.Geometry(ogr.wkbLinearRing)\n geom = [geom[0], (geom[0][0], geom[1][1]), geom[1], (geom[1][0], geom[0][1])]\n for point in geom:\n if len(point) == 2:\n edge.AddPoint(float(point[0]), float(point[1]))\n edge.CloseRings()\n geom_ogr = ogr.Geometry(ogr.wkbPolygon)\n geom_ogr.AddGeometry(edge)\n geom_ogr.AssignSpatialReference(osr_sref)\n geom_ogr = force_axis_mapping(geom_ogr)\n elif isinstance(geom, (tuple, list)) and isinstance(geom[0], (tuple, list)) and osr_sref:\n edge = ogr.Geometry(ogr.wkbLinearRing)\n for point in geom:\n if len(point) == 2:\n edge.AddPoint(float(point[0]), float(point[1]))\n edge.CloseRings()\n geom_ogr = ogr.Geometry(ogr.wkbPolygon)\n geom_ogr.AddGeometry(edge)\n geom_ogr.AssignSpatialReference(osr_sref)\n geom_ogr = force_axis_mapping(geom_ogr)\n elif isinstance(geom, shapely.geometry.Polygon):\n geom_ogr = ogr.CreateGeometryFromWkt(geom.wkt)\n geom_ogr.AssignSpatialReference(osr_sref)\n geom_ogr = swap_axis(geom_ogr) # ensure lon lat order\n elif isinstance(geom, ogr.Geometry):\n geom_sref = geom.GetSpatialReference()\n if geom_sref is None:\n geom.AssignSpatialReference(osr_sref)\n geom_ogr = geom\n geom_ogr = swap_axis(geom_ogr) # ensure lon lat order\n else:\n raise GeometryUnkown(geom)\n\n return geom_ogr", "def _grid_cell_to_polygon(\n grid_point_x_metres, grid_point_y_metres, x_spacing_metres,\n y_spacing_metres):\n\n x_min_metres = grid_point_x_metres - x_spacing_metres / 2\n x_max_metres = grid_point_x_metres + x_spacing_metres / 2\n y_min_metres = grid_point_y_metres - y_spacing_metres / 2\n y_max_metres = grid_point_y_metres + y_spacing_metres / 2\n\n vertex_x_coords_metres = numpy.array(\n [x_min_metres, x_max_metres, x_max_metres, x_min_metres, x_min_metres])\n vertex_y_coords_metres = numpy.array(\n [y_min_metres, y_min_metres, y_max_metres, y_max_metres, y_min_metres])\n\n return polygons.vertex_arrays_to_polygon_object(\n exterior_x_coords=vertex_x_coords_metres,\n exterior_y_coords=vertex_y_coords_metres)", "def test_create_polygon_with_bad_poly_data_fails(self, api_client):\n url = self.base_url + \"/polygons/\"\n prov = baker.make(Provider)\n data = {\n \"name\": \"NRB\",\n \"price\": 930,\n \"provider\": prov.id,\n \"poly\": \"-98.503358 -29.335668, -98.503086 29.335668, -98.503086 29.335423, -98.503358 29.335423, -98.503358 29.335668\" # noqa\n }\n response = api_client().post(url, data)\n assert response.status_code == 400\n assert 'Points of LinearRing do not form a closed linestring.' in response.data['poly']", "def shapePolyToShapely(p: pcbnew.SHAPE_POLY_SET) \\\n -> Union[shapely.geometry.Polygon, shapely.geometry.MultiPolygon]:\n polygons = []\n for pIdx in range(p.OutlineCount()):\n kOutline = p.Outline(pIdx)\n assert kOutline.IsClosed()\n outline = shapeLinechainToList(kOutline)\n holes = []\n for hIdx in range(p.HoleCount(pIdx)):\n kHole = p.Hole(hIdx)\n assert kHole.isClosed()\n holes.append(shapeLinechainToList(kHole))\n polygons.append(Polygon(outline, holes=holes))\n if len(polygons) == 1:\n return polygons[0]\n return MultiPolygon(polygons=polygons)", "def geojson_to_polygons_groundtruth(js_):\n\n burnt_polys = []\n building_polys = []\n for i, feat in enumerate(js_['features']):\n o = {\n \"coordinates\": feat['geometry']['coordinates'],\n \"type\": feat['geometry']['type']\n }\n s = json.dumps(o)\n\n # convert to geojson.geometry.Polygon\n g1 = geojson.loads(s)\n\n # covert to shapely.geometry.polygon.Polygon\n g2 = shape(g1)\n\n if feat['properties']['color'] == 'red': # red for the burnt region\n burnt_polys.append(g2)\n else: # for the building poly\n if feat['properties']['Burnt_Label']:\n building_polys.append([g2, [feat['properties']['BuildingID'], 'blue',\n True]]) # mark building polygons as 'blue' for burnt for now\n else:\n building_polys.append([g2, [feat['properties']['BuildingID'], 'yellow',\n False]]) # mark building polygons as 'yellow' for non-burnt for now\n return burnt_polys, building_polys", "def _get_polygon(element):\n polygon = element.find('%s/%s/%s/%s/%s' %\n (NRML04_AREA_GEOMETRY, gml.GML_POLYGON,\n gml.GML_EXTERIOR, gml.GML_LINEAR_RING,\n gml.GML_POS_LIST)).text\n\n polygon = gml._get_polygon_from_2DLinestring(polygon)\n\n return polygon", "def coarse_dataframe(geodf, side_square):\n\n # initialise the categories\n\n geodf['category'] = -1\n\n # do calculations on the first date, then extrapolate to the rest\n data_df = geodf[geodf['date'] == np.unique(geodf['date'])[0]]\n\n data_df = data_df.sort_values(by=['longitude', 'latitude'])\n\n n_grids = int(math.sqrt(data_df.shape[0]))\n\n category = 0\n\n for n in range(data_df.shape[0]):\n\n # only process lat,long point that do not have a category\n if data_df['category'].iloc[n] == -1:\n\n # get the side_square^2 nearest indexes to the point.\n indexes = []\n for i in range(side_square):\n for j in range(side_square):\n\n if n + n_grids * i + j < n_grids * n_grids and data_df['category'].iloc[n + n_grids * i + j] == -1:\n indexes.append(n + n_grids * i + j)\n\n # assing them all to the same categorty\n data_df['category'].iloc[indexes] = str(category)\n\n # get the geometry points of that catery\n cat_geometry = data_df[data_df['category'] == str(category)]['geometry']\n\n # get indexes of each point belonging to the category\n indexes_all = []\n for point in cat_geometry:\n indexes_all.append(geodf[geodf['geometry'] == point].index.tolist())\n\n indexes_all_flat = [item for sublist in indexes_all for item in sublist]\n\n geodf['category'].iloc[indexes_all_flat] = str(category)\n\n category = category + 1\n\n geodf['category'] = (geodf['category'].astype(str)).str.cat(geodf['date'], sep=\"_\")\n\n geodf = geodf.dissolve(by=['category', 'date'], aggfunc='mean')\n\n # re-assing the date because we are losing it\n geodf['date'] = [i[1] for i in geodf.index]\n\n geodf['category'] = [i[0] for i in geodf.index]\n\n return geodf", "def in_polygon(point, poly):\n x, y = point\n n = len(poly)\n inside = False\n\n p1x, p1y = poly[0]\n for i in range(n+1):\n p2x, p2y = poly[i % n]\n if y > min(p1y, p2y):\n if y <= max(p1y, p2y):\n if x <= max(p1x, p2x):\n if p1y != p2y:\n xinters = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x\n if p1x == p2x or x <= xinters:\n inside = not inside\n p1x, p1y = p2x, p2y\n return inside", "def PolygonPath(polygon):\n\n def coding(ob):\n # The codes will be all \"LINETO\" commands, except for \"MOVETO\"s at the\n # beginning of each subpath\n n = len(getattr(ob, 'coords', None) or ob)\n vals = ones(n, dtype=Path.code_type) * Path.LINETO\n vals[0] = Path.MOVETO\n return vals\n\n if hasattr(polygon, 'geom_type'): # Shapely\n ptype = polygon.geom_type\n if ptype == 'Polygon':\n polygon = [Polygon(polygon)]\n elif ptype == 'MultiPolygon':\n polygon = [Polygon(p) for p in polygon]\n else:\n raise ValueError(\n \"A polygon or multi-polygon representation is required\")\n\n else: # GeoJSON\n polygon = getattr(polygon, '__geo_interface__', polygon)\n ptype = polygon[\"type\"]\n if ptype == 'Polygon':\n polygon = [Polygon(polygon)]\n elif ptype == 'MultiPolygon':\n polygon = [Polygon(p) for p in polygon['coordinates']]\n else:\n raise ValueError(\n \"A polygon or multi-polygon representation is required\")\n\n vertices = concatenate([\n concatenate([asarray(t.exterior)[:, :2]] +\n [asarray(r)[:, :2] for r in t.interiors])\n for t in polygon])\n codes = concatenate([\n concatenate([coding(t.exterior)] +\n [coding(r) for r in t.interiors]) for t in polygon])\n\n return Path(vertices, codes)", "def social_infrastructure_combined(osm_path): \n df_point = social_infrastructure_point(osm_path)\n df_polygon = social_infrastructure_polygon(osm_path)\n \n df_polygon_filtered = compare_polygon_to_point(df_point, df_polygon) #remove duplicates polygon and point data \n df_polygon_filtered['geometry'] = pygeos.centroid(df_polygon_filtered.geometry) #transform to pointdata\n \n return (df_point.append(df_polygon_filtered)).reset_index(drop=True)", "def bbox_to_geom(bbox: Tuple[float, float, float, float]) -> Dict:\n # TODO: Handle dateline crossing geometry\n return {\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [bbox[0], bbox[3]],\n [bbox[0], bbox[1]],\n [bbox[2], bbox[1]],\n [bbox[2], bbox[3]],\n [bbox[0], bbox[3]],\n ]\n ],\n }", "def gbox_boundary(gbox, pts_per_side=16):\n H, W = gbox.shape[:2]\n xx = np.linspace(0, W, pts_per_side, dtype='float32')\n yy = np.linspace(0, H, pts_per_side, dtype='float32')\n\n return polygon_path(xx, yy).T[:-1]", "def geom_to_holes(geom):\n from spatialpandas.geometry import Polygon, MultiPolygon\n if isinstance(geom, Polygon):\n holes = []\n for i, hole in enumerate(geom.data):\n if i == 0:\n continue\n hole = ensure_ring(np.array(hole.as_py()).reshape(-1, 2))\n holes.append(hole)\n return [holes]\n elif isinstance(geom, MultiPolygon):\n holes = []\n for poly in geom.data:\n poly_holes = []\n for i, hole in enumerate(poly):\n if i == 0:\n continue\n arr = ensure_ring(np.array(hole.as_py()).reshape(-1, 2))\n poly_holes.append(arr)\n holes.append(poly_holes)\n return holes\n elif 'Multi' in type(geom).__name__:\n return [[]]*len(geom)\n else:\n return [[]]", "def buildMultiPolygon(self,polygonList):\r\n geomlist=[]\r\n for geom in polygonList:\r\n # Cut 'MULTIPOLYGON(*) if we got one'\r\n if geom.exportToWkt()[:12]==\"MULTIPOLYGON\":\r\n geomWkt=geom.exportToWkt()[13:len(geom.exportToWkt())-1]\r\n else:\r\n # Cut 'POLYGON' if we got one\r\n geomWkt=geom.exportToWkt()[7:]\r\n geomlist.append(str(geomWkt))\r\n multiGeomWKT=\"MULTIPOLYGON(\"\r\n multiGeomWKT +=\",\".join(geomlist)\r\n multiGeomWKT+=\")\"\r\n #if self.debug: print multiGeomWKT\r\n multiGeom=QgsGeometry.fromWkt(multiGeomWKT)\r\n return multiGeom", "def get_airport_start_end(result, geo_airport_cities):\n crs={'init': 'epsg:4326'}\n geometry_st = [Point(xy) for xy in zip(result.start_lon, result.start_lat)]\n geometry_end = [Point(xy) for xy in zip(result.end_lon, result.end_lat)]\n geo_st = gpd.GeoDataFrame(geometry_st, crs=crs, geometry=geometry_st)[['geometry']]\n geo_end = gpd.GeoDataFrame(geometry_end, crs=crs, geometry=geometry_end)[['geometry']]\n geo_st.crs = crs\n geo_end.crs = crs\n st_airport = pd.DataFrame(geo_st.within(geo_airport_cities['geometry'].unary_union.buffer(0.1)))\n st_airport.index=result.index\n result['geometry_st'] = st_airport\n end_airport = pd.DataFrame(geo_end.within(geo_airport_cities['geometry'].unary_union.buffer(0.1)))\n end_airport.index=result.index\n result['geometry_end'] = end_airport\n st_florence = pd.DataFrame(geo_st.within(geo_airport_cities['geometry'].loc[1].buffer(0.1)))\n st_florence.index=result.index\n result['geometry_st_fl'] = st_florence\n end_florence = pd.DataFrame(geo_end.within(geo_airport_cities['geometry'].loc[1].buffer(0.1)))\n end_florence.index=result.index\n result['geometry_end_fl'] = end_florence\n st_pisa = pd.DataFrame(geo_st.within(geo_airport_cities['geometry'].loc[0].buffer(0.1)))\n st_pisa.index=result.index\n result['geometry_st_pisa'] = st_pisa\n end_pisa = pd.DataFrame(geo_end.within(geo_airport_cities['geometry'].loc[0].buffer(0.1)))\n end_pisa.index=result.index\n result['geometry_end_pisa'] = end_pisa\n return result", "def polygonal(resolution, in_vertices, out_vertices_list = None):\n in_vertices = [Point(in_vertices[k,0],in_vertices[k,1]) for k in range(in_vertices.shape[0])] \n\n domain = mshr.Polygon(in_vertices) # https://bitbucket.org/fenics-project/mshr/wiki/API/Polygon\n # Create polygon defined by the given vertices. Vertices must be in counter-clockwise order and free of self-intersections.\n \n if(out_vertices_list is not None):\n for out_vertices in out_vertices_list:\n out_vertices = [Point(out_vertices[k,0],out_vertices[k,1]) for k in range(out_vertices.shape[0])]\n domain -= mshr.Polygon(out_vertices)\n \n mesh=mshr.generate_mesh(domain, resolution)\n\n # TODO : add refined mesh\n # if(refine_mesh):\n # d = mesh.topology().dim()\n \n # class To_refine(SubDomain):\n # def inside(self, x, on_boundary):\n # return x[1]<=0 and x[1]>= -l_mot/2-h_grid-l_vacuum/4\n\n # to_refine = To_refine()\n # marker = MeshFunction(\"bool\", mesh, d, False)\n # to_refine.mark(marker, True)\n # mesh = refine(mesh,marker)\n\n return mesh", "def test_to_wkt_list_complex_polygon(self):\n from pykml.util import to_wkt_list\n\n # create a polygon\n poly = KML.Polygon(\n KML.extrude('1'),\n KML.altitudeMode('relativeToGround'),\n KML.outerBoundaryIs(\n KML.LinearRing(\n KML.coordinates(\n '-122.366278,37.818844,30 '\n '-122.365248,37.819267,30 '\n '-122.365640,37.819861,30 '\n '-122.366669,37.819429,30 '\n '-122.366278,37.818844,30 '\n ),\n ),\n ),\n KML.innerBoundaryIs(\n KML.LinearRing(\n KML.coordinates(\n '-122.366212,37.818977,30 '\n '-122.365424,37.819294,30 '\n '-122.365704,37.819731,30 '\n '-122.366212,37.818977,30 '\n ),\n ),\n ),\n KML.innerBoundaryIs(\n KML.LinearRing(\n KML.coordinates(\n '-122.366212,37.818977,30 '\n '-122.365704,37.819731,30 '\n '-122.366488,37.819402,30 '\n '-122.366212,37.818977,30 '\n ),\n ),\n ),\n )\n\n poly_wkt_list = to_wkt_list(poly)\n\n self.assertEqual(len(poly_wkt_list), 1)\n self.assertEqual(\n poly_wkt_list[0],\n ('POLYGON ((-122.366278 37.818844 30, '\n '-122.365248 37.819267 30, '\n '-122.365640 37.819861 30, '\n '-122.366669 37.819429 30, '\n '-122.366278 37.818844 30), '\n '(-122.366212 37.818977 30, '\n '-122.365424 37.819294 30, '\n '-122.365704 37.819731 30, '\n '-122.366212 37.818977 30), '\n '(-122.366212 37.818977 30, '\n '-122.365704 37.819731 30, '\n '-122.366488 37.819402 30, '\n '-122.366212 37.818977 30))')\n )", "def polygon_clip(subjectPolygon, clipPolygon):\n\n def inside(p):\n return (cp2[0] - cp1[0]) * (p[1] - cp1[1]) > (cp2[1] - cp1[1]) * (p[0] - cp1[0])\n\n def computeIntersection():\n dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]]\n dp = [s[0] - e[0], s[1] - e[1]]\n n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]\n n2 = s[0] * e[1] - s[1] * e[0]\n n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])\n return [(n1 * dp[0] - n2 * dc[0]) * n3, (n1 * dp[1] - n2 * dc[1]) * n3]\n\n outputList = subjectPolygon\n cp1 = clipPolygon[-1]\n\n for clipVertex in clipPolygon:\n cp2 = clipVertex\n inputList = outputList\n outputList = []\n s = inputList[-1]\n\n for subjectVertex in inputList:\n e = subjectVertex\n if inside(e):\n if not inside(s):\n outputList.append(computeIntersection())\n outputList.append(e)\n elif inside(s):\n outputList.append(computeIntersection())\n s = e\n cp1 = cp2\n if len(outputList) == 0:\n return None\n return outputList", "def prune_and_polygon(cls, ground_truths, detections):\n\t\tif not hasattr(ground_truths[0], 'intersection'):\n\t\t\tground_truths = [cls.Polygon(value) for value in ground_truths]\n\t\tif not hasattr(detections[0], 'intersection'):\n\t\t\tdetections = [cls.Polygon(value) for value in detections]\n\t\tground_truths = [value for value in ground_truths if value.length > 0.]\n\t\treturn (ground_truths, detections)", "def is_dateline(vertices):\n vertices = np.asarray(vertices, dtype=\"d\")\n longitudes = vertices[:, 0]\n return np.abs(longitudes.min(axis=0) - longitudes.max(axis=0)) > 180", "def multi2poly(returned_vector_pred, layer_name=None):\n try: # Try to convert multipolygon to polygon\n df = gpd.read_file(returned_vector_pred, layer=layer_name)\n if 'MultiPolygon' in df['geometry'].geom_type.values:\n logging.info(\"\\nConverting multiPolygon to Polygon...\")\n gdf_exploded = df.explode(index_parts=True, ignore_index=True)\n gdf_exploded.to_file(returned_vector_pred, layer=layer_name) # overwrite the layer readed\n except Exception as e:\n logging.error(f\"\\nSomething went wrong during the conversion of Polygon. \\nError {type(e)}: {e}\")", "def get_mesh_boundary(triangles):\n # Create edges and sort each vertices on each edge.\n edge0 = triangles[:,0:2]\n edge1 = triangles[:,1:3]\n edge2 = triangles.take((0,2), axis=1)\n edges = np.concatenate((edge0, edge1, edge2), axis=0)\n edge_sort = np.sort(edges, axis=1)\n\n # Get unique edges that are only present once.\n (uniq, uniq_ids, counts) = np.unique(edge_sort, axis=0, return_index=True, return_counts=True)\n edge_inds = np.arange(edge_sort.shape[0], dtype=int)\n outer_edge_ids = edge_inds[np.in1d(edge_inds, uniq_ids[counts==1])]\n outer_edges = edge_sort[outer_edge_ids,:]\n num_outer_edges = outer_edges.shape[0]\n\n # Assume we need to close the polygon.\n num_outer_verts = num_outer_edges + 1\n\n # Loop over outer edges and use traversal method to get ordered vertices.\n v_start = outer_edges[0,0]\n v_end = outer_edges[0,1]\n vert_inds = -1*np.ones(num_outer_verts, dtype=int)\n vert_inds[0] = v_start\n vert_inds[1] = v_end\n vert_num = 2\n outer_edges[0,:] = -1\n for edge_num in range(1,num_outer_edges):\n edge_inds_next = np.where(outer_edges == v_end)\n if (edge_inds_next[0].shape[0] < 1):\n msg = \"Next edge not found for vertex %d\" % v_end\n raise ValueError(msg)\n edge_ind_next = edge_inds_next[0][0]\n vert_ind_next = 0\n if (edge_inds_next[1][0] == 0):\n vert_ind_next = 1\n vert_inds[vert_num] = outer_edges[edge_ind_next, vert_ind_next]\n outer_edges[edge_ind_next, :] = -1\n v_end = vert_inds[vert_num]\n vert_num += 1\n\n return vert_inds", "def inside_polygon(point, polygon):\n x = point[0]\n y = point[1]\n n = len(polygon)\n inside = False\n p1x, p1y = polygon[0]\n for i in range(1, n + 1):\n p2x, p2y = polygon[i % n]\n if y > min(p1y, p2y):\n if y <= max(p1y, p2y):\n if x <= max(p1x, p2x):\n if p1y != p2y:\n xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x\n if p1x == p2x or x <= xinters:\n inside = not inside\n p1x, p1y = p2x, p2y\n return inside", "def test_polygonize():\n # A collection with one non-zero-area Polygon is returned as a Polygon.\n geom1 = GeometryCollection([POLY, ZERO_POLY])\n result1 = polygonize(geom1)\n assert result1.geom_type == \"Polygon\"\n assert result1.area == 1.0\n\n # A collection with multiple non-zero-area polygons is returned as a MultiPolygon.\n geom2 = GeometryCollection([POLY, POLY])\n result2 = polygonize(geom2)\n assert result2.geom_type == \"MultiPolygon\"\n assert result2.area == 2.0\n\n # Zero-area geometries are not permitted.\n with pytest.raises(ValueError) as err:\n _ = polygonize(ZERO_POLY)\n assert err.match(\"Geometry has zero area\")", "def round_geom(geom, precision=None):\n if geom['type'] == 'Point':\n x, y = geom['coordinates']\n xp, yp = [x], [y]\n if precision is not None:\n xp = [round(v, precision) for v in xp]\n yp = [round(v, precision) for v in yp]\n new_coords = tuple(zip(xp, yp))[0]\n if geom['type'] in ['LineString', 'MultiPoint']:\n xp, yp = zip(*geom['coordinates'])\n if precision is not None:\n xp = [round(v, precision) for v in xp]\n yp = [round(v, precision) for v in yp]\n new_coords = tuple(zip(xp, yp))\n elif geom['type'] in ['Polygon', 'MultiLineString']:\n new_coords = []\n for piece in geom['coordinates']:\n xp, yp = zip(*piece)\n if precision is not None:\n xp = [round(v, precision) for v in xp]\n yp = [round(v, precision) for v in yp]\n new_coords.append(tuple(zip(xp, yp)))\n elif geom['type'] == 'MultiPolygon':\n parts = geom['coordinates']\n new_coords = []\n for part in parts:\n inner_coords = []\n for ring in part:\n xp, yp = zip(*ring)\n if precision is not None:\n xp = [round(v, precision) for v in xp]\n yp = [round(v, precision) for v in yp]\n inner_coords.append(tuple(zip(xp, yp)))\n new_coords.append(inner_coords)\n return {'type': geom['type'], 'coordinates': new_coords}", "def _pointInsidePolygon(self,point,polygon):\n # try:\n # import cv2\n # except:\n # logger.warning(\"Unable to import cv2\")\n # return False\n\n if( len(polygon) < 3 ):\n logger.warning(\"feature._pointInsidePolygon - this is not a valid polygon\")\n return False\n\n if( not isinstance(polygon,list)):\n logger.warning(\"feature._pointInsidePolygon - this is not a valid polygon\")\n return False\n\n #if( not isinstance(point,tuple) ):\n #if( len(point) == 2 ):\n # point = tuple(point)\n #else:\n # logger.warning(\"feature._pointInsidePolygon - this is not a valid point\")\n # return False\n #if( cv2.__version__ == '$Rev:4557'):\n counter = 0\n retVal = True\n p1 = None\n #print \"point: \" + str(point)\n poly = copy.deepcopy(polygon)\n poly.append(polygon[0])\n #for p2 in poly:\n N = len(poly)\n p1 = poly[0]\n for i in range(1,N+1):\n p2 = poly[i%N]\n if( point[1] > np.min((p1[1],p2[1])) ):\n if( point[1] <= np.max((p1[1],p2[1])) ):\n if( point[0] <= np.max((p1[0],p2[0])) ):\n if( p1[1] != p2[1] ):\n test = float((point[1]-p1[1])*(p2[0]-p1[0]))/float(((p2[1]-p1[1])+p1[0]))\n if( p1[0] == p2[0] or point[0] <= test ):\n counter = counter + 1\n p1 = p2\n\n if( counter % 2 == 0 ):\n retVal = False\n return retVal\n return retVal\n #else:\n # result = cv2.pointPolygonTest(np.array(polygon,dtype='float32'),point,0)\n # return result > 0 ", "def __getGeometry(self, geom):\n if \"POLYGON\" in self.geometryType:\n rings = geom['rings']\n polygon = getMultiGeometry(rings)\n polyGeom = arcpy.Polygon(polygon, self.sr)\n return polyGeom\n elif \"POLYLINE\" in self.geometryType:\n paths = geom['paths']\n polyline = getMultiGeometry(paths)\n lineGeom = arcpy.Polyline(polyline, self.sr)\n return lineGeom\n elif \"POINT\" in self.geometryType:\n try:\n point = arcpy.Point(float(geom['x']), float(geom['y']))\n except:\n raise NullGeometryError(\"Point geometry is invalid or null\")\n pointGeom = arcpy.Geometry(\"point\", point, self.sr)\n return pointGeom", "def build_polygon(self, \n dataset_metadata_dict, \n bounding_box, \n visibility=True, \n parent_folder=None, \n polygon_name=None):\n if parent_folder is None:\n parent_folder=self.dataset_type_folder\n if polygon_name is None:\n polygon_name = str(dataset_metadata_dict['dataset_title'])\n\n try:\n if dataset_metadata_dict['convex_hull_polygon']:\n polygon_bounds = [[float(ordinate)\n for ordinate in coord_pair.strip().split(' ')\n ]\n for coord_pair in\n re.search('POLYGON\\(\\((.*)\\)\\)',\n dataset_metadata_dict['convex_hull_polygon']\n ).group(1).split(',')\n ]\n # build the polygon based on the bounds. Also set the polygon name. It is inserted into the self.dataset_type_folder.\n polygon_kml = parent_folder.newpolygon(name=polygon_name,\n outerboundaryis=polygon_bounds, visibility=visibility)\n \n polygon_kml.style = self.polygon_style\n \n # Always set timestamps on polygons\n self.set_timestamps(polygon_kml, dataset_metadata_dict)\n\n # build the polygon description\n description_string = '<![CDATA['\n description_string = description_string + '<p><b>{0}: </b>{1}</p>'.format('Survey Name',\n str(dataset_metadata_dict['dataset_title']))\n description_string = description_string + '<p><b>{0}: </b>{1}</p>'.format('Survey ID', str(dataset_metadata_dict['ga_survey_id']))\n description_string = description_string + '<p><b>{0}: </b>{1}</p>'.format('Survey Start Date',\n str(dataset_metadata_dict['start_date']))\n description_string = description_string + '<p><b>{0}: </b>{1}</p>'.format('Survey End Date',\n str(dataset_metadata_dict['end_date']))\n if dataset_metadata_dict['dataset_link']:\n description_string = description_string + '<p><b>{0}: </b>{1}</p>'.format('Link to dataset', str(\n dataset_metadata_dict['dataset_link']))\n description_string = description_string + ']]>'\n polygon_kml.description = description_string\n \n return polygon_kml\n \n except Exception as e:\n logger.debug('Unable to display polygon \"{}\": {}'.format(dataset_metadata_dict['convex_hull_polygon'], e))", "def _grow_polygon_points(pts,growth):\n \n min_x1 = 0 # Initialize smallest x to 1st point\n min_x2 = 1 # Initialize second smallest x to 2nd point\n if pts[min_x2][0] < pts[min_x1][0]: # Swap if assumption was incorrect\n min_x1 = 1\n min_x2 = 0\n min_y1 = 0 # Initialize smallest y to 1st point\n min_y2 = 1 # Initialize second smallest y to 2nd point\n if pts[min_y2][1] < pts[min_y1][1]: # Swap if assumption was incorrect\n min_y1 = 1\n min_y2 = 0\n for i in range(2,len(pts)): # For other 2 points\n if pts[i][0] < pts[min_x2][0]: # Point is less than 2nd smallest\n if pts[i][0] < pts[min_x1][0]: # Point is also less than 1st smallest\n min_x2 = min_x1\n min_x1 = i\n else:\n min_x2 = i\n if pts[i][1] < pts[min_y2][1]:\n if pts[i][1] < pts[min_y1][1]:\n min_y2 = min_y1\n min_y1 = i\n else:\n min_y2 = i\n print\n print pts\n print \"min_x1: \" + str(min_x1) \n print \"min_x2: \" + str(min_x2) \n print \"min_y1: \" + str(min_y1) \n print \"min_y2: \" + str(min_y2) \n for i in range(len(pts)): # For each point\n if i == min_x1 or i == min_x2: # x is minimum, shrink\n pts[i] = (pts[i][0] - growth, pts[i][1])\n else: # x is maximum, grow\n pts[i] = (pts[i][0] + growth, pts[i][1])\n if i == min_y1 or i == min_y2: # y is minimum, shrink\n pts[i] = (pts[i][0], pts[i][1] - growth) \n else: # y is maximum, grow\n pts[i] = (pts[i][0], pts[i][1] + growth)", "def crosses_dateline(self):\n if not isinstance(self.crs, GeographicalCRS):\n raise CRSError(\"Dateline detection only defined for geographical \"\n \"coordinates\")\n\n return any(self._seg_crosses_dateline(seg) for seg in self.segments)", "def test_merge_stitches_together_a_polygon_surrounding_another_polygon(self):\n topology = {\n \"type\": \"Topology\",\n \"objects\": {\n \"collection\": {\n \"type\": \"GeometryCollection\",\n \"geometries\": [\n {\"type\": \"Polygon\", \"arcs\": [[0], [1]]},\n {\"type\": \"Polygon\", \"arcs\": [[-2]]},\n ],\n }\n },\n \"arcs\": [\n [[0, 0], [0, 3], [3, 3], [3, 0], [0, 0]],\n [[1, 1], [2, 1], [2, 2], [1, 2], [1, 1]],\n ],\n }\n self.assertDictEqual(\n {\n \"type\": \"MultiPolygon\",\n \"coordinates\": [[[[0, 0], [0, 3], [3, 3], [3, 0], [0, 0]]]],\n },\n self.merge(topology, topology[\"objects\"][\"collection\"][\"geometries\"]),\n )", "def convex_polygon_intersect_test(polygon1, polygon2):\n\n # Find the polygon that has fewer sides so that we can do fewer checks\n polygon_a = polygon1 if len(polygon1) <= len(polygon2) else polygon2\n polygon_b = polygon2 if len(polygon1) > len(polygon2) else polygon1\n\n # Perform Separating Axis Test\n intersect = True\n edge_index = 0\n edges = polygon_a.edges() + polygon_b.edges()\n\n # Loop through the edges of polygonA searching for a separating axis\n while intersect and edge_index < len(edges):\n\n # Get an axis normal to the current edge\n edge = edges[edge_index]\n edge_vector = linalg.sub(edge[1], edge[0])\n projection_axis = linalg.lnormal(edge_vector)\n\n # Get the projection ranges for each polygon onto the projection axis\n min_a, max_a = range_project_polygon(projection_axis, polygon_a)\n min_b, max_b = range_project_polygon(projection_axis, polygon_b)\n\n # test if projections overlap\n if min_a > max_b or max_a < min_b:\n intersect = False\n edge_index += 1\n\n return intersect", "def remove_excess_polygon(polygons_dict, region):\n start_len = len(polygons_dict)\n poly_region_default_area = area(\n geojson.Feature(geometry=region, properties={}).geometry)\n idx = 0\n iteration_range = start_len\n while idx < iteration_range:\n intersection_polygon_area = 0\n poly_list = []\n poly_copy = copy.deepcopy(polygons_dict)\n del poly_copy[idx]\n for el in poly_copy:\n el_poly = shapely.geometry.asShape(el['geometry'])\n poly_list.append(el_poly)\n union_poly = cascaded_union(poly_list)\n intersection_polygon = union_poly.intersection(region)\n if not (intersection_polygon.is_empty and union_poly.is_empty):\n intersection_polygon_area = area(geojson.Feature(geometry=intersection_polygon, properties={}).geometry)\n else:\n break\n if float(\"{0:.2f}\".format(poly_region_default_area)) == float(\"{0:.2f}\".format(intersection_polygon_area)):\n del polygons_dict[idx]\n iteration_range -= 1\n else:\n idx += 1\n if len(polygons_dict) > 0 and (len(polygons_dict) != start_len):\n return polygons_dict\n else:\n return None", "def test_merge_does_not_stitch_together_a_polygon_and_its_hole(self):\n topology = {\n \"type\": \"Topology\",\n \"objects\": {\n \"collection\": {\n \"type\": \"GeometryCollection\",\n \"geometries\": [{\"type\": \"Polygon\", \"arcs\": [[0], [1]]}],\n }\n },\n \"arcs\": [\n [[0, 0], [0, 3], [3, 3], [3, 0], [0, 0]],\n [[1, 1], [2, 1], [2, 2], [1, 2], [1, 1]],\n ],\n }\n self.assertDictEqual(\n {\n \"type\": \"MultiPolygon\",\n \"coordinates\": [\n [\n [[0, 0], [0, 3], [3, 3], [3, 0], [0, 0]],\n [[1, 1], [2, 1], [2, 2], [1, 2], [1, 1]],\n ]\n ],\n },\n self.merge(topology, topology[\"objects\"][\"collection\"][\"geometries\"]),\n )", "def geojson_to_polygons(js_):\n\n burnt_polys = []\n building_polys = []\n for i, feat in enumerate(js_['features']):\n o = {\n \"coordinates\": feat['geometry']['coordinates'],\n \"type\": feat['geometry']['type']\n }\n s = json.dumps(o)\n\n # convert to geojson.geometry.Polygon\n g1 = geojson.loads(s)\n\n # covert to shapely.geometry.polygon.Polygon\n g2 = shape(g1)\n\n if feat['properties']['color'] == 'red': # red for the burnt region\n burnt_polys.append(g2)\n else: # for the building poly\n building_polys.append([g2, [feat['properties']['BuildingID'], 'yellow',\n False]]) # mark building polygons as 'yellow' for non-burnt for now\n return burnt_polys, building_polys", "def extent_as_polygon(self, crs=wgs84):\n from shapely.geometry import Polygon\n\n # this is not so trivial\n # for optimisation we will transform the boundaries only\n _i = np.hstack([np.arange(self.nx+1),\n np.ones(self.ny+1)*self.nx,\n np.arange(self.nx+1)[::-1],\n np.zeros(self.ny+1)]).flatten()\n _j = np.hstack([np.zeros(self.nx+1),\n np.arange(self.ny+1),\n np.ones(self.nx+1)*self.ny,\n np.arange(self.ny+1)[::-1]]).flatten()\n _i, _j = self.corner_grid.ij_to_crs(_i, _j, crs=crs)\n return Polygon(zip(_i, _j))", "def polygon_clip(subjectPolygon, clipPolygon):\n def inside(p):\n return (cp2[0] - cp1[0]) * (p[1] - cp1[1]) > (cp2[1] - cp1[1]) * (p[0] - cp1[0])\n\n def computeIntersection():\n dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]]\n dp = [s[0] - e[0], s[1] - e[1]]\n n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]\n n2 = s[0] * e[1] - s[1] * e[0]\n n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])\n return [(n1 * dp[0] - n2 * dc[0]) * n3, (n1 * dp[1] - n2 * dc[1]) * n3]\n\n outputList = subjectPolygon\n cp1 = clipPolygon[-1]\n\n for clipVertex in clipPolygon:\n cp2 = clipVertex\n inputList = outputList\n outputList = []\n s = inputList[-1]\n\n for subjectVertex in inputList:\n e = subjectVertex\n if inside(e):\n if not inside(s):\n outputList.append(computeIntersection())\n outputList.append(e)\n elif inside(s):\n outputList.append(computeIntersection())\n s = e\n cp1 = cp2\n if len(outputList) == 0:\n return None\n return (outputList)", "def F_interp_geos_mat(sounding_lon,sounding_lat,sounding_datenum,\\\n geos_dir='/mnt/Data2/GEOS/s5p_interp/',\\\n interp_fields=['TROPPT']):\n from scipy.io import loadmat\n from scipy.interpolate import RegularGridInterpolator\n \n start_datenum = np.amin(sounding_datenum)\n end_datenum = np.amax(sounding_datenum)\n start_datetime = datedev_py(start_datenum)\n start_year = start_datetime.year\n start_month = start_datetime.month\n start_day = start_datetime.day\n start_hour = start_datetime.hour\n \n end_datetime = datedev_py(end_datenum)\n end_year = end_datetime.year\n end_month = end_datetime.month\n end_day = end_datetime.day\n end_hour = end_datetime.hour\n end_minute = end_datetime.minute\n end_second = end_datetime.second\n \n step_hour = 3 # geos fp data are 3-hourly\n \n geos_start_hour = start_hour-start_hour%step_hour\n geos_start_datetime = datetime.datetime(year=start_year,month=start_month,day=start_day,hour=geos_start_hour)\n if end_hour > 24-step_hour or (end_hour == 24-step_hour and (end_minute > 0 or end_second > 0)):\n geos_end_hour = 0\n geos_end_datetime = datetime.datetime(year=end_year,month=end_month,day=end_day,hour=geos_end_hour) +datetime.timedelta(days=1)\n elif end_hour%step_hour == 0 and end_minute == 0 and end_second == 0:\n geos_end_hour = end_hour\n geos_end_datetime = datetime.datetime(year=end_year,month=end_month,day=end_day,hour=geos_end_hour)\n else:\n geos_end_hour = (step_hour-(end_hour+1)%step_hour)%step_hour+end_hour+1\n geos_end_datetime = datetime.datetime(year=end_year,month=end_month,day=end_day,hour=geos_end_hour)\n \n nstep = (geos_end_datetime-geos_start_datetime).total_seconds()/3600/step_hour+1\n nstep = int(nstep)\n \n geos_data = {}\n # load narr data\n for istep in range(nstep):\n file_datetime = geos_start_datetime+datetime.timedelta(hours=step_hour*istep)\n file_dir = os.path.join(geos_dir,file_datetime.strftime('Y%Y'),\\\n file_datetime.strftime('M%m'),\\\n file_datetime.strftime('D%d'))\n file_path = os.path.join(file_dir,'subset_'+file_datetime.strftime('%Y%m%d_%H')+'.mat')\n if not geos_data:\n mat_data = loadmat(file_path,variable_names=np.concatenate((['lat','lon'],interp_fields)))\n geos_data['lon'] = mat_data['lon'].flatten()\n geos_data['lat'] = mat_data['lat'].flatten()\n geos_data['datenum'] = np.zeros((nstep),dtype=np.float64)\n for fn in interp_fields:\n geos_data[fn] = np.zeros((len(geos_data['lon']),len(geos_data['lat']),nstep))\n geos_data[fn][...,istep] = mat_data[fn]\n else:\n mat_data = loadmat(file_path,variable_names=interp_fields)\n for fn in interp_fields:\n geos_data[fn][...,istep] = mat_data[fn]\n \n geos_data['datenum'][istep] = (file_datetime.toordinal()\\\n +file_datetime.hour/24.\\\n +file_datetime.minute/1440.\\\n +file_datetime.second/86400.+366.)\n # interpolate\n sounding_interp = {}\n for fn in interp_fields:\n my_interpolating_function = \\\n RegularGridInterpolator((geos_data['lon'],geos_data['lat'],geos_data['datenum']),\\\n geos_data[fn],bounds_error=False,fill_value=np.nan)\n sounding_interp[fn] = my_interpolating_function((sounding_lon,sounding_lat,sounding_datenum))\n return sounding_interp", "def test_ordering_polygon_vertices(self):\n\n # So far the admissible classes are Point, Line and Polygon\n tmp_filename = unique_filename(suffix='.shp')\n\n # Simple polygon (in clock wise order)\n P = numpy.array([[106.79, -6.23],\n [106.80, -6.24],\n [106.78, -6.23],\n [106.77, -6.21]])\n\n v_ref = Vector(geometry=[P], geometry_type='polygon')\n v_ref.write_to_file(tmp_filename)\n v_file = read_layer(tmp_filename)\n for i in range(len(v_ref)):\n x = v_ref.get_geometry()[i]\n y = v_file.get_geometry()[i]\n msg = 'Read geometry %s, but expected %s' % (y, x)\n assert numpy.allclose(x, y), msg\n\n assert v_file == v_ref\n assert v_ref == v_file\n assert v_file.is_polygon_data\n assert v_file.geometry_type == 3\n\n # Reversed order (OGR will swap back to clockwise)\n P = numpy.array([[106.77, -6.21],\n [106.78, -6.23],\n [106.80, -6.24],\n [106.79, -6.23]])\n\n v_ref = Vector(geometry=[P], geometry_type='polygon')\n v_ref.write_to_file(tmp_filename)\n v_file = read_layer(tmp_filename)\n for i in range(len(v_ref)):\n x = v_ref.get_geometry()[i]\n x = x[::-1, :] # Flip Up-Down to get order clockwise\n y = v_file.get_geometry()[i]\n msg = 'Read geometry %s, but expected %s' % (y, x)\n assert numpy.allclose(x, y), msg\n assert v_file == v_ref\n assert v_ref == v_file\n assert v_ref.is_polygon_data\n assert v_ref.geometry_type == 3\n\n # Self intersecting polygon (in this case order will be flipped)\n P = numpy.array([[106.79, -6.23],\n [106.80, -6.24],\n [106.78, -6.23],\n [106.79, -6.22],\n [106.77, -6.21]])\n v_ref = Vector(geometry=[P], geometry_type='polygon')\n v_ref.write_to_file(tmp_filename)\n v_file = read_layer(tmp_filename)\n for i in range(len(v_ref)):\n x = v_ref.get_geometry()[i]\n x = x[::-1, :] # Flip Up-Down to get order clockwise\n y = v_file.get_geometry()[i]\n msg = 'Read geometry %s, but expected %s' % (y, x)\n assert numpy.allclose(x, y), msg\n\n assert v_file == v_ref\n assert v_ref == v_file\n assert v_file.is_polygon_data\n assert v_file.geometry_type == 3", "def polygon_clip(subjectPolygon, clipPolygon):\n def inside(p):\n return(cp2[0]-cp1[0])*(p[1]-cp1[1]) > (cp2[1]-cp1[1])*(p[0]-cp1[0])\n \n def computeIntersection():\n dc = [ cp1[0] - cp2[0], cp1[1] - cp2[1] ]\n dp = [ s[0] - e[0], s[1] - e[1] ]\n n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]\n n2 = s[0] * e[1] - s[1] * e[0] \n n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])\n return [(n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3]\n\n outputList = subjectPolygon\n cp1 = clipPolygon[-1]\n\n for clipVertex in clipPolygon:\n cp2 = clipVertex\n inputList = outputList\n outputList = []\n s = inputList[-1]\n\n for subjectVertex in inputList:\n e = subjectVertex\n if inside(e):\n if not inside(s):\n outputList.append(computeIntersection())\n outputList.append(e)\n elif inside(s):\n outputList.append(computeIntersection())\n s = e\n cp1 = cp2\n if len(outputList) == 0:\n return None\n return(outputList)", "def getPolygonBoundaries(self, polygon: Polygon):\n polygon_df = gpd.GeoDataFrame([polygon], columns=['geometry'])\n\n polygon_df.set_crs(epsg=self.output_epsg, inplace=True)\n polygon_df['geometry'] = polygon_df['geometry'].to_crs(epsg=self.input_epsg)\n minx, miny, maxx, maxy = polygon_df['geometry'][0].bounds\n\n polygon_input = 'POLYGON(('\n xcords, ycords = polygon_df['geometry'][0].exterior.coords.xy\n for x, y in zip(list(xcords), list(ycords)):\n polygon_input += f'{x} {y}, '\n polygon_input = polygon_input[:-2]\n polygon_input += '))'\n\n return f\"({[minx, maxx]},{[miny,maxy]})\", polygon_input", "def compute_layers(prod):\n # 1. Do polygons overlap for the same outlook\n LOG.warning(\"==== Running Geometry differences\")\n for day in prod.outlook_collections:\n prod.outlook_collections[day].difference_geometries()", "def trapezoid_decomposition_linear(polygons):\n # Enumerate all the edges and iteratively build up the set of trapezoids\n # Add a vertical line for each point in the polygon\n all_polygons = np.concatenate(polygons, axis=0)\n vertical_lines = SortedDict({x[0]: [x[1], 1000000, 0] for x in all_polygons})\n\n # Loop over Polygons to determine end-points\n for polygon in polygons:\n start_vertex = polygon[0]\n for vertex in polygon[1:]:\n # find the lines in front of the smaller \n x_start = start_vertex[0]\n x_curr = vertex[0]\n start_idx = vertical_lines.bisect_right(min(x_start, x_curr))\n end_idx = vertical_lines.bisect_left(max(x_start, x_curr))\n x_vals = vertical_lines.keys()\n for i in range(start_idx, end_idx):\n x = x_vals[i]\n if x < min(x_start, x_curr) or x > max(x_start, x_curr):\n continue\n y, top, bottom = vertical_lines[x]\n y_val = linear_interpolation(start_vertex, vertex, x)\n if y_val > y and y_val < top:\n vertical_lines[x][1] = y_val\n elif y_val < y and y_val > bottom:\n vertical_lines[x][2] = y_val\n start_vertex = vertex\n return vertical_lines", "def point_in_polygon(x, y, poly):\n n = len(poly)\n inside = False\n\n p1x, p1y = poly[0]\n for i in range(n + 1):\n p2x, p2y = poly[i % n]\n if y > min(p1y, p2y):\n if y <= max(p1y, p2y):\n if x <= max(p1x, p2x):\n if p1y != p2y:\n xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x\n if p1x == p2x or x <= xinters:\n inside = not inside\n p1x, p1y = p2x, p2y\n return inside", "def split_bygeom(self, iterable, geom_getter=lambda x: x.geom):\n points, linestrings, multipoints, multilinestrings = [], [], [], []\n\n for x in iterable:\n geom = geom_getter(x)\n if geom is None:\n pass\n elif isinstance(geom, GeometryCollection):\n # Duplicate object, shapefile do not support geometry collections !\n subpoints, sublines, pp, ll = self.split_bygeom(geom, geom_getter=lambda geom: geom)\n if subpoints:\n clone = x.__class__.objects.get(pk=x.pk)\n clone.geom = MultiPoint(subpoints, srid=geom.srid)\n multipoints.append(clone)\n if sublines:\n clone = x.__class__.objects.get(pk=x.pk)\n clone.geom = MultiLineString(sublines, srid=geom.srid)\n multilinestrings.append(clone)\n elif isinstance(geom, Point):\n points.append(x)\n elif isinstance(geom, LineString):\n linestrings.append(x)\n else:\n raise ValueError(\"Only LineString and Point geom should be here. Got %s for pk %d\" % (geom, x.pk))\n return points, linestrings, multipoints, multilinestrings", "def test_clip_points_by_polygons_with_holes0(self):\n\n # Define an outer ring\n outer_ring = numpy.array([[106.79, -6.233],\n [106.80, -6.24],\n [106.78, -6.23],\n [106.77, -6.21],\n [106.79, -6.233]])\n\n # Define inner rings\n inner_rings = [numpy.array([[106.77827, -6.2252],\n [106.77775, -6.22378],\n [106.78, -6.22311],\n [106.78017, -6.22530],\n [106.77827, -6.2252]])[::-1],\n numpy.array([[106.78652, -6.23215],\n [106.78642, -6.23075],\n [106.78746, -6.23143],\n [106.78831, -6.23307],\n [106.78652, -6.23215]])[::-1]]\n\n v = Vector(geometry=[Polygon(outer_ring=outer_ring,\n inner_rings=inner_rings)])\n assert v.is_polygon_data\n\n # Write it to file\n tmp_filename = unique_filename(suffix='.shp')\n v.write_to_file(tmp_filename)\n\n # Read polygon it back\n L = read_layer(tmp_filename)\n P = L.get_geometry(as_geometry_objects=True)[0]\n\n outer_ring = P.outer_ring\n inner_ring0 = P.inner_rings[0]\n inner_ring1 = P.inner_rings[1]\n\n # Make some test points\n points = generate_random_points_in_bbox(outer_ring, 1000, seed=13)\n\n # Clip to outer ring, excluding holes\n indices = inside_polygon(points, P.outer_ring, holes=P.inner_rings)\n\n # Sanity\n for point in points[indices, :]:\n # Must be inside outer ring\n assert is_inside_polygon(point, outer_ring)\n\n # But not in any of the inner rings\n assert not is_inside_polygon(point, inner_ring0)\n assert not is_inside_polygon(point, inner_ring1)\n\n if False:\n # Store for visual check\n pol = Vector(geometry=[P])\n tmp_filename = unique_filename(suffix='.shp')\n pol.write_to_file(tmp_filename)\n print 'Polygon with holes written to %s' % tmp_filename\n\n pts = Vector(geometry=points[indices, :])\n tmp_filename = unique_filename(suffix='.shp')\n pts.write_to_file(tmp_filename)\n print 'Clipped points written to %s' % tmp_filename", "def point_in_polygon(pnt, poly): # pnt_in_poly(pnt, poly): #\r\n x, y = pnt\r\n N = len(poly)\r\n for i in range(N):\r\n x0, y0, xy = [poly[i][0], poly[i][1], poly[(i + 1) % N]]\r\n c_min = min([x0, xy[0]])\r\n c_max = max([x0, xy[0]])\r\n if c_min < x <= c_max:\r\n p = y0 - xy[1]\r\n q = x0 - xy[0]\r\n y_cal = (x - x0) * p / q + y0\r\n if y_cal < y:\r\n return True\r\n return False", "def boundary_polygon(self):\n try:\n return self.boundary_polygon_by_edges()\n except Exception as exc:\n self.log.warning('Warning, boundary_polygon() failed using edges! Trying polygon union method')\n self.log.warning(exc,exc_info=True)\n return self.boundary_polygon_by_union()", "def shrink_polygon(self,polygon, offset = 1):\r\n \r\n import numpy as np\r\n import copy\r\n import math\r\n \r\n def angle(x1, y1, x2, y2):\r\n numer = (x1*x2 + y1*y2)\r\n denom = np.sqrt((x1**2 + y1**2) * (x2**2 + y2**2))\r\n print(numer)\r\n print(denom)\r\n print( math.acos(numer/denom) )\r\n return math.acos(numer/denom) \r\n \r\n def cross_sign(x1, y1, x2, y2):\r\n return x1*y2 > x2*y1\r\n \r\n # If the polygon is closed, un-close it\r\n closed = False\r\n if np.linalg.norm(polygon[0,:]-polygon[-1,:]) < 1E-10:\r\n polygon = polygon[:-1,:]\r\n closed = True\r\n \r\n # Make sure polygon is counter-clockwise\r\n if self.are_vertices_clockwise(np.row_stack((polygon,polygon[0,:]))):\r\n polygon = np.flipud(polygon)\r\n \r\n polygon_shrinked = copy.copy(polygon)\r\n \r\n for idx in range(polygon.shape[0]):\r\n \r\n if idx == polygon.shape[0]-1:\r\n vtx_before = idx-1\r\n vtx_center = idx\r\n vtx_after = 0\r\n else:\r\n vtx_before = idx-1\r\n vtx_center = idx\r\n vtx_after = idx+1\r\n \r\n side_before = polygon[vtx_center,:] - polygon[vtx_before,:]\r\n side_after = polygon[vtx_after,:] - polygon[vtx_center,:]\r\n \r\n side_before /= np.linalg.norm(side_before)\r\n side_after /= np.linalg.norm(side_after)\r\n \r\n nvec_before = np.asarray([-side_before[1], side_before[0]])\r\n nvec_after = np.asarray([-side_after[1], side_after[0]])\r\n \r\n vtx1_before = polygon[vtx_before,:] + nvec_before*offset\r\n vtx2_before = polygon[vtx_center,:] + nvec_before*offset\r\n \r\n vtx1_after = polygon[vtx_center,:] + nvec_after*offset\r\n vtx2_after = polygon[vtx_after,:] + nvec_after*offset\r\n \r\n p = vtx1_before\r\n r = (vtx2_before-vtx1_before)\r\n \r\n q = vtx1_after\r\n s = (vtx2_after-vtx1_after)\r\n \r\n if np.cross(r,s) == 0:\r\n \r\n # Lines are collinear\r\n polygon_shrinked[idx,:] = vtx2_before\r\n \r\n else:\r\n \r\n # Lines are not collinear\r\n t = np.cross(q - p,s)/(np.cross(r,s))\r\n \r\n # This is the intersection point\r\n polygon_shrinked[idx,:] = p + t*r\r\n \r\n if closed:\r\n polygon_shrinked = np.row_stack((\r\n polygon_shrinked,\r\n polygon_shrinked[0,:]))\r\n \r\n return polygon_shrinked", "def shrink_polygon(self,polygon, offset = 1):\r\n \r\n import numpy as np\r\n import copy\r\n import math\r\n \r\n def angle(x1, y1, x2, y2):\r\n numer = (x1*x2 + y1*y2)\r\n denom = np.sqrt((x1**2 + y1**2) * (x2**2 + y2**2))\r\n print(numer)\r\n print(denom)\r\n print( math.acos(numer/denom) )\r\n return math.acos(numer/denom) \r\n \r\n def cross_sign(x1, y1, x2, y2):\r\n return x1*y2 > x2*y1\r\n \r\n # If the polygon is closed, un-close it\r\n closed = False\r\n if np.linalg.norm(polygon[0,:]-polygon[-1,:]) < 1E-10:\r\n polygon = polygon[:-1,:]\r\n closed = True\r\n \r\n # Make sure polygon is counter-clockwise\r\n if self.are_vertices_clockwise(np.row_stack((polygon,polygon[0,:]))):\r\n polygon = np.flipud(polygon)\r\n \r\n polygon_shrinked = copy.copy(polygon)\r\n \r\n for idx in range(polygon.shape[0]):\r\n \r\n if idx == polygon.shape[0]-1:\r\n vtx_before = idx-1\r\n vtx_center = idx\r\n vtx_after = 0\r\n else:\r\n vtx_before = idx-1\r\n vtx_center = idx\r\n vtx_after = idx+1\r\n \r\n side_before = polygon[vtx_center,:] - polygon[vtx_before,:]\r\n side_after = polygon[vtx_after,:] - polygon[vtx_center,:]\r\n \r\n side_before /= np.linalg.norm(side_before)\r\n side_after /= np.linalg.norm(side_after)\r\n \r\n nvec_before = np.asarray([-side_before[1], side_before[0]])\r\n nvec_after = np.asarray([-side_after[1], side_after[0]])\r\n \r\n vtx1_before = polygon[vtx_before,:] + nvec_before*offset\r\n vtx2_before = polygon[vtx_center,:] + nvec_before*offset\r\n \r\n vtx1_after = polygon[vtx_center,:] + nvec_after*offset\r\n vtx2_after = polygon[vtx_after,:] + nvec_after*offset\r\n \r\n p = vtx1_before\r\n r = (vtx2_before-vtx1_before)\r\n \r\n q = vtx1_after\r\n s = (vtx2_after-vtx1_after)\r\n \r\n if np.cross(r,s) == 0:\r\n \r\n # Lines are collinear\r\n polygon_shrinked[idx,:] = vtx2_before\r\n \r\n else:\r\n \r\n # Lines are not collinear\r\n t = np.cross(q - p,s)/(np.cross(r,s))\r\n \r\n # This is the intersection point\r\n polygon_shrinked[idx,:] = p + t*r\r\n \r\n if closed:\r\n polygon_shrinked = np.row_stack((\r\n polygon_shrinked,\r\n polygon_shrinked[0,:]))\r\n \r\n return polygon_shrinked", "def toShapely(ring, geometryList):\n outline = []\n for idxA, idxB in zip(ring, ring[1:] + ring[:1]):\n shape = geometryList[idxA].GetShape()\n if shape in [STROKE_T.S_ARC, STROKE_T.S_CIRCLE]:\n outline += approximateArc(geometryList[idxA],\n commonEndPoint(geometryList[idxA], geometryList[idxB]))\n elif shape in [STROKE_T.S_CURVE]:\n outline += approximateBezier(geometryList[idxA],\n commonEndPoint(geometryList[idxA], geometryList[idxB]))\n elif shape in [STROKE_T.S_RECT]:\n assert idxA == idxB\n outline += geometryList[idxA].GetRectCorners()\n elif shape in [STROKE_T.S_POLYGON]:\n # Polygons are always closed, so they should appear as stand-alone\n assert len(ring) in [1, 2]\n return shapePolyToShapely(geometryList[idxA].GetPolyShape())\n elif shape in [STROKE_T.S_SEGMENT]:\n outline.append(commonEndPoint(geometryList[idxA], geometryList[idxB]))\n else:\n raise RuntimeError(f\"Unsupported shape {shape} in outline\")\n return Polygon(outline)" ]
[ "0.67136943", "0.63109124", "0.5897794", "0.56662834", "0.5617019", "0.5594572", "0.5547453", "0.55368793", "0.5471673", "0.5448653", "0.54439175", "0.54206467", "0.5395042", "0.53801703", "0.5376376", "0.5372196", "0.5371982", "0.5348414", "0.53426564", "0.53149515", "0.53107405", "0.5295455", "0.5234663", "0.52319884", "0.5222803", "0.52158165", "0.5212296", "0.5205853", "0.5149984", "0.5137602", "0.5129526", "0.5115809", "0.51119345", "0.5107609", "0.51028097", "0.5102198", "0.5094649", "0.50942576", "0.50850886", "0.5076834", "0.50629747", "0.5058436", "0.50535536", "0.50533956", "0.5047325", "0.5044213", "0.50429887", "0.5040226", "0.50217843", "0.5016959", "0.50154316", "0.5008021", "0.5007327", "0.50042546", "0.5003134", "0.49964735", "0.49917972", "0.4985286", "0.497912", "0.4964861", "0.49551243", "0.49433887", "0.49389452", "0.49305883", "0.49271667", "0.49200556", "0.49193895", "0.49174654", "0.4910628", "0.48994756", "0.4896574", "0.48918927", "0.48893997", "0.48893982", "0.488712", "0.48839626", "0.4869141", "0.48562992", "0.48504195", "0.4844107", "0.48358464", "0.48327285", "0.48308647", "0.4829525", "0.48265457", "0.48229182", "0.48225105", "0.48219746", "0.48217085", "0.48139554", "0.48104233", "0.48039222", "0.4797334", "0.4797006", "0.47871912", "0.47689468", "0.47653916", "0.4761991", "0.4761991", "0.4759664" ]
0.76047856
0
Test that invalid tokens works
def test_invalid_tokens(self): self.assertTrue(1 + 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_unused_token_is_valid(self):\n assert self.token.is_valid()", "def test_regex_invalid_tokens(self):\n tokens = (\n \"\",\n \"lemon wins\",\n \"..\",\n \"x.y\",\n \"x.y.\",\n \".y.z\",\n \".y.\",\n \"..z\",\n \"x..z\",\n \" . . \",\n \"\\n.\\n.\\n\",\n \"hellö.world.bye\",\n \"base64.nötbåse64.morebase64\",\n \"19jd3J.dfkm3d.€víł§tüff\",\n )\n\n for token in tokens:\n with self.subTest(token=token):\n results = token_remover.TOKEN_RE.findall(token)\n self.assertEqual(len(results), 0)", "def test_rejects_invalid_tokens(self):\n config.set(xsrf_token_key='abcdef')\n tool = utils.XsrfTool()\n self.assertFalse(tool.verify_token(\n 'ThisTokenDoesNotEvenHaveASlash', 12345, 'test_action'))\n timestamp = utils.get_timestamp(XsrfToolTests.TEST_NOW)\n self.assertFalse(\n tool.verify_token('NotTheRightDigest/%f' % timestamp, 12345,\n 'test_action'))", "def test_missing_multiple_tokens(self):\n self.helper_test_evaluate_raises(\n 'A or (B and (C and not D))',\n expected_exc_type=MissingSymbolError,\n A=0,\n D=1)", "def test_missing_all_tokens(self):\n self.helper_test_evaluate_raises(\n '(A nand B) and not D',\n expected_exc_type=MissingSymbolError)", "def testIsValidToken(self):\n self.assertTrue(TokenResource.isValidToken('aValidToken'),\n msg='Expected isValidToken to accept a valid token.')\n self.assertTrue(TokenResource.isValidToken(TokenResource.VALID_TOKEN_CHARS),\n msg='Expected isValidToken to accept a valid token.')\n self.assertFalse(TokenResource.isValidToken('Token!'),\n msg='Expected isValidToken to accept an invalid token.')\n self.assertFalse(TokenResource.isValidToken('an invalid Token'),\n msg='Expected isValidToken to accept an invalid token.')", "def test_unclosed(self):\n nt = NewickTokenizer(newick='(a,(b,c)')\n self.assertRaises(ValueError, nt.tokens)", "def test_open_closed(self):\n nt = NewickTokenizer(newick='(a,(),(d,e));')\n self.assertRaises(ValueError, nt.tokens)", "def test_regex_valid_tokens(self):\n # Don't worry, these tokens have been invalidated.\n tokens = (\n \"NDcyMjY1OTQzMDYy_DEzMz-y.XsyRkw.VXmErH7j511turNpfURmb0rVNm8\",\n \"NDcyMjY1OTQzMDYyNDEzMzMy.Xrim9Q.Ysnu2wacjaKs7qnoo46S8Dm2us8\",\n \"NDc1MDczNjI5Mzk5NTQ3OTA0.XsyR-w.sJf6omBPORBPju3WJEIAcwW9Zds\",\n \"NDY3MjIzMjMwNjUwNzc3NjQx.XsySD_.s45jqDV_Iisn-symw0yDRrk_jf4\",\n )\n\n for token in tokens:\n with self.subTest(token=token):\n results = token_remover.TOKEN_RE.fullmatch(token)\n self.assertIsNotNone(results, f\"{token} was not matched by the regex\")", "def test_no_brackets_in_words():\n raise SkipTest\n assert_raises(ParseError, grammar['word'].parse, ']')", "def test_lexing_error():\n with pytest.raises(SyntaxError):\n lex._lexer(None, None)._load_text(\"TEST\")._throw_lexing_error()", "def test_extra_closed(self):\n nt = NewickTokenizer(newick='(a,(b,c)));')\n self.assertRaises(ValueError, nt.tokens)", "def test_validate_input_rejection_invalid_symbol(self):\n with nose.assert_raises(exceptions.RejectionError):\n self.dtm1.validate_input('02')", "def _check_tokens(number_token=None, name_token=None, gpe_token=None):\n assert number_token is None or number_token == number_token.lower(), \\\n \"Tokens need to be lowercase: %s\" % number_token\n assert name_token is None or name_token == name_token.lower(), \\\n \"Tokens need to be lowercase: %s\" % name_token\n assert gpe_token is None or gpe_token == gpe_token.lower(), \\\n \"Tokens need to be lowercase: %s\" % gpe_token", "def test_missing_single_token(self):\n self.helper_test_evaluate_raises(\n 'A or (B and (C and not D))',\n expected_exc_type=MissingSymbolError,\n A=0,\n B=1,\n D=1)", "def test_bad_token(self):\n user = self.create_user()\n\n token_generator = EmailActivationTokenGenerator()\n bad_activation_keys = (\n 'emailactivationtokengenerator',\n 'emailactivation-tokengenerator',\n '3rd-bademailactivationkey'\n )\n for key in bad_activation_keys:\n self.assertFalse(token_generator.check_token(user, key))", "def test_no_start_open_parens(self):\n self.assertRaises(ValueError, NewickTokenizer, newick='hi')", "def test_all_extra_tokens(self):\n self.helper_test_evaluate_raises(\n '1 or 0',\n expected_exc_type=ExtraSymbolError,\n A=1,\n B=1,\n C=1)", "def test_issue401(EN, text, i):\n tokens = EN(text)\n assert tokens[i].lemma_ != \"'\"", "async def validate_token(self, token):", "def test_odd_quotes(self):\n content = \"((h_ ,'p)h p,g()[],:_)hpg;\"\n tok = NewickTokenizer(StringIO(content))\n self.assertRaises(Exception, tok.tokens)\n content = \"((h_ ,'p')h p,'g()[]',:_')hpg;\"\n tok = NewickTokenizer(StringIO(content))\n self.assertRaises(Exception, tok.tokens)", "def testBadKeyToToken(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='hey')", "def test_bad_symbol():\n token = None\n\n with pytest.raises(LythSyntaxError) as err:\n token = Token(\";\", TokenInfo(\"<stdin>\", 0, 1, \";\"))\n\n assert token is None\n assert err.value.msg is LythError.INVALID_CHARACTER\n assert err.value.filename == \"<stdin>\"\n assert err.value.lineno == 0\n assert err.value.offset == 1\n assert err.value.line == \";\"\n\n token = Token(\"+\", TokenInfo(\"<stdin>\", 0, 1, \"+;\"))\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == \"+\"\n assert token.info.lineno == 0\n assert token.symbol == Symbol.ADD\n assert token.info.line == \"+;\"\n\n with pytest.raises(LythSyntaxError) as err:\n token += \";\"\n\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == \"+\"\n assert token.info.lineno == 0\n assert token.symbol == Symbol.ADD\n assert token.info.line == \"+;\"\n\n assert err.value.msg is LythError.SYNTAX_ERROR\n assert err.value.filename == \"<stdin>\"\n assert err.value.lineno == 0\n assert err.value.offset == 1\n assert err.value.line == \"+;\"\n\n token = Token(\"6\", TokenInfo(\"<stdin>\", 0, 1, \"6;\"))\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == \"6\"\n assert token.info.lineno == 0\n assert token.symbol == Literal.VALUE\n assert token.info.line == \"6;\"\n\n with pytest.raises(LythSyntaxError) as err:\n token += \";\"\n\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == \"6\"\n assert token.info.lineno == 0\n assert token.symbol == Literal.VALUE\n assert token.info.line == \"6;\"\n\n assert err.value.msg is LythError.SYNTAX_ERROR\n assert err.value.filename == \"<stdin>\"\n assert err.value.lineno == 0\n assert err.value.offset == 1\n assert err.value.line == \"6;\"\n\n token = token()\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == 6\n assert token.info.lineno == 0\n assert token.symbol == Literal.VALUE\n assert token.info.line == \"6;\"", "def test_disallowed_unknown_token(self):\n MALICIOUS0 = token_id_encode(\"MALICIOUS0\")\n MALICIOUS1 = token_id_encode(\"MALICIOUS1\")\n id1 = Identity.create_random_identity()\n acc1 = Address.create_from_identity(id1, full_shard_key=0)\n\n env = get_test_env(\n genesis_account=acc1,\n genesis_minor_token_balances={self.GENESIS_TOKEN: 10000000},\n )\n state = create_default_shard_state(env=env)\n tx = create_transfer_transaction(\n shard_state=state,\n key=id1.get_key(),\n from_address=acc1,\n to_address=acc1,\n value=0,\n gas=opcodes.GTXCOST,\n gas_token_id=self.genesis_token,\n transfer_token_id=MALICIOUS0,\n )\n self.assertFalse(state.add_tx(tx))\n\n tx1 = create_transfer_transaction(\n shard_state=state,\n key=id1.get_key(),\n from_address=acc1,\n to_address=acc1,\n value=0,\n gas=opcodes.GTXCOST,\n gas_token_id=MALICIOUS1,\n transfer_token_id=self.genesis_token,\n )\n self.assertFalse(state.add_tx(tx1))", "def test_missing_space_before_symbol():\n token = Token(\"5\", TokenInfo(\"<stdin>\", 0, 1, \"5+\"))\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == \"5\"\n assert token.info.lineno == 0\n assert token.symbol == Literal.VALUE\n assert token.info.line == \"5+\"\n\n with pytest.raises(LythSyntaxError) as err:\n token += \"+\"\n\n assert token.lexeme == \"5\"\n assert err.value.msg is LythError.MISSING_SPACE_BEFORE_OPERATOR\n assert err.value.filename == \"<stdin>\"\n assert err.value.lineno == 0\n assert err.value.offset == 1\n assert err.value.line == \"5+\"", "def test_no_arg(self):\n self.assertRaises(ValueError, NewickTokenizer)", "def test_team_reg_bad_value(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_team_reg(val))", "def testBadDataToToken(self):\n key = createKey()\n self.assertRaises(ValueError, dataToToken, key, data=self)", "def test_invalid_pseudo_open(self):\n\n with self.assertRaises(SyntaxError):\n sv.compile(':is(div')", "def test_unparse_invalid_examples(self):\n for description, example in INVALID_EXAMPLES.items():\n for mode in MODES:\n if example['trees'][mode] is None:\n continue\n with self.assertRaises(SyntaxError, msg=(description, mode)) as raised:\n typed_astunparse.unparse(example['trees'][mode])\n self.assertIn('PEP 526', str(raised.exception), msg=(description, mode))\n\n with self.assertRaises(SyntaxError, msg=(description, mode)):\n typed_ast.ast3.parse(source=example['code'], mode=mode)", "def test_xfailed_not_mentioned_exception():\n assert False", "def test_syntax_errors(self):\r\n bad_math_list = [\r\n '11+',\r\n '11*',\r\n 'f((x)',\r\n 'sqrt(x^)',\r\n '3f(x)', # Not 3*f(x)\r\n '3|4',\r\n '3|||4'\r\n ]\r\n bad_exceptions = {}\r\n for math in bad_math_list:\r\n try:\r\n preview.latex_preview(math)\r\n except pyparsing.ParseException:\r\n pass # This is what we were expecting. (not excepting :P)\r\n except Exception as error: # pragma: no cover\r\n bad_exceptions[math] = error\r\n else: # pragma: no cover\r\n # If there is no exception thrown, this is a problem\r\n bad_exceptions[math] = None\r\n\r\n self.assertEquals({}, bad_exceptions)", "def test_invalid_tag(self):\n\n with self.assertRaises(SyntaxError):\n sv.compile(':is(div)p')", "def test_parse_tags_invalid(self):\n tagstring = \"tag1, tag2, tag3!\"\n\n with self.assertRaisesRegexp(Exception, \"invalid tag 'tag3!': only numbers, letters, and commas are \"\n \"allowed in tags\"):\n api.parse_tags(tagstring)", "def test_live_thread_token_is_valid(self):\n assert self.token.is_valid()", "def test_unexpected_comma(self):\n nt = NewickTokenizer(newick='(a,(b,c),,(d,e));')\n self.assertRaises(ValueError, nt.tokens)", "def test_label(self):\n nt = NewickTokenizer(newick=\"(a\\n'b',(b,c),(d,e));\")\n self.assertRaises(ValueError, nt.tokens)", "def test_wrong_input(self):\n\n test_float = 2954.02\n test_list = [\"anagram\", \"gramana\"]\n with pytest.raises(AttributeError) as exc_info:\n is_anagram(test_float, test_list)\n expected_error_msg = \"Words must be strings!\"\n assert exc_info.match(expected_error_msg)", "def test_incompatible_rules():\n\n grammar = \"\"\"\n A: B | C;\n B: 'enumeration';\n C: value=INT;\n \"\"\"\n with pytest.raises(TextXSyntaxError):\n metamodel_from_str(grammar)", "def test_lexing_error_evaluate_1():\n with pytest.raises(SyntaxError):\n lex._lexer([lex_premades.float], [])._load_text(\"TEST\").evaluate()", "def test_several_extra_tokens(self):\n self.helper_test_evaluate_raises(\n 'A or B or C',\n expected_exc_type=ExtraSymbolError,\n A=0,\n B=0,\n C=0,\n D=0,\n E=0)", "def test_garbage_token(self):\n token = 'ffnnsdifsdjofjfosdjfodsjfosdjofj'\n result = self._token_checker.valid_token_to_id(token)\n self.assertEqual(result, None)", "def t_error(t):\n print(\"Illegal character '%s'\" % repr(t.value[0]))\n t.lexer.skip(1)", "def test_decode_token_invalid(token):\n payload = User.decode_auth_token(f'{token}1337')\n assert isinstance(payload, User) is False\n assert 'Invalid token' in payload", "def _check_tokens_are_valid(format_string, message):\n named_tokens = re.findall(r\"{(\\w*)}\", format_string)\n invalid_tokens = [x for x in named_tokens if x.lower() not in _valid_tokens]\n if invalid_tokens:\n msg = message\n msg += \" [{0}]. \".format(\", \".join(invalid_tokens))\n msg += 'Did you check your \"modules.yaml\" configuration?'\n raise RuntimeError(msg)", "def test_is_valid_timestamp_invalid(self):\n timestamps = (\n (\"B4Yffw\", \"DISCORD_EPOCH - TOKEN_EPOCH - 1\"),\n (\"ew\", \"123\"),\n (\"AoIKgA\", \"42076800\"),\n (\"{hello}[world]&(bye!)\", \"ASCII invalid Base64\"),\n (\"Þíß-ï§-ňøẗ-våłìÐ\", \"Unicode invalid Base64\"),\n )\n\n for timestamp, msg in timestamps:\n with self.subTest(msg=msg):\n result = TokenRemover.is_valid_timestamp(timestamp)\n self.assertFalse(result)", "def test_bad_values(self):\n self.assertOK([60])\n self.assertRaisesInternalError([59.9])\n self.assertRaisesInternalError([''])\n self.assertRaisesInternalError([';'])\n self.assertRaisesInternalError(['('])\n self.assertRaisesInternalError([None])", "def test_single_extra_token(self):\n self.helper_test_evaluate_raises(\n 'A and not B',\n expected_exc_type=ExtraSymbolError,\n A=1,\n B=1,\n C=0)", "def test_unclosed_comment(self):\n nt = NewickTokenizer(newick='(a,(b,c),[(d,e));')\n self.assertRaises(ValueError, nt.tokens)", "def test_words_with_numbers(self):\n\n test_string = \"1. FC Köln\"\n test_anagram = \"anagram\"\n with pytest.raises(ValueError) as exc_info:\n is_anagram(test_string, test_anagram)\n expected_error_msg = \"should only contain letters!\"\n assert exc_info.match(expected_error_msg)", "def _check_special_token_identifier(key):\n if not (key.endswith('_token') and key != '_token'):\n raise ValueError('Each key needs to have the form \"name_token\".'\n ' Received {}'.format(key))", "def __valid_token_format(self, token):\n if len(token) != self.TOKEN_LENGTH * 2:\n return False\n for c in token:\n if c not in '01234567890abcdef':\n return False\n return True", "def test_should_raise_in_case_of_wrong_characters(self):\n validator = CharCombinationValidator()\n\n regex = re.compile(r'[\\(\\[\\{]\\)\\]\\}')\n forbidden_chars = regex.sub('', punctuation)\n for char in forbidden_chars:\n with self.assertRaises(FormulaValidationError):\n validator('Fe(O)2%s' % char)", "def testTokenToDataWithBadKey(self):\n key = createKey()\n data = {u'user': u'aliafshar'}\n token = dataToToken(key, data)\n self.assertRaises(ValueError, tokenToData, createKey(), token=token)", "def test_lexing_error_evaluate_2():\n with pytest.raises(SyntaxError):\n lex._lexer([lex_premades.float], [], \"[[LINE]] [[TEXT]]\")._load_text(\n \"TEST\"\n ).evaluate()", "def test_false_lower(self):\n self.assertRaises(ParseException, self.flag.parseString, 'n')", "def test_malformedModes(self):\n self.assertRaises(irc.IRCBadModes, irc.parseModes, \"foo\", [])\n self.assertRaises(irc.IRCBadModes, irc.parseModes, \"%\", [])", "def test_get_non_valid_token(self):\r\n\r\n user = UserFactory.create_batch(2)[1]\r\n res = self.app.get('/api/token/non-valid?api_key=' + user.api_key)\r\n error = json.loads(res.data)\r\n\r\n assert res.status_code == 404, error\r\n assert error['status'] == 'failed', error\r\n assert error['action'] == 'GET', error\r\n assert error['target'] == 'token', error\r\n assert error['exception_cls'] == 'NotFound', error", "def test_lexing_error_evaluate_3():\n with pytest.raises(SyntaxError):\n lex._lexer([lex_premades.float], [], example_function())._load_text(\n \"TEST\"\n ).evaluate()", "def test_extra_suffix(self):\n nt = NewickTokenizer(newick='(a,(b,c));suffix')\n self.assertRaises(ValueError, nt.tokens)", "def test_no_delimiter_error(self):\n val = DwcaValidator(yaml.load(self.yaml_delimited5, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'sex': 'male | female'}\n with self.assertRaises(ValueError):\n val.validate(document)", "def _expect_empty(self):\n\n item = self._lexer.get_token()\n if item:\n line_no, token = item\n raise ParseError(u\"Unexpected token '{0}' on line {1}\"\n .format(common.from_utf8(token.strip()), line_no))", "def test_errors(self):\n response = self.client.get(reverse('users:email_confirmation', kwargs={\n 'token': 'invalidEmailVerification31231323',\n }))\n\n soup = BeautifulSoup(response.content, 'html.parser')\n error_msg = soup.find('p', 'invalid-token')\n\n self.assertEqual(error_msg.text, 'Invalid token. Make sure your token is valid and not deleted.')", "def test_get_tokens():\n pass", "def test05(self):\n\n s = \"a\"\n with self.assertRaises(ParserException):\n t = parse_newick(s)", "def test_validate_token_returns_false_for_invalid_token(self, demo_app):\n demo_app.config.get.return_value = self.jwt_key\n token = jwt.encode({}, self.jwt_key_2, algorithm='HS256')\n\n self.assertFalse(\n validate_token(token)[0],\n 'Failed to recognise invalidate token.'\n )", "def test_wrong_token(self):\n wrong_token = \"1\"*64\n expected_result = \"You have to give your token\"\n resp = SearchTest.client.get('/api/search/',{\"token\":wrong_token,\"search_string\":\"Umut\"})\n self.assertEqual(json.loads(resp.content),expected_result,\"Token Control is not True\")", "def test_invalid_regref(self, parse_input_mocked_metadata):\n with pytest.raises(BlackbirdSyntaxError, match=\"reserved for register references\"):\n parse_input_mocked_metadata(\"float q0 = 5\")\n\n with pytest.raises(BlackbirdSyntaxError, match=\"reserved for register references\"):\n parse_input_mocked_metadata(\"float array q4 =\\n\\t-0.1, 0.2\")", "def test_invalid_username():\n expect_error(edit, InputError, \"aaa\", 1, True, None, None)", "def test_readbadformat(self):\n\n self.assertRaises(ParseError, self.hw, self.badfile)", "def test_sans_comma(self):\n nt = NewickTokenizer(newick='(a,(b,c)(d,e));')\n self.assertRaises(ValueError, nt.tokens)", "def test_fail_token(client, request):\n res = client.get('/token?uid=1')\n\n assert res.status_code == 400\n assert 'User does not exist' in res.data.decode('utf-8')", "def test_parser(test_input, expected):\n tokens = list(sp.tokenize(test_input))\n assert tokens == expected", "def test_empty(self):\n\n tokens = list(Lexer(\"\").generate_tokens())\n self.assertEqual(tokens, [])", "def _check_token(self, token):\n token = token.lower()\n check = re.sub(r'((^|\\')([a-z]+))+$', '', token)\n if check == '':\n return True\n return False", "def test_uss_num_bad_values(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_uss_num(val))", "def _expect_token(self, expected):\n\n item = self._lexer.get_token()\n\n if not item:\n raise ParseError(u'Unexpected end of file')\n\n else:\n line_no, token = item\n\n if token != expected:\n raise ParseError(u\"Unexpected token '{0}', \"\n u\"expecting '{1}' on line {2}\"\n .format(common.from_utf8(token.strip()), expected,\n line_no))", "def test_failure(self):\n\n bad_examples = \"\"\"\n[test_date] > date(\"1 day from now\") ->\n\nCan't convert '1 day from now' to a date.\n===\n[test_date] between date(\"2020-01-01\") and 7 ->\nWhen using between, the column (date) and between values (date, num) must be the same data type.\n\n[test_date] between date(\"2020-01-01\") and 7\n ^\n===\n[test_date] between \"potato\" and date(\"2020-01-01\") ->\nCan't convert 'potato' to a date.\n\"\"\"\n\n for field, expected_error in self.bad_examples(bad_examples):\n with self.assertRaises(Exception) as e:\n self.builder.parse(field, debug=True)\n if str(e.exception).strip() != expected_error.strip():\n print(\"===\" * 10)\n print(str(e.exception))\n print(\"vs\")\n print(expected_error)\n print(\"===\" * 10)\n self.assertEqual(str(e.exception).strip(), expected_error.strip())", "def test_invalid_pseudo_close(self):\n\n with self.assertRaises(SyntaxError):\n sv.compile('div)')\n\n with self.assertRaises(SyntaxError):\n sv.compile(':is(div,)')", "def test_is_valid_user_id_invalid(self):\n ids = (\n (\"SGVsbG8gd29ybGQ\", \"non-digit ASCII\"),\n (\"0J_RgNC40LLQtdGCINC80LjRgA\", \"cyrillic text\"),\n (\"4pO14p6L4p6C4pG34p264pGl8J-EiOKSj-KCieKBsA\", \"Unicode digits\"),\n (\"4oaA4oaB4oWh4oWi4Lyz4Lyq4Lyr4LG9\", \"Unicode numerals\"),\n (\"8J2fjvCdn5nwnZ-k8J2fr_Cdn7rgravvvJngr6c\", \"Unicode decimals\"),\n (\"{hello}[world]&(bye!)\", \"ASCII invalid Base64\"),\n (\"Þíß-ï§-ňøẗ-våłìÐ\", \"Unicode invalid Base64\"),\n )\n\n for user_id, msg in ids:\n with self.subTest(msg=msg):\n result = TokenRemover.is_valid_user_id(user_id)\n self.assertFalse(result)", "def test_bad_token(self):\n db.session.add(self.user, self.user2)\n db.session.commit()\n self.assertIsNone(self.\n user.verify_auth_token('jdjdje230920093944334j'))", "def test_validate_input_rejection(self):\n with nose.assert_raises(exceptions.RejectionError):\n self.dtm1.validate_input('000011')", "def test_wrong_type_error(self, parse_input_mocked_metadata):\n with pytest.raises(ValueError, match=\"invalid value\"):\n bb = parse_input_mocked_metadata(\n \"for int m in [1, 4.2, 9]\\n\\tMZgate(0, 1) | [0, 1]\"\n )", "def _not_valid_(s) :\n return not s.valid()", "def test_integer():\n token = Token(\"1\", TokenInfo(\"<stdin>\", 0, 1, \" 12+\"))\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == \"1\"\n assert token.info.lineno == 0\n assert token.symbol == Literal.VALUE\n assert token.info.line == \" 12+\"\n\n token += \"2\"\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == \"12\"\n assert token.info.lineno == 0\n assert token.symbol == Literal.VALUE\n assert token.info.line == \" 12+\"\n\n with pytest.raises(LythSyntaxError) as err:\n token += \"+\"\n\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == \"12\"\n assert token.info.lineno == 0\n assert token.symbol == Literal.VALUE\n assert token.info.line == \" 12+\"\n\n assert err.value.msg is LythError.MISSING_SPACE_BEFORE_OPERATOR\n assert err.value.filename == \"<stdin>\"\n assert err.value.lineno == 0\n assert err.value.offset == 1\n assert err.value.line == \" 12+\"\n\n token = token()\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == 12\n assert token.info.lineno == 0\n assert token.symbol == Literal.VALUE\n assert token.info.line == \" 12+\"\n\n assert repr(token) == \"Token(VALUE, 12, 0, 1)\"\n assert str(token) == \"VALUE: 12\"", "def test_markdown_normal_token_error():\n\n # Arrange\n scanner = MarkdownScanner()\n with copy_to_temp_file(\n os.path.join(\"test\", \"resources\", \"rules\", \"md047\", \"end_with_blank_line.md\")\n ) as temp_source_path:\n plugin_path = os.path.join(\n \"test\", \"resources\", \"plugins\", \"bad\", \"bad_end_tokens.py\"\n )\n supplied_arguments = [\n \"--add-plugin\",\n plugin_path,\n \"scan\",\n temp_source_path,\n ]\n\n expected_return_code = 1\n expected_output = \"\"\"{path}:0:0: MDE044: Plugin that triggers on end_tokens. (bad-end-tokens)\n{path}:0:0: MDE044: Plugin that triggers on end_tokens. (bad-end-tokens)\n{path}:0:0: MDE044: Plugin that triggers on end_tokens. (bad-end-tokens)\"\"\".replace(\n \"{path}\", temp_source_path\n )\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output,\n expected_error,\n expected_return_code,\n )", "def test_invalid_pseudo(self):\n\n with self.assertRaises(NotImplementedError):\n sv.compile(':before')\n\n with self.assertRaises(SyntaxError):\n sv.compile(':nth-child(a)')", "def test_empty_username():\n expect_error(register, InputError, \"\", \"abcdef\", \"A\", \"A\", \"A\")", "def test_parse_simple_nonmember(self):\n lexed = [\n Token(\n value=\"qet\",\n token_type=KT.UNKNOWN,\n line_number=0,\n ),\n Token(\n value=\"be'\",\n token_type=KT.NOUN,\n line_number=0,\n ),\n ]\n self.assertFalse(parse(SimpleKlingonGrammar, lexed))", "def test_staff_inputs_bad_syntax(self, mock_log):\r\n staff_ans = \"clearly bad syntax )[+1e\"\r\n problem = self.build_problem(answer=staff_ans, tolerance=1e-3)\r\n\r\n msg = \"There was a problem with the staff answer to this problem\"\r\n with self.assertRaisesRegexp(StudentInputError, msg):\r\n self.assert_grade(problem, '1+j', 'correct')\r\n\r\n mock_log.debug.assert_called_once_with(\r\n \"Content error--answer '%s' is not a valid number\", staff_ans\r\n )", "def test_invalid_event(bot):\n expect_error(edit, InputError, bot.username, 1, False, None, None)", "def test_gender_bad_value(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_gender(val))", "def test_valid_name_invalid():\n assert not valid_name(\"\")\n assert not valid_name(\"a\"*21)", "def test_noun_chunks_is_parsed_fr(fr_tokenizer):\n doc = fr_tokenizer(\"trouver des travaux antérieurs\")\n with pytest.raises(ValueError):\n list(doc.noun_chunks)", "def test_in_word(self):\n with self.assertRaises(ValueError):\n term, rmd = util.parse_date(\"notasearch1902foradatepartial\")", "def validate(data, badchars):\n assert(all(b not in data for b in badchars))", "def test_whitespace(self):\n self.assertRaises(ParseException, self.flag.parseString, ' ')", "def test_check_x_ValueError(self):\n self.assertRaisesRegex(\n ValueError,\n 'x must be >= 0',\n Rectangle,\n 4, 2, -1, 0, 12\n )", "def test_unknown_lower(self):\n self.assertRaises(ParseException, self.flag.parseString, 'u')", "def test_errors_on_bad_argument(self):\n self.assertRaises(Exception, Scope, 'foo')\n self.assertRaises(Exception, Scope, 1)\n self.assertRaises(Exception, Scope, [])\n self.assertRaises(Exception, Scope, tuple())" ]
[ "0.8261625", "0.7524087", "0.74162", "0.72980505", "0.7251938", "0.7226254", "0.714365", "0.71125466", "0.70883507", "0.70597327", "0.7019325", "0.7010936", "0.7009519", "0.6961416", "0.6934247", "0.68621945", "0.6833543", "0.6818326", "0.68157536", "0.6811885", "0.6800639", "0.6795364", "0.67951626", "0.6779176", "0.6777553", "0.67458403", "0.6727804", "0.67243856", "0.6709411", "0.66823506", "0.66730183", "0.6646089", "0.6640793", "0.662709", "0.6617271", "0.6598198", "0.6588749", "0.6571073", "0.6564801", "0.6559812", "0.6557551", "0.65498304", "0.65317494", "0.6526326", "0.6524543", "0.6502084", "0.6500504", "0.649458", "0.6482949", "0.646976", "0.6467633", "0.6467432", "0.64287156", "0.64176667", "0.64131874", "0.64086616", "0.6385538", "0.6384905", "0.636661", "0.63609093", "0.63491", "0.63467443", "0.6343797", "0.6335295", "0.633258", "0.63319427", "0.63199043", "0.6299182", "0.62937444", "0.6284952", "0.62813014", "0.62644464", "0.62521636", "0.6248338", "0.62476665", "0.62453055", "0.6242697", "0.6233423", "0.6226816", "0.62259364", "0.62101835", "0.6206545", "0.6200776", "0.6199406", "0.61932296", "0.6191652", "0.61903805", "0.6177204", "0.6176489", "0.6175882", "0.61709774", "0.61632353", "0.61591166", "0.61575675", "0.6156939", "0.6153474", "0.6148389", "0.613584", "0.61354464", "0.61306494" ]
0.87907505
0
This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`.
def plot_confusion_matrix(cm, classes, normalize=False, title=None, cmap=plt.cm.Blues, image_save=False, image_save_dir="/home/temp/moriz/validation/", save_suffix=None, save_format="pdf" ): if not title: title = 'Confusion matrix' if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] fig, ax = plt.subplots() im = ax.imshow(cm, interpolation='nearest', cmap=cmap) ax.figure.colorbar(im, ax=ax) # We want to show all ticks... ax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]), # ... and label them with the respective list entries xticklabels=classes, yticklabels=classes, title=title, ylabel='True label', xlabel='Predicted label') # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") # Loop over data dimensions and create text annotations. fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i in range(cm.shape[0]): for j in range(cm.shape[1]): ax.text(j, i, format(cm[i, j], fmt), ha="center", va="center", color="white" if cm[i, j] > thresh else "black") fig.tight_layout() if image_save: if save_suffix is None: save_name = "/confusion_matrix." + save_format else: save_name = "/confusion_matrix_" + save_suffix + "." + save_format plt.savefig(image_save_dir + save_name, dpi='figure', format=save_format) plt.show() plt.close(fig)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_confusion_matrix(cm, classes=[0,1], normalize=False, title='Confusion matrix', print_matrix=False):\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n if print_matrix:\n print(cm)", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix',saveas='cm', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n \n print(cm)\n\n plt.figure() \n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n \n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n \n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n \n foo_fig = plt.gcf() # 'get current figure'\n# foo_fig.savefig('confusion_matrix.eps', format='eps', dpi=1000) \n foo_fig.savefig(saveas, dpi=1000, bbox_inches='tight')\n plt.show()", "def plot_confusion_matrix(cm, y_test, y_pred, class_names,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print('\\n')\n print(\"Normalized confusion matrix\")\n else:\n print('\\n')\n print('Confusion matrix, without normalization')\n print_cm(cm, class_names)\n text_labels = [['True Negative', 'False Positive'],\n ['False Negative', 'True Positive']]\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(class_names))\n plt.xticks(tick_marks, class_names, rotation=45)\n plt.yticks(tick_marks, class_names)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i - 0.1, format(cm[i, j], fmt),\n verticalalignment='bottom',\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.text(j, i + 0.1, text_labels[i][j],\n verticalalignment='top',\n horizontalalignment=\"center\",\n fontsize=12,\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n # Print accuracy and precision\n print('Accuracy: ', accuracy_score(y_test, y_pred, normalize=True))\n print('Precision: ', precision_score(y_test, y_pred, average='macro'))\n print('Roc-Auc: ', roc_auc_score(y_test, y_pred))\n # Plot non-normalized confusion matrix", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n #cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n cm = cm.astype('float') / np.sum(cm.ravel())\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig=plt.figure\n plt.imshow(cm, interpolation='nearest', cmap=cmap )\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()\n return fig", "def plot_confusion_matrix(\n y_true, y_pred, classes, normalize=True, title=\"Confusion matrix\", cmap=plt.cm.Blues\n):\n cm = confusion_matrix(y_true, y_pred)\n\n if normalize:\n cm = cm.astype(\"float\") / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print(\"Confusion matrix, without normalization\")\n\n plt.imshow(cm, interpolation=\"nearest\", cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = \".2f\" if normalize else \"d\"\n thresh = cm.max() / 2.0\n for i, j in product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(\n j,\n i,\n format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\",\n )\n\n plt.tight_layout()\n plt.ylabel(\"True label\")\n plt.xlabel(\"Predicted label\")", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n #print(\"Normalized confusion matrix\")\n #else:\n\n #print('Confusion matrix, without normalization')\n\n# print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n# for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n# plt.text(j, i, format(cm[i, j], fmt),\n# horizontalalignment=\"center\",\n# color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n cm = confusion_matrix(y_test,predictions)\n plt.figure()\n plot_confusion_matrix(cm, classes=[0,1,2], normalize=True,\n title='Confusion Matrix')", "def plot_confusion_matrix(y_test, y_pred, classes,\n normalize=True,\n title='Average accuracy \\n',\n cmap=plt.cm.Blues, verbose = 0, precision = 0):\n from sklearn.metrics import confusion_matrix\n import itertools\n \n cm = confusion_matrix(y_test, y_pred)\n accuracy = (np.sum(np.diag(cm)) / np.sum(cm)) * 100.0\n\n if normalize:\n cm = (cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]) * 100.0\n if verbose == 1:\n print(\"Normalized confusion matrix\")\n else:\n if verbose == 1:\n print('Confusion matrix, without normalization')\n \n if verbose == 1:\n print(cm)\n\n plt.figure(figsize=(18, 9))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title.format_map({'acc':accuracy}), fontsize=25)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45, fontsize=20)\n plt.yticks(tick_marks, classes, fontsize=20)\n\n fmt = '{:.'+ '%d'%(precision) +'f} %' if normalize else '{:d}'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, fmt.format(cm[i, j]),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\", fontsize=16)\n plt.tight_layout()\n plt.ylabel('True label', fontsize=20)\n plt.xlabel('Predicted label', fontsize=20)", "def plot_confusion_matrix(self, cm, classes, normalize, cmap=plt.cm.Blues, title='confusin Matrix'):\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n\r\n print(cm)\r\n\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=45)\r\n plt.yticks(tick_marks, classes)\r\n\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n\r\n tick_marks = np.arange(len(classes))\r\n\r\n self.subplt.set_xlabel(\"Predicted label\")\r\n self.subplt.set_ylabel(\"True Label\")\r\n self.subplt.set_title(\"Confusion Matrix\")\r\n self.subplt.set_xticks(tick_marks,classes)\r\n self.subplt.set_yticks(tick_marks,classes)\r\n\r\n self.canvas2.show()", "def showConfusionMatrix(self): \r\n sn.heatmap(self.conf_matrix, annot=True)\r\n plt.plot( label=\"Accuracy\")\r\n plt.plot( label=\"Error\")\r\n plt.figtext(0,0,'Accuracy: {}\\nError: {}\\nRecall: {}\\nPrecision: {}'.format(self.accuracy,\r\n self.error,\r\n self.recall,\r\n self.precision))\r\n plt.title('Confusion Matrix')\r\n plt.show()\r\n return None", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n \n print(cm)\n \n \n plt.title(title)\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n \n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n \n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.colorbar()\n plt.show()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, \"%.2f\" % cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print('Normalized confusion matrix')\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment='center',\n color='white' if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n #based on http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n cmap=plt.cm.Blues\n \n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n np.set_printoptions(precision=2)\n \n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, '%1.2f' % cm[i, j],\n horizontalalignment=\"center\",\n fontsize =12,\n color=\"white\" if cm[i, j] > thresh else \"black\")\n #plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n \n print(cm)\n \n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes)) \n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n \n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n \n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.axis('auto')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix',cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n print('Confusion matrix')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(confusion_matrix, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(confusion_matrix, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n confusion_matrix = confusion_matrix.astype(\n 'float') / confusion_matrix.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(confusion_matrix)\n\n thresh = confusion_matrix.max() / 2.\n for i, j in itertools.product(range(confusion_matrix.shape[0]), range(confusion_matrix.shape[1])):\n plt.text(j, i, confusion_matrix[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if confusion_matrix[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n # print(cm)\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=45)\r\n plt.yticks(tick_marks, classes)\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n plt.tight_layout()\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n confusion_matrix_dir = './confusion_matrix_plots'\n if not os.path.exists(confusion_matrix_dir):\n os.mkdir(confusion_matrix_dir)\n\n plt.cla()\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"#BFD1D4\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n if normalize:\n plt.savefig(os.path.join(confusion_matrix_dir, 'normalized.jpg'))\n else:\n plt.savefig(os.path.join(confusion_matrix_dir, 'without_normalization.jpg'))", "def plot_confusion_matrix(cm, classes, normalize=True, title='Confusion matrix', cmap=plt.cm.Blues):\n\n plt.figure(figsize=(10,10))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n cm = np.around(cm, decimals=2)\n cm[np.isnan(cm)] = 0.0\n print(\"Normalized confusion matrix\")\n\n else:\n print('Confusion matrix, without normalization')\n\n thresh = cm.max() / 2.\n\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n t = \"(%.2f)\"%(cm[i, j])\n #print t\n# plt.text(j, i, t,\n# horizontalalignment=\"center\",\n# color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig('IOB-Confusion-Matrix-SVM.png')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n # 1. find out how many samples per class have received their correct label\n # 计算真正类别为k的样本被预测成各个类别的比例\n # e.g. 有25个样本的 true label 是 6,其中10个样本被预测为类别7,那么在混淆矩阵中 true label = 6 并且 predicted label = 7 的一个格子中的值为 0.4\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n # 2. get the precision (fraction of class-k predictions that have ground truth label k)\n # 计算预测的准确率\n # e.g. 预测为类别k的有12个,但其中只有9个的真正类别是k,那么准确率为 0.75\n # cm = cm.astype('float') / cm.sum(axis=0)[:, np.newaxis]\n \n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n # plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n # tick_marks = np.arange(len(classes))\n # plt.xticks(tick_marks, classes, rotation=45)\n # plt.yticks(tick_marks, classes)\n\n # fmt = '.2f' if normalize else 'd'\n # thresh = cm.max() / 2.\n # for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n # plt.text(j, i, format(cm[i, j], fmt),\n # horizontalalignment=\"center\",\n # color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n \n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n \n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n \n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig('confusion_matrix.png')", "def plotConfusionMatrix(self, cm, classes, normalize=True, title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n plt.savefig('confusion_matrix.png')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion Matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion Matrix, without normalization')\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title, weight='bold')\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n if i == 0:\n plt.text(j-0.1, i+0.3, format(cm[i, j], fmt), color=\"white\" if cm[i, j] > thresh else \"black\")\n if i == 1:\n plt.text(j-0.1, i-0.2, format(cm[i, j], fmt), color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True Label', weight='bold')\n plt.xlabel('Predicted Label', weight='bold')\n plt.show()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n #print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=90)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n #pdb.set_trace()\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def sam_plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n plots_dims = itertools.product(list(range(cm.shape[0])),\n list(range(cm.shape[1])))\n for i, j in plots_dims:\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \n print(a)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap, vmin=0.0, vmax=1.0)\n\n plt.title(title)\n\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.3f'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n # plt.title(title)\n # plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n # Tweak spacing to prevent clipping of tick-labels\n plt.subplots_adjust(bottom=0.2)", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n # print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.xlabel('Predicted label') \n plt.ylabel('True label') \n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label') \n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Purples):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n # plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n # plt.grid('off')\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=True,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n# print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n return plt.gcf()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title + \"Confusion matrix\")\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print(title + ' confusion matrix')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=None):\n if normalize:\n # cm = cm.T\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n # cm = cm.T\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.figure(figsize=(4, 4))\n plt.imshow(cm, interpolation='nearest', cmap=cmap or plt.cm.Blues)\n plt.title(('Normalized ' if normalize else '') + title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(list(range(cm.shape[0])), list(range(cm.shape[1]))):\n plt.text(\n j,\n i,\n format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=0)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n #print(\"Normalized confusion matrix\")\n else:\n 1#print('Confusion matrix, without normalization')\n\n #print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title, fontsize=14)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title,fontsize=20)\n# plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, fontsize=15)\n plt.yticks(tick_marks, classes,rotation=30,fontsize=15)\n\n fmt = '.2f'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\",fontsize=20)\n\n plt.tight_layout()\n plt.ylabel('True label',fontsize=20)\n plt.xlabel('Predicted label',fontsize=20)", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Confusion matrix\")\n else:\n print('Confusion matrix')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Greens):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, '%.02f'%cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"red\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n # plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title.split('/')[-1])\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n # plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n if title:\n plt.savefig(title+'.png')\n\n plt.close()", "def plot_confusion_matrix(cm, classes=[],\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.figure()\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n #print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=90)\n plt.yticks(tick_marks, classes)\n\n fmt = '.1f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\r\n normalize=False,\r\n title='Confusion matrix',\r\n cmap=plt.cm.Blues):\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n\r\n print(cm)\r\n\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=45)\r\n plt.yticks(tick_marks, classes)\r\n\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n\r\n plt.tight_layout()\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')\r\n plt.savefig('Logistik.png')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='',\n cmap=plt.cm.Blues, file_name='cm_plot'):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.rcParams[\"font.family\"] = \"Times New Roman\"\n plt.rcParams[\"font.size\"] = FONT_SIZE\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=0)\n plt.yticks(tick_marks, classes)\n\n fmt = '.6f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label', fontsize=FONT_SIZE)\n plt.xlabel('Predicted label', fontsize=FONT_SIZE)\n plt.subplots_adjust(bottom=0.13)\n with PdfPages(file_name) as pdf:\n pdf.savefig()\n plt.close()", "def plot_confusion_matrix(self):\r\n interp = ClassificationInterpretation.from_learner(self.learn)\r\n interp.plot_confusion_matrix()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes=None, normalize=False,\n title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n\n if classes:\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n plt.show()", "def plot_confusion_matrix(cm,\r\n normalize=False,\r\n title='Confusion matrix',\r\n cmap=plt.cm.Blues):\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n\r\n# print(cm)\r\n\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n\r\n plt.tight_layout()\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(cm)\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j], horizontalalignment=\"center\", color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(cm)\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j], horizontalalignment=\"center\", color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots(figsize=(8, 8))\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes)\n ax.set_title(title,size = 20)\n ax.set_ylabel('True label',size = 20)\n ax.set_xlabel('Predicted label',size = 20)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\",size = 18)\n plt.setp(ax.get_yticklabels(),size = 18)\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n name = OUTFOLDER + \"/confusion_matrix_batch%d_layers%d_epochs%d_f1%d\" % (BATCH_SIZE,LAYERS,EPOCHS,f1_mean_test*100)\n if normalize:\n name = name + \"_norm\"\n plt.savefig(name)\n plt.close()\n return ax", "def plot_confusion_matrix(cm, classes,\n\t\t\t\t\t\t normalize=False,\n\t\t\t\t\t\t title='Confusion matrix',\n\t\t\t\t\t\t cmap=plt.cm.Blues):\n\tif normalize:\n\t\tcm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\t\tprint(\"Normalized confusion matrix\")\n\telse:\n\t\tprint('Confusion matrix, without normalization')\n\n\tprint(cm)\n\n\tplt.imshow(cm, interpolation='nearest', cmap=cmap)\n\tplt.title(title)\n\tplt.colorbar()\n\ttick_marks = np.arange(len(classes))\n\tplt.xticks(tick_marks, classes, rotation=45)\n\tplt.yticks(tick_marks, classes)\n\n\tfmt = '.2f' if normalize else 'd'\n\tthresh = cm.max() / 2.\n\tfor i, j in product(range(cm.shape[0]), range(cm.shape[1])):\n\t\tplt.text(j, i, format(cm[i, j], fmt),\n\t\t\t\t horizontalalignment=\"center\",\n\t\t\t\t color=\"white\" if cm[i, j] > thresh else \"black\")\n\n\tplt.tight_layout()\n\tplt.ylabel('True label')\n\tplt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig('../results/conf_matr.png')\n\n return cm", "def plot_confusion_matrix(cm, classes,\n\t\t\t\t\t\t\t normalize=False,\n\t\t\t\t\t\t\t title='Confusion matrix',\n\t\t\t\t\t\t\t cmap=plt.cm.Blues):\n\t\tif normalize:\n\t\t\tcm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\t\t\tprint(\"Normalized confusion matrix\")\n\t\telse:\n\t\t\tprint('Confusion matrix, without normalization')\n\n\t\tplt.imshow(cm, interpolation='nearest', cmap=cmap)\n\t\tplt.title(title)\n\t\tplt.colorbar()\n\t\ttick_marks = np.arange(len(classes))\n\t\tplt.xticks(tick_marks, classes, rotation=45)\n\t\tplt.yticks(tick_marks, classes)\n\n\t\tplt.tight_layout()\n\t\tplt.ylabel('True label')\n\t\tplt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot(self):\n plt.imshow(self.cm, interpolation='nearest', cmap=self.cmap)\n plt.title(self.title)\n plt.colorbar()\n tick_marks = np.arange(len(self.classes))\n plt.xticks(tick_marks, self.classes, rotation=45)\n plt.yticks(tick_marks, self.classes)\n \n if self.normalize:\n self.cm = self.cm.astype('float') / self.cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n \n print(self.cm)\n \n thresh = self.cm.max() / 2.\n for i, j in itertools.product(range(self.cm.shape[0]), range(self.cm.shape[1])):\n plt.text(j, i, self.cm[i, j], horizontalalignment=\"center\", color=\"white\" if self.cm[i, j] > thresh else \"black\")\n plt.tight_layout()\n plt.ylabel('True Label')\n plt.xlabel('Predicted label')" ]
[ "0.8194862", "0.80949175", "0.8029915", "0.8019153", "0.79941195", "0.7991258", "0.7980955", "0.7976606", "0.79610753", "0.79590565", "0.79378676", "0.7934962", "0.7934504", "0.79313844", "0.7926313", "0.7924577", "0.79241234", "0.7923211", "0.7923023", "0.7921931", "0.7917871", "0.7916092", "0.79083747", "0.7907475", "0.79068965", "0.7904398", "0.7900711", "0.7900422", "0.7896704", "0.7894559", "0.7893862", "0.7891639", "0.78906786", "0.78895235", "0.7886698", "0.7884568", "0.78841054", "0.78773123", "0.78745896", "0.7869866", "0.7860299", "0.78572506", "0.7856715", "0.7853253", "0.7852508", "0.78493565", "0.78482205", "0.7847642", "0.7845746", "0.78436774", "0.78436774", "0.78436774", "0.78436774", "0.78436774", "0.78436774", "0.78436774", "0.78436774", "0.7842821", "0.783704", "0.7836942", "0.7836734", "0.78358006", "0.78322923", "0.7831496", "0.78314656", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.7822435", "0.7822236", "0.7820784", "0.7820784", "0.7820304", "0.7817516", "0.78159386", "0.78157204", "0.7814644", "0.7814644", "0.7814644", "0.7814644", "0.7814644", "0.7814644", "0.7814644", "0.7814644", "0.7813965", "0.7813563" ]
0.0
-1
Proxy to turn of transcypt when calling img.get/set methods
def image_proxy(img): def _set(*args): __pragma__("noalias", "set") value = img.set(*args) __pragma__("alias", "set", "py_set") return value def _get(*args): __pragma__("noalias", "get") value = img.get(*args) __pragma__("alias", "get", "py_get") return value img.set = _set img.get = _get return img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, img, *args, **kwargs):\n raise NotImplementedError", "def __call__(self, img, *args, **kwargs):\n raise NotImplementedError", "def __init__(self, img, settings):\r\n self.img_orig = img\r\n self.settings = settings", "def _apply_transform(self, img: np.ndarray): \n img = self.transform(image=img)[\"image\"]\n return img", "def _apply_transform(self, img: np.ndarray): \n img = self.transform(image=img)[\"image\"]\n return img", "def getimage(self):", "def __getitem__(self, index: int) -> Any:\n img_id, image = super().__getitem__(index)\n image[\"imageId\"] = img_id\n if self._transform is not None:\n return self._transform(image)\n return image", "def setImage(*args):", "def transform(self, previousimage):", "def __getattr__(self, name):\t\t\n\t\tif name == \"__deepcopy__\" or name == \"__setstate__\":\n\t\t\treturn object.__getattribute__(self, name)\t\t\t\n\t\t# if get data => get cur_tensor.data\n\t\telif name == \"data\":\n\t\t\treturn self.cur_tensor.data\t\t\n\t\t\n\t\telif hasattr(self.cur_tensor, name):\t\t\t\n\t\t\tdef wrapper(*args, **kwargs):\t\t\t\t\n\t\t\t\tfunc = self.cur_tensor.__getattribute__(name)\n\t\t\t\tout_tensor = func(*args, **kwargs)\n\n\t\t\t\tif not isinstance(out_tensor, torch.Tensor):\n\t\t\t\t\tout_logs = []\n\t\t\t\t\tfor t in out_tensor:\n\t\t\t\t\t\tout_log = copy.deepcopy(self)\n\t\t\t\t\t\tout_log.setTensor(t)\t\t\t\t\t\t\n\t\t\t\t\t\tout_logs.append(out_log)\n\t\t\t\t\t\t\n\t\t\t\t\treturn out_logs\n\t\t\t\telse:\t\t\t\t\t\t\n\t\t\t\t\tself.cur_tensor = out_tensor\n\t\t\t\t\tself.output_shape[self.cur_id] = out_tensor.size() \n\n\t\t\t\t\treturn self\n\t\t\t# print(wrapper)\n\t\t\treturn wrapper\n\t\t\t\n\t\t\t# return self\n\n\n\t\telse:\n\t\t\treturn object.__getattribute__(self, name)", "def __call__(self, image):\n\n image = np.array(image)\n image = self.transform(image=image)['image']\n return image", "def modify(self, *args, **kwargs):\n return _image.image_modify(self, *args, **kwargs)", "def __call__(self, img: torch.Tensor) -> torch.Tensor:\n return self._trafo(img)", "def __setstate__(self, dct):\n\t\tself.__dict__ = dct\n\t\tself.image = None\n\t\tself.draw() # Regenerate Image", "def set_img(self, img):\n self.img = img", "def transform(self):", "def __getitem__(self, key):\n assert 0 <= key <= 3\n if key == 3:\n return self.__real\n else:\n return self.__img[key]", "def op(self, img):\n raise NotImplementedError(\"'op' is an abstract method.\")", "def augment(self, image):\n pass", "def set_from_original(self):\n self.image = self.orig_image\n self.update_img()\n self.update_size()", "def process(self, image):", "def image(self):\n return self._image", "def _image_transform(self, img, source, title):\n conf = source.conf[title]\n \n xmin = conf.get('xmin', 0)\n ymin = conf.get('ymin', 0)\n\n xmax = img.shape[-1] + xmin\n ymax = img.shape[-2] + ymin\n if \"xmax\" in conf:\n if(conf['xmax'] <= xmin):\n logging.warning(\"xmax <= xmin for title %s on %s. Ignoring xmax\", title, source.name())\n else:\n xmax = conf['xmax']\n if \"ymax\" in conf:\n if(conf['ymax'] <= ymin):\n logging.warning(\"ymax <= ymin for title %s on %s. Ignoring xmax\", title, source.name())\n else:\n ymax = conf['ymax']\n\n \n translate_transform = QtGui.QTransform().translate(ymin, xmin)\n\n # The order of dimensions in the scale call is (y,x) as in the numpy\n # array the last dimension corresponds to the x.\n scale_transform = QtGui.QTransform().scale((ymax-ymin)/img.shape[-2],\n (xmax-xmin)/img.shape[-1])\n \n #rotate_transform = QtGui.QTransform()\n #if source.data_type[title] == 'image':\n # if \"angle\" in conf:\n # rotate_transform = QtGui.QTransform(numpy.cos(conf[\"angle\"]), numpy.sin(conf[\"angle\"]), -numpy.sin(conf[\"angle\"]), numpy.cos(conf[\"angle\"]), 0, 0)\n\n transpose_transform = QtGui.QTransform()\n if source.data_type[title] == 'image':\n transpose_transform *= QtGui.QTransform(0, 1, 0,\n 1, 0, 0,\n 0, 0, 1)\n if(self.settingsWidget.ui.transpose.currentText() == 'Yes' or\n (self.settingsWidget.ui.transpose.currentText() == 'Auto' \n and \"transpose\" in conf)):\n transpose_transform *= QtGui.QTransform(0, 1, 0,\n 1, 0, 0,\n 0, 0, 1)\n \n transform = scale_transform * translate_transform * transpose_transform\n #transform = scale_transform * translate_transform * rotate_transform * transpose_transform\n \n # print('|%f %f %f|' % (transform.m11(), transform.m12(), transform.m13()))\n # print('|%f %f %f|' % (transform.m21(), transform.m22(), transform.m23()))\n # print('|%f %f %f|' % (transform.m31(), transform.m32(), transform.m33()))\n return transform", "def __init__(self, img):\n self.img = img", "def get_image(self, image):\n self.original_image = image\n self.prepare_images()", "def __call__(self, image: Union[Tensor, Image]) -> Tensor:\n return self.transform(image)", "def __call__(self, image: Union[Tensor, Image]) -> Tensor:\n return self.transform(image)", "def __call__(self, image: Union[Tensor, Image]) -> Tensor:\n return self.transform(image)", "def update_img(self):\n self.img = np.array(self.image)", "def on_image(self, image):", "def __getitem__(self, index):\n if self.train:\n img, target = self.train_data[index], self.train_labels[index]\n else:\n img, target = self.test_data[index], self.test_labels[index]\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(img)\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target, index # only line changed", "def setImage(self, image):\n raise NotImplementedError", "def __getitem__(self, idx):\n image = Image.open(self.filenames[idx]) # PIL image\n image = self.transform(image)\n return image", "def adjust(self, image):\n ...", "def update_image(self):\n self.image = Image.fromarray(self.img)", "def __getitem__(self, index):\n img, _ = self.dataset.__getitem__(index)\n\n # Convert to numpy.ndarray\n if isinstance(img, Image.Image):\n img = np.array(img)\n elif isinstance(img, torch.Tensor):\n img = img.numpy()\n else:\n pass\n\n # Convert grayscale to color\n if len(img.shape) == 2:\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)\n\n # Perform image rotation\n if self.target_transform is not None:\n img, target = self.target_transform(img)\n else:\n # Assume that the initial rotation angle of all training/test images is 0\n target = 0\n\n # after rotate, make img become PIL Image\n img = Image.fromarray(img.astype(np.uint8))\n\n if self.transform is not None:\n img = self.transform(img)\n\n return img, target", "def modify_image(self, example, target_label):\n raise NotImplementedError()", "def get_image_data(self):\n raise NotImplementedError(str(type(self)) + 'does not'\n 'implement get_image.')", "def __translate(self, img):\n if not isinstance(img, Image):\n raise InvalidImageTypeException(\"display_images only accepts objects of type Image\")\n\n w = img.width()\n h = img.height()\n tkimg = Tkinter.PhotoImage(width=w, height=h)\n for x in range(w):\n for y in range(h):\n tkimg.put('#%02x%02x%02x' % img.get_rgb(x, y), (x, y))\n return tkimg", "def forward_test(self, img, img_metas, **kwargs):", "def transform(self, *args, **kwargs):\n raise NotImplementedError", "def set_image(self, image):\n\n # would be better if we only saved if it didn't exist\n if image.data:\n # save the images data\n self._set_image_data(image)\n\n # could be an update, could be new\n image = self._save_to_redis(image)\n\n # let the world know we have added a new image\n self.revent.fire('image_added',{\n 'source_page_url': image.source_page_url,\n 'source_url': image.source_url,\n 'shahash': image.shahash,\n 'vhash': image.vhash,\n 'xdim': image.xdim,\n 'ydim': image.ydim,\n })\n\n return image", "def set_proxy(self):", "def image_special_func(img):\n return myimg.image_special_func(img.tolist())", "def update_image(self, image):\n raise NotImplementedError()", "def __setitem__(self, *args):\n return _itkRGBAPixelPython.itkRGBAPixelUS___setitem__(self, *args)", "def _get_img_tensor(self, fname, internal_transform):\n transforms = list(self.base_transforms)\n if internal_transform:\n transforms.insert(1, internal_transform)\n\n return T.Compose(transforms)(Image.open(self.imgs_root / fname))", "def convert_img(self):\r\n self.img = self.img.convert('RGB')", "def __call__(self, results):\n # Image is bgr\n img = results['img'][..., ::-1]\n img = Image.fromarray(img)\n img = self.transform(img)\n img = np.asarray(img)\n img = img[..., ::-1]\n results['img'] = img\n return results", "def set_image(self, **kwargs):\n self.image = kwargs.get('url')", "def transform(self, X, copy=...):\n ...", "def transform(self, X, copy=...):\n ...", "def transform(self, X, copy=...):\n ...", "def __getitem__(self,index):\r\n target_path = self.target_path\r\n refer_path = self.refer_path\r\n vocab = self.vocab\r\n caption = self.caption\r\n '''\r\n here take the differences of target and refer_images\r\n '''\r\n target_image = Image.open(target_path[index])\r\n refer_image = Image.open(refer_path[index])\r\n '''\r\n ### another choice\r\n image1 = merge(target_image, refer_image)\r\n image2 = merge(refer_image, target_image)\r\n image = np.array(image1) - np.array(image2)\r\n image = Image.fromarray(image.astype('uint8')).convert('RGB')\r\n '''\r\n image = merge(refer_image, target_image)\r\n caption_ind = caption[index]\r\n\r\n if self.transform != None:\r\n # apply image preprocessing\r\n image = self.transform(image)\r\n # tokenize captions\r\n caption_str = str(caption_ind).lower()\r\n tokens = nltk.tokenize.word_tokenize(caption_str)\r\n caption_voc = torch.Tensor([vocab(vocab.start_token())] +\r\n [vocab(token) for token in tokens] +\r\n [vocab(vocab.end_token())])\r\n\r\n return image, caption_voc", "def __init__(self, image):\n self.image = image", "def setImages( self, event_key, images ):\n print \"event index\",event_key[0]\n self.run = event_key[1]\n self.subrun = event_key[2]\n self.event_num = event_key[3]\n print self.run,self.subrun,self.event_num\n self.images = images\n #print self.images.img_v\n #for img in self.images.img_v:\n # print img.shape\n self.labeltools.setImage( event_key[0], self.images )", "def __getitem__(self, index):\n dataset= self.dataset\n filename, label = dataset[index]\n \n path=os.path.join(self.image_dir, filename)\n if path not in self.img_cache:\n image = Image.open(path)\n image.load()\n self.img_cache[path]=image\n else:\n image=self.img_cache[path]\n \n \n encoded_lab=torch.zeros(len(self.domains), dtype=torch.float32)\n encoded_lab[label]=1\n #image=self.hsv_color_change(image,0.5)\n #im.save(self.image_dir+\"/testimg.jpg\")\n #image.save(self.image_dir+\"/testimg2.jpg\")\n return self.transform(image), encoded_lab", "def __setitem__(self, *args):\n return _itkRGBAPixelPython.itkRGBAPixelF___setitem__(self, *args)", "def process_image(self):\n pass", "def __call__(self, img: Image):\n if self.K <= 1:\n return self.transform(img)\n else:\n return [self.transform(img) for _ in range(self.K)]", "def _fcn_link_pic(self, name):\n def _get_pic_fcn():\n kwargs = self.cbqt.cbobjs._objs[name].to_kwargs()\n self.pic[name].update_from_dict(kwargs)\n self.pic[name]._pic.set_data(**kwargs)\n return _get_pic_fcn", "def imgCopy(img):\n return sitk.Image(img)", "def __getitem__(self, index):\n\n totensor = transforms.Compose(\n [transforms.Resize((224, 224)),\n transforms.ToTensor()\n ])\n\n assert (index < len(self.data))\n assert (index < self.len)\n images = self.data[index]\n # print(images)\n img = cv2.imread(os.path.join(self.dataset.directory, images))\n\n target = self.bbox[index]\n\n scale = np.array(img.shape) / 224\n\n # img = cv2.rectangle(img, (target[0]-10, target[1]-10), (target[2]+10, target[3]+10),\n # color=(255, 255, 0), thickness=10)\n\n # cv2.imwrite(os.path.join(\"res\", str(index)+\".jpg\"), draw)\n\n # print(img.shape, scale)\n img = cv2.resize(img, (224, 224))\n\n # print(target)\n\n target[0] = int(target[0] / scale[1] - 5)\n target[1] = int(target[1] / scale[0] - 5)\n target[2] = int(target[2] / scale[1] + 5)\n target[3] = int(target[3] / scale[0] + 5)\n\n # print(target)\n t = target\n if self.transform is not None:\n seq_det = self.transform.to_deterministic() # call this for each batch again, NOT only once at the start\n\n keypoints_on_images = []\n keypoints = []\n keypoints.append(ia.Keypoint(x=target[0], y=target[1]))\n keypoints.append(ia.Keypoint(x=target[2], y=target[3]))\n\n keypoints_on_images.append(ia.KeypointsOnImage(keypoints, shape=np.asarray(img).shape[:-1]))\n\n # augment keypoints and images\n img = seq_det.augment_image(np.asarray(img))\n after_aug = []\n\n target = seq_det.augment_keypoints(keypoints_on_images)\n for point in target[0].keypoints:\n # print(point)\n x_new, y_new = point.x, point.y\n after_aug.append(point.x)\n after_aug.append(point.y)\n target = after_aug\n # print(after_aug)\n newImg = Image.fromarray(img)\n reg_targets = np.float32(np.array(target))\n\n b=self.labels[index]\n\n #a = np.array(self.labels[index])\n #b = np.zeros((a.size, 2))\n #b[np.arange(a.size), a] = 1\n\n #print(\"B=\",b,self.labels[index])\n\n #print(targets)\n ##draw = cv2.rectangle(cv2.resize(np.array(newImg), (224, 224)), (t[1], t[0]), (t[3], t[2]), color=(0, 0, 0),\n # thickness=6)\n\n #draw = cv2.rectangle(cv2.resize(np.array(draw), (224, 224)), (targets[0], targets[1]), (targets[2], targets[3]),\n # color=(0, 255, 0), thickness=3)\n\n #cv2.imwrite(os.path.join(\"res\", str(index) + \".jpg\"), draw)\n #print(reg_targets)\n\n return totensor(newImg), reg_targets,b ,index", "def transact(self):", "def transact(self):", "def _process_img(self, img):\n\n # ==\n # Construct transforms\n trans_list = [transforms.Resize(self.img_size)]\n if self.grayscale:\n trans_list += [transforms.Grayscale(num_output_channels=1)]\n\n img_transforms = transforms.Compose(trans_list)\n\n # ==\n # Transform and output\n img = img_transforms(img)\n obs = np.array(img, dtype=np.float32)\n\n # Ensure channel is in first dimension (torch conv standard)\n if len(np.shape(obs)) == 2:\n obs = np.expand_dims(obs, axis=0)\n elif len(np.shape(obs)) == 3:\n # PIL have channel on dim 2, swap with dim 0\n obs = np.swapaxes(obs, 2, 0)\n pass\n else:\n raise RuntimeError\n\n # Scale values to [0, 1]\n if self.scale_observation:\n obs = obs / 255.0\n\n # (Optinal) Flatten to vector\n if self.flatten_obs:\n obs = obs.flatten()\n\n return obs", "def __getitem__(self, i):\n image = Image.open(self.images[i]).convert('RGB')\n target = Image.open(self.targets[i]).convert('L')\n if self.transform is not None:\n image, target = self.transform(image, target)\n return image, target", "def test_replace_image(self):\n pass", "def save_image(self):\n self.save()", "def original(self) -> Any:\n raise NotImplementedError", "def setImage(self, *args, **kwargs):\n super().setImage(*args, **kwargs)\n self.view.disableAutoRange(axis = 0)\n self.view.enableAutoRange(axis = 1)", "def __getitem__(self, idx):\n img = self.images[idx]\n label = self.labels[idx].split(\" \")[-1]\n img = Image.open(img)\n img = img.convert('RGB')\n img = self.transform(img)\n return(img, label[:-1])", "def any_image(self, index):\n self.__accessed_image[index] = True\n return self.__image(index)", "def on_image_change(self, value):\n self.current_image.setImage( self._model.image )", "def transform(self, x):", "def encode_decode(self, img, img_metas):\n pass", "def __setattr__(self, name, value):\n try:\n orig = object.__getattribute__(self, 'orig')\n except AttributeError:\n object.__setattr__(self, name, value)\n else:\n object.__setattr__(orig, name, value)", "def _TransformHidden(self, _):\n raise NotImplementedError()", "def __getattribute__(self,attr):\n if attr in super(BaseTransformer,self).__getattribute__('_overrides'):\n return super(BaseTransformer,self).__getattribute__('_'+attr)\n return super(BaseTransformer,self).__getattribute__(attr)", "def _set(self, thumbnail_name, thumbnail):\n raise NotImplementedError", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, value):\n\n self._transform = value\n if self.is_attached:\n self[\"transform\"] = self._transform", "def transform(img_path):\n\n img = self.loader(img_path)\n return self.transform(img)", "def __call__(self, images, targets):\n pass", "def observation(self, img):\r\n img = img.transpose(1, 2, 0)\r\n return img", "def __getstate__(self):\n\t\tdct = self.__dict__.copy()\n\t\t# Can't pickle ImageCore objects - convert to string\n\t\tdel dct['image']\n\t\treturn dct", "def __getitem__(self, key):\n # Get images and apply transforms\n image_file = self.data[self.data.columns[0]][key]\n image = Image.open(os.path.join(self.path, 'data', image_file))\n if self.is_transform:\n image = self.transform(image)\n\n if not torch.is_tensor(image):\n image = transforms.ToTensor()(image)\n\n # Get caption embeddings\n caption = self.data[self.data.columns[-1]][key]\n if self.embeddings is not None:\n caption = [self.embeddings[word] for word in caption]\n\n if self.tier != self.TIERS[-1]:\n labels = self.data[self.data.columns[1]][key]\n return image_file, image, caption, labels\n\n return image_file, image, caption", "def __setitem__(self, *args):\n return _itkRGBAPixelPython.itkRGBAPixelUC___setitem__(self, *args)", "def __getitem__(self, idx):\n records = copy.deepcopy(self._get_imganno(idx))\n records['image'] = cv2.imread(records['image_file'])\n records['image'] = cv2.cvtColor(records['image'], cv2.COLOR_BGR2RGB)\n if 'mask' in records:\n records['mask'] = (records['mask'] + 0).astype('uint8')\n records = self.transform(records)\n return records", "def setImage(self, img):\n self.image = img\n self.repaint()", "def setThumbnailImage(*args):", "def __getitem__(self, index):\n\n \"\"\"\n step1. Get the image path from 'self.img_name' and load it.\n hint : path = root + self.img_name[index] + '.jpeg'\n \n step2. Get the ground truth label from self.label\n \n step3. Transform the .jpeg rgb images during the training phase, such as rssh [email protected] -p porttc. But at the beginning, I suggest you follow the hints. \n \n In the testing phase, if you have a normalization process during the training phase, you only need \n to normalize the data. \n \n hints : Convert the pixel value to [0, 1]\n Transpose the image shape from [H, W, C] to [C, H, W]\n \n step4. Return processed image and label\n \"\"\"\n ##step1\n path = self.root + self.img_name[index] + '.jpeg'\n img = Image.open(path)\n \n ##step2\n GroundTruth = self.label[index]\n \n ##step3\n img_np = np.asarray(img)/255\n img_np = np.transpose(img_np, (2,0,1))\n img_ten = torch.from_numpy(img_np)\n \n ##step4\n return img_ten, GroundTruth", "def _augment(img):\r\n return flip(img, axis=2)", "def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.LongTensor]:\n _class, _img = self.dataset[idx]\n\n _img = Image.open(_img)\n _img = self.ops(_img)\n\n return _img, _class" ]
[ "0.6246118", "0.6246118", "0.62181085", "0.6194361", "0.6194361", "0.60827804", "0.606501", "0.6059099", "0.59579223", "0.59201604", "0.5910337", "0.58504593", "0.5800288", "0.5793381", "0.5784016", "0.5776674", "0.5700956", "0.56607634", "0.5656199", "0.56306195", "0.56052715", "0.5596977", "0.5584456", "0.55816317", "0.558088", "0.5532997", "0.5532997", "0.5532997", "0.5513728", "0.55103856", "0.548925", "0.54614156", "0.544237", "0.54421365", "0.54407156", "0.5434632", "0.54270583", "0.54267913", "0.537618", "0.5366111", "0.5362312", "0.5359978", "0.5346501", "0.53464395", "0.53278565", "0.5315721", "0.5313587", "0.52913415", "0.52775276", "0.5274344", "0.52703136", "0.52703136", "0.52703136", "0.52684826", "0.5268091", "0.5265264", "0.52627146", "0.5254381", "0.52528924", "0.5239507", "0.5232313", "0.52235836", "0.52120227", "0.52098036", "0.52098036", "0.5207218", "0.52070296", "0.52014714", "0.51861656", "0.51730245", "0.5168616", "0.5161868", "0.51579195", "0.5139867", "0.5120394", "0.5119289", "0.51183105", "0.511627", "0.51133573", "0.5104906", "0.51020104", "0.51020104", "0.51020104", "0.51020104", "0.51020104", "0.51020104", "0.51020104", "0.50982904", "0.50933754", "0.50913215", "0.5067552", "0.5065916", "0.5065831", "0.50657207", "0.50630534", "0.50619376", "0.5061376", "0.5061307", "0.5059995", "0.504887" ]
0.7732635
0
We need to run this before the actual draw to insert and update p5 env variables
def pre_draw(p5_instance, draw_func): global _CTX_MIDDLE, _DEFAULT_FILL, _DEFAULT_LEADMULT, _DEFAULT_STROKE, _DEFAULT_TEXT_FILL global ADD, ALT, ARROW, AUTO, AUDIO, AXES, BACKSPACE, BASELINE, BEVEL, BEZIER, BLEND, BLUR, BOLD, BOLDITALIC global BOTTOM, BURN, CENTER, CHORD, CLAMP, CLOSE, CONTROL, CORNER, CORNERS, CROSS, CURVE, DARKEST global DEG_TO_RAD, DEGREES, DELETE, DIFFERENCE, DILATE, DODGE, DOWN_ARROW, ENTER, ERODE, ESCAPE, EXCLUSION global FILL, GRAY, GRID, HALF_PI, HAND, HARD_LIGHT, HSB, HSL, IMAGE, IMMEDIATE, INVERT, ITALIC, LANDSCAPE global LEFT, LEFT_ARROW, LIGHTEST, LINE_LOOP, LINE_STRIP, LINEAR, LINES, MIRROR, MITER, MOVE, MULTIPLY, NEAREST global NORMAL, OPAQUE, OPEN, OPTION, OVERLAY, P2D, PI, PIE, POINTS, PORTRAIT, POSTERIZE, PROJECT, QUAD_STRIP, QUADRATIC global QUADS, QUARTER_PI, RAD_TO_DEG, RADIANS, RADIUS, REPEAT, REPLACE, RETURN, RGB, RIGHT, RIGHT_ARROW global ROUND, SCREEN, SHIFT, SOFT_LIGHT, SQUARE, STROKE, SUBTRACT, TAB, TAU, TEXT, TEXTURE, THRESHOLD, TOP global TRIANGLE_FAN, TRIANGLE_STRIP, TRIANGLES, TWO_PI, UP_ARROW, VIDEO, WAIT, WEBGL global frameCount, focused, displayWidth, displayHeight, windowWidth, windowHeight, width, height global disableFriendlyErrors, deviceOrientation, accelerationX, accelerationY, accelerationZ global pAccelerationX, pAccelerationY, pAccelerationZ, rotationX, rotationY, rotationZ global pRotationX, pRotationY, pRotationZ, turnAxis, keyIsPressed, key, keyCode, mouseX, mouseY, pmouseX, pmouseY global winMouseX, winMouseY, pwinMouseX, pwinMouseY, mouseButton, mouseIsPressed, touches, pixels _CTX_MIDDLE = p5_instance._CTX_MIDDLE _DEFAULT_FILL = p5_instance._DEFAULT_FILL _DEFAULT_LEADMULT = p5_instance._DEFAULT_LEADMULT _DEFAULT_STROKE = p5_instance._DEFAULT_STROKE _DEFAULT_TEXT_FILL = p5_instance._DEFAULT_TEXT_FILL ADD = p5_instance.ADD ALT = p5_instance.ALT ARROW = p5_instance.ARROW AUDIO = p5_instance.AUDIO AUTO = p5_instance.AUTO AXES = p5_instance.AXES BACKSPACE = p5_instance.BACKSPACE BASELINE = p5_instance.BASELINE BEVEL = p5_instance.BEVEL BEZIER = p5_instance.BEZIER BLEND = p5_instance.BLEND BLUR = p5_instance.BLUR BOLD = p5_instance.BOLD BOLDITALIC = p5_instance.BOLDITALIC BOTTOM = p5_instance.BOTTOM BURN = p5_instance.BURN CENTER = p5_instance.CENTER CHORD = p5_instance.CHORD CLAMP = p5_instance.CLAMP CLOSE = p5_instance.CLOSE CONTROL = p5_instance.CONTROL CORNER = p5_instance.CORNER CORNERS = p5_instance.CORNERS CROSS = p5_instance.CROSS CURVE = p5_instance.CURVE DARKEST = p5_instance.DARKEST DEG_TO_RAD = p5_instance.DEG_TO_RAD DEGREES = p5_instance.DEGREES DELETE = p5_instance.DELETE DIFFERENCE = p5_instance.DIFFERENCE DILATE = p5_instance.DILATE DODGE = p5_instance.DODGE DOWN_ARROW = p5_instance.DOWN_ARROW ENTER = p5_instance.ENTER ERODE = p5_instance.ERODE ESCAPE = p5_instance.ESCAPE EXCLUSION = p5_instance.EXCLUSION FILL = p5_instance.FILL GRAY = p5_instance.GRAY GRID = p5_instance.GRID HALF_PI = p5_instance.HALF_PI HAND = p5_instance.HAND HARD_LIGHT = p5_instance.HARD_LIGHT HSB = p5_instance.HSB HSL = p5_instance.HSL IMAGE = p5_instance.IMAGE IMMEDIATE = p5_instance.IMMEDIATE INVERT = p5_instance.INVERT ITALIC = p5_instance.ITALIC LANDSCAPE = p5_instance.LANDSCAPE LEFT = p5_instance.LEFT LEFT_ARROW = p5_instance.LEFT_ARROW LIGHTEST = p5_instance.LIGHTEST LINE_LOOP = p5_instance.LINE_LOOP LINE_STRIP = p5_instance.LINE_STRIP LINEAR = p5_instance.LINEAR LINES = p5_instance.LINES MIRROR = p5_instance.MIRROR MITER = p5_instance.MITER MOVE = p5_instance.MOVE MULTIPLY = p5_instance.MULTIPLY NEAREST = p5_instance.NEAREST NORMAL = p5_instance.NORMAL OPAQUE = p5_instance.OPAQUE OPEN = p5_instance.OPEN OPTION = p5_instance.OPTION OVERLAY = p5_instance.OVERLAY P2D = p5_instance.P2D P3D = p5_instance.WEBGL PI = p5_instance.PI PIE = p5_instance.PIE POINTS = p5_instance.POINTS PORTRAIT = p5_instance.PORTRAIT POSTERIZE = p5_instance.POSTERIZE PROJECT = p5_instance.PROJECT QUAD_STRIP = p5_instance.QUAD_STRIP QUADRATIC = p5_instance.QUADRATIC QUADS = p5_instance.QUADS QUARTER_PI = p5_instance.QUARTER_PI RAD_TO_DEG = p5_instance.RAD_TO_DEG RADIANS = p5_instance.RADIANS RADIUS = p5_instance.RADIUS REPEAT = p5_instance.REPEAT REPLACE = p5_instance.REPLACE RETURN = p5_instance.RETURN RGB = p5_instance.RGB RIGHT = p5_instance.RIGHT RIGHT_ARROW = p5_instance.RIGHT_ARROW ROUND = p5_instance.ROUND SCREEN = p5_instance.SCREEN SHIFT = p5_instance.SHIFT SOFT_LIGHT = p5_instance.SOFT_LIGHT SQUARE = p5_instance.SQUARE STROKE = p5_instance.STROKE SUBTRACT = p5_instance.SUBTRACT TAB = p5_instance.TAB TAU = p5_instance.TAU TEXT = p5_instance.TEXT TEXTURE = p5_instance.TEXTURE THRESHOLD = p5_instance.THRESHOLD TOP = p5_instance.TOP TRIANGLE_FAN = p5_instance.TRIANGLE_FAN TRIANGLE_STRIP = p5_instance.TRIANGLE_STRIP TRIANGLES = p5_instance.TRIANGLES TWO_PI = p5_instance.TWO_PI UP_ARROW = p5_instance.UP_ARROW VIDEO = p5_instance.VIDEO WAIT = p5_instance.WAIT WEBGL = p5_instance.WEBGL frameCount = p5_instance.frameCount focused = p5_instance.focused displayWidth = p5_instance.displayWidth displayHeight = p5_instance.displayHeight windowWidth = p5_instance.windowWidth windowHeight = p5_instance.windowHeight width = p5_instance.width height = p5_instance.height disableFriendlyErrors = p5_instance.disableFriendlyErrors deviceOrientation = p5_instance.deviceOrientation accelerationX = p5_instance.accelerationX accelerationY = p5_instance.accelerationY accelerationZ = p5_instance.accelerationZ pAccelerationX = p5_instance.pAccelerationX pAccelerationY = p5_instance.pAccelerationY pAccelerationZ = p5_instance.pAccelerationZ rotationX = p5_instance.rotationX rotationY = p5_instance.rotationY rotationZ = p5_instance.rotationZ pRotationX = p5_instance.pRotationX pRotationY = p5_instance.pRotationY pRotationZ = p5_instance.pRotationZ turnAxis = p5_instance.turnAxis keyIsPressed = p5_instance.keyIsPressed key = p5_instance.key keyCode = p5_instance.keyCode mouseX = p5_instance.mouseX mouseY = p5_instance.mouseY pmouseX = p5_instance.pmouseX pmouseY = p5_instance.pmouseY winMouseX = p5_instance.winMouseX winMouseY = p5_instance.winMouseY pwinMouseX = p5_instance.pwinMouseX pwinMouseY = p5_instance.pwinMouseY mouseButton = p5_instance.mouseButton mouseIsPressed = p5_instance.mouseIsPressed touches = p5_instance.touches pixels = p5_instance.pixels return draw_func()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_draw(self):\n pass", "def pointsSetUp(self):\r\n self.background.draw(self.surface)\r\n for i in range(len(self.points)):\r\n self.points[i].organize()\r\n self.points[i].update()\r\n self.points[i].addNumber(i)\r\n self.points[i].setActiveTurn()", "def _init_env_variables(self):\n #raw_input(\"TakeOFF PRESS\")\n # We TakeOff before sending any movement commands\n self.takeoff()\n\n # For Info Purposes\n self.cumulated_reward = 0.0\n # We get the initial pose to mesure the distance from the desired point.\n gt_pose = self.get_gt_pose()\n self.previous_distance_from_des_point = self.get_distance_from_desired_point(\n gt_pose.position)", "def __init__(self, parent):\n super(P5, self).__init__(parent)\n self.shapes = []\n self.scenes = []\n self.current_scene = 0\n self.objects = []\n self.lighting = True\n self.draw_axes = True", "def pre_draw(self):", "def visualize_environment(self,env_state):\n fig=plt.figure(figsize=self.figsize)\n ax=plt.subplot(111)\n #Plot the targets\n plt.plot([i[0] for i in self.coordinates__targets],\\\n [i[1] for i in self.coordinates__targets],\\\n marker='x',markersize=15,linestyle='None',color='k',label='Target')\n plot_target_values = True\n if plot_target_values:\n for i ,t in enumerate(self.coordinates__targets):\n plt.text(t[0],t[1],self.target_values[i])\n #Plot the towers\n tower_colors = ['r','b','g']\n for tk in xrange(self.N_tower_kinds):\n plt.plot([i[0] for i in self.coordinates__tower_sites[tk]],\\\n [i[1] for i in self.coordinates__tower_sites[tk]],\\\n marker='o',markersize=10,linestyle='None',color=tower_colors[tk],alpha=.5,label='Tower {} Sites'.format(tk+1))\n if env_state == 'solved':\n for tk in xrange(self.N_tower_kinds):\n plt.plot([i[0] for i in self.coordinates__solved_towers[tk]],\\\n [i[1] for i in self.coordinates__solved_towers[tk]],\\\n marker='^',markersize=20,linestyle='None',color=tower_colors[tk],label='Tower {} Placed'.format(tk+1))\n for x,y,w,h in self.coordinates__obstacles:\n r = plt.Rectangle((x,y),w,h,fc='c')\n ax.add_patch(r)\n plt.xlim(0,self.map_dimensions[1])\n plt.ylim(0,self.map_dimensions[0])\n plt.legend(numpoints=1,loc='best')\n savename = 'SolvedMap.png' if env_state == 'solved' else 'InitialMap.png'\n plt.savefig(savename)", "def init_game_setting(self):\n np.random.seed(1) \n self.s_prev = np.zeros((80, 80, 1))\n print('loading trained model from {}'.format(self.model_path))\n self.sess = tf.InteractiveSession(graph=self.model)\n self.saver.restore(self.sess, self.model_path)", "def setup(self):\n # if not system.restore_snapshot():\n # self.log.debug(\"No snapshot to restore, if this is not expected please contact automation team\")\n crindsim.set_mode(\"manual\")\n pos.connect()\n pos.sign_on()", "def setup(self):\n # Initialize the drawing environment (create main windows, etc)\n glutInit(sys.argv)\n glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB | GLUT_DEPTH)\n glutInitWindowSize(WINDOW_WIDTH, WINDOW_HEIGHT)\n glutCreateWindow(name)\n\n glShadeModel(GL_SMOOTH)\n\n glClearDepth(1.0)\n glDepthFunc(GL_LESS) # The Type Of Depth Test To Do\n glEnable(GL_DEPTH_TEST) # Enables Depth Testing\n glShadeModel(GL_SMOOTH) # Enables Smooth Color Shading\n\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity() # Reset The Projection Matrix\n\n # Calculate The Aspect Ratio Of The Window\n gluPerspective(45.0, float(WINDOW_WIDTH)/float(WINDOW_HEIGHT), 0.1, 100.0)\n\n glMatrixMode(GL_MODELVIEW)\n\n # Set up keyboard listeners.\n glutKeyboardFunc(self.on_key)", "def draw_environment():\n rect(screen, LIGHT_GRAY, (0, 0, 800, 450)) # grey sky\n rect(screen, WHITE, (0, 450, 800, 1000)) # white ground", "def _prepare_draw(self, view=None):\n pass", "def setup(self):\r\n m = re.match(r'\\[([0-9]+),([0-9]+)]',\r\n self.value.strip().replace(' ', ''))\r\n if m:\r\n # Note: we subtract 15 to compensate for the size of the dot on the screen.\r\n # (is a 30x30 image--lms/static/green-pointer.png).\r\n (self.gx, self.gy) = [int(x) - 15 for x in m.groups()]\r\n else:\r\n (self.gx, self.gy) = (0, 0)", "def _update_plot(self):\n\n self.T_ex[:-1] = self.T_ex[1:]\n self.T_ex[-1] = self.ensemble.T_ex\n self.plot_T_ex[0].set_ydata(self.T_ex)\n self.T_kin[:-1] = self.T_kin[1:]\n self.T_kin[-1] = self.ensemble.T_kin\n self.plot_T_kin[0].set_ydata(self.T_kin)\n self.canvas.draw()\n\n renderer = self.canvas.get_renderer()\n raw_data = renderer.tostring_rgb()\n surf = pygame.image.fromstring(raw_data,\n (self.plot_width, self.disp_height),\n \"RGB\")\n self.game_display.blit(surf, (self.disp_width, 0))", "def env_init(self, env_info={}):\n self.dealer_sticks = env_info['dealer_sticks']\n self.random = np.random.RandomState(env_info['seed'])\n self.current_state = None", "def setup():\n size(SPACE['w'], SPACE['h'])\n colorMode(RGB, 1)", "def main():\n save = False\n show = True\n\n #hd_parameter_plots = HDparameterPlots(save=save)\n #hd_parameter_plots.flow_parameter_distribution_for_non_lake_cells_for_current_HD_model()\n #hd_parameter_plots.flow_parameter_distribution_current_HD_model_for_current_HD_model_reprocessed_without_lakes_and_wetlands()\n #hd_parameter_plots.flow_parameter_distribution_ten_minute_data_from_virna_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale_rdirs()\n #hd_parameter_plots.flow_parameter_distribution_ten_minute_data_from_virna_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale_rdirs_no_tuning()\n #ice5g_comparison_plots = Ice5GComparisonPlots(save=save)\n #ice5g_comparison_plots.plotLine()\n #ice5g_comparison_plots.plotFilled()\n #ice5g_comparison_plots.plotCombined()\n #ice5g_comparison_plots.plotCombinedIncludingOceanFloors()\n #flowmapplot = FlowMapPlots(save)\n #flowmapplot.FourFlowMapSectionsFromDeglaciation()\n #flowmapplot.Etopo1FlowMap()\n #flowmapplot.ICE5G_data_all_points_0k()\n #flowmapplot.ICE5G_data_all_points_0k_no_sink_filling()\n #flowmapplot.ICE5G_data_all_points_0k_alg4_two_color()\n #flowmapplot.ICE5G_data_all_points_21k_alg4_two_color()\n #flowmapplot.Etopo1FlowMap_two_color()\n #flowmapplot.Etopo1FlowMap_two_color_directly_upscaled_fields()\n #flowmapplot.Corrected_HD_Rdirs_FlowMap_two_color()\n #flowmapplot.ICE5G_data_ALG4_true_sinks_21k_And_ICE5G_data_ALG4_true_sinks_0k_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_Etopo1_ALG4_sinkless_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_Etopo1_ALG4_true_sinks_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_sinkless_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_true_sinks_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_corr_orog_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_no_true_sinks_corr_orog_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_HD_as_data_ALG4_true_sinks_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Upscaled_Rdirs_vs_Directly_Upscaled_fields_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplot.Ten_Minute_Data_from_Virna_data_ALG4_corr_orog_downscaled_lsmask_no_sinks_21k_vs_0k_FlowMap_comparison()\n #flowmapplot.Upscaled_Rdirs_vs_Corrected_HD_Rdirs_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n flowmapplotwithcatchment = FlowMapPlotsWithCatchments(save)\n #flowmapplotwithcatchment.Upscaled_Rdirs_vs_Corrected_HD_Rdirs_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.compare_present_day_and_lgm_river_directions_with_catchments_virna_data_plus_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_present_day_river_directions_with_catchments_virna_data_with_vs_without_tarasov_style_orog_corrs()\n #flowmapplotwithcatchment.compare_lgm_river_directions_with_catchments_virna_data_with_vs_without_tarasov_style_orog_corrs()\n #flowmapplotwithcatchment.Upscaled_Rdirs_vs_Corrected_HD_Rdirs_tarasov_upscaled_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.upscaled_rdirs_with_and_without_tarasov_upscaled_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.\\\n #upscaled_rdirs_with_and_without_tarasov_upscaled_north_america_only_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.\\\n #Upscaled_Rdirs_vs_Corrected_HD_Rdirs_tarasov_upscaled_north_america_only_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.\\\n #Upscaled_Rdirs_vs_Corrected_HD_Rdirs_tarasov_upscaled_north_america_only_data_ALG4_corr_orog_glcc_olson_lsmask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.compare_present_day_and_lgm_river_directions_with_catchments_ICE5G_plus_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_present_day_and_lgm_river_directions_with_catchments_ICE6G_plus_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_ICE5G_and_ICE6G_with_catchments_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs()\n #flowmapplotwithcatchment.compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs_original_ts()\n flowmapplotwithcatchment.compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs_new_ts_10min()\n outflowplots = OutflowPlots(save)\n #outflowplots.Compare_Upscaled_Rdirs_vs_Directly_Upscaled_fields_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_as_HD_data_ALG4_sinkless_all_points_0k()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_as_HD_data_ALG4_true_sinks_all_points_0k()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_sinkless_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_true_sinks_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_corr_orog_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_corr_orog_downscaled_ls_mask_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_Etopo1_ALG4_sinkless_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_Etopo1_ALG4_true_sinks_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_plus_tarasov_upscaled_srtm30_ALG4_corr_orog_0k_directly_upscaled_fields()\n #outflowplots.Compare_Original_Corrections_vs_Upscaled_MERIT_DEM_0k()\n outflowplots.Compare_Original_Corrections_vs_Upscaled_MERIT_DEM_0k_new_truesinks()\n #outflowplots.Compare_Original_Corrections_vs_Upscaled_MERIT_DEM_0k_new_truesinks_individual_rivers()\n #outflowplots.Compare_ICE5G_with_and_without_tarasov_upscaled_srtm30_ALG4_corr_orog_0k_directly_upscaled_fields()\n #hd_output_plots = HDOutputPlots()\n #hd_output_plots.check_water_balance_of_1978_for_constant_forcing_of_0_01()\n #hd_output_plots.plot_comparison_using_1990_rainfall_data()\n #hd_output_plots.plot_comparison_using_1990_rainfall_data_adding_back_to_discharge()\n #coupledrunoutputplots = CoupledRunOutputPlots(save=save)\n #coupledrunoutputplots.ice6g_rdirs_lgm_run_discharge_plot()\n #coupledrunoutputplots.extended_present_day_rdirs_lgm_run_discharge_plot()\n #coupledrunoutputplots.ocean_grid_extended_present_day_rdirs_vs_ice6g_rdirs_lgm_run_discharge_plot()\n #coupledrunoutputplots.extended_present_day_rdirs_vs_ice6g_rdirs_lgm_echam()\n #coupledrunoutputplots.extended_present_day_rdirs_vs_ice6g_rdirs_lgm_mpiom_pem()\n #lake_plots = LakePlots()\n #lake_plots.plotLakeDepths()\n #lake_plots.LakeAndRiverMap()\n #lake_plots.LakeAndRiverMaps()\n if show:\n plt.show()", "def reset(self):\n look = [0.4, 0.1, 0.54] \n distance = 1.5\n pitch = -90\n yaw = -90\n roll = 180\n pos_range = [0.45, 0.5, 0.0, 0.1]\n\n self._view_matrix = p.computeViewMatrixFromYawPitchRoll(look, distance, yaw, pitch, roll, 2)\n fov = 20. + self._cameraRandom * np.random.uniform(-2, 2)\n aspect = self._width / self._height\n near = 0.01\n far = 10\n self._proj_matrix = p.computeProjectionMatrixFOV(fov, aspect, near, far)\n\n self._attempted_grasp = False\n self._env_step = 0\n self.terminated = 0\n\n p.resetSimulation()\n p.setPhysicsEngineParameter(numSolverIterations=150)\n p.setTimeStep(self._timeStep)\n p.loadURDF(os.path.join(self._urdfRoot, \"plane.urdf\"), [0, 0, -1])\n\n self.tableUid = p.loadURDF(os.path.join(self._urdfRoot, \"table/table.urdf\"), 0.5000000, 0.00000, -.640000,\n 0.000000, 0.000000, 0.0, 1.0)\n\n p.setGravity(0, 0, -10)\n self._envStepCounter = 0\n p.stepSimulation()\n\n # place objs\n if RESUME_NUM is not None:\n self.model_paths = self.model_paths[RESUME_NUM*3:]\n self.img_save_cnt = RESUME_NUM\n self.N_DATA_TO_GENERATE -= RESUME_NUM\n for _ in tqdm(range(self.N_DATA_TO_GENERATE)):\n urdfList = []\n chosen_classes = random.sample(CLASS_NAME, k=3)\n for _c in chosen_classes:\n urdfList.append([self.model_paths[_c].pop(), _c])\n self._objectUids, self._objectClasses = self._randomly_place_objects(urdfList, pos_range)\n self._observation = self._get_observation()\n self.img_save_cnt += 1\n for uid in self._objectUids:\n p.removeBody(uid)\n\n # close-up view\n for idx, obj_path in enumerate(urdfList):\n self._objectUids, self._objectClasses = self._place_single_object([obj_path])\n observation = self._get_closeup(idx)\n for uid in self._objectUids:\n p.removeBody(uid)\n\n\n # terminate\n sys.exit('finish')\n\n return np.array(self._observation)", "def set_app_defaults(self):\n self.curve_render = 0\n self.image_render = 0\n self.image_height = 200\n self.image_data = []\n self.auto_scale = True\n\n self.create_actions()\n self.setup_signals()\n self.reset_graph()\n\n self.fps = utils.SimpleFPS()\n\n # Click the live button\n self.ui.actionContinue_Live_Updates.trigger()", "def _prepare_draw(self, view=None):\n return True", "def prepare(self):\n self.__plot_data = [[], []]\n self.final_residual = None\n self.time_value = None\n self.clear_folder_content(self.run_path())\n self.copy_folder_content(self.config_path('system'), self.run_path('system'), overwrite=True)\n self.copy_folder_content(self.config_path('constant'), self.run_path('constant'), overwrite=True)\n self.copy_folder_content(self.config_path('0'), self.run_path('0'), overwrite=True)\n return True", "def setup():\r\n #this happens just once\r\n size(width, height) #instead of create_canvas\r", "def setup(self):\n\n for name, infos in Rt.geom_dict.items():\n if name in Rt.optim_var_dict:\n self.add_input(name, val=infos[1][0])", "def init_env(self, env_name):\n self.env = gym.make(env_name)\n self.env.render()\n\n self.state_space = self.env.observation_space.n\n self.action_space = self.env.action_space.n\n\n self.q_table = np.zeros((self.state_space, self.action_space))", "def draw():", "def startup(self):\n self.prev_gray = None\n self.frame_idx = 1\n self.tracks = []\n self.fps = []\n self.vid_info = None\n self.track_new_points_count = 0", "def draw(self):\n\t\tpass", "def test_grasp_3D_location(self):\n self.taskRunner.callOnThread(self.grasp_3D_location)", "def save_env():\n global vis\n vis.save([vis.env])", "def graphics(env, fovea, objects, unit):\n plt.clf()\n\n env = environment.redraw(env, unit, objects)\n fovea_im = fovea.get_focus_image(env)\n\n plt.subplot(121)\n plt.title('Training environment')\n plt.xlim(0, unit)\n plt.ylim(0, unit)\n plt.imshow(env)\n\n # PLOT DESK EDGES\n plt.plot([0.2*unit, 0.2*unit, 0.8*unit, 0.8*unit, 0.2*unit],\n [0.2*unit, 0.8*unit, 0.8*unit, 0.2*unit, 0.2*unit], 'w-'\n )\n\n # PLOT FOVEA EDGES\n fov_indices = fovea.get_index_values()\n plt.plot([fov_indices[0][0], fov_indices[0][0], fov_indices[0][1],\n fov_indices[0][1], fov_indices[0][0]],\n [fov_indices[1][0], fov_indices[1][1], fov_indices[1][1],\n fov_indices[1][0], fov_indices[1][0]], 'w-'\n )\n\n plt.subplot(122)\n plt.title('Focus image')\n plt.imshow(fovea_im)\n\n plt.draw()\n plt.pause(0.01)", "def onResetParameters(self):\r\n # productive #button\r\n profprint()\r\n fileName = pathToScene = slicer.modules.needlefinder.path.replace(\"NeedleFinder.py\", \"Config/default.cfg\")\r\n self.logic.loadParameters(fileName)", "def init_game_setting(self):\n ##################\n # YOUR CODE HERE #\n ##################\n self.state = np.zeros((1, 80, 80))\n self.clear_action()", "def do_stuff(self):\n self.create_tourism_raster()", "def setup(config, session, pts_all):\n optic = config['Optic']\n general = config['General']\n\n numFrames_total_rough = session['frames_total']\n numVids = session['num_vids']\n spacing = optic['spacing']\n\n bbox_subframe_displacement = pts_all['bbox_subframe_displacement']\n pts_displacement = pts_all['pts_displacement']\n pts_x_displacement = pts_all['pts_x_displacement']\n pts_y_displacement = pts_all['pts_y_displacement']\n mask_frame_displacement = pts_all['mask_frame_displacement']\n\n ## Make point cloud\n pts_spaced = np.ones((np.int64(bbox_subframe_displacement[3] * bbox_subframe_displacement[2] / spacing),\n 2)) * np.nan ## preallocation\n cc = 0 ## set idx counter\n\n # make spaced out points\n for ii in range(len(pts_x_displacement)):\n if (pts_x_displacement[ii] % spacing == 0) and (pts_y_displacement[ii] % spacing == 0):\n pts_spaced[cc, 0] = pts_x_displacement[ii]\n pts_spaced[cc, 1] = pts_y_displacement[ii]\n cc = cc + 1\n\n pts_spaced = np.expand_dims(pts_spaced, 1).astype('single')\n pts_spaced = np.delete(pts_spaced, np.where(np.isnan(pts_spaced[:, 0, 0])), axis=0)\n print(f'number of points: {pts_spaced.shape[0]}')\n\n ## Define random colors for points in cloud\n color_tuples = list(np.arange(len(pts_x_displacement)))\n for ii in range(len(pts_x_displacement)):\n color_tuples[ii] = (np.random.rand(1)[0] * 255, np.random.rand(1)[0] * 255, np.random.rand(1)[0] * 255)\n\n ## Preallocate output variables\n\n # I add a bunch of NaNs to the end because the openCV estimate is usually less than the actual number of frames\n displacements = np.ones((pts_spaced.shape[0], 2, np.uint64(\n numFrames_total_rough + numFrames_total_rough * 0.1 + (numVids * 1000)))) * np.nan\n positions_recursive = np.ones((pts_spaced.shape[0], 2, np.uint64(\n numFrames_total_rough + numFrames_total_rough * 0.1 + (numVids * 1000)))) * np.nan\n\n ## Preset point tracking variables\n pointInds_toUse = copy.deepcopy(pts_spaced)\n pointInds_tracked = pointInds_toUse ## set the first frame to have point locations be positions in the point cloud\n pointInds_tracked_tuple = list(np.arange(pointInds_toUse.shape[0]))\n\n return pointInds_toUse, pointInds_tracked, pointInds_tracked_tuple, displacements, pts_spaced, color_tuples , positions_recursive", "def mainLogic(self):\n self.canvas.delete('all')\n n = int(self.nEntry.get())\n \n self.vert = vert = genVertList(n)\n self.drawPoints(vert)\n \n self.conMat = conMat = ranSymMatrix(n)\n self.calcConn(conMat)", "def setupRender():\n prefs = getPreferences()\n\n # Check of the built-in environment maps path can be located.\n # Discontinue if it cannot be found.\n envPath = prefs.path_value\n if not envPath:\n return {'WARNING'}, \"No environment images path defined\"\n\n # Discontinue if there is no output path defined.\n renderPath = outputPath()\n if not renderPath:\n return {'WARNING'}, \"The scene needs to be saved before rendering\"\n\n if prefs.image_value == 'NONE':\n return {'WARNING'}, \"No environment image defined\"\n\n setRenderSettings(os.path.join(renderPath, IMAGE_NAME))\n createCamera()\n createWorld(envPath)\n return renderPath", "def appStarted(self):\n self.color1=self.app.colorset[0]\n self.color2=self.app.colorset[1]\n self.color3=self.app.colorset[2]\n self.rows=6\n self.cols=6\n self.squaresize=800\n self.piecesize=int(self.squaresize/self.cols)\n self.square=([[0]*self.cols for row in range(self.rows)])\n self.side=[0]*self.cols\n \n self.doubleclick=None\n self.temp=None\n self.s=None\n self.imagesize=self.squaresize\n self.image=self.loadImage('level4.png')\n w,h=self.image.size\n scale=min(w,h)\n self.image=self.scaleImage(self.image,self.imagesize/scale)\n self.image=self.image.crop((0,0,self.squaresize,self.squaresize))\n self.imageW,self.imageH=self.image.size\n self.smol=self.scaleImage(self.image,300/scale)\n\n self.pieces=self.createPiece()\n self.pieces.shuffle()\n self.start=False\n self.timer=0\n self.timers=0\n self.timerm=0", "def display_preprocessed(env,frame):\n env.reset()\n\n #Plot the figure\n plt.figure()\n\n #Show the pre processed frame\n plt.imshow(preprocess_frame(env.reset(), (0, 0, 0, 0), 84), cmap=\"gray\")\n\n #Add title\n plt.title('Pre Processed image')\n\n #Show the plot\n plt.show()", "def setup(self):\n if not system.restore_snapshot():\n self.log.warning(\"Not able to restore snapshot\")\n\n pos.connect()\n\n pos.sign_on()", "def setup(self):\n self.board[(3, 3)] = -1\n self.board[(3, 4)] = -1\n self.board[(4, 3)] = 1\n self.board[(4, 4)] = 1\n\n self.stones_set = 4", "def onReset(self):\n #productive\n profprint()\n fileName = pathToScene = slicer.modules.needlefinder.path.replace(\"NeedleFinder.py\",\"Config/default.cfg\")\n self.logic.loadParameters(fileName)", "def init_env_variables(self):\n self.total_distance_moved = 0.0\n self.current_y_distance = self.get_y_dir_distance_from_start_point(self.start_point)\n self.cart_current_speed = rospy.get_param('/cart_pole_3d/init_cart_vel')", "def updateVariables(self) -> None:\n ...", "def setup():\n size(800, 600)\n stroke_weight(3)", "def setup(self):\n header_print(self.data['intro'])\n header_print(self.data['help'])\n random.shuffle(self.data['draw'])\n random.shuffle(self.data['locations'])\n random.shuffle(self.data['events'])\n random.shuffle(self.data['aces'])\n random.shuffle(self.data['personalities'])\n self.stats = {\n 'round': 0,\n 'powers': {\n 'MOONS': 6,\n 'SUNS': 6,\n 'WAVES': 6,\n 'LEAVES': 6,\n 'WYRMS': 6,\n 'KNOTS': 6,\n },\n 'hand': self.data['draw'][:],\n 'discard': [],\n 'active': [],\n 'opponent': {},\n }", "def draw(self, screen):", "def __init__(self, env_config, test_mode=False, render_mode='2d', verbose=False):\n\n self.test_mode = test_mode\n self.render_mode = render_mode\n self.verbose = verbose\n self.config = env_config\n\n # Setting dimension of observation vector\n self.n_observations = len(Vessel.NAVIGATION_FEATURES) + 3*self.config[\"n_sectors\"] + ColavRewarder.N_INSIGHTS\n\n self.episode = 0\n self.total_t_steps = 0\n self.t_step = 0\n self.history = []\n\n\n # Declaring attributes\n #self.obstacles = None\n self.main_vessel = None\n #self.agent = None\n\n #self.path = None\n\n self.reached_goal = None\n self.collision = None\n self.progress = None\n self.cumulative_reward = None\n self.last_reward = None\n self.last_episode = None\n self.rng = None\n self._tmp_storage = None\n\n self._action_space = gym.spaces.Box(\n low=np.array([-1, -1]),\n high=np.array([1, 1]),\n dtype=np.float32\n )\n self._observation_space = gym.spaces.Box(\n low=np.array([-1]*self.n_observations),\n high=np.array([1]*self.n_observations),\n dtype=np.float32\n )\n\n # Initializing rendering\n self._viewer2d = None\n self._viewer3d = None\n if self.render_mode == '2d' or self.render_mode == 'both':\n render2d.init_env_viewer(self)\n if self.render_mode == '3d' or self.render_mode == 'both':\n render3d.init_env_viewer(self, autocamera=self.config[\"autocamera3d\"])\n\n # self.agent = PPO2.load('C:/Users/amalih/Documents/gym-auv-master/logs/agents/MovingObstacles-v0/1589625657ppo/6547288.pkl')\n #self.agent = PPO2.load('C:/Users/amalih/Documents/gym-auv-master/logs/agents/MovingObstacles-v0/1590746004ppo/2927552.pkl')\n # self.agent = PPO2.load('C:/Users/amalih/Documents/gym-auv-master/logs/agents/MovingObstacles-v0/1590827849ppo/4070808.pkl')\n #'C:/Users/amalih/OneDrive - NTNU/github/logs/agents/MultiAgentPPO-v0/1064190.pkl'\n\n #self.agent = PPO2.load('C:/Users/amalih/Documents/gym-auv-master/logs/agents/MovingObstacles-v0/1590705511ppo/4425456.pkl')\n #self.agent = PPO2.load('C:/Users/amalih/Documents/gym-auv-master/gym-auv-master/logs/agents/MovingObstacles-v0/1589130704ppo/6916896.pkl')\n #self.agent = PPO2.load('C:/Users/amalih/Documents/gym-auv-master/gym-auv-master/logs/agents/MovingObstacles-v0/1589031909ppo/1760568.pkl')\n self.agent = PPO2.load('C:/Users/amalih/OneDrive - NTNU/github/logs/agents/MultiAgentPPO-v0/1591171914ppo/79288.pkl')\n\n self.rewarder_dict = {}\n\n self.reset()\n print('Init done')", "def setup_figure(self):\n \n # connect ui widgets to measurement/hardware settings or functions\n self.ui.start_pushButton.clicked.connect(self.start)\n self.ui.interrupt_pushButton.clicked.connect(self.interrupt)\n self.settings.save_h5.connect_to_widget(self.ui.save_h5_checkBox)\n self.settings.save_movie.connect_to_widget(self.ui.save_movie_checkBox)\n \n # Set up pyqtgraph graph_layout in the UI\n self.graph_layout=pg.GraphicsLayoutWidget()\n self.ui.plot_groupBox.layout().addWidget(self.graph_layout)\n \n self.aux_graph_layout=pg.GraphicsLayoutWidget()\n self.ui.aux_plot_groupBox.layout().addWidget(self.aux_graph_layout)\n \n self.camera_layout=pg.GraphicsLayoutWidget()\n self.ui.camera_groupBox.layout().addWidget(self.camera_layout)\n\n # Create PlotItem object (a set of axes) \n \n self.plot1 = self.graph_layout.addPlot(row=1,col=1,title=\"Lick\")\n self.plot2 = self.graph_layout.addPlot(row=2,col=1,title=\"breathing\")\n\n # Create PlotDataItem object ( a scatter plot on the axes )\n self.breathing_plot = self.plot2.plot([0])\n self.lick_plot_0 = self.plot1.plot([0])\n self.lick_plot_1 = self.plot1.plot([1]) \n \n self.lick_plot_0.setPen('y')\n self.lick_plot_1.setPen('g')\n \n self.T=np.linspace(0,10,10000)\n self.k=0\n \n self.camera_view=pg.ViewBox()\n self.camera_layout.addItem(self.camera_view)\n self.camera_image=pg.ImageItem()\n self.camera_view.addItem(self.camera_image)", "def _reset(self, env_id: np.ndarray) -> None:", "def setup_game(self):", "def draw(self):\n pass", "def draw(self):\n pass", "def draw(self):\n pass", "def draw(self):\n pass", "def postRun(self):\n pass", "def state_preview_enter(cfg, app, win):", "def state_processing_enter(cfg, app, win):", "def main():\n \n data_base = '/local/duman/SIMULATIONS/many_polymers_5/'\n save_base = '/usr/users/iff_th2/duman/RolfData/many_polymers_5'\n \n \n ## Index the data\n# density = [0.08, 0.2, 0.4]\n# xi_L = [0.05, 0.1, 0.2, 1.0, 2.5, 5.0, 8.0, 16.0]\n# Pe = [3.0, 7.0, 15.0, 50.0, 150.0, 500.0, 750.0, 1500.0, 4375.0, 8000.0, 10000.0]\n# kappa = [1.25, 2.5, 5.0, 25.0, 62.5, 125.0, 200.0, 400.0]\n# fp = [0.0, 0.0048, 0.0112, 0.024, 0.08, 0.24, 0.8, 1.2, 2.4, 7.0]\n \n density = [0.2]\n kappa = [1.25, 2.5, 5.0, 25.0, 62.5, 125.0, 200.0, 400.0]\n xi_L = [0.05, 0.1, 0.2, 1.0, 2.5, 5.0, 8.0, 16.0]\n fp = [0.0048, 0.0112, 0.024, 0.08, 0.24, 0.8, 1.2, 2.4, 7.0]\n Pe = [3.0, 7.0, 15.0, 50.0, 150.0, 500.0, 750.0, 1500.0, 4375.0]\n \n ## Create points\n points = []\n for i, x in enumerate(xi_L):\n for j, p in enumerate(Pe):\n points.append( Phase(x, p, kappa[i], fp[j], 'short') ) \n \n for point in points:\n point.analyse_type()\n point.set_plot_props()\n \n long_xil = [0.05, 0.2, 1.0, 2.5, 16.0]\n long_pe = [3.0, 150.0, 750.0, 8000.0, 10000.0]\n long_kappa = [5.0, 20.0, 100.0, 250.0, 1600.0]\n long_fp = [0.0003, 0.015, 0.075, 0.0, 0.0]\n long_points = []\n for i, x in enumerate(long_xil):\n for j, p in enumerate(long_pe):\n long_points.append( Phase(x, p, long_kappa[i], long_fp[j], 'long') ) \n \n for point in long_points:\n point.determine_type()\n point.set_plot_props()\n \n plot_data(points, long_points, save_base, xi_L, Pe)", "def prepare_for_use(self):\n self._random_state = check_random_state(self.random_state)\n self._offset = self.offset\n self._x_idx = 0", "def prepare_for_use(self):\n self._random_state = check_random_state(self.random_state)\n self._offset = self.offset\n self._x_idx = 0", "def prepare_for_use(self):\n self._random_state = check_random_state(self.random_state)\n self._offset = self.offset\n self._x_idx = 0", "def setup_mode():\n status_label.color = WHITE\n status_label.text = \"-SET-\"\n\n ave_label.color = BLACK # Turn off average label and value display\n ave_value.color = BLACK\n\n max_value.text = str(MAX_RANGE_F) # Display maximum range value\n min_value.text = str(MIN_RANGE_F) # Display minimum range value\n\n time.sleep(0.8) # Show SET status text before setting parameters\n status_label.text = \"\" # Clear status text\n\n param_index = 0 # Reset index of parameter to set\n\n setup_state = \"SETUP\" # Set initial state\n while setup_state == \"SETUP\":\n # Select parameter to set\n setup_state = \"SELECT_PARAM\" # Parameter selection state\n while setup_state == \"SELECT_PARAM\":\n param_index = max(0, min(2, param_index))\n status_label.text = SETUP_COLORS[param_index][0]\n image_group[param_index + 226].color = BLACK\n status_label.color = BLACK\n time.sleep(0.25)\n image_group[param_index + 226].color = SETUP_COLORS[param_index][1]\n status_label.color = WHITE\n time.sleep(0.25)\n\n param_index -= get_joystick()\n\n _buttons = panel.events.get()\n if _buttons and _buttons.pressed:\n if _buttons.key_number == BUTTON_UP: # HOLD button pressed\n param_index = param_index - 1\n if _buttons.key_number == BUTTON_DOWN: # SET button pressed\n param_index = param_index + 1\n if _buttons.key_number == BUTTON_HOLD: # HOLD button pressed\n play_tone(1319, 0.030) # Musical note E6\n setup_state = \"ADJUST_VALUE\" # Next state\n if _buttons.key_number == BUTTON_SET: # SET button pressed\n play_tone(1319, 0.030) # Musical note E6\n setup_state = \"EXIT\" # Next state\n\n # Adjust parameter value\n param_value = int(image_group[param_index + 230].text)\n\n while setup_state == \"ADJUST_VALUE\":\n param_value = max(32, min(157, param_value))\n image_group[param_index + 230].text = str(param_value)\n image_group[param_index + 230].color = BLACK\n status_label.color = BLACK\n time.sleep(0.05)\n image_group[param_index + 230].color = SETUP_COLORS[param_index][1]\n status_label.color = WHITE\n time.sleep(0.2)\n\n param_value += get_joystick()\n\n _buttons = panel.events.get()\n if _buttons and _buttons.pressed:\n if _buttons.key_number == BUTTON_UP: # HOLD button pressed\n param_value = param_value + 1\n if _buttons.key_number == BUTTON_DOWN: # SET button pressed\n param_value = param_value - 1\n if _buttons.key_number == BUTTON_HOLD: # HOLD button pressed\n play_tone(1319, 0.030) # Musical note E6\n setup_state = \"SETUP\" # Next state\n if _buttons.key_number == BUTTON_SET: # SET button pressed\n play_tone(1319, 0.030) # Musical note E6\n setup_state = \"EXIT\" # Next state\n\n # Exit setup process\n status_label.text = \"RESUME\"\n time.sleep(0.5)\n status_label.text = \"\"\n\n # Display average label and value\n ave_label.color = YELLOW\n ave_value.color = YELLOW\n return int(alarm_value.text), int(max_value.text), int(min_value.text)", "def _pre_draw_bge(self):\r\n self._pre_draw_common()\r\n # draw rays\r\n self._drawRays()", "def prepare_to_advance(self):\n\n self.capture_user_input()\n self.UI.reset_figure()\n # stopping the blocking event loop\n self.fig.canvas.stop_event_loop()", "def draw(self):\n self.draw_occupied_cells()\n self.draw_open_cells()\n self.draw_edges()\n plt.xlabel(\"Red\")\n plt.ylabel(\"Black\")\n plt.title('Hex')\n self.camera.snap()", "def pysweep_before_finish_init(self):\n self.displaycanvas = DisplayCanvas(self.pysweep.master, self.boardsize, self.lcounterlength, self.rcounterlength, self.images)\n self.displaycanvas.pack()\n\n self.pysweep.master.update_idletasks()\n self.displaycanvas.update_idletasks()\n # enode = self.arbitrary()\n # print('DisplayCanvas:', enode)", "def draw(self):", "def _env_setup(self, initial_qpos):\n raise NotImplementedError()", "def set_mode_point():\n global DRAW_MODE\n DRAW_MODE=\"point\"", "def setupWidget(self):\r\n self.generateCoordinates()\r\n self.modifyCoordinateLists()\r\n self.settings.movementMatrix = self.movementMatrix\r\n self.settings.ghostIntersectionList = self.ghostIntersectionList\r\n self.createBodies()\r\n print(\"GameW set\")", "def prepare_to_visualize(self):\n self.system.hold_structure_changes()\n for surface in self.inactive_surfaces:\n surface.activate_constraint()\n self.system.resume_structure_changes()", "def initialize_variables(self):\n self.sess.run(self.init)", "def __attrs_post_init__(self):\n self.path = (Path(CONFIG['conda_folder']) / 'envs' / self.name)", "def setup(self):\n #if not system.restore_snapshot():\n # self.log.debug(\"No snapshot to restore, if this is not expected please contact automation team\")\n if not pos.connect():\n self.tc_fail(\"Failed to connect to POS\")\n self.recover()", "def setup(self, gl_buffers, color_vbo, pos_vbo, partNumber):\n self.gl_objects = gl_buffers\n self.color_vbo, self.pos_vbo = color_vbo, pos_vbo\n self.partNumber = partNumber", "def setup(self):\n EventGenerator.setup(self)\n\n if self.egs5_dir is None:\n self.egs5_dir = self.get_install_dir()\n logger.debug(\"Using EGS5 from install dir: \" + self.egs5_dir)\n\n ## data directory\n self.egs5_data_dir = os.path.join(self.egs5_dir, \"data\")\n ## config directory\n self.egs5_config_dir = os.path.join(self.egs5_dir, \"config\")\n\n logger.debug(\"egs5_data_dir=%s\" % self.egs5_data_dir)\n logger.debug(\"egs5_config_dir=%s\" % self.egs5_config_dir)\n\n if os.path.exists(\"data\"):\n os.unlink(\"data\")\n os.symlink(self.egs5_data_dir, \"data\")\n\n if os.path.exists(\"pgs5job.pegs5inp\"):\n os.unlink(\"pgs5job.pegs5inp\")\n os.symlink(self.egs5_config_dir + \"/src/esa.inp\", \"pgs5job.pegs5inp\")\n\n logger.debug(\"Reading run parameters: {}\".format(self.run_params))\n ## run parameters\n self.run_param_data = RunParameters(self.run_params)\n\n # Set target thickness from job parameter or use the default from run parameters\n if self.target_thickness is not None:\n self.target_z = self.target_thickness\n logger.debug(\"Target thickness set from job param: {}\".format(self.target_z))\n else:\n self.target_z = self.run_param_data.get(\"target_z\")\n logger.debug(\"Target thickness set from run_params: {}\".format(self.target_z))\n\n ebeam = self.run_param_data.get(\"beam_energy\")\n electrons = self.run_param_data.get(\"num_electrons\") * self.bunches\n\n seed_data = \"%d %f %f %d\" % (self.seed, self.target_z, ebeam, electrons)\n logger.debug(\"Seed data (seed, target_z, ebeam, electrons): {}\".format(seed_data))\n seed_file = open(\"seed.dat\", 'w')\n seed_file.write(seed_data)\n seed_file.close()", "def __init__(self,\n env_name='blobble-world-v0'\n ):\n self._env_name = env_name\n\n # Take a timestamp. This will be used for any output files created in the output folder\n self._timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M\")\n\n # Create training and evaluation environments\n self._train_py_env = suite_gym.load(self._env_name)\n self._eval_py_env = suite_gym.load(self._env_name)\n\n # Convert the training and test environments to Tensors\n self._train_env = tf_py_environment.TFPyEnvironment(self._train_py_env)\n self._eval_env = tf_py_environment.TFPyEnvironment(self._eval_py_env)\n print('=====================================================')\n print('Environments created for : ', self._env_name)\n print('Training Environment')\n print(' Observation Spec:')\n print(' ', self._train_env.time_step_spec().observation)\n print(' Reward Spec:')\n print(' ', self._train_env.time_step_spec().reward)\n print(' Action Spec:')\n print(' ', self._train_env.action_spec())\n print('Evaluation Environment')\n print(' Observation Spec:')\n print(' ', self._eval_env.time_step_spec().observation)\n print(' Reward Spec:')\n print(' ', self._eval_env.time_step_spec().reward)\n print(' Action Spec:')\n print(' ', self._eval_env.action_spec())\n print('=====================================================')\n\n self._config = BlobbleConfig('blobble_config.ini')\n self._config.print_config()\n\n # Get the demonstration parameters and output folder. We don't need these just yet but it's\n # good to do now in case there is an error in the config file (exception will be thrown)\n self._output_folder = (self._config.get_output_params()['output_folder'])\n\n self._num_demo_episodes = int(self._config.get_output_params()['num_demonstration_episodes'])\n demo_video = (self._config.get_output_params()['demonstration_video'])\n if demo_video == 'True':\n self._demo_video = True\n else:\n self._demo_video = False\n\n # Get and check the advanced learning parameters\n self._learning_rate = float(self._config.get_learning_adv_params()['learning_rate'])\n self._fc_layer_params = tuple(self._config.get_learning_adv_params()['fc_layer_params'].split(','))\n\n print('Create and train a neural network agent')\n self._neural_network_agent = create_neural_network_agent(self._train_env,\n self._learning_rate,\n self._fc_layer_params)\n\n learning_params = self._config.get_learning_params()\n train_neural_network(self._neural_network_agent,\n self._train_env,\n self._eval_env,\n num_train_iterations=learning_params['training_iterations'],\n log_interval=learning_params['training_log_interval'],\n eval_interval=learning_params['eval_interval'],\n num_eval_episodes=learning_params['num_eval_episodes'],\n replay_buffer_max_length=learning_params['replay_buffer_max_length'],\n collect_steps_per_iteration=learning_params['collect_steps_per_iteration'],\n output_folder=self._output_folder,\n timestamp=self._timestamp)", "def main():\n # background\n background = background_maker()\n\n # face\n face = face_maker()\n\n # eye\n eye_l = eye_maker()\n eye_r = eye_maker()\n\n # mouth\n mouth = mouth_maker()\n mouth_1 = GArc(60, 60, 290, 60)\n mouth_2 = GArc(60, 60, 190, 60)\n\n # nose\n nose = GOval(10, 10)\n nose.filled = True\n\n # ear\n ear_l = ear_maker()\n ear_r = ear_maker()\n ear_ll = ear2_maker()\n ear_rr = ear2_maker()\n\n # body\n body = body_maker()\n body2 = body2_maker()\n body3 = body3_maker()\n\n # label\n label = label_maker('Rilakkuma', 70)\n label2 = label_maker('Min', 10, font='Dialog')\n\n # arm\n arm_l = arm1_maker()\n arm_r = arm2_maker()\n\n # leg\n leg = leg_maker()\n leg2 = leg_maker()\n\n # show my draw\n window.add(background)\n window.add(leg, (window.width - leg.width) / 2 - body.width/3.7, (window.height - leg.height) / 2 + body.height*1.1)\n window.add(leg2, (window.width - leg2.width) / 2 + body.width / 3.7,\n (window.height - leg2.height) / 2 + body.height * 1.1)\n window.add(body, (window.width - body.width) / 2, (window.height - body.height) / 2 + face.height/1.4)\n window.add(body2, (window.width - body2.width) / 2,\n (window.height - body2.height) / 2 + face.height/1.4 + body.height/3.3)\n window.add(body3, (window.width - body3.width) / 2, (window.height - body3.height) / 2 + face.height/1.2)\n window.add(arm_l, (window.width - arm_l.width) / 2 - body.width / 2.9,\n (window.height - arm_l.height) / 2 + face.height / 1.5)\n window.add(arm_r, (window.width - arm_r.width) / 2 + body.width / 2.9,\n (window.height - arm_r.height) / 2 + face.height / 1.5)\n window.add(label, (window.width-label.width)/2, window.height/4)\n window.add(ear_l, (window.width - ear_l.width) / 2 - face.width / 2.25,\n (window.height - ear_l.height) / 2 - face.height / 3)\n window.add(ear_ll, (window.width - ear_ll.width) / 2 - face.width / 2.25,\n (window.height - ear_ll.height) / 2 - face.height / 3.5)\n window.add(ear_r, (window.width - ear_r.width) / 2 + face.width / 2.25,\n (window.height - ear_r.height) / 2 - face.height / 3)\n window.add(ear_rr, (window.width - ear_rr.width) / 2 + face.width / 2.25,\n (window.height - ear_rr.height) / 2 - face.height / 3.5)\n window.add(face, (window.width - face.width) / 2, (window.height - face.height) / 2)\n window.add(eye_l, (window.width - eye_l.width) / 2 - face.width / 5, (window.height - eye_l.height) / 2)\n window.add(eye_r, (window.width - eye_r.width) / 2 + face.width / 5, (window.height - eye_r.height) / 2)\n window.add(mouth, (window.width - mouth.width) / 2, (window.height - mouth.height) / 2 + face.height / 8)\n window.add(nose, (window.width - nose.width) / 2, (window.height - nose.height) / 2 + face.height / 12)\n window.add(mouth_1, (window.width - mouth_1.width) / 2 - face.width / 20,\n (window.height - mouth_1.height) / 2 + face.height / 11)\n window.add(mouth_2, (window.width - mouth_2.width) / 2 + face.width / 20,\n (window.height - mouth_2.height) / 2 + face.height / 11)\n window.add(label2, window.width-label2.width, window.height)\n\n # kuma2\n kuma2_color = '0xFFEEDD'\n face2 = face_maker(140, color=kuma2_color)\n\n eye2_l = eye_maker(size=15)\n eye2_r = eye_maker(size=15)\n\n mouth2 = mouth_maker(size=40)\n mouth2_1 = GArc(60, 60, 290, 60)\n mouth2_2 = GArc(60, 60, 190, 60)\n\n nose2 = GOval(8, 8)\n nose2.filled = True\n\n ear2_l = ear_maker(size=50, color=kuma2_color)\n ear2_r = ear_maker(size=50, color=kuma2_color)\n ear2_ll = ear2_maker(size=30, color='0xFFC1E0')\n ear2_rr = ear2_maker(size=30, color='0xFFC1E0')\n\n body_2 = body_maker(size=100, color=kuma2_color)\n body2_2 = body2_maker(size=85, color=kuma2_color)\n body3_2 = body3_maker(size=60)\n\n arm2_l = arm1_maker(size=40, color=kuma2_color)\n arm2_r = arm2_maker(size=40, color=kuma2_color)\n\n leg_2 = leg_maker(size=25, color=kuma2_color)\n leg2_2 = leg_maker(size=25, color=kuma2_color)\n\n buttons = GOval(15, 15)\n buttons.filled = True\n buttons.fill_color = 'red'\n\n window.add(leg_2, (window.width - leg_2.width) / 2 - face.width / 1.05 - body_2.width/3.3,\n (window.height - leg_2.height) / 2 + face.height / 1.4 + body2.height * 0.82)\n window.add(leg2_2, (window.width - leg2_2.width) / 2 - face.width / 1.05 + body_2.width/3.3,\n (window.height - leg2_2.height) / 2 + face.height / 1.4 + body2.height * 0.82)\n window.add(body_2, (window.width - body_2.width) / 2 - face.width/1.05,\n (window.height - body_2.height) / 2 + face.height / 1.4)\n window.add(body2_2, (window.width - body2_2.width) / 2 - face.width/1.05,\n (window.height - body2_2.height) / 2 + face.height / 1.4 + body_2.height / 3.3)\n window.add(body3_2, (window.width - body3_2.width) / 2 - face.width/1.05,\n (window.height - body3_2.height) / 2 + face.height / 1.2)\n window.add(arm2_l, (window.width - arm2_l.width) / 2 - face.width / 1.05 - body_2.width/2.9,\n (window.height - arm2_l.height) / 2 + face2.height / 1.06)\n window.add(arm2_r, (window.width - arm2_r.width) / 2 - face.width / 1.05 + body_2.width/2.9,\n (window.height - arm2_r.height) / 2 + face2.height / 1.06)\n window.add(ear2_l, (window.width - ear2_l.width) / 2 - face.width / 0.8,\n (window.height - ear2_l.height) / 2 - face2.height / 9)\n window.add(ear2_ll, (window.width - ear2_ll.width) / 2 - face.width / 0.8,\n (window.height - ear2_ll.height) / 2 - face2.height / 15)\n window.add(ear2_r, (window.width - ear2_r.width) / 2 - face.width / 1.5,\n (window.height - ear2_r.height) / 2 - face2.height / 9)\n window.add(ear2_rr, (window.width - ear2_rr.width) / 2 - face.width / 1.52,\n (window.height - ear2_rr.height) / 2 - face2.height / 15)\n window.add(face2, (window.width-face2.width)/2 - face.width/1.05, (window.height-face2.height)/2 + face2.height/4)\n window.add(eye2_l, (window.width - eye2_l.width) / 2 - face.width / 0.9,\n (window.height - eye2_l.height) / 2 + face2.height/4)\n window.add(eye2_r, (window.width - eye2_r.width) / 2 - face.width / 1.25,\n (window.height - eye2_r.height) / 2 + face2.height/4)\n window.add(mouth2, (window.width - mouth2.width) / 2 - face.width/1.05,\n (window.height - mouth2.height) / 2 + face2.height / 2.4)\n window.add(nose2, (window.width - nose2.width) / 2 - face.width/1.05,\n (window.height - nose2.height) / 2 + face2.height / 2.5)\n window.add(mouth2_1, (window.width - mouth2_1.width) / 2 - face.width / 1,\n (window.height - mouth2_1.height) / 2 + face2.height / 2.5)\n window.add(mouth2_2, (window.width - mouth2_2.width) / 2 - face.width / 1.1,\n (window.height - mouth2_2.height) / 2 + face2.height / 2.5)\n window.add(buttons, (window.width-buttons.width)/2 - face.width/1.05,\n (window.height-buttons.height)/2 + face.height/1.62)", "def _on_step(self) -> bool:\n # print(\"locals \", self.locals)\n # # what timestep you think\n # print(\"timestep \",CustomCallback.step)\n # # what timestep a2c or ppo2 learn() is on \n # print(\"a2c/ppo2 num timestep\",self.num_timesteps)\n \n # TODO: add flag to save screenshots or not\n subfolder = os.path.join(self.directory, 'screen/')\n filepath = os.path.join(subfolder)\n img_name = '_screenshot_' + str(self.num_timesteps)\n \n if(self.algo == \"A2C\" or self.algo == \"PPO2\"):\n # self.locals['obs'] gives black and white imgs\n obs = self.env.get_images()\n for i in range(self.num_envs):\n mpl.image.imsave(subfolder+\"env_\" + str(i) + img_name + \"_.png\", obs[i])\n elif (self.algo == \"DQN\"):\n self.env.ale.saveScreenPNG(subfolder+\"env_\" + str(0) + img_name + \"_.png\")\n\n step_stats = {self.num_timesteps: {\n 'num_timesteps': self.num_timesteps,\n 'state': self.num_timesteps/self.num_envs,\n }\n }\n # add step to dict\n CustomCallback.main_data_dict.update(step_stats)\n key = self.num_timesteps\n\n # collection of minimum data: action, reward, lives\n if(self.algo == \"DQN\"):\n CustomCallback.main_data_dict[key]['action_env_0'] = self.locals['action']\n CustomCallback.main_data_dict[key]['action_name_env_0'] = self.actions[self.locals['env_action']]\n if(self.game == \"Pong\"):\n CustomCallback.main_data_dict[key]['curr_score_env_0'] = self.locals['episode_rewards'][-1]\n else:\n CustomCallback.main_data_dict[key]['cumulative_life_reward'] = self.locals['episode_rewards'][-1]\n if(self.isLives == True):\n CustomCallback.main_data_dict[CustomCallback.step]['lives'] = self.locals['info']['ale.lives']\n else:\n for i in range(self.num_envs):\n CustomCallback.main_data_dict[key]['action_env_'+str(i)] = self.locals['actions'][i]\n CustomCallback.main_data_dict[key]['action_name_env_'+str(i)] = self.actions[self.locals['actions'][i]]\n CustomCallback.main_data_dict[key]['step_reward_env_'+str(i)] = self.locals['rewards'][i]\n if(self.isLives == True):\n if(CustomCallback.step == 1):\n CustomCallback.main_data_dict[key]['lives_env_'+str(i)] = 3\n if(CustomCallback.step >= 2):\n CustomCallback.main_data_dict[key]['lives_env_'+str(i)] = self.locals['infos'][i]['ale.lives']\n\n if(self.game == \"Pong\" and self.algo != \"DQN\"):\n # extra processing for Pong scores\n self.find_life_game_info_a2c_ppo2_pong()\n\n # at the last step, write data into csv files\n if(CustomCallback.step == (self.num_steps/self.num_envs)):\n self.make_dataframes(self.df_list)\n # save minimal data\n self.df_to_csv(\"df_og.csv\", self.df_list)\n self.df_to_parquet()\n CustomCallback.step = CustomCallback.step + 1\n return True", "def _setup():\n\n # Set random seeds\n tf.set_random_seed(CONFIG.seed)\n np.random.seed(CONFIG.seed)\n\n gs_env = tf.Variable(0, trainable=False, name='global_step_env')\n inc_gs = tf.assign_add(gs_env, 1)\n\n # Make the gym environment\n if CONFIG.env == 'mnist':\n env = MNIST()\n elif CONFIG.env == 'binary':\n env = BinaryClassifier(done_every=CONFIG.ep_len)\n else:\n raise ValueError('Do not recognize environment ', CONFIG.env)\n\n discrete = isinstance(env.action_space, gym.spaces.Discrete)\n\n # Observation and action sizes\n ob_dim = env.observation_space.shape[0]\n ac_dim = env.action_space.n if discrete else env.action_space.shape[0]\n\n policy_net = PolicyEstimator(ob_dim, ac_dim, gs_env)\n value_net = ValueEstimator(ob_dim)\n\n return env, inc_gs, policy_net, value_net", "def __init__(self, env, random_seed=None):\n self.env = env \n self.RandomState = np.random.RandomState(random_seed)", "def Initialize():\n # --- Set four-character run id, comment lines, user's name.\n top.pline2 = \"Example 3D beam in a FODO lattice\"\n top.pline1 = \"S-G cigar beam. 64x64x256\"\n top.runmaker = \"David P. Grote\"\n\n # --- Invoke setup routine - it is needed to created a cgm file for plots\n setup()\n\n # --- Create the beam species\n beam = Species(type=Potassium,charge_state=+1,name=\"Beam species\")\n\n # --- Set input parameters describing the beam, 72 to 17.\n beam.b0 = 15.358933450767e-3\n beam.a0 = 8.6379155933081e-3\n beam.x0 = 3.*mm\n beam.emit = 51.700897052724e-6\n beam.ap0 = 0.e0\n beam.bp0 = 0.e0\n beam.ibeam = 2.e-03\n beam.vbeam = 0.e0\n beam.ekin = 80.e3\n beam.aion = beam.type.A\n beam.zion = beam.charge_state\n top.lrelativ = false\n top.derivqty()\n beam.vthz = .5e0*beam.vbeam*beam.emit/sqrt(beam.a0*beam.b0) # Vthz ~ Vthperp\n\n # +++ Set up arrays describing lattice.\n # --- Set temp variables.\n hlp = 36.0e-2 # half lattice period length\n piperad = 3.445e-2 # pipe radius\n quadlen = 11.e-2 # quadrupole length\n gaplen = 4.*cm\n rodlen = quadlen + gaplen\n dbdx = .949/quadlen\n\n # --- Set general lattice variables.\n top.tunelen = 2.e0*hlp\n env.zl = -hlp*2\n env.zu = -env.zl\n env.dzenv = top.tunelen/100.e0\n\n # --- Set up quadrupoles\n addnewquad(zs= -quadlen/2.,\n ze= +quadlen/2.,\n db=-dbdx,ap=piperad)\n addnewquad(zs=hlp - quadlen/2.,\n ze=hlp + quadlen/2.,\n db=+dbdx,ap=piperad)\n addnewquad(zs=2.*hlp - quadlen/2.,\n ze=2.*hlp + quadlen/2.,\n db=-dbdx,ap=piperad)\n top.zlatstrt = 0.\n top.zlatperi = 2.e0*hlp\n\n # +++ Set input parameters describing the 3d simulation.\n w3d.nx = 64/2\n w3d.ny = 64/2\n w3d.nz = 256/2\n steps_p_perd = 50\n top.dt = (top.tunelen/steps_p_perd)/beam.vbeam\n\n # --- Set to finite beam.\n top.pbound0 = top.pboundnz = periodic\n top.pboundxy = absorb\n w3d.xmmin = -piperad\n w3d.xmmax = piperad\n w3d.ymmin = -piperad\n w3d.ymmax = piperad\n w3d.zmmin = -hlp*2\n w3d.zmmax = +hlp*2\n top.prwall = piperad\n\n # --- Set pulse length.\n beam.zimin = w3d.zmmin*.95/2.\n beam.zimax = w3d.zmmax*.95/2.\n\n # --- Load Semi-Gaussian cigar beam.\n top.npmax = 20000\n w3d.distrbtn = \"semigaus\"\n w3d.cigarld = true\n w3d.xrandom = \"digitrev\"\n w3d.vtrandom = \"digitrev\"\n w3d.vzrandom = \"digitrev\"\n w3d.ldprfile = \"polar\"\n w3d.cylinder = false\n top.straight = .8\n\n # --- set up field solver\n w3d.l4symtry = true\n w3d.bound0 = periodic\n w3d.boundnz = periodic\n w3d.boundxy = dirichlet\n\n solver = MultiGrid3D()\n registersolver(solver)\n\n pipe = ZCylinderOut(piperad,4.,voltage=0.)\n installconductors(pipe,dfill=largepos)\n\n # --- Run the envelope solver to provide data used to initialize particles.\n package(\"env\")\n generate()\n step()\n\n # --- Generate the PIC code (allocate storage, load ptcls, t=0 plots, etc.).\n package(\"w3d\")\n generate()\n return", "def game_draw(self):\n pass", "def start(self):\n self.start_pre()\n\n #scn = bpy.context.scene\n #bpy.ops.ed.undo_push() # push current state to undo\n\n #self.header_text_set(\"PointsPicker\")\n #self.cursor_modal_set(\"CROSSHAIR\")\n #self.manipulator_hide()\n #self.b_pts = list() # list of 'Point' objects (see /lib/point.py)\n self.b_pts = []\n \n \n self.points_shader = None\n self.points_batch = None \n default_keymap = {\n \"add\": {\"LEFTMOUSE\"},\n \"grab\": {\"LEFTMOUSE\"},\n \"remove\": {\"ALT+LEFTMOUSE\", \"RIGHTMOUSE\"},\n \"commit\": {\"RET\"},\n \"cancel\": {\"ESC\"},\n \"done\": {'ENTER', 'UP_ARROW'}\n }\n\n self.actions = ActionHandler(self.context, default_keymap)\n #self.reload_stylings()\n \n \n self.variable_1 = BoundFloat('''options['variable_1']''', min_value =0.5, max_value = 15.5)\n self.variable_2 = BoundInt('''self.variable_2_gs''', min_value = 0, max_value = 10)\n self.variable_3 = BoundBool('''options['variable_3']''')\n \n self.ui_setup()\n self.ui_setup_post()\n\n self.snap_type = \"SCENE\" #'SCENE' 'OBJECT'\n self.snap_ob = None #bpy.context.object\n \n self.started = False\n \n \n self.selected = -1\n self.hovered = [None, -1]\n\n self.grab_undo_loc = None\n self.grab_undo_no = None\n self.mouse = (None, None)\n\n self.xform = XForm(Matrix.Identity(4))\n\n self.d3_points_render = D3PointsRender(self, render_opts)\n \n self.start_post()\n self.update_ui()", "def setUp(self):\n self.location = [(0, 0), (0, 1)]\n self.hit = (0, 0)", "def run(self):\n self.simulate_test_data()\n self.pipeline_test_data()\n self.plot_jump_flags_image()\n self.plot_groupdq_flags(pixel=[884, 550])\n self.plot_ramps_pre_post_correction(pixel=[884, 550])", "def draw(self, key):\n # Screen size\n max_y, max_x = self.stdscr.getmaxyx()\n # Evaluate size chart\n size_x = [2, max_x - 10]\n size_y = [1, max_y * 2 // 3 - 1]\n # Draw the GPU chart\n if 'GR3D' in self.jetson.stats:\n self.chart_gpu.draw(self.stdscr, size_x, size_y)\n # Percent Gauge GPU\n gpu = self.jetson.stats.get('GR3D', {})\n linear_gauge(self.stdscr, offset=max_y * 2 // 3, start=2, size=max_x // 2,\n name='GPU',\n value=gpu.get('val', 0),\n label=label_freq(gpu),\n status='ON' if gpu else 'REQUIRE SUDO',\n color=curses.color_pair(6))\n # Temperature GPU\n if \"GPU\" in self.jetson.stats['TEMP']:\n temp_gpu = self.jetson.stats['TEMP']['GPU']\n plot_name_info(self.stdscr, max_y * 2 // 3, max_x // 2 + 4, \"GPU Temp\", str(temp_gpu) + \"C\")\n # Jetson clocks status\n jc = self.jetson.jetson_clocks\n if jc is not None:\n jc_status = jc.status\n if jc_status == \"active\":\n color = curses.color_pair(2) # Running (Green)\n elif jc_status == \"inactive\":\n color = curses.A_NORMAL # Normal (Grey)\n elif \"ing\" in jc_status:\n color = curses.color_pair(3) # Warning (Yellow)\n else:\n color = curses.color_pair(1) # Error (Red)\n # Show if JetsonClock is enabled or not\n if jc.enable:\n jc_status = \"[\" + jc_status + \"]\"\n plot_name_info(self.stdscr, max_y * 2 // 3 + 2, 2, \"Jetson Clocks\", jc_status, color)\n # NVP Model\n nvpmodel = self.jetson.nvpmodel\n if nvpmodel is not None:\n plot_name_info(self.stdscr, max_y * 2 // 3 + 3, 2, \"NV Power[\" + str(nvpmodel.num) + \"]\", nvpmodel.mode)", "def plot_config_3d(view, trace, marker_names):\n\n if view == \"3D Plot\":\n with st.expander(\"3D Plot Configuration\", expanded=True):\n col_plot_type, col_grid_res, col_fill, col_interp = st.columns(4)\n col_col_type, col_choice, col_preview, col_overlay = st.columns(4)\n trace[\"Chart_Type\"] = col_plot_type.selectbox(\"Plot Type\", [\"Contour\",\"3D Scatter\",\"Surface\",\"Heatmap\"], key = \"Chart_Type\")\n col_col_type.selectbox('Color Map Type', ['Sequential','Diverging'], key=\"Color_Set_Type\")\n\n if st.session_state[\"Color_Set_Type\"] == 'Sequential':\n color_map = list(sequential_color_dict().keys())\n else:\n color_map = list(diverging_color_dict().keys())\n\n color_set = col_choice.selectbox(\"Color Map\", color_map) \n if st.session_state[\"Color_Set_Type\"] == 'Sequential':\n st.session_state['Color_Palette'] = sequential_color_dict().get(color_set)\n else:\n st.session_state['Color_Palette'] = diverging_color_dict().get(color_set)\n\n colormap_preview = plot_color_set(st.session_state['Color_Palette'], color_set, view)\n col_preview.image(colormap_preview, use_column_width = True)\n\n if trace[\"Chart_Type\"] != '3D Scatter':\n trace[\"Grid_Res\"] = col_grid_res.number_input(\"Grid Resolution\", min_value=0.0, max_value=100000.0, value=50.0, step=0.5, key=\"Grid_Res\")\n trace[\"Fill_Value\"] = col_fill.selectbox(\"Fill Value\", [\"nan\",0], help=\"fill missing data with the selected value\", key = \"Fill_Value\")\n trace[\"Interp_Method\"] = col_interp.selectbox(\"Interpolation Method\", [\"linear\",\"nearest\",\"cubic\"], key = \"Interp_Method\")\n\n else:\n trace[\"Fill_Value\"] = None\n trace[\"Interp_Method\"] = None\n trace[\"Grid_Res\"] = None\n \n st.session_state[\"Overlay\"] = col_overlay.checkbox(\"Overlay Original Data\", help=\"Display scatter of original data overlayed on chart\")\n \n if st.session_state[\"Overlay\"] == True:\n st.subheader(\"Overlay\")\n col_overlay_alpha, col_overlay_marker, col_overlay_color = st.columns(3)\n overlay_alpha = col_overlay_alpha.slider(\"Opacity\",value=0.5,min_value=0.0, max_value=1.0, step=0.01, key = \"Overlay_Alpha\")\n overlay_marker = col_overlay_marker.selectbox(\"Style\", marker_names, help=\"https://plotly.com/python/marker-style/\", key = \"Overlay Marker\")\n overlay_color = col_overlay_color.color_picker('Pick a color ', '#000000', key = \"Overlay Color\")\n else:\n overlay_alpha = None\n overlay_marker = None\n overlay_color = None\n else:\n trace[\"Chart_Type\"] = None\n st.session_state['Color_Palette'] = None\n trace[\"Fill_Value\"] = None\n trace[\"Interp_Method\"] = None\n trace[\"Grid_Res\"] = None\n\n\n\n return trace[\"Chart_Type\"], trace[\"Fill_Value\"], trace[\"Interp_Method\"], trace[\"Grid_Res\"], st.session_state['Color_Palette'], st.session_state[\"Overlay\"], overlay_alpha, overlay_marker, overlay_color", "def InitEnvironment(self):\r\n\t\t\r\n\t\t# Turn antialiasing on\r\n\t\trender.setAntialias(AntialiasAttrib.MMultisample,1)\r\n\t\t\r\n\t\t# load the falcon model\r\n\t\tfalcon = loader.loadModel(\"Content/falcon/falcon.bam\")\r\n\t\tfalcon.setScale(30)\r\n\t\tfalcon.setPos(0, 0, 28.5)\r\n\t\tfalcon.reparentTo(render)", "def initialise(self):\n self.logger.debug(\" %s [GenerateNextPose::initialise()]\" % self.name)", "def draw_defaults(self):\n\n pass", "def update_plot():\n pass", "def init():\n \n # General parameters\n exp_path = '/home/laura/Documents/stacks tif/1705_regMovie.tif' # experimental tif stack (grayscale)\n bin_path = '/home/laura/Documents/stacks tif/1705/1705_binarizedMovie.tif' # binarized tif stack\n vect_path = '/home/laura/Documents/STAGE3/1705_NET/' # gpickle directory\n dest_path = '/home/laura/Documents/STAGE3/1705_NET/superposition' # output directory\n verbose = True\n debug = True\n invert = True \n main_params = [exp_path, bin_path, vect_path, dest_path, verbose, debug, invert]\n \n # Output options\n doImg = -1 # image index\n doStack = False \n doVideo = False \n compress = 3 # advice: no more than 5\n output_params = [doImg, doStack, doVideo, compress]\n \n # Drawing options (colors as BGR)\n line = True # edges drawing\n line_color = (0, 255, 0) # green \n line_size = 1 \n apex_color = (0, 0, 255) # red\n apex_size = 5\n node_color = (255, 0, 0) # blue\n node_size = 5\n body_color = (0, 255, 0) # green\n body_size = 3\n drawing_params = [line, line_color, line_size, apex_color, apex_size,\n node_color, node_size, body_color, body_size]\n \n return main_params, output_params, drawing_params", "def _setFig(self):\n self.p.background_fill_color = grey['light']\n self.p.xgrid.grid_line_color = None\n self.p.ygrid.grid_line_color = None\n self.p.ygrid.grid_line_dash = 'dotted'\n self.p.ygrid.grid_line_dash = 'dotted'\n\n self.p.xgrid.minor_grid_line_color = grey['median']\n self.p.ygrid.minor_grid_line_color = grey['median']\n self.p.xgrid.minor_grid_line_dash = 'dotted'\n self.p.ygrid.minor_grid_line_dash = 'dotted'\n\n self.p.xaxis.axis_label = \"tsne_feature_0\"\n self.p.yaxis.axis_label = \"tsne_feature_1\"", "def initialize_global_variables():\n\n # Screen Padding\n global BottomPadding, TopPadding, LeftPadding, RightPadding\n # Window Decoration\n global WinTitle, WinBorder\n # Grid Layout\n global CORNER_WIDTHS, CENTER_WIDTHS, Monitors, WidthAdjustment\n # Simple Layout\n global MwFactor\n # System Desktop and Screen Information\n global MaxWidth, MaxHeight, OrigX, OrigY, Desktop, WinList, OldWinList\n # Miscellaneous \n global TempFile, WindowFilter\n \n Config = initconfig()\n cfgSection=\"DEFAULT\"\n \n # use \"default\" for configurations written using the original stiler\n if Config.has_section(\"default\"):\n cfgSection=\"default\"\n\n BottomPadding = Config.getint(cfgSection,\"BottomPadding\")\n TopPadding = Config.getint(cfgSection,\"TopPadding\")\n LeftPadding = Config.getint(cfgSection,\"LeftPadding\")\n RightPadding = Config.getint(cfgSection,\"RightPadding\")\n WinTitle = Config.getint(cfgSection,\"WinTitle\")\n WinBorder = Config.getint(cfgSection,\"WinBorder\")\n MwFactor = Config.getfloat(cfgSection,\"MwFactor\")\n TempFile = Config.get(cfgSection,\"TempFile\")\n Monitors = Config.getint(cfgSection,\"Monitors\")\n WidthAdjustment = Config.getfloat(cfgSection,\"WidthAdjustment\")\n WindowFilter = Config.getboolean(cfgSection,\"WindowFilter\")\n CORNER_WIDTHS = map(lambda y:float(y),Config.get(cfgSection,\"GridWidths\").split(\",\"))\n\n # create the opposite section for each corner_width\n opposite_widths = []\n for width in CORNER_WIDTHS:\n opposite_widths.append(round(abs(1.0-width),2))\n\n # add the opposites\n CORNER_WIDTHS.extend(opposite_widths)\n\n CORNER_WIDTHS=list(set(CORNER_WIDTHS)) # filter out any duplicates\n CORNER_WIDTHS.sort()\n\n CENTER_WIDTHS = filter(lambda y: y < 0.5, CORNER_WIDTHS)\n CENTER_WIDTHS = map(lambda y:round(abs(y*2-1.0),2),CENTER_WIDTHS)\n CENTER_WIDTHS.append(1.0)\t\t\t\t # always allow max for centers\n CENTER_WIDTHS = list(set(CENTER_WIDTHS)) # filter dups\n CENTER_WIDTHS.sort()\n\n # Handle multiple monitors\n CORNER_WIDTHS=map(lambda y:round(y/Monitors,2)+WidthAdjustment,CORNER_WIDTHS)\n CENTER_WIDTHS=map(lambda y:round(y/Monitors,2)+WidthAdjustment,CENTER_WIDTHS)\n\n logging.debug(\"corner widths: %s\" % CORNER_WIDTHS)\n logging.debug(\"center widths: %s\" % CENTER_WIDTHS)\n\n (Desktop,OrigXstr,OrigYstr,MaxWidthStr,MaxHeightStr,WinList) = initialize()\n MaxWidth = int(MaxWidthStr) - LeftPadding - RightPadding\n MaxHeight = int(MaxHeightStr) - TopPadding - BottomPadding\n OrigX = int(OrigXstr) + LeftPadding\n OrigY = int(OrigYstr) + TopPadding \n OldWinList = retrieve(TempFile)", "def environment_created(self):\n\n pass", "def appGL(deltaT):#-------------------------------- OpenGL UPDATE\n pass # -> Delete this line if you do something here !", "def _child_init(self):\n self._create_init_gp()", "def on_draw(self):\n # Clearing the buffers\n self.clear()\n self.set3d()\n # Makes it so color can be added\n glColor3d(1, 1, 1)\n\n self.push(self.player.pos, self.player.rot)\n self.model.draw()\n glPopMatrix()\n self.model.process_queue_slowly()\n\n # Draws the crosshairs on the screen\n self.set2d()\n self.draw_position_label()\n self.draw_reticle()", "def draw():\n\n max_frames = 1_000\n\n # Normally this would be just in the setup but for some reason\n # python P5 doesn't like just setting the background in the setup\n # loop\n if frame_count == 1:\n background(51)\n\n # Instantiate a new generator for the colors of the line. The generator\n # picks a random starting point and then\n colors = color(5)\n\n # Set up the default length and angle\n length = height / 35\n angle = (width / height) * 0.25\n\n # Reset the matrix to the initial translation\n reset_matrix()\n translate(width / 2, height)\n\n # Loop through the collatz sequence for the current frame count.\n # Reversed so we can start at 1 and then go to n\n for collatz_number in reversed(list(collatz(frame_count))):\n # If the current number is even rotate to the right\n if collatz_number % 2 == 0:\n rotate(angle)\n\n # If it is odd then rotate to the left with some extra \"umph\"\n # because it looks better\n else:\n rotate(-angle * 1.5)\n\n # Set the stroke to the current color and add some alpha so that\n # overlapping paths have more color\n r, g, b = next(colors)\n stroke(r, g, b, 20)\n\n # Draw the line for the collatz sequence\n line((0, 0), (0, -length))\n\n # Translate to the end of the line\n translate(0, -length)\n\n # Check if we are at the end, if so stop looping\n if frame_count > max_frames:\n no_loop()", "def _init_env_variables(self):\n raise NotImplementedError()" ]
[ "0.6523757", "0.6017574", "0.5880544", "0.5814766", "0.57777816", "0.5713455", "0.5674024", "0.56632835", "0.56570673", "0.5616937", "0.56138486", "0.5583289", "0.5570645", "0.55119085", "0.5494234", "0.546833", "0.54369044", "0.54284686", "0.54177946", "0.5411796", "0.54018736", "0.5394893", "0.5387748", "0.53675354", "0.5353824", "0.5350876", "0.5336216", "0.53278697", "0.53173226", "0.5301016", "0.5300163", "0.5282256", "0.5281637", "0.5270738", "0.52700275", "0.5266221", "0.52625406", "0.525982", "0.5247953", "0.5241658", "0.5241248", "0.5232133", "0.52290857", "0.5222172", "0.5212846", "0.52068067", "0.52040124", "0.5187863", "0.5186182", "0.5185893", "0.5185893", "0.5185893", "0.5185893", "0.5182087", "0.5172873", "0.51712644", "0.51690286", "0.5166745", "0.5166745", "0.5166745", "0.5165213", "0.516428", "0.516148", "0.515769", "0.51524836", "0.5151243", "0.51488113", "0.5144251", "0.51388544", "0.51348376", "0.513356", "0.51325965", "0.5131754", "0.513002", "0.5127173", "0.5126891", "0.5125216", "0.5124526", "0.51243806", "0.51212287", "0.5116737", "0.5114894", "0.5114603", "0.5103273", "0.5099847", "0.50914186", "0.50868964", "0.50852364", "0.50799865", "0.50792336", "0.5074597", "0.50724995", "0.506667", "0.5066189", "0.5063909", "0.5060671", "0.5055335", "0.50549823", "0.5053878", "0.5051104" ]
0.6062119
1
Injects the p5js's skecth instance as a global variable to setup and draw functions
def global_p5_injection(p5_sketch): def decorator(f): def wrapper(): global _P5_INSTANCE _P5_INSTANCE = p5_sketch return pre_draw(_P5_INSTANCE, f) return wrapper return decorator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup():\r\n #this happens just once\r\n size(width, height) #instead of create_canvas\r", "def __init__(self, parent):\n super(P5, self).__init__(parent)\n self.shapes = []\n self.scenes = []\n self.current_scene = 0\n self.objects = []\n self.lighting = True\n self.draw_axes = True", "def setup_draw(self):\n pass", "def setup(self):\n self.star_list = arcade.SpriteList()\n\n for i in range(50):\n # Create snowflake instance\n singlestar = Singlestar()\n # Add snowflake to snowflake list\n self.star_list.append(singlestar)\n\n # Don't show the mouse pointer\n self.set_mouse_visible(False)\n\n # Set the background color\n arcade.set_background_color(arcade.color.BLACK)", "def setup():\n size(800, 600)\n stroke_weight(3)", "def pre_draw(p5_instance, draw_func):\n global _CTX_MIDDLE, _DEFAULT_FILL, _DEFAULT_LEADMULT, _DEFAULT_STROKE, _DEFAULT_TEXT_FILL\n\n global ADD, ALT, ARROW, AUTO, AUDIO, AXES, BACKSPACE, BASELINE, BEVEL, BEZIER, BLEND, BLUR, BOLD, BOLDITALIC\n global BOTTOM, BURN, CENTER, CHORD, CLAMP, CLOSE, CONTROL, CORNER, CORNERS, CROSS, CURVE, DARKEST\n global DEG_TO_RAD, DEGREES, DELETE, DIFFERENCE, DILATE, DODGE, DOWN_ARROW, ENTER, ERODE, ESCAPE, EXCLUSION\n global FILL, GRAY, GRID, HALF_PI, HAND, HARD_LIGHT, HSB, HSL, IMAGE, IMMEDIATE, INVERT, ITALIC, LANDSCAPE\n global LEFT, LEFT_ARROW, LIGHTEST, LINE_LOOP, LINE_STRIP, LINEAR, LINES, MIRROR, MITER, MOVE, MULTIPLY, NEAREST\n global NORMAL, OPAQUE, OPEN, OPTION, OVERLAY, P2D, PI, PIE, POINTS, PORTRAIT, POSTERIZE, PROJECT, QUAD_STRIP, QUADRATIC\n global QUADS, QUARTER_PI, RAD_TO_DEG, RADIANS, RADIUS, REPEAT, REPLACE, RETURN, RGB, RIGHT, RIGHT_ARROW\n global ROUND, SCREEN, SHIFT, SOFT_LIGHT, SQUARE, STROKE, SUBTRACT, TAB, TAU, TEXT, TEXTURE, THRESHOLD, TOP\n global TRIANGLE_FAN, TRIANGLE_STRIP, TRIANGLES, TWO_PI, UP_ARROW, VIDEO, WAIT, WEBGL\n\n global frameCount, focused, displayWidth, displayHeight, windowWidth, windowHeight, width, height\n global disableFriendlyErrors, deviceOrientation, accelerationX, accelerationY, accelerationZ\n global pAccelerationX, pAccelerationY, pAccelerationZ, rotationX, rotationY, rotationZ\n global pRotationX, pRotationY, pRotationZ, turnAxis, keyIsPressed, key, keyCode, mouseX, mouseY, pmouseX, pmouseY\n global winMouseX, winMouseY, pwinMouseX, pwinMouseY, mouseButton, mouseIsPressed, touches, pixels\n\n _CTX_MIDDLE = p5_instance._CTX_MIDDLE\n _DEFAULT_FILL = p5_instance._DEFAULT_FILL\n _DEFAULT_LEADMULT = p5_instance._DEFAULT_LEADMULT\n _DEFAULT_STROKE = p5_instance._DEFAULT_STROKE\n _DEFAULT_TEXT_FILL = p5_instance._DEFAULT_TEXT_FILL\n\n ADD = p5_instance.ADD\n ALT = p5_instance.ALT\n ARROW = p5_instance.ARROW\n AUDIO = p5_instance.AUDIO\n AUTO = p5_instance.AUTO\n AXES = p5_instance.AXES\n BACKSPACE = p5_instance.BACKSPACE\n BASELINE = p5_instance.BASELINE\n BEVEL = p5_instance.BEVEL\n BEZIER = p5_instance.BEZIER\n BLEND = p5_instance.BLEND\n BLUR = p5_instance.BLUR\n BOLD = p5_instance.BOLD\n BOLDITALIC = p5_instance.BOLDITALIC\n BOTTOM = p5_instance.BOTTOM\n BURN = p5_instance.BURN\n CENTER = p5_instance.CENTER\n CHORD = p5_instance.CHORD\n CLAMP = p5_instance.CLAMP\n CLOSE = p5_instance.CLOSE\n CONTROL = p5_instance.CONTROL\n CORNER = p5_instance.CORNER\n CORNERS = p5_instance.CORNERS\n CROSS = p5_instance.CROSS\n CURVE = p5_instance.CURVE\n DARKEST = p5_instance.DARKEST\n DEG_TO_RAD = p5_instance.DEG_TO_RAD\n DEGREES = p5_instance.DEGREES\n DELETE = p5_instance.DELETE\n DIFFERENCE = p5_instance.DIFFERENCE\n DILATE = p5_instance.DILATE\n DODGE = p5_instance.DODGE\n DOWN_ARROW = p5_instance.DOWN_ARROW\n ENTER = p5_instance.ENTER\n ERODE = p5_instance.ERODE\n ESCAPE = p5_instance.ESCAPE\n EXCLUSION = p5_instance.EXCLUSION\n FILL = p5_instance.FILL\n GRAY = p5_instance.GRAY\n GRID = p5_instance.GRID\n HALF_PI = p5_instance.HALF_PI\n HAND = p5_instance.HAND\n HARD_LIGHT = p5_instance.HARD_LIGHT\n HSB = p5_instance.HSB\n HSL = p5_instance.HSL\n IMAGE = p5_instance.IMAGE\n IMMEDIATE = p5_instance.IMMEDIATE\n INVERT = p5_instance.INVERT\n ITALIC = p5_instance.ITALIC\n LANDSCAPE = p5_instance.LANDSCAPE\n LEFT = p5_instance.LEFT\n LEFT_ARROW = p5_instance.LEFT_ARROW\n LIGHTEST = p5_instance.LIGHTEST\n LINE_LOOP = p5_instance.LINE_LOOP\n LINE_STRIP = p5_instance.LINE_STRIP\n LINEAR = p5_instance.LINEAR\n LINES = p5_instance.LINES\n MIRROR = p5_instance.MIRROR\n MITER = p5_instance.MITER\n MOVE = p5_instance.MOVE\n MULTIPLY = p5_instance.MULTIPLY\n NEAREST = p5_instance.NEAREST\n NORMAL = p5_instance.NORMAL\n OPAQUE = p5_instance.OPAQUE\n OPEN = p5_instance.OPEN\n OPTION = p5_instance.OPTION\n OVERLAY = p5_instance.OVERLAY\n P2D = p5_instance.P2D\n P3D = p5_instance.WEBGL\n PI = p5_instance.PI\n PIE = p5_instance.PIE\n POINTS = p5_instance.POINTS\n PORTRAIT = p5_instance.PORTRAIT\n POSTERIZE = p5_instance.POSTERIZE\n PROJECT = p5_instance.PROJECT\n QUAD_STRIP = p5_instance.QUAD_STRIP\n QUADRATIC = p5_instance.QUADRATIC\n QUADS = p5_instance.QUADS\n QUARTER_PI = p5_instance.QUARTER_PI\n RAD_TO_DEG = p5_instance.RAD_TO_DEG\n RADIANS = p5_instance.RADIANS\n RADIUS = p5_instance.RADIUS\n REPEAT = p5_instance.REPEAT\n REPLACE = p5_instance.REPLACE\n RETURN = p5_instance.RETURN\n RGB = p5_instance.RGB\n RIGHT = p5_instance.RIGHT\n RIGHT_ARROW = p5_instance.RIGHT_ARROW\n ROUND = p5_instance.ROUND\n SCREEN = p5_instance.SCREEN\n SHIFT = p5_instance.SHIFT\n SOFT_LIGHT = p5_instance.SOFT_LIGHT\n SQUARE = p5_instance.SQUARE\n STROKE = p5_instance.STROKE\n SUBTRACT = p5_instance.SUBTRACT\n TAB = p5_instance.TAB\n TAU = p5_instance.TAU\n TEXT = p5_instance.TEXT\n TEXTURE = p5_instance.TEXTURE\n THRESHOLD = p5_instance.THRESHOLD\n TOP = p5_instance.TOP\n TRIANGLE_FAN = p5_instance.TRIANGLE_FAN\n TRIANGLE_STRIP = p5_instance.TRIANGLE_STRIP\n TRIANGLES = p5_instance.TRIANGLES\n TWO_PI = p5_instance.TWO_PI\n UP_ARROW = p5_instance.UP_ARROW\n VIDEO = p5_instance.VIDEO\n WAIT = p5_instance.WAIT\n WEBGL = p5_instance.WEBGL\n\n frameCount = p5_instance.frameCount\n focused = p5_instance.focused\n displayWidth = p5_instance.displayWidth\n displayHeight = p5_instance.displayHeight\n windowWidth = p5_instance.windowWidth\n windowHeight = p5_instance.windowHeight\n width = p5_instance.width\n height = p5_instance.height\n disableFriendlyErrors = p5_instance.disableFriendlyErrors\n deviceOrientation = p5_instance.deviceOrientation\n accelerationX = p5_instance.accelerationX\n accelerationY = p5_instance.accelerationY\n accelerationZ = p5_instance.accelerationZ\n pAccelerationX = p5_instance.pAccelerationX\n pAccelerationY = p5_instance.pAccelerationY\n pAccelerationZ = p5_instance.pAccelerationZ\n rotationX = p5_instance.rotationX\n rotationY = p5_instance.rotationY\n rotationZ = p5_instance.rotationZ\n pRotationX = p5_instance.pRotationX\n pRotationY = p5_instance.pRotationY\n pRotationZ = p5_instance.pRotationZ\n turnAxis = p5_instance.turnAxis\n keyIsPressed = p5_instance.keyIsPressed\n key = p5_instance.key\n keyCode = p5_instance.keyCode\n mouseX = p5_instance.mouseX\n mouseY = p5_instance.mouseY\n pmouseX = p5_instance.pmouseX\n pmouseY = p5_instance.pmouseY\n winMouseX = p5_instance.winMouseX\n winMouseY = p5_instance.winMouseY\n pwinMouseX = p5_instance.pwinMouseX\n pwinMouseY = p5_instance.pwinMouseY\n mouseButton = p5_instance.mouseButton\n mouseIsPressed = p5_instance.mouseIsPressed\n touches = p5_instance.touches\n pixels = p5_instance.pixels\n\n return draw_func()", "def setup():\n size(SPACE['w'], SPACE['h'])\n colorMode(RGB, 1)", "def __init__(\n self,\n win,\n outer_diam_pix,\n inner_diam_pix,\n bg_colour=(-1, -1, -1),\n line_colour=(+1, +1, +1),\n spot_colour=(-1, -1, -1),\n circle_edges=128,\n ):\n\n self._win = win\n self._outer_diam_pix = outer_diam_pix\n self._inner_diam_pix = inner_diam_pix\n self._circle_edges = circle_edges\n self._stim = {}\n\n self._stim[\"aperture\"] = psychopy.visual.Aperture(\n win=win,\n size=self._outer_diam_pix,\n nVert=self._circle_edges,\n shape=\"circle\",\n units=\"pix\",\n autoLog=False,\n )\n\n self._stim[\"aperture\"].enabled = False\n\n self._stim[\"bg\"] = psychopy.visual.Circle(\n win=self._win,\n radius=self._outer_diam_pix / 2.0,\n units=\"pix\",\n lineColor=None,\n fillColor=bg_colour,\n edges=self._circle_edges,\n autoLog=False,\n )\n\n self._stim[\"line\"] = psychopy.visual.Rect(\n win=self._win,\n size=(self._outer_diam_pix * 2, self._inner_diam_pix),\n units=\"pix\",\n lineWidth=0,\n lineColor=None,\n fillColor=line_colour,\n autoLog=False,\n )\n\n self._stim[\"spot\"] = psychopy.visual.Circle(\n win=self._win,\n radius=self._inner_diam_pix / 2.0,\n units=\"pix\",\n fillColor=spot_colour,\n edges=self._circle_edges,\n lineWidth=0,\n autoLog=False,\n )\n\n self.bg_colour = bg_colour\n self.line_colour = line_colour\n self.spot_colour = spot_colour", "def start(self):\n pygame.init()\n self.screen = pygame.display.set_mode((self.width, self.height))\n pygame.display.set_caption(\"PyStroke Demo\")\n self.engines = [DemoGameEngine(self.screen, self.e_e)] # add others here\n self.engine = self.engines[0]\n self.run()", "def __init__(self, renderSurf):\n self.surf = renderSurf", "def setup(self):\n # Initialize the drawing environment (create main windows, etc)\n glutInit(sys.argv)\n glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB | GLUT_DEPTH)\n glutInitWindowSize(WINDOW_WIDTH, WINDOW_HEIGHT)\n glutCreateWindow(name)\n\n glShadeModel(GL_SMOOTH)\n\n glClearDepth(1.0)\n glDepthFunc(GL_LESS) # The Type Of Depth Test To Do\n glEnable(GL_DEPTH_TEST) # Enables Depth Testing\n glShadeModel(GL_SMOOTH) # Enables Smooth Color Shading\n\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity() # Reset The Projection Matrix\n\n # Calculate The Aspect Ratio Of The Window\n gluPerspective(45.0, float(WINDOW_WIDTH)/float(WINDOW_HEIGHT), 0.1, 100.0)\n\n glMatrixMode(GL_MODELVIEW)\n\n # Set up keyboard listeners.\n glutKeyboardFunc(self.on_key)", "def setup_game(self):", "def __init__(self):\n self.function_dict = {\n \"Sphere\": self.draw_sphere,\n \"BSpline\": self.draw_nurbspatch,\n \"Cylinder\": self.draw_cylinder,\n \"Cone\": self.draw_cone,\n \"Torus\": self.draw_torus,\n \"Plane\": self.draw_plane,\n }", "def __init__(self):\n self.figure = plt.figure()\n FigureCanvas.__init__(self, self.figure)\n self.figure.patch.set_facecolor('blue')\n self.figure.patch.set_alpha(0.0)\n self.pv_monitor = controls.PvMonitors.get_instance()", "def init():\n \n # General parameters\n exp_path = '/home/laura/Documents/stacks tif/1705_regMovie.tif' # experimental tif stack (grayscale)\n bin_path = '/home/laura/Documents/stacks tif/1705/1705_binarizedMovie.tif' # binarized tif stack\n vect_path = '/home/laura/Documents/STAGE3/1705_NET/' # gpickle directory\n dest_path = '/home/laura/Documents/STAGE3/1705_NET/superposition' # output directory\n verbose = True\n debug = True\n invert = True \n main_params = [exp_path, bin_path, vect_path, dest_path, verbose, debug, invert]\n \n # Output options\n doImg = -1 # image index\n doStack = False \n doVideo = False \n compress = 3 # advice: no more than 5\n output_params = [doImg, doStack, doVideo, compress]\n \n # Drawing options (colors as BGR)\n line = True # edges drawing\n line_color = (0, 255, 0) # green \n line_size = 1 \n apex_color = (0, 0, 255) # red\n apex_size = 5\n node_color = (255, 0, 0) # blue\n node_size = 5\n body_color = (0, 255, 0) # green\n body_size = 3\n drawing_params = [line, line_color, line_size, apex_color, apex_size,\n node_color, node_size, body_color, body_size]\n \n return main_params, output_params, drawing_params", "def main():\n Canvas1Demo().mainloop()", "def setup(client):\n client.add_cog(ProcessDisplay(client))", "def setup_screen():\n screen = Screen()\n screen.setup(width=600, height=600)\n screen.bgcolor(\"black\")\n screen.title(\"My Snake Game\")\n screen.tracer(0)\n return screen", "def setup(config, session, pts_all):\n optic = config['Optic']\n general = config['General']\n\n numFrames_total_rough = session['frames_total']\n numVids = session['num_vids']\n spacing = optic['spacing']\n\n bbox_subframe_displacement = pts_all['bbox_subframe_displacement']\n pts_displacement = pts_all['pts_displacement']\n pts_x_displacement = pts_all['pts_x_displacement']\n pts_y_displacement = pts_all['pts_y_displacement']\n mask_frame_displacement = pts_all['mask_frame_displacement']\n\n ## Make point cloud\n pts_spaced = np.ones((np.int64(bbox_subframe_displacement[3] * bbox_subframe_displacement[2] / spacing),\n 2)) * np.nan ## preallocation\n cc = 0 ## set idx counter\n\n # make spaced out points\n for ii in range(len(pts_x_displacement)):\n if (pts_x_displacement[ii] % spacing == 0) and (pts_y_displacement[ii] % spacing == 0):\n pts_spaced[cc, 0] = pts_x_displacement[ii]\n pts_spaced[cc, 1] = pts_y_displacement[ii]\n cc = cc + 1\n\n pts_spaced = np.expand_dims(pts_spaced, 1).astype('single')\n pts_spaced = np.delete(pts_spaced, np.where(np.isnan(pts_spaced[:, 0, 0])), axis=0)\n print(f'number of points: {pts_spaced.shape[0]}')\n\n ## Define random colors for points in cloud\n color_tuples = list(np.arange(len(pts_x_displacement)))\n for ii in range(len(pts_x_displacement)):\n color_tuples[ii] = (np.random.rand(1)[0] * 255, np.random.rand(1)[0] * 255, np.random.rand(1)[0] * 255)\n\n ## Preallocate output variables\n\n # I add a bunch of NaNs to the end because the openCV estimate is usually less than the actual number of frames\n displacements = np.ones((pts_spaced.shape[0], 2, np.uint64(\n numFrames_total_rough + numFrames_total_rough * 0.1 + (numVids * 1000)))) * np.nan\n positions_recursive = np.ones((pts_spaced.shape[0], 2, np.uint64(\n numFrames_total_rough + numFrames_total_rough * 0.1 + (numVids * 1000)))) * np.nan\n\n ## Preset point tracking variables\n pointInds_toUse = copy.deepcopy(pts_spaced)\n pointInds_tracked = pointInds_toUse ## set the first frame to have point locations be positions in the point cloud\n pointInds_tracked_tuple = list(np.arange(pointInds_toUse.shape[0]))\n\n return pointInds_toUse, pointInds_tracked, pointInds_tracked_tuple, displacements, pts_spaced, color_tuples , positions_recursive", "def main():\r\n plot = Plotter(0.5, 1.2)\r\n plot.plot_func()", "def setup(self):\n\n self.points = [[0.360502, 0.535494],\n [0.476489, 0.560185],\n [0.503125, 0.601218],\n [0.462382, 0.666667],\n [0.504702, 0.5]]\n self.max_neighbors = 4\n self.beta = 1\n self.graph = 'beta skeleton'\n self.edges = [0, 1, 0, 2, 0, 3, 0, 4,\n 1, 3, 1, 4,\n 2, 3, 2, 4,\n 3, 4]", "def main():\r\n LEDStrip = createNeoPixelObject()\r\n setup(LEDStrip)\r\n clock(LEDStrip)", "def draw(self, screen):", "def setup_scene(self):\n\n # read map\n options, landscapes, statics, dynamics, trees, hero, hare = read_map('test.map')\n self.num_of_blocks_X, self.num_of_blocks_Y = options['size']\n with self.canvas:\n # init landscapes\n block_x = 0\n for i in xrange(self.num_of_blocks_X):\n block_y = 0\n for j in xrange(self.num_of_blocks_Y):\n class_name = landscapes[i][j]\n if class_name is not None:\n clazz = eval(class_name.capitalize())\n else:\n clazz = Grass\n block = clazz(pos=(block_x, block_y),\n size=(self.block_width, self.block_height), border=(0, 0))\n self.blocks[i][j] = block\n block_y += self.block_height \n block_x += self.block_width\n\n # init dynamics\n for x, y, class_name in dynamics:\n if 'dynamics_as_blocks' in options and options['dynamics_as_blocks']:\n x, y = (x + 0.5) * self.block_width, (y + 0.5) * self.block_height\n eval(class_name.capitalize())(x, y)\n \n with self.canvas:\n # draw or hero\n HeroRabbit(BLOCK_SIZE[0]*(hero[0] + 0.5), BLOCK_SIZE[1]*(hero[1] + 0.5))\n Hare(BLOCK_SIZE[0]*(hare[0] + 0.5), BLOCK_SIZE[1]*(hare[1] + 0.5))\n\n # init statics\n def _is_mountain(i, j):\n return int(0 <= i < self.num_of_blocks_X and 0 <= j <= self.num_of_blocks_Y and\n statics[i][j] == 'mountain')\n\n def _get_mountain_type(i, j):\n opensides = (_is_mountain(i - 1, j), _is_mountain(i, j + 1),\n _is_mountain(i + 1, j), _is_mountain(i, j - 1)) # left, top, right, bottom\n opensides_to_type = {\n (1, 1, 1, 1): 'center',\n (1, 0, 1, 0): 'horizontal_center',\n (0, 1, 0, 1): 'vertical_center',\n (1, 0, 0, 0): 'horizontal_right',\n (0, 1, 0, 0): 'vertical_bottom',\n (0, 0, 1, 0): 'horizontal_left',\n (0, 0, 0, 1): 'vertical_top',\n }\n return opensides_to_type.get(opensides, 'horizontal_center')\n \n _mountains = []\n _bushes= []\n \n for i in xrange(self.num_of_blocks_X):\n for j in xrange(self.num_of_blocks_Y):\n class_name = statics[i][j]\n if class_name is not None:\n pos = (i + 0.5) * self.block_width, (j + 0.5) * self.block_height\n if class_name == 'bush':\n #Bush(*pos)\n _bushes.append(pos)\n elif class_name == 'mountain':\n _mountains.append((pos, _get_mountain_type(i, j)))\n #Mountain(*pos, type=_get_mountain_type(i, j))\n \n for tree_pos in trees:\n Tree(BLOCK_SIZE[0]*(tree_pos[0] + 0.5), BLOCK_SIZE[1]*(tree_pos[1] + 0.5))\n \n with self.canvas:\n for pos in _bushes:\n Bush(*pos)\n \n for pos, type in _mountains:\n Mountain(*pos, type=type)\n\n HolyCarrot(13.5*self.block_width, 7.5*self.block_height)\n # This should be called at the end\n self.reindex_graphics()", "def show():\n setup()\n plt.show()", "def setup(self):\n self.poly2 = Polygon([(145, 60), (201, 69), (265, 46), (333, 61), (352, 99), (370, 129), (474, 138), (474, 178), (396, 225), (351, 275), (376, 312), (382, 356), (338, 368), (287, 302), (224, 304), (128, 338), (110, 316), (129, 270), (83, 231), (65, 51), (83, 163), (103, 201), (90, 74), (126, 162)])\n self.poly2.set_direction(\"E\")\n self.poly1 = Polygon([(905, 328),(877, 367),(944, 413),(1004, 384),(1019, 307),(953, 248),(880, 250),(865, 278),(883, 325)])\n self.poly1.set_direction(\"SW\")\n self.poly3 = Polygon([(900, 600), (950,650), (1000, 500)])\n self.poly3.set_direction(\"N\")\n self.p1 = Point(485, 138)\n self.p1.set_direction(\"SE\")\n self.p2 = Point(self.width/2, self.height/2)\n self.p2.set_direction(\"NW\")\n self.p3 = Point(86,163)\n self.p3.set_direction(\"SE\")\n #a separate list for each different type of shape for collision purposes.\n self.polys = [self.poly1, self.poly2, self.poly3]\n self.points = [self.p1, self.p2, self.p3]", "def __init__(self, parent):\n super(Demo4, self).__init__(parent)\n self.scenes = []\n self.draw_axes = True\n self.lighting = True\n self.current_scene = 0\n self.objects = []\n self.diffuse_light = [0.8, 0.8, 0.8, 1]", "def draw():", "def setup(self):\n\n # Create the Sprite lists\n self.sprite_list = arcade.SpriteList()\n\n r = 60\n for x in rand_range(0, 100 * math.pi, scale=math.pi / 5):\n star = arcade.Sprite(\"../../resources/arcade/gold_1.png\")\n star.center_x = SCREEN_WIDTH / 2 + r * math.cos(x)\n star.center_y = SCREEN_HEIGHT / 2 + r * math.sin(x)\n star.seed = scale_generator(x=random() * math.pi, offset=.5, step=.01)\n star.scale = next(star.seed)\n self.sprite_list.append(star)\n r += 3", "def __init__(self):\r\n config = ConfigProvider().getProcessingConfig()\r\n self.xGround = config.get(\"xGround\")\r\n self.yGround = config.get(\"yGround\")", "def draw(self):", "def __init__(self, opt):\n self.scat = Scattering(M=opt.N, N=opt.N, J=opt.scat, pre_pad=False).cuda() \n super(ScatModule, self).__init__()", "def initialize_graphics(self):\n self.renderer = vtk.vtkRenderer()\n self.window = vtk.vtkRenderWindow()\n self.window.AddRenderer(self.renderer)\n self.renderer.SetBackground(1.0, 1.0, 1.0)\n self.window.SetSize(1000, 1000)\n\n # Create a trackball interacter to transoform the geometry using the mouse.\n self.interactor = vtk.vtkRenderWindowInteractor()\n self.interactor.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())\n self.interactor.SetRenderWindow(self.window)\n\n style = ClickInteractorStyle(self)\n self.interactor.SetInteractorStyle(style)\n style.SetCurrentRenderer(self.renderer)", "def _init_plot(self) -> None:\n\n # create a grayscale plot\n out = sys.stdout\n sys.stdout = open(\"/dev/null\", \"w\")\n hdu = self.image_generator.image(self.ra, self.dec)\n self.plot = aplpy.FITSFigure(hdu)\n self.plot.show_grayscale()\n self.plot.set_theme(\"publication\")\n sys.stdout = out\n\n # label for the position angle\n pa_string = \"PA = %.1f\" % self.mode_details.position_angle().to_value(u.deg)\n if self.mode_details.automated_position_angle():\n pa_string += \" (auto)\"\n self.draw_label(0.95, -0.05, pa_string, style=\"italic\", weight=\"bold\")\n\n # label for the title\n if self.title:\n self.draw_label(\n 0.5, 1.03, self.title, style=\"italic\", weight=\"bold\", size=\"large\"\n )\n\n # label for the image source\n self.draw_label(\n -0.05,\n -0.05,\n \"%s\" % self.image_generator.source(),\n style=\"italic\",\n weight=\"bold\",\n )\n\n # grid overlay\n self.plot.add_grid()\n self.plot.grid.set_alpha(0.2)\n self.plot.grid.set_color(\"b\")\n\n # indicate the RSS field of view\n self.draw_circle(self.ra, self.dec, 4.0 * u.arcmin, \"g\")\n self.draw_label(\n 0.79,\n 0.79,\n \"RSS\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"left\",\n color=(0, 0, 1),\n )\n\n # indicate the Salticam field of view\n self.draw_circle(self.ra, self.dec, 5.0 * u.arcmin, \"g\")\n self.draw_label(\n 0.86,\n 0.86,\n \"SCAM\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"left\",\n color=(0, 0, 1),\n )\n\n # labels for north and east direction\n self.draw_label(\n self.ra,\n self.dec + 4.8 * u.arcmin,\n \"N\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n color=(0, 0.5, 1),\n )\n self.draw_label(\n self.ra + 4.8 * u.arcmin / np.abs(np.cos(self.dec)),\n self.dec,\n \"E\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"right\",\n color=(0, 0.5, 1),\n )\n\n # add cross hairs\n self.draw_centered_line(\n 0 * u.deg,\n 8 * u.arcmin,\n self.ra,\n self.dec,\n color=\"g\",\n linewidth=0.5,\n alpha=1.0,\n )\n self.draw_centered_line(\n 90 * u.deg,\n 8 * u.arcmin,\n self.ra,\n self.dec,\n color=\"g\",\n linewidth=0.5,\n alpha=1.0,\n )\n\n # label for the magnitude range and bandpass\n if self.magnitude_range:\n self._show_magnitudes()\n\n # add mode specific content\n if not self.basic_annotations:\n self.mode_details.annotate_finder_chart(self)", "def setup(self, flags):\n self.figure = pylab.figure(1)\n self.axes = {}\n self.stream_data = {}\n self.flags = flags", "def __init__(self, skin_directory):\n self.ax = None\n self.generate_axis()\n self.skin_directory = skin_directory\n self.figure = plt.gcf()", "def setup(self):\n\n self.insts = []\n self.testInst = pysat.Instrument('pysat', 'testing2D',\n clean_level='clean')\n self.testInst.bounds = (dt.datetime(2008, 1, 1),\n dt.datetime(2008, 1, 3))\n self.insts.append(self.testInst)\n self.insts.append(self.testInst)\n\n self.dname = 'series_profiles'\n self.test_vals = np.arange(50) * 1.2\n\n self.testC = pysat.Constellation(instruments=self.insts)\n\n return", "def __init__(self):\n self.gameloop()", "def main():\n\n viewer = Viewer(1900, 1200)\n viewer.add((init_BaracuddaFish()))\n viewer.add(init_BlueStarFish())\n viewer.add_movable(init_SeaSnake())\n init_groupeOfFishs(viewer)\n\n under_water = [\n 'res/skybox/underwater/uw_lf.jpg',\n 'res/skybox/underwater/uw_rt.jpg',\n 'res/skybox/underwater/uw_up.jpg',\n 'res/skybox/underwater/uw_dn.jpg',\n 'res/skybox/underwater/uw_ft.jpg',\n 'res/skybox/underwater/uw_bk.jpg']\n viewer.add(Skybox(under_water))\n\n viewer.run()", "def __init__(self, map_state):\n self.map_state = map_state\n self.image = map_prepare.GFX[\"misc\"][\"interface\"]\n self.make_widgets()", "def start(self):\n pygame.init()\n self.screen = pygame.display.set_mode((self.width, self.height))\n pygame.display.set_caption(\"PyStroke\")\n self.engines = [GameEngine(self.screen, self.e_e)] # add others here\n self.engine = self.engines[0]\n self.run()", "def __init__(self):\n # Screen settings\n self.screen_width = 2400\n self.screen_height = 1600\n self.bg_color = (0, 0, 0)\n\n # Raindrop settings\n self.r_y_speed = 10", "def __init__(self):\n self.km, self.kc = \\\n start_new_kernel(extra_arguments=['--matplotlib=inline'],\n stderr=open(os.devnull, 'w'))", "def __init__(self, game, pieceEnv):\n self.game = game\n self.pieceEnv = pieceEnv", "def __init__(self, skin_position: str, /):", "def screen_setup(screen_size):\n window = turtle.Screen()\n window.bgcolor(\"black\")\n window.title(\"Maze Game\")\n window.setup(screen_size, screen_size)", "def __init__(self, program, scr=None, delay=0):\n self.cpu = IntCode(program, silent=True, input_wait=True)\n self.map = Graph()\n self.display = False\n self.delay = delay\n if scr is not None:\n self.screen = Screen(50, 50, self.TILES, scr=scr, colors=self.COLORS)\n self.display = True\n self.position = Point(0, 0)\n self.map.edges[self.position] = []\n self.grid = {0: {self.position}, 1: set(), 2: set()}", "def figure():\n global fig\n return fig", "def main():\n# example_from_m3()\n# draw_you_guess_it()\n# draw_pink_square()\n draw_squares_in_squares()", "def __init__(self):\n\n self.width = 10\n self.height = 10\n self.new_game()", "def appInit(self):\n glClearColor(1.0, 1.0, 1.0, 1.0)\n glColor(0.0, 0.0, 0.0)\n glPointSize(4.0)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluOrtho2D(0.0, WINDOW_WIDTH, 0.0, WINDOW_HEIGHT)\n\n self.scene = Scene()\n\n tri = Triangle()\n tri.set_location(10, 50)\n tri.set_color(0, 1, 1)\n self.scene.addShape(tri)\n\n tri = Triangle()\n tri.set_location(70, 50)\n tri.set_color(1, 0, 1)\n tri.set_size(2, 2)\n self.scene.addShape(tri)\n\n tri = Triangle()\n tri.set_location(300, 50)\n self.scene.addShape(tri)", "def main():\n g = DemoGame(800, 600)\n g.start()", "def __init__(self):\n self.t_height = 291\n self.t_left = 65\n self.t_right = 144\n self.points = Pix()", "def setUp(self):\r\n self.spaceship = SpaceShipGame()", "def setUp(self):\r\n self.spaceship = SpaceShipGame()", "def setUp(self):\r\n self.spaceship = SpaceShipGame()", "def setUp(self):\r\n self.spaceship = SpaceShipGame()", "def setUp(self):\r\n self.spaceship = SpaceShipGame()", "def setUp(self):\r\n self.spaceship = SpaceShipGame()\r\n self.spaceship.init()", "def setup(self):\n self.graph = KytosGraph()", "def create_scene(self):\n \n self.scene=soya.World()", "def init():\n global tube, ball, faceTextureName, woodTextureName\n tube = gluNewQuadric()\n gluQuadricDrawStyle(tube, GLU_FILL)\n ball = gluNewQuadric()\n gluQuadricDrawStyle(ball, GLU_FILL)\n\n # Set up lighting and depth-test\n glEnable(GL_LIGHTING)\n glEnable(GL_NORMALIZE) # Inefficient...\n glEnable(GL_DEPTH_TEST) # For z-buffering!\n\n generateCheckerBoardTexture()\n faceTextureName = loadImageTexture(\"brick.jpg\")\n woodTextureName = loadImageTexture(\"wood.jpg\")", "def __init__(self, instrument, scene, psf_grid, wave, background):\n # This will be the image in electrons... convert to DN at the end.\n img = np.zeros(instrument.array_size, dtype=float)\n\n # Add the background and dark current in electrons\n itime_tot = instrument.itime * instrument.coadds\n img += (background + instrument.dark_current) * itime_tot\n\n # Total readnoise in electrons\n readnoise = instrument.readnoise / math.sqrt(instrument.fowler)\n\n # i and j are the coordinates into the PSF array. Make it 0 at the center.\n psf_i = np.arange(psf_grid.psf.shape[3]) - (psf_grid.psf.shape[3] / 2)\n psf_j = np.arange(psf_grid.psf.shape[4]) - (psf_grid.psf.shape[4] / 2)\n\n psf_i_scaled = psf_i * (psf_grid.psf_scale[wave] / instrument.scale)\n psf_j_scaled = psf_j * (psf_grid.psf_scale[wave] / instrument.scale)\n\n # Add the point sources\n print 'Observation: Adding stars one by one.'\n for ii in range(len(scene.xpos)):\n # Fetch the appropriate interpolated PSF and scale by flux.\n # This is only good to a single pixel.\n psf = psf_grid.get_local_psf(scene.xpos[ii], scene.ypos[ii], wave)\n psf *= scene.flux[ii]\n\n # Project this PSF onto the detector at this position.\n # This includes sub-pixel shifts and scale changes.\n\n # Coordinates of the PSF's pixels at this star's position\n psf_i_old = psf_i_scaled + scene.xpos[ii]\n psf_j_old = psf_j_scaled + scene.ypos[ii]\n\n # Make the interpolation object.\n # Can't keep this because we have a spatially variable PSF.\n psf_interp = RectBivariateSpline(psf_i_old, psf_j_old, psf, kx=1, ky=1)\n\n # New grid of points to evaluate at for this star.\n xlo = int(psf_i_old[0])\n xhi = int(psf_i_old[-1])\n ylo = int(psf_j_old[0]) + 1\n yhi = int(psf_j_old[-1]) + 1\n\n # Remove sections that will be off the edge of the image\n if xlo < 0:\n xlo = 0\n if xhi > img.shape[0]:\n xhi = img.shape[0]\n if ylo < 0:\n ylo = 0\n if yhi > img.shape[1]:\n yhi = img.shape[1]\n \n # Interpolate the PSF onto the new grid.\n psf_i_new = np.arange(xlo, xhi)\n psf_j_new = np.arange(ylo, yhi)\n psf_star = psf_interp(psf_i_new, psf_j_new, grid=True)\n\n # Add the PSF to the image.\n img[xlo:xhi, ylo:yhi] += psf_star\n \n print 'Observation: Finished adding stars.'\n\n #####\n # ADD NOISE: Up to this point, the image is complete; but noise free.\n #####\n # Add Poisson noise from dark, sky, background, stars.\n img_noise = np.random.poisson(img, img.shape)\n\n # Add readnoise\n img_noise += np.random.normal(loc=0, scale=readnoise, size=img.shape)\n \n \n self.img = img_noise", "def __init__(self, scene): # type: (Scene) -> None\n self.scene = scene", "def __init__(self):\n #screen Settings\n self.screen_width = 1024\n self.screen_height = 768\n self.bg_color = (32, 32, 32)\n\n #rocket settings\n self.rocket_speed = 1\n\n #laser Settings\n self.laser_speed = 1.0\n self.laser_width = 3\n self.laser_height = 15\n self.laser_color = (0, 255, 255)\n self.lasers_allowed = 3", "def __init__(self):\n self.screen = pg.display.get_surface()\n self.screen_rect = self.screen.get_rect()\n self.clock = pg.time.Clock()\n self.fps = 60\n self.keys = pg.key.get_pressed()\n self.done = False\n # ship = random.choice(list(prepare.GFX[\"ships\"].values()))\n ship = list(prepare.GFX[\"ships\"].values())[7] # pick first ship available\n self.player = actors.Player((0, 0), ship)\n self.level = level.Level(self.screen_rect.copy(), self.player)\n\n self.energyloss_counter = 0\n self.energygain_counter = 0", "def __init__(self, supress_msg=False):\r\n # HYSPLITm Startup\r\n if not supress_msg:\r\n print '''\r\n ------------------------------\r\n HYSPLITm - HYSPLIT Manager - v%s\r\n\r\n Check the README for usage details.\r\n This program provided without warranty.\r\n\r\n All users of this program should properly credit NOAA ARL.\r\n For more information, visit\r\n http://www.arl.noaa.gov/HYSPLIT_info.php\r\n ''' % self.__version__\r\n self._dir()\r\n # Create a variable that holds the last run set used\r\n self._runset = None\r\n # Create instances of HyBase and Plot for use in interactive mode\r\n # FIXME: HyB and HyS usage is too complicated. I think due to trying to\r\n # both compose and inherit them.\r\n self.HyB = model.HyBase(self)\r\n self.HyS = model.HySetup(self)\r\n self.Plot = hp.Plot(self)\r\n self.Plot.HyS = model.HySetup(self)", "def setup(self):\n\n # Used to keep track of our scrolling\n self.view_bottom = 0\n self.view_left = 0\n\n # Keep track of the score\n self.score = 0\n\n # Create the Sprite lists\n self.player_list = arcade.SpriteList()\n self.wall_list = arcade.SpriteList()\n self.coin_list = arcade.SpriteList()\n\n # Set up the player, specifically placing it at these coordinates.\n # image_source = \":resources:images/animated_characters/female_adventurer/femaleAdventurer_idle.png\"\n self.player_list = arcade.SpriteList()\n self.player_sprite = Player()\n self.player_sprite.center_x = 256\n self.player_sprite.center_y = 256\n self.player_list.append(self.player_sprite)\n\n # --- Load in a map from the tiled editor ---\n\n # Name of map file to load\n map_name = r\"Math_Game\\floor_is_lava.tmx\"\n # Name of the layer in the file that has our platforms/walls\n platforms_layer_name = 'Platforms'\n\n # Read in the tiled map\n my_map = arcade.tilemap.read_tmx(map_name)\n\n # -- Platforms\n self.wall_list = arcade.tilemap.process_layer(map_object=my_map,\n layer_name='Platforms',\n base_directory=r'C:\\Users\\katel\\Desktop\\CSE310\\group_project\\Math_Game\\platformer-art-complete-pack-0\\Base pack\\Tiles',\n scaling=TILE_SCALING,\n use_spatial_hash=True, hit_box_algorithm=\"Simple\", hit_box_detail=4.5)\n\n # --- Other stuff\n # Set the background color\n if my_map.background_color:\n arcade.set_background_color(my_map.background_color)\n\n # Create the 'physics engine'\n self.physics_engine = arcade.PhysicsEnginePlatformer(self.player_sprite,\n self.wall_list,\n GRAVITY)", "def setup(self):\n \n # Define ui file to be used as a graphical interface\n # This file can be edited graphically with Qt Creator\n # sibling_path function allows python to find a file in the same folder\n # as this python module\n self.ui_filename = sibling_path(__file__, \"lick_training_plot.ui\")\n \n #Load ui file and convert it to a live QWidget of the user interface\n self.ui = load_qt_ui_file(self.ui_filename)\n\n # Measurement Specific Settings\n # This setting allows the option to save data to an h5 data file during a run\n # All settings are automatically added to the Microscope user interface\n self.settings.New('save_h5', dtype=bool, initial=False)\n self.settings.New('tdelay', dtype=int, initial=0,ro=True)\n self.settings.New('trial_time',dtype=int,initial=10,ro=False)\n self.settings.New('lick_interval', dtype=int, initial=1,ro=False)\n self.settings.New('water_reward', dtype=bool, initial=False,ro=False)\n self.settings.New('total_drops', dtype=int, initial=0,ro=False)\n self.settings.New('save_movie', dtype=bool, initial=False,ro=False)\n self.settings.New('movie_on', dtype=bool, initial=False,ro=True)\n #self.settings.New('sampling_period', dtype=float, unit='s', initial=0.005)\n \n # Create empty numpy array to serve as a buffer for the acquired data\n #self.buffer = np.zeros(10000, dtype=float)\n \n # Define how often to update display during a run\n self.display_update_period = 0.04 \n \n # Convenient reference to the hardware used in the measurement\n self.daq_ai = self.app.hardware['daq_ai']\n self.arduino_sol = self.app.hardware['arduino_sol']\n self.water=self.app.hardware['arduino_water']\n self.camera=self.app.hardware['camera']", "def __init__(self, plotter=None):\n if plotter is None:\n self._get_plotter()\n else:\n self.plotter = plotter\n self.plotter.keyPressFunction = self.keypress", "def __init__(self):\n # start x position\n self.x = random.randrange(size_x)\n # start y position\n self.y = - random.randrange(100)\n # drift x (amount of change each loop along the x axis)\n self.dx = random.randrange(3) - random.randrange(6)\n # drift y (amount of change each loop along the y axis)\n self.dy = random.randrange(1, 20) + random.randrange(4)\n # the size of the circular snowflake\n self.size = random.randrange(1, 4)\n # the colour of the snowflake (from sludgy grey to snowy white)\n c = random.randrange(200, 256)\n self.color = [c, c, c]", "def __init__(self):\r\n ScriptedLoadableModuleLogic.__init__(self)\r\n self.rgbport = 18944\r\n self.depthPort = 18945", "def do_stuff(self):\n self.create_tourism_raster()", "def setupWidget(self):\r\n self.generateCoordinates()\r\n self.modifyCoordinateLists()\r\n self.settings.movementMatrix = self.movementMatrix\r\n self.settings.ghostIntersectionList = self.ghostIntersectionList\r\n self.createBodies()\r\n print(\"GameW set\")", "def setup(self):\n\n self.total_time = 0.0\n\n self.background = arcade.load_texture(\"images\\\\background-1_0 (1).png\")\n\n # Create the Sprite lists\n self.all_sprites_list = arcade.SpriteList()\n self.player_list = arcade.SpriteList()\n self.wall_list = arcade.SpriteList()\n self.coin_list = arcade.SpriteList()\n self.myobject_list = arcade.SpriteList()\n\n # Set up the player\n self.gameover = 0\n self.score = 0\n self.lives = 4\n self.collision_time = 0\n self.numobj = STARTING_OBJECTS_COUNT\n self.ncoins = COIN_COUNT\n self.player_sprite = VehicleSprite(\"images\\\\bugatti.png\",\n CHARACTER_SCALING)\n self.player_sprite.angle = 90\n # self.player_sprite.change_y = 1\n self.all_sprites_list.append(self.player_sprite)\n\n self.create_buddies()\n self.create_treasure()\n\n # Make the mouse disappear when it is over the window.\n # So we just see our object, not the pointer.\n\n # Set the background color\n arcade.set_background_color(arcade.color.ASH_GREY)\n\n # Set up the player, specifically placing it at these coordinates.\n # self.player_sprite = arcade.Sprite(\"images\\\\carcar.png\", CHARACTER_SCALING)\n # self.player_sprite.center_x = 500\n # self.player_sprite.center_y = 110\n # self.player_sprite.angle = 90\n # self.player_sprite.change_y = 1\n # self.player_list.append(self.player_sprite)\n\n # Create the 'physics engine'\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite,\n self.wall_list)\n\n # Set the viewport boundaries\n # These numbers set where we have 'scrolled' to.\n self.view_left = 0\n self.view_bottom = 0\n\n # For draw\n self.line_start = 0", "def __init__(self):\n\n self.parser = self.define_parser()\n self.pen = Pen()", "def __init_accessors (self):\n self.colors = ay.utils.Colors\n self.layout = Layout(self.seed)\n self.shapes = Shapes", "def __init__(self, *args):\n _ShapeUpgrade.ShapeUpgrade_ShellSewing_swiginit(self,_ShapeUpgrade.new_ShapeUpgrade_ShellSewing(*args))", "def setUp(self):\n self.shonan = ShonanAveraging3(\"toyExample.g2o\", DEFAULT_PARAMS)", "def draw_game():\n # Fill window with background color\n RENDER_WINDOW.fill(BACKGROUNDCOLOR)\n\n # Draw Game Title\n draw_title()\n\n # Draw Puzzle\n draw_puzzle()\n \n # Draw buttons to GUI \n draw_buttons()\n\n # Draw Text\n draw_text() \n\n # Draw random toggle\n draw_rand_toggle()", "def __init__(self, model, size):\n self.model = model\n self.screen = pygame.display.set_mode(size)", "def initGameState(self):\n print(\"Setting game state: \")\n self.playGUI = GUI()\n self.playGUI.drawBoard(self.player)", "def setup(self, viewer, space, phys_vars):\n self.shapes = []\n self.bodies = []\n self.viewer = weakref.proxy(viewer)\n self.space = weakref.proxy(space)\n self.phys_vars = weakref.proxy(phys_vars)", "def __init__(self, g_model, width, height):\n pygame.init()\n #determine where to draw\n self.screen = pygame.display.set_mode((width, height))\n self.game_model = g_model", "def canvas_api():\n pass", "def __init__(self, *args):\n _hypre.HypreSmoother_swiginit(self, _hypre.new_HypreSmoother(*args))", "def main():\n PanelDemo().mainloop()", "def setup(self):\n\n # Sprite lists\n self.all_sprite_list = arcade.SpriteList()\n self.player_list = arcade.SpriteList()\n self.coin_list = arcade.SpriteList()\n self.deathclaw_list = arcade.SpriteList()\n # Score\n self.score = 0\n\n # Set up the player\n # Character image from https://www.pngkit.com/\n self.player_sprite = arcade.Sprite(\"Vault_Boy.png\", SPRITE_SCALING_PLAYER)\n self.player_sprite.center_x = 50\n self.player_sprite.center_y = 50\n self.player_list.append(self.player_sprite)\n\n # Create the ENEMY\n for i in range(DEATHCLAW_COUNT):\n\n # Create the ENEMY instance\n # ENEMY image from https://fallout.fandom.com/\n deathclaw = Deathclaw(\"Deathclaw.png\", SPRITE_SCALING_DEATHCLAW)\n\n # Position the coin\n deathclaw.center_x = random.randrange(SCREEN_WIDTH)\n deathclaw.center_y = random.randrange(SCREEN_HEIGHT)\n\n # Add the coin to the lists\n self.deathclaw_list.append(deathclaw)\n\n for i in range(COIN_COUNT):\n\n # Create the Nuka Cola instance\n # Nuka Cola image fromh https://www.cleanpng.com/free/nuka-cola.html\n coin = Coin(\"Nuka_Kola.png\", SPRITE_SCALING_COIN)\n\n # Position the coin\n coin.center_x = random.randrange(SCREEN_WIDTH)\n coin.center_y = random.randrange(SCREEN_HEIGHT)\n\n # Add the coin to the lists\n self.coin_list.append(coin)", "def expose_graphics_methods():\n pass", "def setup(self):\n self.total_time = 0.0\n self.timer_text = None\n arcade.set_background_color(arcade.color.WHITE)", "def setup(self, ctxConfig, drvConfig):\n superClass.setup(self, ctxConfig, drvConfig)\n # TODO Your startup stuff here", "def appInit(self):\n glutInitDisplayMode( GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH )\n\n glClearColor(0.4, 0.4, 0.5, 1.0)\n glShadeModel(GL_SMOOTH)\n\n glEnable(GL_DEPTH_TEST)\n\n glEnable(GL_BLEND)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n\n self.texture = Texture()\n # self.texture.load_jpeg('Sunrise.jpg')\n self.x2yAspect = self.texture.GetWidth()/self.texture.GetHeight()\n glutReshapeFunc(self.reshape)\n glutDisplayFunc(self.redraw)", "def setup(self, shader_program):\n self.setup_view(shader_program)\n self.setup_projection(shader_program)", "def __init__(self, x1, y1, w, h, dal):\n self._dal = dal\n self._screen_size = Rect(x1, y1, w, h)\n\n self._facade = Facade.facade_layer()\n self._play_button = Rect(426,656,207,58)\n self._quit_button = Rect(686,662,207,58)", "def setup(self):\n # Create your sprites and sprite lists here\n self.game: Game = Game(SCREEN_WIDTH, SCREEN_HEIGHT, TILE_SIZE, 1, grid_layers = 4)\n self.game.game_message = \"Lead the Rabbit home\"\n\n # show the menu so that we see the instructions\n self.game.menu.button_list[0].text = \"Start\"\n self.game.menu.is_visible = True", "def __init__(self, chess_board, pieces, window, update):\n import turtle\n\n self.board = chess_board\n self.pieces = pieces\n self.update = update\n self.is_piece_selected = False\n self.selected_row = -1\n self.selected_col = -1\n self.turn_color = \"white\"\n self.window = turtle.Screen()\n window.onclick(self.findHeuristic)", "def __init__(self):\n self.size = 16\n self.color = COLOR\n self.pos = self.spawn()", "def setUp(self):\n self.s = Simulation()\n self.s['Retina']=GeneratorSheet(nominal_density=4.0)\n self.s['V1']= CFSheet(nominal_density=4.0)\n self.s['V2'] = CFSheet(nominal_density=4.0)\n\n self.s.connect('Retina','V1',delay=0.5,connection_type=CFProjection,\n name='RtoV1',learning_fn=CFPLF_Hebbian())\n\n self.s.connect('Retina','V2',delay=0.5,connection_type=CFProjection,\n name='RtoV2',learning_fn=CFPLF_Hebbian())", "def __init__(self):\n pygame.init()\n\n self.settings = Settings()\n\n self.screen = pygame.display.set_mode(\n (self.settings.screen_width, self.settings.screen_height))\n pygame.display.set_caption(\"52 Card Trick\")\n self.CardSet = CardSet(self)", "def __init__(self, player):\n self.wall_list = pygame.sprite.Group()\n self.enemy_list = pygame.sprite.Group()\n self.sludge = pygame.sprite.Group()\n self.consumeable = pygame.sprite.Group()\n self.can_climb = pygame.sprite.Group()\n self.player = player\n self.spore_list = [Decompose_Spore, Ledge_Spore]\n self.active_spore = self.spore_list[0]\n \n # Background image\n self.background = None" ]
[ "0.6492295", "0.6341241", "0.6277463", "0.6121529", "0.5968284", "0.5964794", "0.5882622", "0.5853329", "0.5788281", "0.5773192", "0.5767879", "0.5749697", "0.5735698", "0.5676923", "0.56400704", "0.5595668", "0.55945295", "0.558435", "0.5566732", "0.55556136", "0.55504155", "0.55404216", "0.5505791", "0.54731435", "0.5457119", "0.54535", "0.5426456", "0.53873134", "0.53824294", "0.5375594", "0.53746927", "0.53470606", "0.5333786", "0.53251255", "0.5324375", "0.5319783", "0.5318764", "0.53089505", "0.5308752", "0.5299013", "0.5287858", "0.52829695", "0.527847", "0.52717", "0.5261003", "0.5260663", "0.5248631", "0.5245833", "0.524476", "0.5237264", "0.5235069", "0.52341104", "0.52297926", "0.52285635", "0.52285635", "0.52285635", "0.52285635", "0.52285635", "0.5219446", "0.5212122", "0.5205531", "0.52030826", "0.52023447", "0.51924145", "0.5189261", "0.51881665", "0.51854014", "0.51820624", "0.5178308", "0.5177324", "0.5175122", "0.5174473", "0.5173671", "0.5162074", "0.5161342", "0.51605", "0.51579213", "0.5156583", "0.5154882", "0.51544994", "0.5151671", "0.51496553", "0.51455253", "0.5145192", "0.51441675", "0.5143923", "0.5139989", "0.51357454", "0.5129901", "0.5129531", "0.5127691", "0.51255983", "0.51252335", "0.51200444", "0.5119576", "0.51194304", "0.5119377", "0.5117036", "0.51168704", "0.51162636" ]
0.63667154
1
Calculates the local density of states of a hamiltonian and writes it in file
def ldos0d(h,e=0.0,delta=0.01): if h.dimensionality==0: # only for 0d iden = np.identity(h.intra.shape[0],dtype=np.complex) # create identity g = ( (e+1j*delta)*iden -h.intra ).I # calculate green function else: raise # not implemented... d = [ -(g[i,i]).imag/np.pi for i in range(len(g))] # get imaginary part d = spatial_dos(h,d) # convert to spatial resolved DOS g = h.geometry # store geometry write_ldos(g.x,g.y,d,z=g.z) # write in file return d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_density(fname, density):\n K, M, N = density.shape\n output = open(fname, \"w\")\n output.write(\"ARMA_CUB_TXT_FN008\\n\")\n output.write(\"%d %d %d\\n\" % (K, M, N))\n for i in range(N):\n for k in range(K):\n for m in range(M):\n output.write(\" %+.6e\" % density[k, m, i])\n output.write(\"\\n\")\n\n output.close()", "def main():\n N = 201 # Amount of gridpoints, odd number to include 0\n L = 10 # Size of the system\n k = 50 # Amount of energies and states calculated\n x = y = np.linspace(-L/2,L/2,N) # Gridpoints\n h = x[1]-x[0] # Spacing of gridpoints\n\n # Solve the system with and without perturbation\n E,psi,E_p,psi_p = fdm_2d(N,L,x,y,h,k)\n\n # Print the first two energies and the absolute error of the energies\n print('Energies of the two lowest states:')\n print('E_00 = %.4f' % E[0])\n print('E_01 = %.4f' % E[1], '\\n')\n print('Absolute error for E_00: %.4e' % np.abs(E[0]-1))\n print('Absolute error for E_01: %.4e' % np.abs(E[1]-2))\n\n print('\\nEnergies of the two lowest states after perturbation:')\n print('E_00 = %.4f' % E_p[0])\n print('E_01 = %.4f' % E_p[1])\n\n # Calculate the normalized densities of the states\n densities_norm = np.zeros((k,N,N))\n densities_norm_p = np.zeros((k,N,N))\n for i in range(k):\n # meshgrid form\n state = np.reshape(psi.T[i],(N,N))\n state_p = np.reshape(psi_p.T[i],(N,N))\n densities_norm[i] = normalized_density(state,x)\n densities_norm_p[i] = normalized_density(state_p,x)\n\n # Analytical solution of the ground state\n X,Y = np.meshgrid(x,y)\n psi00_exact = phi(X,0)*phi(Y,0)\n psi00_exact_density = normalized_density(psi00_exact,x)\n\n print('\\nMaximum absolute error of the normalized ground state densities of the unperturbated system:')\n print('errmax = {:.4e}'.format(np.max(np.abs(densities_norm[0]-psi00_exact_density))))\n\n # Plotting the ground state density of the unperturbated system\n fig1 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig1.add_subplot(1,2,1,projection='3d')\n surf1 = ax.plot_surface(X, Y, densities_norm[0], cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n fig1.suptitle(r'Normalized ground state density $|\\psi|^2$ using FDM')\n ax = fig1.add_subplot(1,2,2)\n ax.imshow(densities_norm[0],extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'FDM_psi00_unperturbated.png'))\n plt.close()\n\n\n fig2 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig2.add_subplot(1,2,1,projection='3d')\n surf2 = ax.plot_surface(X, Y, psi00_exact_density, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n\n\n # Plotting the ground state density of the perturbated system\n fig1 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig1.add_subplot(1,2,1,projection='3d')\n surf1 = ax.plot_surface(X, Y, densities_norm_p[0], cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n fig1.suptitle(r'Normalized ground state density $|\\psi|^2$ of the perturbated system using FDM')\n ax = fig1.add_subplot(1,2,2)\n ax.imshow(densities_norm_p[0],extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'FDM_psi00_perturbated.png'))\n plt.close()\n\n fig2 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig2.add_subplot(1,2,1,projection='3d')\n surf2 = ax.plot_surface(X, Y, psi00_exact_density, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n\n # Plotting the analytic ground state density\n fig1 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig1.add_subplot(1,2,1,projection='3d')\n surf1 = ax.plot_surface(X, Y, psi00_exact_density, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n fig1.suptitle(r'Analytical normalized ground state density $|\\psi|^2$')\n ax = fig1.add_subplot(1,2,2)\n ax.imshow(psi00_exact_density,extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'exact_psi00.png'))\n plt.close()\n\n # Plot some of the other densities and save them as pdf\n for i in range(1,20):\n density = densities_norm[i]\n fig = plt.figure(figsize=plt.figaspect(0.5))\n plt.imshow(density,extent=[-L/2,L/2,-L/2,L/2])\n plt.title('n={}'.format(i))\n plt.savefig(os.path.join(path,'FDM_unperturbated{}.png'.format(i)))\n plt.close()\n\n density_p = densities_norm_p[i]\n fig = plt.figure(figsize=plt.figaspect(0.5))\n plt.imshow(density_p,extent=[-L/2,L/2,-L/2,L/2])\n plt.title('n={}'.format(i))\n plt.savefig(os.path.join(path,'FDM_perturbated{}.png'.format(i)))\n plt.close() \n\n # Plot analytical states until nx,ny = 5\n for nx in range(6):\n for ny in range(6):\n state = phi(X,nx)*phi(Y,ny)\n density = normalized_density(state,x)\n plt.figure()\n plt.imshow(density,extent=[-L/2,L/2,-L/2,L/2])\n plt.title('$n_x={}, n_y={}$'.format(nx,ny))\n plt.savefig(os.path.join(path,'analytical_state_{}_{}.png'.format(nx,ny)))\n plt.close()\n\n # Get analytical energies from nx,ny = 0 to 10\n n = 10\n energies = analytical_energies(n)\n\n # Plot k analytical and the FDM energies\n index = np.arange(k)\n plt.figure()\n plt.plot(index,energies[0:k],label='Analytical energies')\n plt.plot(index,E,label='Unperturbated energies')\n plt.plot(index,E_p,label='Perturbated energies')\n plt.legend()\n plt.xlabel('n')\n plt.ylabel(r'$\\tilde{E} = \\frac{E}{\\hbar\\omega}$')\n plt.title('Energies')\n plt.savefig(os.path.join(path,'energies.png'))\n plt.close()", "def onestatfile():\n with hp.File('StatsFile.h5', 'w') as onefile:\n alldata = np.empty((600, 4, 3, 500), dtype=np.float32)\n for j in range(600):\n for i in range(3):\n msd, vol, rms, asp = getstats(i, j+1)\n alldata[j, 0, i, :] = msd\n alldata[j, 1, i, :] = vol\n alldata[j, 2, i, :] = rms\n alldata[j, 3, i, :] = asp\n onefile.create_dataset('Stats', data=alldata, chunks=(1, 4, 3, 500),\n compression='gzip', compression_opts=9)", "def write_dftb_in(self, filename):\n\n outfile = open(filename, 'w')\n outfile.write('Geometry = GenFormat { \\n')\n #outfile.write(' <<< \"geo_end.gen\" \\n')\n outfile.write(' <<< %s \\n' %self.geo_fname)\n outfile.write('} \\n')\n outfile.write(' \\n')\n\n params = self.parameters.copy()\n\n s = 'Hamiltonian_MaxAngularMomentum_'\n for key in params:\n if key.startswith(s) and len(key) > len(s):\n break\n else:\n # User didn't specify max angular mometa. Get them from\n # the .skf files:\n symbols = set(self.atoms.get_chemical_symbols())\n for symbol in symbols:\n path = os.path.join(self.slako_dir,\n '{0}-{0}.skf'.format(symbol))\n l = read_max_angular_momentum(path)\n params[s + symbol] = '\"{}\"'.format('spdf'[l])\n\n # --------MAIN KEYWORDS-------\n previous_key = 'dummy_'\n myspace = ' '\n for key, value in sorted(params.items()):\n current_depth = key.rstrip('_').count('_')\n previous_depth = previous_key.rstrip('_').count('_')\n for my_backsclash in reversed(\n range(previous_depth - current_depth)):\n outfile.write(3 * (1 + my_backsclash) * myspace + '} \\n')\n outfile.write(3 * current_depth * myspace)\n if key.endswith('_') and len(value) > 0:\n outfile.write(key.rstrip('_').rsplit('_')[-1] +\n ' = ' + str(value) + '{ \\n')\n elif (key.endswith('_') and (len(value) == 0) \n and current_depth == 0): # E.g. 'Options {'\n outfile.write(key.rstrip('_').rsplit('_')[-1] +\n ' ' + str(value) + '{ \\n')\n elif (key.endswith('_') and (len(value) == 0) \n and current_depth > 0): # E.g. 'Hamiltonian_Max... = {'\n outfile.write(key.rstrip('_').rsplit('_')[-1] +\n ' = ' + str(value) + '{ \\n')\n elif key.count('_empty') == 1:\n outfile.write(str(value) + ' \\n')\n elif ((key == 'Hamiltonian_ReadInitialCharges') and \n (str(value).upper() == 'YES')):\n f1 = os.path.isfile(self.directory + os.sep + 'charges.dat')\n f2 = os.path.isfile(self.directory + os.sep + 'charges.bin')\n if not (f1 or f2):\n print('charges.dat or .bin not found, switching off guess')\n value = 'No'\n outfile.write(key.rsplit('_')[-1] + ' = ' + str(value) + ' \\n')\n else:\n outfile.write(key.rsplit('_')[-1] + ' = ' + str(value) + ' \\n')\n # point\n if self.pcpot is not None and ('DFTB' in str(value)):\n outfile.write(' ElectricField = { \\n')\n outfile.write(' PointCharges = { \\n')\n outfile.write(\n ' CoordsAndCharges [Angstrom] = DirectRead { \\n')\n outfile.write(' Records = ' +\n str(len(self.pcpot.mmcharges)) + ' \\n')\n outfile.write(\n ' File = \"dftb_external_charges.dat\" \\n')\n outfile.write(' } \\n')\n outfile.write(' } \\n')\n outfile.write(' } \\n')\n previous_key = key\n\n current_depth = key.rstrip('_').count('_')\n for my_backsclash in reversed(range(current_depth)):\n outfile.write(3 * my_backsclash * myspace + '} \\n')\n #outfile.write('ParserOptions { \\n')\n #outfile.write(' IgnoreUnprocessedNodes = Yes \\n')\n #outfile.write('} \\n')\n #if self.do_forces:\n # outfile.write('Analysis { \\n')\n # outfile.write(' CalculateForces = Yes \\n')\n # outfile.write('} \\n')\n\n outfile.close()", "def sumofstate_HD(T):\n\n Q = np.float64(0.0)\n\n #--- nuclear spin statistics ------------\n g_even = 1 # hydrogen deuteride\n g_odd = 1\n # ---------------------------------------\n\n data = eJHD\n\n nCols = data.shape[1]\n # nCols is equal to the number of vibrational\n # states included in the summation\n\n # generate Q using each energy from the dataset\n for i in range(0, nCols):\n\n # select row for v=i\n row = data[:,i]\n\n # remove nan values\n x = row[np.logical_not(np.isnan(row))]\n\n # get the dimension (equal to J_max)\n nRows = x.shape[0]\n\n # iterate over the available energies\n for j in range(0, nRows):\n E = x[j]\n energy = (-1*E*H*C)\n\n factor = (2*j+1)*math.exp(energy/(K*T))\n\n if j % 2 == 0:\n factor = factor*g_even\n else:\n factor = factor*g_odd\n Q = Q+factor\n\n\n\n # return the sum of states for HD\n return Q", "def _ion_densities_datafiles(self):\n ne = self.ne_in\n nD = self.ni_in[0,:]\n nC = (ne-nD)/6.\n print(\"nC/nD: \"+str(np.mean(nC/nD)*100.)+\" %\")\n self.ni_in[0,:] = nD\n self.ni_in[1,:] = nC", "def density(self):\n return (1e-3*self.molar_mass) * self.pressure / (gas_constant * self.temperature) # kg/m^3", "def write_sum(self):\n with open('{}_Simulation_Fst.dat'.format(self.simulation_window), 'w+') as output:\n for line in self.fst_data:\n output.write(line)", "def export_1D_edp(self, filename=\"1Dedp.dat\", start=(-10,25), end=(30,-20), \n N=100):\n rho = []\n x0, z0 = start\n x1, z1 = end\n xpoints = np.linspace(x0, x1, N)\n zpoints = np.linspace(z0, z1, N)\n for x, z in zip(xpoints, zpoints):\n tmp = self.phase * self.F * np.cos(self.qx*x+self.qz*z)\n dist = np.sqrt((x-x0)**2 + (z-z0)**2)\n rho.append([x, z, dist, tmp.sum(axis=0)])\n rho = np.array(rho, float)\n X, Z, DIST, EDP = rho[:,0], rho[:,1], rho[:,2], rho[:,3]\n with open(filename, 'w') as f:\n f.write(\"x z dist ED\\n\")\n for x, z, dist, edp in zip(X, Z, DIST, EDP):\n f.write(\"{0: 3.1f} {1: 3.1f} {2: 3.1f} {3: }\\n\".format(x, z, dist, edp))", "def __init__(self, path, verbose=False, mindens=1e3, **kwargs):\n self.path = path\n self.aux = os.path.dirname(os.path.realpath(__file__))\n self.aux = self.aux.replace('analysis', 'aux/')\n self.mindens = mindens\n self.verbose = verbose\n if not self.verbose:\n warnings.simplefilter('ignore')\n\n # Read in the .fits data. HDU[1] is the grid and HDU[4] are the level\n # populations. [2] and [3] can be ignored.\n\n self.hdu = fits.open(self.path)\n self.grid = self.hdu[1]\n if self.verbose:\n for c in self.grid.columns:\n print c\n print('\\n')\n self.names = self.grid.columns.names\n\n # Remove all the sink particles and convert units to [au].\n\n self.notsink = ~self.grid.data['IS_SINK']\n self.xvals = self.grid.data['x1'][self.notsink] / sc.au\n self.yvals = self.grid.data['x2'][self.notsink] / sc.au\n self.zvals = self.grid.data['x3'][self.notsink] / sc.au\n\n # Extract the physical properties. Assume that the densities are only\n # ever H2 or [oH2, pH2]. If the latter, allow density to be the sum.\n # Individual values can still be accessed through _density.\n\n self.gtemp = self.grid.data['TEMPKNTC'][self.notsink]\n self.dtemp = self.grid.data['TEMPDUST'][self.notsink]\n self.dtemp = np.where(self.dtemp == -1, self.gtemp, self.dtemp)\n\n self.ndens = len([n for n in self.names if 'DENSITY' in n])\n if self.ndens > 1 and self.verbose:\n print('Assuming DENSITY1 and DENSITY2 are oH2 and pH2.')\n self._dens = {d: self.grid.data['DENSITY%d' % (d+1)][self.notsink]\n for d in range(self.ndens)}\n self.dens = np.sum([self._dens[k] for k in range(self.ndens)], axis=0)\n\n self.nabun = len([n for n in self.names if 'ABUNMOL' in n])\n if self.nabun > 1:\n raise NotImplementedError()\n self.abun = self.grid.data['ABUNMOL1'][self.notsink]\n self.velo = np.array([self.grid.data['VEL%d' % i][self.notsink]\n for i in [1, 2, 3]])\n self.turb = self.grid.data['TURBDPLR'][self.notsink]\n\n # Remove all particles that fall below the minimum density.\n\n self.dmask = self.dens > self.mindens\n self.xvals = self.xvals[self.dmask]\n self.yvals = self.yvals[self.dmask]\n self.zvals = self.zvals[self.dmask]\n self.gtemp = self.gtemp[self.dmask]\n self.dtemp = self.dtemp[self.dmask]\n self.dens = self.dens[self.dmask]\n self.abun = self.abun[self.dmask]\n self.turb = self.turb[self.dmask]\n\n # Remove all the particles that are |x_i| > rmax.\n\n self.rmax = kwargs.get('rmax', 20)\n self.rmask = np.where(abs(self.xvals) > self.rmax, 1, 0)\n self.rmask += np.where(abs(self.yvals) > self.rmax, 1, 0)\n self.rmask += np.where(abs(self.zvals) > self.rmax, 1, 0)\n self.rmask = np.where(self.rmask == 0, True, False)\n self.xvals = self.xvals[self.rmask]\n self.yvals = self.yvals[self.rmask]\n self.zvals = self.zvals[self.rmask]\n self.gtemp = self.gtemp[self.rmask]\n self.dtemp = self.dtemp[self.rmask]\n self.dens = self.dens[self.rmask]\n self.abun = self.abun[self.rmask]\n self.turb = self.turb[self.rmask]\n\n # Excitation properties. Remove all the sink particles.\n\n pops = self.hdu[4].data.T\n idxs = [i for i, b in enumerate(self.notsink) if not b]\n self.levels = np.delete(pops, idxs, axis=1)\n idxs = [i for i, b in enumerate(self.dmask) if not b]\n self.levels = np.delete(self.levels, idxs, axis=1)\n idxs = [i for i, b in enumerate(self.rmask) if not b]\n self.levels = np.delete(self.levels, idxs, axis=1)\n\n # Apply the gridding. Note we include a single point radial grid to\n # better interface with the function in analysemodel.\n\n self.xgrid = np.zeros(1)\n self.ygrid = self.estimate_grids(**kwargs)\n self.gridded = {}\n self.gridded['dens'] = self.grid_param(self.dens)\n self.gridded['gtemp'] = self.grid_param(self.gtemp)\n self.gridded['dtemp'] = self.grid_param(self.dtemp)\n self.gridded['abun'] = self.grid_param(self.abun)\n self.gridded['turb'] = self.grid_param(self.turb)\n self.gridded['levels'] = {}\n self.grid_levels(kwargs.get('nlevels', 5))\n\n return", "def create_dat_file(\n src_depth: float,\n epi_in_km: float,\n baz: float,\n focal_mech: [float],\n dt: float,\n save_path: str,\n bm_file_path: str,\n M0: float = None,\n fdom: str = 1.000,\n):\n\n bm_file = bm_file_path\n\n f = np.loadtxt(bm_file, skiprows=5)\n f_ud = np.flipud(f)\n\n radius_mars = 3389.5 * 1e3 # f_ud[0][0] # 3390 (km)\n\n # radius_of_planet = 3390\n # km_per_deg = np.pi * (radius_mars * 1e-3) / 180.0\n # dist_in_km = epi_in_km * np.pi * (radius_mars * 1e-3) / 180.0\n dist = epi_in_km\n\n if baz < 0:\n baz *= -1\n rec_az = baz\n rec_z = 0.0\n\n src_x = 0.0\n src_y = 0.0\n src_z = src_depth\n or_time = 0.0\n s_strength = 1.0\n\n assert (M0 is None and len(focal_mech) == 6) or (M0 is not None and len(focal_mech) == 3), (\n \"focal_mech length is incorrect. \"\n \"If you specify M0, focal_mech is [strike,dip,rake]. \"\n \"Otherwise focal_mech is [m_rr, m_tt, m_pp, m_rt, m_rp, m_tp]\"\n )\n\n for i in range(len(focal_mech)):\n focal_mech[i] += 0\n\n M_tt_ins = focal_mech[1] # / 1e14\n M_pp_ins = focal_mech[2] # / 1e14\n M_rr_ins = focal_mech[0] # / 1e14\n M_rp_ins = focal_mech[4] # / 1e14\n M_rt_ins = focal_mech[3] # / 1e14\n M_tp_ins = focal_mech[5] # / 1e14\n\n moment_tensor = f\"{M_tt_ins:10.4f}{-M_tp_ins+0:10.4f}{M_rt_ins:10.4f}{M_pp_ins:10.4f}{-M_rp_ins+0:10.4f}{M_rr_ins:10.4f}\"\n # moment_tensor = f\"{M_tt_ins:10.4f}{M_tp_ins:10.4f}{M_rt_ins:10.4f}{M_pp_ins:10.4f}{M_rp_ins:10.4f}{M_rr_ins:10.4f}\"\n\n # model = TauPyModel(taup_path)\n # model_layers = model.model.s_mod.v_mod.layers\n\n with open(join(save_path, \"crfl.dat\"), \"w\") as f:\n f.write(\"Test name\\n\")\n f.write(\" 0 0 0 0 0 0 0 1 1 1 2 1 0 0 1 0 1 2 0 1 1\\n\")\n f.write(\" 5 1 0 1 1\\n\")\n\n # Get the indices of the velocity model with blocky description\n indices = np.setdiff1d(\n np.arange(len(f_ud[:, 0])), np.unique(f_ud[:, 0], return_index=True)[1]\n )\n indices1 = indices - 1\n inds = np.sort(np.hstack((0, np.hstack((indices1, indices)))))\n\n for i, layer in enumerate(f_ud):\n if layer[0] == 0.0:\n continue\n depth = (radius_mars - layer[0]) * 1e-3\n dens = layer[1] * 1e-3\n vp = layer[2] * 1e-3\n vs = layer[3] * 1e-3\n qka = layer[4] # qka\n qmu = layer[5] # qmu\n vph = layer[6]\n vsh = layer[7]\n eta = layer[8]\n\n qs = qmu\n L = (4 / 3) * (vs / vp) ** 2\n qp = 1 / (L * (1 / qmu) + (1 - L) * (1 / qka))\n if np.isnan(qp):\n qp = qka\n qs = 10.0\n\n # Check if part of velocity model is part of the gradient:\n if i not in inds and vs != 0.0:\n # prev_depth = (radius_mars - f_ud[i - 1, 0]) * 1e-3\n # layer_thickness = depth - prev_depth\n # factor = 0.07\n # layer_thickness_lim = factor * (\n # vs / fdom\n # ) # layer limit should be less then 1/10 of wavelength\n # vs0 = f_ud[i - 1, 3] * 1e-3\n # if layer_thickness_lim > factor * (vs0 / fdom):\n # layer_thickness_lim = factor * (vs0 / fdom)\n # import math\n\n # n_layers = math.ceil(layer_thickness / layer_thickness_lim)\n n_layers = 1\n else:\n n_layers = 1\n text = f\"{depth:10.4f}{vp:10.4f}{qp:10.4f}{vs:10.4f}{qs:10.4f}{dens:10.4f}{n_layers:10d}\\n\"\n f.write(text)\n f.write(\"\\n\")\n f.write(f\"{rec_z:10.4f}\\n\")\n f.write(f\"{src_x:10.4f}{src_y:10.4f}{src_z:10.4f}{or_time:10.4f}{s_strength:10.4f}\\n\")\n f.write(f\"{moment_tensor}\\n\")\n f.write(f\"{dist:10.4f}{dist:10.4f}{0.:10.4f}{rec_az:10.4f}{1:10d}\\n\")\n f.write(f\"{dist:10.4f}\\n\")\n f.write(f\"{rec_az:10.4f}\\n\")\n f.write(f\"{12.:10.4f} {-300.:10.4f}\\n\")\n f.write(\" 3.0000 3.5000 23.5000 25.0000 650\\n\")\n f.write(f\" 0.0100 0.0133{fdom:10.4f} 1.0300 0.0000\\n\")\n # f.write(\" 0.2420 32768 0 2 0.2420 245.7600\\n\")\n npts = 32768\n t_sigma = 0.3 * dt * npts\n f.write(f\"{dt:10.4f}{npts:10d}{0:10d}{2:10d}{dt:10.4f}{t_sigma:10.4f}\\n\")\n\n f.close()", "def frequencyEstimator(ctd, ladcp, bathy, rho_neutral, strain,\\\n wl_min=100, wl_max=500, full_set=False):\n \n U, V, p_ladcp = oc.loadLADCP(ladcp)\n S, T, p_ctd, lat, lon = oc.loadCTD(ctd)\n \n \n Ek, Ep, Etotal, eta_power,\\\n Upow, Vpow, UVkx, eta_kx,\\\n N2mean, wl_min, wl_max,\\\n dist, depths, U, V, p_ladcp,\\\n Uspec, Vspec, etaSpec =\\\n internal_wave_energy(ctd, ladcp,\\\n rho_neutral,\\\n bathy, strain, wl_min=wl_min, wl_max=wl_max)\n \n eta_power_export = np.vstack(eta_power)\n eta_kx_export = np.vstack(eta_kx)\n Up_export = np.vstack(Upow)\n Vp_export = np.vstack(Vpow)\n UVkx_export = np.vstack(UVkx)\n \n\n np.savetxt('eta_power.csv',eta_power_export)\n np.savetxt('eta_kx.csv',eta_kx_export)\n np.savetxt('Upow.csv',Up_export)\n np.savetxt('Vpow.csv',Vp_export)\n np.savetxt('UVkx.csv',UVkx_export)\n\n\n \n \n # look for wavenumber maxes\n \n \n # Use ratios to solve for internal frequncys\n f = np.nanmean(gsw.f(lat))\n \n omega = f*np.sqrt(Etotal/(Ek-Ep))\n\n m = np.mean((wl_min, wl_max))\n m = (2*np.pi)/m\n kh = (m/np.sqrt(np.abs(N2mean)))*(np.sqrt(omega**2 - f**2))\n mask = kh == 0\n kh[mask]= np.nan\n lambdaH = 1e-3*(2*np.pi)/kh\n \n # get mean spectra\\\n \n eta_mean = []\n for station in eta_power:\n eta_mean.append(np.nanmean(station, axis=0))\n \n eta_mean = np.vstack(eta_mean).T\n \n \n aspect = kh/m \n \n file2save = pd.DataFrame(lambdaH)\n file2save.index = np.squeeze(depths)\n file2save.to_excel('lambdaH.xlsx')\n file2save = pd.DataFrame(kh)\n file2save.index = np.squeeze(depths)\n file2save.to_excel('Kh.xlsx')\n file2save = pd.DataFrame(omega)\n file2save.index = np.squeeze(depths)\n file2save.to_excel('omega.xlsx')\n file2save = pd.DataFrame(aspect)\n file2save.index = np.squeeze(depths)\n file2save.to_excel('aspect.xlsx')\n \n np.savetxt('eta_mean.csv', eta_mean)\n \n \n np.savetxt('kh.csv', kh)\n np.savetxt('lamdah.csv', lambdaH)\n np.savetxt('omega.csv', omega)\n \n if full_set:\n return lambdaH, kh, omega, N2mean,\\\n dist, depths, U, V, p_ladcp,\\\n Uspec, Vspec, etaSpec, aspect\n \n else:\n return lambdaH, kh, omega, N2mean", "def save_state(self, file):\n np.savez(file, z_mn=self.z_mn, theta=self.theta, phi=self.phi,\n z_best=self.z_best, ll_best=self.ll_best, log=self.log)", "def resultInHDF5(self, iStep):\n filePath = os.path.expanduser('~/LBMResults')\n resultFile = filePath + '/SimulationResults.h5'\n dataFile = tb.open_file(resultFile, 'a')\n #output the densities of fluids\n for i in sp.arange(self.typesFluids):\n dataFile.create_array('/FluidMacro', 'FluidDensityType%gin%g' % (i, iStep), \\\n self.fluidsDensity[i])\n dataFile.create_array('/FluidVelocity', 'FluidVelocityXAt%g' % iStep, \\\n self.physicalVX)\n dataFile.create_array('/FluidVelocity', 'FluidVelocityYAt%g' % iStep, \\\n self.physicalVY)\n dataFile.close()", "def hf_energy(hf_state, hamiltonian_sp):\n qpu = get_default_qpu()\n res = qpu.submit(hf_state.to_job(job_type=\"OBS\", observable=hamiltonian_sp))\n return res.value", "def testDensityCalculation(self):\n known_densities = np.array([1.76776695e-01, 1.76776695e-01, 1.76776695e-01,\n 4.59619433e-01, 4.59619433e-01, 1.76776695e-01, 5.00000000e-01, \n 8.84538011e-02, 3.40206909e-02, 2.26040275e-04])\n densities = nb._get_local_densities() \n np.testing.assert_allclose(densities, known_densities)", "def write_initdata(xy0, v0, NL, BND, h, beta, outdir):\n dio.ensure_dir(outdir)\n M = np.hstack((xy0, v0))\n np.savetxt(outdir + 'NL.txt', NL, fmt='%i', delimiter=',', header='NL (Neighbor List)')\n np.savetxt(outdir + 'BND.txt', BND, fmt='%i', header='BND (Boundary List)')\n np.savetxt(outdir + 'xyv0.txt', M, delimiter=',', header='xy0 (initial positions) v0 (initial velocities)')\n with open(outdir + 'h.txt', \"w\") as hfile:\n hfile.write(\"# h (time step) \\n{0:4f}\".format(h))\n if beta != 'none':\n with open(outdir + 'beta.txt', \"w\") as betafile:\n betafile.write(\"# beta (damping coeff) \\n{0:4f}\".format(beta))", "def getDensityOfStates(self, Elist, linear):\n\n\t\timport states\n\n\t\t# Create energies in cm^-1 at which to evaluate the density of states\n\t\tconv = constants.h * constants.c * 100.0 * constants.Na # [=] J/mol/cm^-1\n\t\tEmin = min(Elist) / conv\n\t\tEmax = max(Elist) / conv\n\t\tdE = (Elist[1] - Elist[0]) / conv\n\t\tElist0 = np.arange(Emin, Emax+dE/2, dE)\n\n\t\t# Prepare inputs for density of states function\n\t\tvib = np.array([mode.frequency for mode in self.modes if isinstance(mode, HarmonicOscillator)])\n\t\trot = np.array([mode.frequencies for mode in self.modes if isinstance(mode, RigidRotor)])\n\t\thind = np.array([[mode.frequency, mode.barrier] for mode in self.modes if isinstance(mode, HinderedRotor)])\n\t\tif len(hind) == 0: hind = np.zeros([0,2],np.float64)\n\t\tlinear = 1 if linear else 0\n\t\tsymm = self.symmetry\n\n\t\t# Calculate the density of states\n\t\tdensStates, msg = states.densityofstates(Elist0, vib, rot, hind, symm, linear)\n\t\tmsg = msg.strip()\n\t\tif msg != '':\n\t\t\traise Exception('Error while calculating the density of states for species %s: %s' % (self, msg))\n\n\t\t# Convert density of states from (cm^-1)^-1 to mol/J\n\t\tdensStates /= conv\n\n\t\t# Return result\n\t\treturn densStates", "def main():\n regexham = r'\\s+\\((\\d+,\\s*\\d+)\\)\\s+([\\-+]?\\d+\\.\\d+[eEdD]?[\\-+]?\\d+)' #to extract the Hamiltonian.\n root = '.'\n #fname = 'output_files/'\n ciffci = CIFlow_Reader('testfci.dat', regexp = regexham , read_ham= True)\n ciffcipar = CIFlow_Reader( 'psi0_output10outputfci.dat', regexp = regexham , read_ham = True)\n #print ciffci.calc_overlap(cifdoci)\n #print e.get_groundstate('00000000000011|00000000000011') \n\n psir = rp.PsiReader('psi0_output10.dat', isbig = False, numorbs = -1 , read_ints = False)\n\n detlist = dw.cimain(psir.values['nalpha'],psir.values['nbeta'], psir.values['norb'], [range(1,psir.values['nalpha']+psir.values['nbeta']), []], [] , fname = 'determinants.dat' ,ref = [lambda x , y , z : psir.get_hf_orbs()] , add_frozen = 0, write = False) #CISDDOCI\n count = 0\n for det in detlist:\n for det2 in detlist:\n #+ because the eigenvectors have already a different phasefactor of 1.\n if abs(ciffci.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) - ciffcipar.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) ) > 1e-10 :\n print 'difference in hamiltonian row: ' , det[0]+'|'+det[1] , \" col: \" , det2[0]+'|'+det2[1] , 'fci: ', ciffci.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) , 'fciaddres: ' , ciffcipar.get_mat_element(det[0]+'|'+det[1],det2[0]+'|'+det2[1]) \n count += 1\n print 'There were ' , count , ' different elements'", "def statee(h):\n # Convert height to SI\n hsi = h*0.3048\n\n # Get data\n zsi, tsi, psi, dsi = statsi(hsi)\n\n # Convert back to English\n z = zsi/0.3048\n t = tsi*1.8\n p = psi*0.02088543\n d = dsi*0.001940320\n\n return z, t, p, d", "def write_density_modification_script(self, mtz_in, pdb_in, mtz_out, log_file):\n solc = self.get_solvent_content(pdb_in)\n \n F_column_labels = self.get_F_column_labels(self.mtz_file) #column labels are those inherited from the extrapolated structure factors\n\n script_out = 'launch_dm.sh'\n i = open(script_out, 'w')\n i.write('#!/bin/sh \\n\\\n\\n\\\n#dm:\\n\\\ndm hklin %s hklout %s <<eor > %s \\n\\\nSOLC %.3f\\n\\\nMODE SOLV HIST MULTI SAYR\\n\\\nCOMBINE %s\\n\\\nNCYC %d\\n\\\nLABI FP=%s SIGFP=SIG%s PHIO=PHIC_ALL FOMO=FOM\\n\\\nLABO FDM=FDM PHIDM=PHIDM\\n\\\neor\\n' % (mtz_in, mtz_out, log_file, solc, self.density_modification.combine, self.density_modification.cycles, F_column_labels, F_column_labels))\n\n ccp4_map_name = re.sub(r\".mtz$\", \".ccp4\", mtz_out)\n\n i.write('#generate map in ccp4 format\\n\\\nfft hklin %s mapout %s <<eof > fft.log\\n\\\nLABI F1=FDM PHI=PHIDM\\n\\\neof' % (mtz_out, ccp4_map_name))\n\n i.close()\n os.system(\"chmod +x %s\" % (script_out))\n return script_out", "def density_of_state_plot(N=400,a=1.0,eita=0.01):\n foot_step=2*np.pi/N\n k=np.arange(0.0,2*np.pi/a,foot_step)\n Ek=band_energy(k)\n E=np.arange(-3.0,3.0,0.01)\n Ek.shape=(N,1)\n E.shape=(1,600)\n \"\"\"Reshape E and Ek series with broadcasting method.\"\"\"\n dirac_function=np.imag(np.true_divide(1/np.pi,np.subtract(E-Ek,1j*eita)))\n D=np.sum(np.true_divide(dirac_function,N),axis=0)\n \"\"\"Calculate the density of state with lorentzian broadenning method.\"\"\" \n E.shape=(600)\n plt.plot(D,E)", "def writeFFDFile(fileName, nBlocks, nx, ny, nz, points):\n\n f = open(fileName, \"w\")\n\n f.write(\"%d\\n\" % nBlocks)\n for i in range(nBlocks):\n f.write(\"%d %d %d \" % (nx[i], ny[i], nz[i]))\n # end\n f.write(\"\\n\")\n for block in range(nBlocks):\n for k in range(nz[block]):\n for j in range(ny[block]):\n for i in range(nx[block]):\n f.write(\"%f \" % points[block][i, j, k, 0])\n # end\n # end\n # end\n f.write(\"\\n\")\n\n for k in range(nz[block]):\n for j in range(ny[block]):\n for i in range(nx[block]):\n f.write(\"%f \" % points[block][i, j, k, 1])\n # end\n # end\n # end\n f.write(\"\\n\")\n\n for k in range(nz[block]):\n for j in range(ny[block]):\n for i in range(nx[block]):\n f.write(\"%f \" % points[block][i, j, k, 2])\n # end\n # end\n # end\n # end\n f.close()\n return", "def write_mat_file(self):\n mat_dict = {}\n mat_dict['Lx_p'] = self.Lx_p\n mat_dict['Ly_p'] = self.Ly_p\n mat_dict['Lz_p'] = self.Lz_p\n mat_dict['Lo'] = self.obst.get_Lo()\n mat_dict['Ny_divs'] = self.N_divs\n mat_dict['rho_p'] = self.rho_p\n mat_dict['nu_p'] = self.nu_p\n mat_dict['snl'] = list(np.union1d(self.obst_list[:],self.solid_list[:]))\n mat_dict['inl'] = list(self.inlet_list[:])\n mat_dict['onl'] = list(self.outlet_list[:])\n\n scipy.io.savemat('geometry_description',mat_dict)", "def test_density(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n r = np.array([0.7, 0.8])\n t = 6.25e-6\n solrt = sol(r, t)\n np.testing.assert_allclose(solrt.density[0], 2.26666666666663)", "def ldos_finite(h,e=0.0,n=10,nwf=4,delta=0.0001):\n if h.dimensionality!=1: raise # if it is not one dimensional\n intra = csc(h.intra) # convert to sparse\n inter = csc(h.inter) # convert to sparse\n interH = inter.H # hermitian\n m = [[None for i in range(n)] for j in range(n)] # full matrix\n for i in range(n): # add intracell\n m[i][i] = intra\n for i in range(n-1): # add intercell\n m[i][i+1] = inter\n m[i+1][i] = interH\n m = bmat(m) # convert to matrix\n (ene,wfs) = slg.eigsh(m,k=nwf,which=\"LM\",sigma=0.0) # diagonalize\n wfs = wfs.transpose() # transpose wavefunctions\n dos = (wfs[0].real)*0.0 # calculate dos\n for (ie,f) in zip(ene,wfs): # loop over waves\n c = 1./(1.+((ie-e)/delta)**2) # calculate coefficient\n dos += np.abs(f)*c # add contribution\n odos = spatial_dos(h,dos) # get the spatial distribution\n go = h.geometry.supercell(n) # get the supercell\n write_ldos(go.x,go.y,odos) # write in a file\n return dos # return the dos", "def plot_density_of_states(xlim=(-10, 5), ylim=(-1.5, 1.5), fmt='pdf'):\n\n efermi = Vasprun('vasprun.xml').efermi\n dos_lines = open ('DOSCAR').readlines()\n\n x, up, down = [], [], []\n nedos = Incar.from_file('INCAR').as_dict()['NEDOS'] - 1\n\n for line in dos_lines[6:6+nedos]:\n split_line = line.split()\n x.append(float(split_line[0]) - efermi)\n up.append(float(split_line[1]))\n down.append(-float(split_line[2]))\n\n x, up, down = np.array(x), np.array(up), np.array(down)\n sum = up + down\n\n ax = plt.figure().gca()\n ax.set_xlim(xlim[0], xlim[1])\n ax.set_ylim(ylim[0], ylim[1])\n\n ax.set_xlabel(r'$\\mathrm{E\\/(eV)}$')\n ax.set_ylabel(r'$\\mathrm{Density\\/of\\/States$')\n ax.set_xticklabels([r'$\\mathrm{%s}$' % t for t in ax.get_xticklabels()])\n ax.set_yticklabels([r'$\\mathrm{%s}$' % t for t in ax.get_yticklabels()])\n\n ax.plot(x, up, color='red' )\n ax.plot(x, down, color='green')\n ax.plot(x, sum, color='black' )\n if fmt is not None:\n plt.savefig('density_of_states.{}'.format(fmt))\n else:\n return ax\n\n plt.close()", "def statsi(h):\n\n # Define constants\n zsa = np.array([0.0, 11000.0, 20000.0, 32000.0, 47000.0, 52000.0, 61000.0, 79000.0, 9.9e20])\n Tsa = np.array([288.15, 216.65, 216.65, 228.65, 270.65, 270.65,252.65, 180.65, 180.65])\n g = 9.80665\n R = 287.0528\n Re = 6346766.0\n Psa = 101325.0\n\n # Calculate geopotential altitude\n z = Re*h/(Re+h)\n\n # Loop through atmosphere layers\n for i in range(8):\n \n # Calculate layer temperature gradient\n Lt = -(Tsa[i+1]-Tsa[i])/(zsa[i+1]-zsa[i])\n\n # If no temperature gradient\n if Lt == 0.0:\n\n # Are we in this layer of the atmosphere?\n if z <= zsa[i+1]:\n t = Tsa[i] # Temp isn't changing\n p = Psa*np.exp(-g*(z-zsa[i])/R/Tsa[i])\n d = p/R/t\n break\n\n # We need to go higher\n else:\n Psa *= np.exp(-g*(zsa[i+1]-zsa[i])/R/Tsa[i])\n\n # Temperature gradient\n else:\n ex = g/R/Lt\n if z <= zsa[i+1]:\n t = Tsa[i]-Lt*(z-zsa[i])\n p = Psa*(t/Tsa[i])**ex\n d = p/R/t\n break\n else:\n Psa *= (Tsa[i+1]/Tsa[i])**ex\n\n # We have left the atmosphere...\n else:\n t = Tsa[-1]\n p = 0.0\n d = 0.0\n\n return z, t, p, d", "def median_path(self,fl):\n nodes = {}\n edges = self.fl2prediction[fl]\n for i in range(len(edges)):\n if edges[i] == 1:\n node_tup = self.edge_index2tuple[i]\n nodes[node_tup[0]] = True\n nodes[node_tup[1]] = True\n fn_prefix = \"psdd/paths/may14/median_%d_%d_%d_%d\" % (self.rows,self.cols,fl[0],fl[1])\n out_fn = \"%s_coords.txt\" % fn_prefix\n with open(out_fn,'w') as outfile:\n for node in nodes.keys():\n outfile.write(\"%s,%s\\n\" % ('0',str(self.node2median[node])[1:-1]))", "def export_2D_edp(self, filename=\"2Dedp.dat\", xmin=-100, xmax=100, \n zmin=-100, zmax=100, N=201):\n rho_xz = []\n xgrid = np.linspace(xmin, xmax, num=N)\n zgrid = np.linspace(zmin, zmax, num=N)\n for x in xgrid:\n for z in zgrid:\n tmp = self.phase * self.F * np.cos(self.qx*x+self.qz*z)\n rho_xz.append([x, z, tmp.sum(axis=0)])\n rho_xz = np.array(rho_xz, float) \n X, Y, Z= rho_xz[:,0], rho_xz[:,1], rho_xz[:,2]\n with open(filename, 'w') as f:\n f.write(\"x z ED\\n\")\n for x, y, z in zip(X, Y, Z):\n f.write(\"{0: 3.1f} {1: 3.1f} {2: }\\n\".format(x, y, z))", "def export_hamilton(args):\n if args.type == 'filling_out':\n clarity_epp.export.hamilton.samplesheet_filling_out(lims, args.process_id, args.output_file)\n elif args.type == 'purify':\n clarity_epp.export.hamilton.samplesheet_purify(lims, args.process_id, args.output_file)", "def save_electronic_eigenstates(m_max, nu_max, mu_max, R_grid, beta, theta):\n\n #Parallel stuff\n #--------------\n #Get processor 'name'.\n my_id = pypar.rank() \n \n #Get total number of processors.\n nr_procs = pypar.size()\n \n #Get number of tasks.\n nr_tasks = len(R_grid)\n\n #Get a list of the indices of this processors share of R_grid. \n my_tasks = nice_stuff.distribute_work(nr_procs, nr_tasks, my_id)\n\n #The processors will be writing to the same file.\n #In order to avoid problems, the procs will do a relay race of writing to\n #file. This is handeled by blocking send() and receive().\n #Hopefully there will not be to much waiting.\n\n #ID of the processor that will start writing.\n starter = 0\n\n #ID of the processor that will be the last to write.\n ender = (nr_tasks - 1) % nr_procs\n\n #Buffer for the baton, i.e. the permission slip for file writing.\n baton = r_[0]\n\n #The processor one is to receive the baton from.\n receive_from = (my_id - 1) % nr_procs \n\n #The processor one is to send the baton to.\n send_to = (my_id + 1) % nr_procs \n #-------------------------------\n\n \n #Initializing the HDF5 file\n #--------------------------\n if my_id == 0:\n\t#Creates a config instance.\n\tmy_config = config.Config(m = m_max, nu = nu_max, mu = mu_max, \n\t R = R_grid[0], beta = beta, theta = theta)\n\t\n\t#Number of basis functions.\n\tbasis_size = (2 * m_max + 1) * (nu_max + 1) * (mu_max + 1)\n\n\t#Generate a filename.\n\tfilename = name_gen.electronic_eigenstates_R(my_config)\n\n\tf = tables.openFile(filename, 'w')\n\ttry:\n\t f.createArray(\"/\", \"R_grid\", R_grid)\t \n\t \n\t #Looping over the m values.\n\t for m in range(-1 * m_max, m_max + 1):\n\t\t#Creating an m group in the file.\n\t\tm_group = name_gen.m_name(m)\n\t\tf.createGroup(\"/\", m_group)\n\t\t\n\t\t#Looping over th q values.\n\t\tfor q in range(mu_max + 1):\n\t\t #Creating a q group in the m group in the file.\n\t\t q_group = name_gen.q_name(q)\n\t\t f.createGroup(\"/%s/\"%m_group, q_group)\n\n\t\t #Initializing the arrays for the eigenvalues and states.\n\t\t f.createCArray('/%s/%s/'%(m_group, q_group),'E', \n\t\t\ttables.atom.FloatAtom(), \n\t\t\t(basis_size/(mu_max + 1), nr_tasks),\n\t\t\tchunkshape=(basis_size/(mu_max + 1), 1))\n\t\t \n\t\t f.createCArray('/%s/%s/'%(m_group, q_group),'V', \n\t\t\ttables.atom.ComplexAtom(16), \n\t\t\t(basis_size, basis_size/(mu_max + 1), nr_tasks),\n\t\t\tchunkshape=(basis_size, basis_size/(mu_max + 1), 1))\n\t \n\tfinally:\n\t f.close()\n\t\n\t#Save config instance.\n\tmy_config.save_config(filename)\n #----------------------------------\n\n\n #Solving the TISE\n #----------------\n #Looping over the tasks of this processor.\n for i in my_tasks:\n\t#Creating TISE instance.\n\ttise = tise_electron.TISE_electron(m = m_max, nu = nu_max, \n\t mu = mu_max, R = R_grid[i], beta = beta, theta = theta)\n\t\n\t#Diagonalizing the hamiltonian.\n\tE,V = tise.solve()\n\t\n\t#First file write. (Send, but not receive baton.)\n\tif starter == my_id:\n\t #Write to file.\n\t tise.save_eigenfunctions_R(E, V, R_grid[i])\n\n\t #Avoiding this statement 2nd time around.\n\t starter = -1\n\n\t #Sending the baton to the next writer.\n\t pypar.send(baton, send_to, use_buffer = True)\n\t\n\t#Last file write. (Receive, but not send baton.)\n\telif i == my_tasks[-1] and ender == my_id :\n\t #Receiving the baton from the previous writer.\n\t pypar.receive(receive_from, buffer = baton)\n\n\t #Write to file.\n\t tise.save_eigenfunctions_R(E, V, R_grid[i])\n\t\n\t#The rest of the file writes.\n\telse:\n\t #Receiving the baton from the previous writer.\n\t pypar.receive(receive_from, buffer = baton)\n\n\t #Write to file.\n\t tise.save_eigenfunctions_R(E, V, R_grid[i])\n\n\t #Sending the baton to the next writer.\n\t pypar.send(baton, send_to, use_buffer = True)\n\t\n\t\n\t#Showing the progress of the work.\n\tif my_id == 0:\n\t nice_stuff.status_bar(\"Electronic BO calculations\", \n\t\ti, len(my_tasks))\n #----------------------------\n \n #Letting everyone catch up. \n pypar.barrier()\n\n #Since the sign of the eigenfunctions are completely arbitrary, one must\n #make sure they do not change sign from one R to another.\n if my_id == 0:\n\ttise.align_all_phases()\n \n #Letting 0 catch up. \n pypar.barrier()", "def save_to_disk(self, filename='ens_state.nc'):\n self.to_netcdf(filename)", "def wrhdf(hdf_filename, x, y, z, f):\n\n # Create an HDF file\n sd_id = SD(hdf_filename, SDC.WRITE | SDC.CREATE | SDC.TRUNC)\n\n if f.dtype == np.float32:\n ftype = SDC.FLOAT32\n elif f.dtype == np.float64:\n ftype = SDC.FLOAT64\n\n # Create the dataset (Data-Set-2 is the name used by the psi data)).\n sds_id = sd_id.create(\"Data-Set-2\", ftype, f.shape)\n\n #Get number of dimensions:\n ndims = np.ndim(f)\n\n #Set the scales:\n for i in range(0,ndims):\n dim = sds_id.dim(i)\n if i == 0:\n if x.dtype == np.float32:\n stype = SDC.FLOAT32\n elif x.dtype == np.float64:\n stype = SDC.FLOAT64\n dim.setscale(stype,x)\n elif i == 1:\n if y.dtype == np.float32:\n stype = SDC.FLOAT32\n elif y.dtype == np.float64:\n stype = SDC.FLOAT64\n dim.setscale(stype,y)\n elif i == 2: \n if z.dtype == np.float32:\n stype = SDC.FLOAT32\n elif z.dtype == np.float64:\n stype = SDC.FLOAT64\n dim.setscale(stype,z)\n\n # Write the data:\n sds_id.set(f)\n\n # Close the dataset:\n sds_id.endaccess()\n\n # Flush and close the HDF file:\n sd_id.end()", "def minfo():\n model = np.loadtxt('cumul_depths.tmp',dtype={'names': ('H'),'formats': \\\n ('f4')}, usecols=[0])\n d = model['H']\n model = np.loadtxt('start_model.dat',dtype={'names': (\"S\"),'formats': \\\n ('f4')}, skiprows=1,usecols=[2])\n vs = model['S']\n\n A = np.repeat(vs,2)\n B = np.repeat(d,2)\n B = np.insert(B,[0],0.0)[:-1] \n out = zip(A, B)\n \n f = open('model.info','w+')\n for line in out:\n print (\" \".join(str(x) for x in line))\n f.write(\" \".join(str(x) for x in line) + \"\\n\") \n f.close()", "def persistent_homology(self):\n\n def low(j, R):\n \"\"\"\n :return: maximum line index of the column j in the matrix R with a 1 in it\n \"\"\"\n if R[j] == []:\n return (-1)\n else:\n return (sorted(R[j])[-1])\n\n # low_j = 0\n # for k in range(j):\n # if R[k, j] == 1:\n # low_j = k\n # return (low_j)\n\n N = self.nbr_splxs\n self.homology_matrix = self.neighbours_matrix[:]\n n = self.nbr_0_splxs\n # initilize the low_j matrix\n self.low_j_to_j_list = N * [-1]\n # Apply the persistence algorithm\n j = 0\n while low(j, self.homology_matrix) == -1:\n j += 1\n self.low_j_to_j_list[low(j, self.homology_matrix)] = j\n j += 1\n while j < N:\n low_j = low(j, self.homology_matrix)\n j0 = self.low_j_to_j_list[low_j]\n while j0 != -1:\n self.homology_matrix[j] = self.sum_column(j, j0, self.homology_matrix)\n # self.homology_matrix[:j, j] = (self.homology_matrix[:j, j0] + self.homology_matrix[:j, j]) % 2\n low_j = low(j, self.homology_matrix)\n j0 = self.low_j_to_j_list[low_j]\n if low_j != -1:\n self.low_j_to_j_list[low_j] = j\n j += 1\n if j % 10 == 0:\n print(j / N)\n # for j in range(1, N):\n # test = True\n # while test:\n # test = False\n # for j0 in range(j):\n # if low(j0, self.homology_matrix) == low(j, self.homology_matrix) \\\n # and low(j0, self.homology_matrix) != 0:\n # self.homology_matrix[:j, j] = (self.homology_matrix[:j, j0] + self.homology_matrix[:j, j]) % 2\n # test = True\n # if j % 10 == 0:\n # print(np.log(j + 1) / np.log(N))\n\n for j in range(N):\n low_j = low(j, self.homology_matrix)\n if low_j != -1:\n # print(low_j,j)\n # self.pers_pairs_birth.append(self.dist_appearance[low_j])\n # self.pers_pairs_death.append(self.dist_appearance[j])\n if self.splxs[low_j][0] == 0:\n self.h0_birth.append(self.dist_appearance[low_j])\n self.h0_death.append(self.dist_appearance[j])\n print(low_j)\n else:\n self.h1_birth.append(self.dist_appearance[low_j])\n self.h1_death.append(self.dist_appearance[j])\n print(\"persistant homology achieved\")\n return ()", "def check_dense_gas(dir='./'):\n import glob\n import pandas as pd\n ff = glob.glob('*.gas')\n\n for i in ff:\n f = pd.read_pickle(i)\n print(i)\n print (f['f_H21'] > 0.0).sum()\n\n print(\"Total dense gas mass: \")\n print(f['m'] * f['f_H21']).sum()\n return None", "def writestat(self, outfile=None, hubble=None):\n s = self._base()\n mindarkmass = min(s.dark['mass'])\n\n if hubble is None:\n hubble = s.properties['h']\n\n if outfile is None: outfile = self._base().filename+'.stat'\n print \"write stat file to \", outfile\n fpout = open(outfile, \"w\")\n header = \"#Grp N_tot N_gas N_star N_dark Mvir(M_sol) Rvir(kpc) GasMass(M_sol) StarMass(M_sol) DarkMass(M_sol) V_max R@V_max VelDisp Xc Yc Zc VXc VYc VZc Contam Satellite? False? ID_A\"\n print >> fpout, header\n for ii in np.arange(self._nhalos)+1:\n print '%d '%ii,\n sys.stdout.flush()\n h = self[ii].properties # halo index starts with 1 not 0\n## 'Contaminated'? means multiple dark matter particle masses in halo)\"\n icontam = np.where(self[ii].dark['mass'] > mindarkmass)\n if (len(icontam[0]) > 0):\n contam = \"contam\"\n else:\n contam = \"clean\"\n## may want to add implement satellite test and false central breakup test.\n ss = \" \" # can adjust column spacing\n outstring = str(ii)+ss\n outstring += str(len(self[ii]))+ss+str(len(self[ii].g))+ss\n outstring += str(len(self[ii].s)) + ss+str(len(self[ii].dark))+ss\n outstring += str(h['m']/hubble)+ss+str(h['r']/hubble)+ss\n outstring += str(self[ii].g['mass'].in_units('Msol').sum())+ss\n outstring += str(self[ii].s['mass'].in_units('Msol').sum())+ss\n outstring += str(self[ii].d['mass'].in_units('Msol').sum())+ss\n outstring += str(h['vmax'])+ss+str(h['vmax_r']/hubble)+ss\n outstring += str(h['vrms'])+ss\n ## pos: convert kpc/h to mpc (no h).\n outstring += str(h['pos'][0][0]/hubble)+ss\n outstring += str(h['pos'][0][1]/hubble)+ss\n outstring += str(h['pos'][0][2]/hubble)+ss\n outstring += str(h['vel'][0][0])+ss+str(h['vel'][0][1])+ss\n outstring += str(h['vel'][0][2])+ss\n outstring += contam+ss\n outstring += \"unknown\" + \\\n ss # unknown means sat. test not implemented.\n outstring += \"unknown\"+ss # false central breakup.\n print >> fpout, outstring\n fpout.close()", "def new_track_density(track_key,hist_dims,conn):\n\n # extract all of the md needed to look up the data\n\n (fname,iden_key,track_key,dset_key) = conn.execute(\"select fout,iden_key,comp_key,dset_key from tracking where comp_key = ?\",\n track_key).fetchone()\n print fname\n F = h5py.File(fname,'r')\n print F.keys()[:5]\n try:\n start_plane = F[fd('tracking',track_key)]['start_plane'][:]\n start_part = F[fd('tracking',track_key)]['start_particle'][:]\n\n print len(start_plane)\n \n # figure out the right size to make the array\n dims = F.attrs['dims']\n print dims\n # make data collection object\n hist2D_ac = Hist2D_accumlator(dims,hist_dims)\n # loop over the heads of track index and hash result\n cur_plane = None\n cur_x = None\n cur_y = None\n temp = 0\n fr_count = 0\n for plane,part in zip(start_plane,start_part):\n if not plane == cur_plane:\n cur_plane = plane\n cp = F[ff(cur_plane)]\n cur_x = cp[fd('x',iden_key)]\n cur_y = cp[fd('y',iden_key)]\n temp += cp.attrs['temperature']\n fr_count += 1\n\n hist2D_ac.add_point(\n (cur_x[part],\n cur_y[part])\n )\n pass\n except ValueError,er:\n print ff(cur_plane)\n \n \n finally:\n F.close()\n del F\n\n f = plt.figure()\n ax = f.add_axes([.1,.1,.8,.8])\n c = ax.imshow(np.flipud(hist2D_ac.data.T),interpolation='nearest')\n plt.colorbar(c)\n ax.set_title('%.2f C '%(temp/fr_count) + str(dset_key))\n return hist2D_ac.data", "def _kde_local(loc, data, bw, lmbda):\n\n l_s_bw = bw * lmbda\n d = (loc - data).T / l_s_bw\n s = (_norm_pdf(d) / l_s_bw).T\n\n kde = 0.0\n for r in range(s.shape[0]):\n kde += s[r].prod()\n return kde", "def hysteresis(T = 1, dimensions = 2, J = 1, filename = \"hist\", hmax = 2.5):\r\n h = np.linspace(-hmax, hmax, 100)\r\n \r\n #size of lattice\r\n N = 20\r\n \r\n #forward tabulated magnetisations and backward going\r\n Mforward = np.zeros(h.shape)\r\n Mbackward = np.zeros(h.shape)\r\n \r\n #initial lattice\r\n lattice = initialiser(N, dimensions = dimensions)\r\n \r\n #anneal lattice\r\n lattice = anneal(lattice, T, 20)\r\n\r\n #forward scan over different values of strength\r\n for i in range(len(h)):\r\n (m,e,l) = simulation(N, T, 200, lattice, h = h[i], nonabsmag=True,\\\r\n dimensions= dimensions, J = J)\r\n Mforward[i] = np.mean(m)\r\n lattice = l\r\n \r\n #backward scan over different values of strength \r\n for i in range(len(h)):\r\n index = len(h) - 1 - i\r\n (m,e,l) = simulation(N, T, 200, lattice, h = h[index], nonabsmag=True,\\\r\n dimensions = dimensions, J = J)\r\n Mbackward[index] = np.mean(m)\r\n lattice = l\r\n \r\n #plot data\r\n f = makeplot(h, [Mforward, Mbackward], [\"Increasing h\", \"Decreasing h\"],\\\r\n \"External field, h $[J]$\", \"Magnetisation\")\r\n f.show()\r\n f.savefig(filename+\".svg\")", "def save_serendipity_dic(y, filename):\n store = pd.io.pytables.HDFStore(y)\n mat = store.matrix\n store.close()\n n = len(mat.columns)\n ser = 1 - mat.sum(axis=1) / n\n\n f = open(filename, \"w\")\n cPickle.dump(ser.to_dict(), f, protocol=2)\n f.close()", "def write_mesh_file(allxyz, beck_bed):\n if SAVEMESH:\n print('+> Saving finite element mesh files...', end='')\n fname = FNAME.rsplit('.', 1)[0]\n ncol = beck_bed[0,:].size\n nrow = beck_bed[:,0].size\n nele = (nrow-1)*(ncol-1)*2\n d = compute_mesh(nrow, ncol, nele)\n h = ':NodeCount ' + str(allxyz[:,0].size) + '\\n:ElementCount ' \\\n + str(nele) + '\\n#\\n:EndHeader\\n'\n with open(fname + '_mesh.t3s', 'w') as f: \n f.write(h)\n with open(fname + '_mesh.t3s', 'a') as f:\n np.savetxt(f, allxyz, fmt='%.6e')\n np.savetxt(f, d, fmt='%d')\n f.write('\\n\\n')\n h = 'TITLE = \\\"' + fname \\\n + '_mesh\\\"\\nVARIABLES = \\\"X\\\", \\\"Y\\\", \\\"' + fname \\\n + '_mesh\\\"\\nZONE NODES=' + str(allxyz[:,0].size) + ', ELEMENTS=' \\\n + str(nele) + ', DATAPACKING=POINT, ZONETYPE=FETRIANGLE\\n'\n with open(fname + '_mesh.dat', 'w') as f: \n f.write(h)\n with open(fname + '_mesh.dat', 'a') as f:\n np.savetxt(f, allxyz, fmt='%.6e')\n np.savetxt(f, d, fmt='%d')\n f.write('\\n\\n')\n inlet = np.zeros((ncol,), dtype=int)\n outlet = np.zeros((ncol,), dtype=int)\n for i in range(ncol):\n inlet[i] = 1 + i*nrow\n outlet[i] = (1 + i)*nrow\n left = np.zeros((nrow-2,), dtype=int)\n right = np.zeros((nrow-2,), dtype=int)\n for i in range(1, nrow-1):\n left[i-1] = (ncol-2)*nrow + i + 1\n right[i-1] = (ncol-1)*nrow + i + 1\n cli = np.zeros((2*(nrow+ncol-2), 13))\n cli[:,:2] = 2\n cli[:,7] = 2\n cli[:,11] = np.concatenate((inlet, outlet, left, right))\n cli[:,12] = np.arange(2*(nrow+ncol-2)) + 1\n cli[:ncol,0] = 4\n cli[:ncol,1] = 5\n cli[:ncol,2] = 5\n cli[:ncol,7] = 4\n cli[ncol:2*ncol,0] = 5\n cli[ncol:2*ncol,1] = 4\n cli[ncol:2*ncol,2] = 4\n cli[ncol:2*ncol,7] = 4\n np.savetxt(fname + '_BC_tmp.cli', cli, fmt='%d')\n with open(fname + '_BC.cli', 'w') as out_f:\n with open(fname + '_BC_tmp.cli', 'r') as in_f:\n for i, line in enumerate(in_f):\n if i < ncol:\n s = ' #Inlet'\n elif i >= ncol and i < 2*ncol:\n s = ' #Outlet'\n else:\n s = ' #'\n out_f.write(line.rstrip('\\n') + s + '\\n')\n out_f.write('\\n')\n os.remove(fname + '_BC_tmp.cli')\n h = ':FileType bc2 ASCII EnSim 1.0' \\\n + '\\n:NodeCount ' + str(allxyz[:,0].size) \\\n + '\\n:ElementCount ' + str(nele) \\\n + '\\n:ElementType T3' \\\n + '\\n:BoundarySegmentCount 2' \\\n + '\\n# id code sectionCount startNode1 endNode1 startNode2 endNode2 tracerCode name' \\\n + '\\n:BoundarySegment 1 455 1 1 ' + str(ncol) + ' 1 1 4 \\\"Inlet\\\"' \\\n + '\\n:BoundarySegment 2 544 1 ' + str(ncol+1) + ' ' + str(2*ncol) + ' 1 1 4 \\\"Outlet\\\"' \\\n + '\\n:ShorelineCount 1' \\\n + '\\n:ShorelineNodeCount ' + str(2*(nrow+ncol-2)) \\\n + '\\n:EndHeader' \\\n + '\\n:BeginNodes ' + str(allxyz[:,0].size) + '\\n'\n with open(fname + '_BC.bc2', 'w') as f: \n f.write(h)\n with open(fname + '_BC.bc2', 'a') as f:\n xyz = np.copy(allxyz)\n xyz[:,2] = 0\n np.savetxt(f, xyz, fmt='%.6e')\n f.write(':EndNodes\\n:BeginElements ' + str(nele) + '\\n')\n np.savetxt(f, d, fmt='%d')\n f.write(':EndElements\\n:BeginTable ' + str(2*(nrow+ncol-2)) + ' 15\\n')\n with open(fname + '_BC.cli', 'r') as g:\n lines = g.read()\n f.write(lines[:-1])\n f.write(':EndTable\\n\\n')\n print(' [done]')", "def _write_dx(self, FN, data):\n n_points = data['counts'][0] * data['counts'][1] * data['counts'][2]\n if FN.endswith('.dx'):\n F = open(FN, 'w')\n else:\n import gzip\n F = gzip.open(FN, 'w')\n\n F.write(\"\"\"object 1 class gridpositions counts {0[0]} {0[1]} {0[2]}\norigin {1[0]} {1[1]} {1[2]}\ndelta {2[0]} 0.0 0.0\ndelta 0.0 {2[1]} 0.0\ndelta 0.0 0.0 {2[2]}\nobject 2 class gridconnections counts {0[0]} {0[1]} {0[2]}\nobject 3 class array type double rank 0 items {3} data follows\n\"\"\".format(data['counts'], data['origin'], data['spacing'], n_points))\n\n for start_n in range(0, len(data['vals']), 3):\n F.write(' '.join(['%6e' % c\n for c in data['vals'][start_n:start_n + 3]]) + '\\n')\n\n F.write('object 4 class field\\n')\n F.write('component \"positions\" value 1\\n')\n F.write('component \"connections\" value 2\\n')\n F.write('component \"data\" value 3\\n')\n F.close()", "def plot_initial_state(input_file_name='initial_state.nc',\n output_file_name='initial_state.png'):\n\n # load mesh variables\n chunks = {'nCells': 32768, 'nEdges': 32768}\n ds = xarray.open_dataset(input_file_name, chunks=chunks)\n nCells = ds.sizes['nCells']\n nEdges = ds.sizes['nEdges']\n nVertLevels = ds.sizes['nVertLevels']\n\n fig = plt.figure()\n fig.set_size_inches(16.0, 12.0)\n plt.clf()\n\n print('plotting histograms of the initial condition')\n print('see: init/initial_state/initial_state.png')\n d = datetime.datetime.today()\n txt = \\\n 'MPAS-Ocean initial state\\n' + \\\n 'date: {}\\n'.format(d.strftime('%m/%d/%Y')) + \\\n 'number cells: {}\\n'.format(nCells) + \\\n 'number cells, millions: {:6.3f}\\n'.format(nCells / 1.e6) + \\\n 'number layers: {}\\n\\n'.format(nVertLevels) + \\\n ' min val max val variable name\\n'\n\n plt.subplot(3, 3, 2)\n varName = 'maxLevelCell'\n var = ds[varName]\n maxLevelCell = var.values - 1\n xarray.plot.hist(var, bins=nVertLevels - 4)\n plt.ylabel('frequency')\n plt.xlabel(varName)\n txt = '{}{:9.2e} {:9.2e} {}\\n'.format(txt, var.min().values,\n var.max().values, varName)\n\n plt.subplot(3, 3, 3)\n varName = 'bottomDepth'\n var = ds[varName]\n xarray.plot.hist(var, bins=nVertLevels - 4)\n plt.xlabel(varName)\n txt = '{}{:9.2e} {:9.2e} {}\\n'.format(txt, var.min().values,\n var.max().values, varName)\n\n cellsOnEdge = ds['cellsOnEdge'].values - 1\n cellMask = np.zeros((nCells, nVertLevels), bool)\n edgeMask = np.zeros((nEdges, nVertLevels), bool)\n for k in range(nVertLevels):\n cellMask[:, k] = k <= maxLevelCell\n cell0 = cellsOnEdge[:, 0]\n cell1 = cellsOnEdge[:, 1]\n edgeMask[:, k] = np.logical_and(np.logical_and(cellMask[cell0, k],\n cellMask[cell1, k]),\n np.logical_and(cell0 >= 0,\n cell1 >= 0))\n cellMask = xarray.DataArray(data=cellMask, dims=('nCells', 'nVertLevels'))\n edgeMask = xarray.DataArray(data=edgeMask, dims=('nEdges', 'nVertLevels'))\n\n plt.subplot(3, 3, 4)\n varName = 'temperature'\n var = ds[varName].isel(Time=0).where(cellMask)\n xarray.plot.hist(var, bins=100, log=True)\n plt.ylabel('frequency')\n plt.xlabel(varName)\n txt = '{}{:9.2e} {:9.2e} {}\\n'.format(txt, var.min().values,\n var.max().values, varName)\n\n plt.subplot(3, 3, 5)\n varName = 'salinity'\n var = ds[varName].isel(Time=0).where(cellMask)\n xarray.plot.hist(var, bins=100, log=True)\n plt.xlabel(varName)\n txt = '{}{:9.2e} {:9.2e} {}\\n'.format(txt, var.min().values,\n var.max().values, varName)\n\n plt.subplot(3, 3, 6)\n varName = 'layerThickness'\n var = ds[varName].isel(Time=0).where(cellMask)\n xarray.plot.hist(var, bins=100, log=True)\n plt.xlabel(varName)\n txt = '{}{:9.2e} {:9.2e} {}\\n'.format(txt, var.min().values,\n var.max().values, varName)\n\n plt.subplot(3, 3, 7)\n varName = 'rx1Edge'\n var = ds[varName].isel(Time=0).where(edgeMask)\n maxRx1Edge = var.max().values\n xarray.plot.hist(var, bins=100, log=True)\n plt.ylabel('frequency')\n plt.xlabel('Haney Number, max={:4.2f}'.format(maxRx1Edge))\n txt = '{}{:9.2e} {:9.2e} {}\\n'.format(txt, var.min().values,\n var.max().values, varName)\n\n font = FontProperties()\n font.set_family('monospace')\n font.set_size(12)\n print(txt)\n plt.subplot(3, 3, 1)\n plt.text(0, 1, txt, verticalalignment='top', fontproperties=font)\n plt.axis('off')\n\n plt.tight_layout(pad=4.0)\n\n plt.savefig(output_file_name, bbox_inches='tight', pad_inches=0.1)", "def save(self, filename):\n hebbian_weights = open(filename, \"w\")\n for i in xrange(self.hidden):\n hebbian_weights.write(\"\\t\".join(self.vis_layer[i].get_weights()) + '\\n')\n for i in xrange(self.layers):\n for j in xrange(self.hidden):\n hebbian_weights.write(\"\\t\".join(self.hidden_layers[i][j].get_weights()) + '\\n')\n hebbian_weights.write(\"\\t\".join(self.output_neuron.get_weights()) + '\\n')\n hebbian_weights.close()", "def write_mat_file(self, geom_filename):\n mat_dict = {}\n mat_dict['Lx_p'] = self.Lx_p\n mat_dict['Ly_p'] = self.Ly_p\n mat_dict['Lz_p'] = self.Lz_p\n mat_dict['Lo'] = self.obst.get_Lo()\n mat_dict['Ny_divs'] = self.N_divs\n mat_dict['rho_p'] = self.rho_p\n mat_dict['nu_p'] = self.nu_p\n mat_dict['snl'] = list(np.union1d(self.obst_list[:],self.solid_list[:]))\n mat_dict['inl'] = list(self.inlet_list[:])\n mat_dict['onl'] = list(self.outlet_list[:])\n\n scipy.io.savemat(geom_filename,mat_dict)", "def nc_to_hdf5_mudis(dataset, config):\n np.warnings.filterwarnings('ignore')\n\n date = datetime.datetime.strptime(dataset.recorddate,\n '%d.%m.%Y') # read date from dateset\n date_name = datetime.datetime.strftime(date,\n '%Y%m%d') # convert date to YYYYMMDD format\n config['date'] = date_name\n\n # Create the directory to save the results\n path = config['str_dir'] + '/radiance/{}/data/'.format(config['date'])\n os.makedirs(os.path.dirname(path), exist_ok=True)\n\n # Read time of the file (correct time)\n time = datetime.datetime.strptime(dataset.recordtime, '%H:%M:%S.')\n time = datetime.datetime.time(time)\n\n # convert time to datetime format\n datetime_name = datetime.datetime.combine(date, time)\n new_name = datetime.datetime.strftime(datetime_name, '%Y%m%d_%H%M%S')\n\n # radiance = dataset.variables['data'][:].reshape(113, 1281)\n # wavelength_axis = dataset.variables['xAxis'][:]\n\n # Create a file in the disk\n with h5py.File(config['str_dir'] + '/radiance/{}/data/{}.h5'.format(\n config['date'], new_name), 'w') as datos:\n\n if not list(datos.items()):\n # Create two datasets(use only one time)\n datos.create_dataset('/data',\n data=dataset['data'][:].reshape(113, 1281),\n dtype='f4')\n # datos.create_dataset('/skymap', data=skymap, dtype='f4')\n else:\n del datos['data']\n # del datos['skymap']\n print('data deleted and corrected')\n datos.create_dataset('/data', data=data, dtype='f4')\n # datos.create_dataset('/skymap', data=skymap, dtype='f4')\n\n # Add attributes to datasets\n datos['data'].attrs['time'] = str(time)\n datos['data'].attrs['Exposure'] = dataset.exposuretime\n datos['data'].attrs['NumAver'] = dataset.AVERAGED\n datos['data'].attrs['CCDTemp'] = dataset.detectortemperature\n datos['data'].attrs['NumSingMes'] = dataset.noofaccumulations\n # datos['data'].attrs['ElectrTemp'] = dataset.\n datos['data'].attrs['Latitude'] = '52.39N'\n datos['data'].attrs['Longitude'] = '9.7E'\n datos['data'].attrs['Altitude'] = '65 AMSL'\n\n chn = np.arange(1, 114)\n datos.create_dataset('/channel', data=chn, dtype=np.float32)\n datos.create_dataset('/wavelength', data=dataset['xAxis'][:])\n\n datos['data'].dims.create_scale(datos['channel'], 'channel')\n datos['data'].dims[0].attach_scale(datos['channel'])\n datos['data'].dims[0].label = 'channel'\n datos['data'].dims[1].label = 'wavelength'\n\n # datos['skymap'].dims[0].label = 'channel'\n # datos['skymap'].dims[1].label = 'Azimuth, Zenith'\n\n datos.close()", "def write_state_file(self, state):\r\n with open(StudentModuleHistoryCleaner.STATE_FILE, \"w\") as state_file:\r\n state_file.write(state)", "def save_progress(filename, derm_counts):\n\n with open(filename, 'w') as f:\n f.write(\"zipcode,derms_within_%d_miles\\n\" % RADIUS)\n prefix = \"\"\n for key, val in derm_counts.iteritems():\n f.write(prefix)\n f.write(key)\n f.write(',')\n f.write(val)\n prefix= \"\\n\"", "def density(temp,pres):\n g_p = liq_g(0,1,temp,pres)\n dliq = g_p**(-1)\n return dliq", "def slabldos(h,energies=np.linspace(-1.0,1.0,40),delta=None,nk=40):\n if h.dimensionality!=2: raise # nope\n ds = ldosmap(h,energies=energies,delta=delta,nk=nk)\n if len(ds[0])!=len(h.geometry.z): \n print(\"Wrong dimensions\",len(ds[0]),len(h.geometry.z))\n raise\n f = open(\"DOSMAP.OUT\",\"w\")\n f.write(\"# energy, index, DOS, zposition\\n\")\n for ie in range(len(energies)):\n for iz in range(len(h.geometry.z)):\n f.write(str(energies[ie])+\" \")\n f.write(str(iz)+\" \")\n f.write(str(ds[ie,iz])+\" \")\n f.write(str(h.geometry.z[iz])+\"\\n\")\n f.close()\n return energies,np.transpose(ds) # retunr LDOS ", "def density(self):\n return self.nnz/self.dim", "def _ion_densities(self):\n nD = self.ne_in*(6-self.zeff_in)/(5.)\n nC = self.ne_in*(self.zeff_in-1)/(30.)\n nC[np.where(nC<0)]=0.\n print(\"nC/nD: \"+str(np.mean(nC/nD)*100.)+\" %\")\n self.ni_in[0,:] = nD\n self.ni_in[1,:] = nC", "def density(self):\n return self.nnz / self.size", "def gstate(N, periodic):\n\n # Create Hamiltonian matrix\n H = kronH(N, periodic)\n \n # Diagonalize\n print('Diagonalizing...', end=' ', flush=True)\n w, v = eigsh(H, k=1, which='SA')\n print('Done')\n\n return w[0]", "def _write_nemo_hr_file(rpn_hr_ds_path, nemo_hr_ds_path):\n with xarray.open_dataset(rpn_hr_ds_path) as rpn_hr:\n logging.debug(\n f\"calculating specific humidity & incoming longwave radiation from {rpn_hr_ds_path}\"\n )\n qair, ilwr, rh = _calc_qair_ilwr(rpn_hr)\n u_out, v_out = _rotate_winds(rpn_hr)\n data_vars = {\n \"nav_lon\": rpn_hr.nav_lon,\n \"nav_lat\": rpn_hr.nav_lat,\n # [:, 0] drops z dimension that NEMO will not tolerate\n \"qair\": qair[:, 0],\n \"RH_2maboveground\": rh[:, 0],\n \"therm_rad\": ilwr[:, 0],\n \"u_wind\": u_out[:, 0],\n \"v_wind\": v_out[:, 0],\n # \"LHTFL_surface\": ** needs to be calculated**,\n }\n nemo_rpn_vars = (\n (\"atmpres\", \"PN\"),\n (\"percentcloud\", \"NT\"),\n (\"PRATE_surface\", \"RT\"),\n (\"precip\", \"PR\"),\n (\"solar\", \"FB\"),\n (\"tair\", \"TT\"),\n )\n missing_vars = \"\"\n for nemo_var, rpn_var in nemo_rpn_vars:\n try:\n # [:, 0] drops z dimension that NEMO will not tolerate\n data_vars.update({nemo_var: getattr(rpn_hr, rpn_var)[:, 0]})\n except AttributeError:\n # Variable is missing from RPN dataset, so provide a placeholder DataArray\n # full of NaNs that we will deal with later via interpolation\n data_vars.update(\n {nemo_var: xarray.DataArray(numpy.full_like(qair[:, 0], numpy.nan))}\n )\n missing_vars = (\n \", \".join((missing_vars, nemo_var)) if missing_vars else nemo_var\n )\n logging.warning(f\"missing RPN variable {rpn_var} from {rpn_hr_ds_path}\")\n nemo_hr = xarray.Dataset(\n data_vars=data_vars, coords=rpn_hr.coords, attrs=rpn_hr.attrs\n )\n nemo_hr.attrs[\"history\"] += (\n f\"\\n{arrow.now().format('ddd MMM DD HH:mm:ss YYYY')}: \"\n f\"Add specific and relative humidity and incoming longwave radiation variables from \"\n f\"correlations\"\n )\n if missing_vars:\n nemo_hr.attrs[\"missing_variables\"] = missing_vars\n _add_vars_metadata(nemo_hr)\n _write_netcdf_file(nemo_hr, nemo_hr_ds_path)", "def write_dftb_in(self, outfile):\n\n outfile.write('Geometry = GenFormat { \\n')\n outfile.write(' <<< \"geo_end.gen\" \\n')\n outfile.write('} \\n')\n outfile.write(' \\n')\n\n params = self.parameters.copy()\n\n s = 'Hamiltonian_MaxAngularMomentum_'\n for key in params:\n if key.startswith(s) and len(key) > len(s):\n break\n # --------MAIN KEYWORDS-------\n previous_key = 'dummy_'\n myspace = ' '\n for key, value in sorted(params.items()):\n current_depth = key.rstrip('_').count('_')\n previous_depth = previous_key.rstrip('_').count('_')\n for my_backsclash in reversed(\n range(previous_depth - current_depth)):\n outfile.write(3 * (1 + my_backsclash) * myspace + '} \\n')\n outfile.write(3 * current_depth * myspace)\n if key.endswith('_') and len(value) > 0:\n outfile.write(key.rstrip('_').rsplit('_')[-1] +\n ' = ' + str(value) + '{ \\n')\n elif (key.endswith('_') and (len(value) == 0)\n and current_depth == 0): # E.g. 'Options {'\n outfile.write(key.rstrip('_').rsplit('_')[-1] +\n ' ' + str(value) + '{ \\n')\n elif (key.endswith('_') and (len(value) == 0)\n and current_depth > 0): # E.g. 'Hamiltonian_Max... = {'\n outfile.write(key.rstrip('_').rsplit('_')[-1] +\n ' = ' + str(value) + '{ \\n')\n elif key.count('_empty') == 1:\n outfile.write(str(value) + ' \\n')\n else:\n outfile.write(key.rsplit('_')[-1] + ' = ' + str(value) + ' \\n')\n previous_key = key\n current_depth = key.rstrip('_').count('_')\n for my_backsclash in reversed(range(current_depth)):\n outfile.write(3 * my_backsclash * myspace + '} \\n')\n outfile.write('ParserOptions { \\n')\n outfile.write(' IgnoreUnprocessedNodes = Yes \\n')\n outfile.write('} \\n')", "def readOutputfile(filename, verbose=False):\n\n # -----------------------------------------------------------------------------\n # Defining the classes for data structure\n T_Simulation = namedtuple('Simulation', ['step'])\n T_Step = namedtuple('Step', ['element', 'node'])\n\n T_Displacement = namedtuple('Displacement', ['ux', 'uy'])\n\n T_Element = namedtuple('Element', ['gp', 'avstrain', 'avstress', 'eqstrain'])\n T_GP = namedtuple('GP', ['stress', 'strain'])\n T_Stresses = namedtuple('Stresses', ['xx', 'yy', 'zz', 'yz', 'zx', 'xy'])\n T_Strains = namedtuple('Strains', ['xx', 'yy', 'zz', 'yz', 'zx', 'xy'])\n # -----------------------------------------------------------------------------\n\n nSteps = 0 # Simulation step counter\n\n SimData = T_Simulation(list())\n\n with open(filename) as f:\n line = f.readline() # Read in the first line of the input file\n while True: # Loop over all lines of the input file\n # Read the nodes displacements\n #line = f.readline()\n #print(line)\n if line == 'DofManager output:\\n': # String starts a list of nodes displacement information\n nSteps += 1 # The above string starts a new simulation step\n line = f.readline() # Cancel ---------- seperator\n line = f.readline()\n Nodes = list() # Initialize/clear list of nodes\n\n while line != '\\n' and line != 'Element output:\\n': # Strings that finish the list\n #\t\t\t\tnNode = int(line.strip().split()[1]) # Node id\n line = f.readline()\n dim1 = float(line.strip().split()[3]) # Displacement dim1\n line = f.readline()\n dim2 = float(line.strip().split()[3]) # Displacement dim2\n Nodes.append(\n T_Displacement(dim1, dim2)) # Append displacements of the current node to the node list\n line = f.readline()\n\n\n if verbose:\n print('Step {}: Dofs completed.\\n'.format(nSteps))\n print('---------------------------------\\n')\n\n # Read the stresses an strains at Gauss points\n elif line == 'Element output:\\n': # String starts a list elements, GPs, strains and stresses\n line = f.readline() # Cancel ---------- seperator\n line = f.readline()\n Elements = list() # Initialize/clear list of elements\n\n while line != '\\n' and line != '\\tR E A C T I O N S O U T P U T:\\n': # Strings that finish the list\n #\t\t\t\t\tnElement = line.strip().split()[2] # Element id\n line = f.readline()\n GPs = T_Element(list(), 0, 0, 0) # List of Gauss points\n\n while line != '\\n' and line.strip().split()[0] == 'GP': # String that starts a new GP\n #\t\t\t\t\t\tnGP = int(line.strip().split()[1].split('.')[1]) # GP id\n tmp = [float(i) for i in line.strip().split()[4:10]] # Read the strains\n strain = T_Strains(tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5])\n line = f.readline()\n tmp = [float(i) for i in line.strip().split()[1:7]] # Read the stresses\n stress = T_Stresses(tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5])\n GPs.gp.append(\n T_GP(stress, strain)) # Append stresses and strains of the current GP to the GP list\n line = f.readline()\n\n\n Elements.append(GPs) # Append GP list of the current element to the element list\n\n if verbose:\n print('Step {}: GPs completed.\\n'.format(nSteps))\n print('---------------------------------\\n')\n\n SimData.step.append(T_Step(Elements, Nodes)) # Append element and node list of the current step to the step list\n #print('the file input ends')\n #print(nSteps)\n # only needed with a while loop\n # Jump over the lines until we reach the next time step (Caught by if-clause)\n try:\n line = f.readline() # Will generate an error if files end is reached\n if line == \"\":\n raise EOFError\n except:\n if verbose: print(\"End of file reached.\\n\")\n break # Break the 'while True' loop\n\n # -----------------------------------------------------------------------------\n\n\n print('averaging the stress')\n # Averaging of strains and stress of GPs of each element\n for istep in range(len(SimData.step)):\n\n for ielement in range(len(SimData.step[istep].element)):\n print(len)\n # Initialization before each element\n stresses = np.array([0., 0., 0., 0., 0., 0.])\n strains = np.array([0., 0., 0., 0., 0., 0.])\n\n for igp in range(len(SimData.step[istep].element[ielement])):\n print(igp)\n # Add up all data of all GPs\n #stresses[:] += SimData.step[istep].element[ielement].gp[igp].stress[:]\n strains[:] += SimData.step[istep].element[ielement].gp[igp].strain[:]\n\n # Divide GP sum by number of GPs\n stresses /= len(SimData.step[istep].element[ielement])\n strains /= len(SimData.step[istep].element[ielement])\n # Replace the field (initialized with 0) with new information\n SimData.step[istep].element[ielement] = SimData.step[istep].element[ielement]._replace(\n avstress=T_Stresses(stresses[0], stresses[1], stresses[2], stresses[3], stresses[4], stresses[5]))\n SimData.step[istep].element[ielement] = SimData.step[istep].element[ielement]._replace(\n avstrain=T_Strains(strains[0], strains[1], strains[2], strains[3], strains[4], strains[5]))\n print('Analysis finished')\n return SimData", "def compute_stability_fh(H, t0, u_attr, r_air, z_t, d0, cp=1004.16):\n L_ob = H .expression(\n '-(r_air * cp * t0 * (u_attr ** 3.0) / 0.41 / 9.806 / H)',\n {'cp': cp, 'H': H, 'r_air': r_air, 't0': t0, 'u_attr': u_attr})\n L_ob = L_ob.where(L_ob.gte(0), -99)\n mh = H \\\n .expression(\n '((1 - (16.0 * (z_t - d0) / L_ob)) ** 0.25)',\n {'d0': d0, 'L_ob': L_ob, 'z_t': z_t}) \\\n .where(L_ob.eq(-99), 0.0)\n fh = H \\\n .expression('(2.0 * log((1.0 + (mh ** 2.0)) / 2.0))', {'mh': mh}) \\\n .where(L_ob.lte(-100).Or(L_ob.gte(100)), 0)\n\n return fh", "def make_stehle(self):\n\n temp_k = self.temp * e / k # temperature in K\n dens_cm = self.e_dens * 1.e-6 # electronic density in cm-3\n prefix = 'n_' + str(self.n_upper) + '_' + str(self.n_lower) + '_'\n\n # extract raw tabulated tabulated_data\n tab_temp_k = np.array(pystark.nc.variables[prefix + 'tempe'].data) # tabulated electron temperatures (K)\n olam0 = pystark.nc.variables[prefix + 'olam0'].data # line centre wavelength (A)\n num_tab_dens = pystark.nc.variables[prefix + 'id_max'].data\n fainom = pystark.nc.variables[prefix + 'fainom'].data\n tab_dens_cm = np.array(pystark.nc.variables[prefix + 'dense'].data) # tabulated electron densities (cm ** -3)\n f00 = np.array(pystark.nc.variables[prefix + 'f00'].data) # normal Holtsmark field strength (30 kV / m)\n dl12 = np.array(pystark.nc.variables[prefix + 'dl12'].data)\n dl12s = np.array(pystark.nc.variables[prefix + 'dl12s'].data)\n fainu = pystark.nc.variables[\n prefix + 'fainu'].data # Asymptotic value of iStark * (alpha ** 2.5) (\"wings factor in alfa units\")\n pr0 = np.array(pystark.nc.variables[\n prefix + 'pr0'].data) # Ratio of the mean interelectronic distance to the electronic Debye length\n jtot = np.array(pystark.nc.variables[prefix + 'jtot'].data,\n dtype=np.int) # \"number of wave lengths for the couple (T,Ne)\"\n dom = np.array(pystark.nc.variables[prefix + 'dom'].data) # frequency detunings in units (rad / (s*ues)\n d1om = np.array(pystark.nc.variables[prefix + 'd1om'].data)\n o1line = np.array(pystark.nc.variables[prefix + 'o1line'].data)\n o1lines = np.array(pystark.nc.variables[prefix + 'o1lines'].data)\n\n # ensure given temperature + density falls within tabulated values\n # change sligtly the value of the input density\n # dens_cm in order to remain , as far as possible, inside the tabulation\n # JSA: this first step seems bogus!\n\n if np.abs(dens_cm - tab_dens_cm[0]) / dens_cm <= 1.0E-3:\n dens_cm = tab_dens_cm[0] * 1.001\n\n for id in np.arange(1, num_tab_dens + 1):\n if np.abs(dens_cm - tab_dens_cm[id]) / dens_cm <= 1.0E-3:\n dens_cm = tab_dens_cm[id] * 0.999\n\n if dens_cm >= 2.0 * tab_dens_cm[num_tab_dens]:\n raise Exception(\n 'Your input density is higher than the largest tabulated value %f' % tab_dens_cm[num_tab_dens])\n\n if dens_cm <= tab_dens_cm[0]:\n raise Exception('Your input density is smaller than the smallest tabulated value %f' % tab_dens_cm[0])\n\n if temp_k >= tab_temp_k[9]:\n raise Exception('Your input temperature is higher than the largest tabulated value %f' % tab_temp_k[9])\n\n if temp_k <= tab_temp_k[0]:\n raise Exception('Your input temperature is lower than the smallest tabulated value %f' % tab_temp_k[0])\n\n normal_holtsmark_field = 1.25e-9 * (dens_cm ** (2. / 3.)) # normal field value in ues\n\n # calculate line centre wavelength and frequency using Rydberg formula\n # JSA: I have made this step clearer and corrected for deuteron mass in the Rydberg constant (though the effect is small)\n # TODO make sure this matches olam0 parameter above -- why were there two variables in the first place?!\n # rydberg_m = Rydberg / (1. + (electron_mass / physical_constants['deuteron mass'][0]))\n # wl_0_angst = 1e10 * (rydberg_m * (1 / n_lower ** 2 - 1 / n_upper ** 2)) ** -1\n\n wl_centre_angst = self.wl_centre * 1e10\n\n c_angst = c * 1e10 # velocity of light in Ansgtroms / s\n angular_freq_0 = 2 * np.pi * c_angst / wl_centre_angst # rad / s\n\n otrans = -2 * np.pi * c_angst / wl_centre_angst ** 2\n\n olines = o1lines / np.abs(otrans)\n oline = o1line / np.abs(otrans)\n\n # Limit analysis_tools to uncorrelated plasmas.\n # check that mean interelectronic distance is smaller than the electronic Debye length (equ. 10)\n PR0_exp = 0.0898 * (dens_cm ** (1. / 6.)) / np.sqrt(temp_k) # = (r0 / debye)\n if PR0_exp > 1.:\n raise Exception('The plasma is too strongly correlated\\ni.e. r0/debye=0.1\\nthe line cannot be computed.')\n\n # fainom_exp=fainom*(F00_exp**1.5)\n # fainum_exp=fainom_exp/( (OPI*2.)**1.5)\n\n # ========================\n # TABULATION Format CDS\n # si on veut ecrire\n # n -np lambda0 kalpha Ne E0 T R0/Debye Dalpha iDoppler iStark\n\n # IN_cds= N+0.01\n # INP_cds = NP+0.01\n\n # ***********************************************************\n # Don't edit the CDS format...\n # ***********************************************************\n\n # Skipped the code in the IF statement starting at line 470, since it\n # isn't used, if (.FALSE.) ...\n\n # ==============================================\n # define an unique detunings grid - domm - for the tabulated\n # profiles ( various temperatures , densities)\n # calculate all the line shapes for this common grid\n # units used at this points are Domega_new= Delta(omega)/F00\n # in rd/(s-1 ues)\n\n max_num_dens = 30 # Maximum number of densities\n max_num_tab_temp = 10\n max_num_detunings = 60 # Maximum number of detunings\n jtot = jtot.astype(np.int)\n domm = np.zeros(100000)\n dom0 = np.zeros(10000)\n tprof = np.zeros([max_num_dens, max_num_tab_temp, 10000])\n tprofs = np.zeros([max_num_dens, max_num_tab_temp, 10000])\n uprof = np.zeros([max_num_dens, 10000])\n uprofs = np.zeros([max_num_dens, 10000])\n\n inc = 0\n domm[inc] = 0.0\n # ---- Look to replace this loop\n for id in np.arange(num_tab_dens + 1): # loop over tab densities\n for j in np.arange(max_num_tab_temp): # loop over tab temperatures (?)\n for i in np.arange(1, jtot[id, j]):\n inc += 1\n dom0[inc] = dom[id, j, i]\n\n inc = np.count_nonzero(dom)\n npik = inc + 1\n # nut=10000\n\n # Calling numpy sort instead of piksrt\n tmp = np.sort(dom0[0:npik])\n dom0[0:npik] = tmp[0:npik]\n # dom0 seems to agree with the FORTRAN version\n\n inc = 0\n domm[0] = 0.0\n # print 'npik',npik\n # ---- Look to replace this loop\n for i in np.arange(1, npik):\n dif = (dom0[i] - dom0[i - 1])\n if dif <= 1.0E-6:\n continue\n if dif / np.abs(dom0[i]) <= 0.1:\n continue\n inc = inc + 1\n domm[inc] = dom0[i]\n\n jdom = inc + 1 # One line after marker 35\n\n for id in np.arange(num_tab_dens):\n for j in np.arange(10):\n if pr0[id, j] > 1.0:\n continue\n\n tprof[id, j, 0] = oline[id, j, 0]\n tprofs[id, j, 0] = olines[id, j, 0]\n\n if jtot[id, j] == 0:\n continue\n\n for i in np.arange(1, jdom + 1):\n skip1 = False\n skip2 = False\n # print 'i',i\n domeg = domm[i]\n ij_max = jtot[id, j]\n # print 'domeg,ij_max',domeg,ij_max\n for ij in np.arange(1, ij_max - 1):\n # print 'ij',ij\n test = (domeg - dom[id, j, ij]) * (domeg - dom[id, j, ij - 1])\n # print 'test1:',test\n if test <= 0.0:\n # print 'triggered test1'\n x1 = dom[id, j, ij - 1]\n x2 = dom[id, j, ij]\n x3 = dom[id, j, ij + 1]\n y1 = oline[id, j, ij - 1]\n y2 = oline[id, j, ij]\n y3 = oline[id, j, ij + 1]\n # print 'x1,x2,x3',x1,x2,x3\n # print 'y1,y2,y3',y1,y2,y3\n tprof[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n y1 = olines[id, j, ij - 1]\n y2 = olines[id, j, ij]\n y3 = olines[id, j, ij + 1]\n tprofs[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n # print 'tprof[id,j,i]',tprof[id,j,i]\n # print 'tprofs[id,j,i]',tprofs[id,j,i]\n skip1 = True\n skip2 = True\n break\n\n if skip1 is False:\n test = (domeg - dom[id, j, ij_max - 2]) * (domeg - dom[id, j, ij_max - 1])\n # print 'test2:',test\n # print 'domeg',domeg\n # print 'dom[id,j,ij_max-1]',dom[id,j,ij_max-2]\n # print 'dom[id,j,ij_max]',dom[id,j,ij_max-1]\n if test <= 0.0:\n # print 'triggered test2'\n x1 = dom[id, j, ij_max - 3]\n x2 = dom[id, j, ij_max - 2]\n x3 = dom[id, j, ij_max - 1]\n y1 = oline[id, j, ij_max - 3]\n y2 = oline[id, j, ij_max - 2]\n y3 = oline[id, j, ij_max - 1]\n tprof[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n y1 = olines[id, j, ij_max - 3]\n y2 = olines[id, j, ij_max - 2]\n y3 = olines[id, j, ij_max - 1]\n tprofs[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n skip2 = True\n # print 'x1,x2,x3',x1,x2,x3\n # print 'y1,y2,y3',y1,y2,y3\n # print 'tprof[id,j,i]',tprof[id,j,i]\n # print 'tprofs[id,j,i]',tprofs[id,j,i]\n continue\n\n if skip2 is False:\n if domeg > dom[id, j, ij_max]:\n # print 'triggered test3'\n tprof[id, j, i] = fainom / (domeg ** 2.5)\n tprofs[id, j, i] = tprof[id, j, i]\n continue\n\n # We can skip writing the intermediate file\n\n\n for id in np.arange(num_tab_dens):\n otest_dens = (dens_cm - tab_dens_cm[id]) * (dens_cm - tab_dens_cm[id + 1])\n if otest_dens <= 0.0:\n dense1 = tab_dens_cm[id]\n dense2 = tab_dens_cm[id + 1]\n id1 = id\n id2 = id + 1\n break\n\n if dens_cm >= tab_dens_cm[num_tab_dens]:\n dense1 = tab_dens_cm[num_tab_dens - 1]\n dense2 = tab_dens_cm[num_tab_dens]\n id1 = num_tab_dens - 1\n id2 = num_tab_dens\n\n for it in np.arange(10):\n otest = (temp_k - tab_temp_k[it]) * (temp_k - tab_temp_k[it + 1])\n if otest <= 0.0:\n it1 = it\n it2 = it + 1\n # pr01 = pr0[id2,it1] # max value of pr0 for T1,T2,dense1,dense2\n tempe1 = tab_temp_k[it]\n tempe2 = tab_temp_k[it + 1]\n break\n\n # interpolation in temperature\n for id in np.arange(id1, id2 + 1):\n for i in np.arange(jdom):\n uprof[id, i] = tprof[id, it1, i] + (temp_k - tempe1) * (tprof[id, it2, i] - tprof[id, it1, i]) / (\n tempe2 - tempe1)\n uprofs[id, i] = tprofs[id, it1, i] + (temp_k - tempe1) * (tprofs[id, it2, i] - tprofs[id, it1, i]) / (\n tempe2 - tempe1)\n\n delta_lambda = np.zeros(jdom)\n delta_nu = np.zeros(jdom)\n wprof_nu = np.zeros(jdom)\n wprofs_nu = np.zeros(jdom)\n\n for i in np.arange(jdom):\n wprof = uprof[id1, i] + (dens_cm - dense1) * (uprof[id2, i] - uprof[id1, i]) / (dense2 - dense1)\n wprofs = uprofs[id1, i] + (dens_cm - dense1) * (uprofs[id2, i] - uprofs[id1, i]) / (dense2 - dense1)\n delta_omega = domm[i] * normal_holtsmark_field\n delta_nu[i] = delta_omega / (2 * np.pi)\n delta_lambda[i] = wl_centre_angst * delta_omega / (angular_freq_0 + delta_omega)\n # print(delta_lambda[i])\n wprof_nu[i] = (wprof / normal_holtsmark_field) * (2. * np.pi)\n wprofs_nu[i] = (wprofs / normal_holtsmark_field) * (2. * np.pi)\n # print '%e %e %e %e' %(delta_lambda[i],delta_nu[i],wprof_nu[i],wprofs_nu[i])\n\n delta_lambda2 = np.concatenate((-delta_lambda[::-1], delta_lambda)) + wl_centre_angst # + olam0\n delta_nu2 = np.concatenate((-delta_nu[::-1], delta_nu))\n wprof_nu2 = np.concatenate((wprof_nu[::-1], wprof_nu))\n wprofs_nu2 = np.concatenate((wprofs_nu[::-1], wprofs_nu))\n\n # for some reason, i only get a good agreement with the other models if i take the pure Stark broadened Stehle\n # output and manually convolve it with the Doppler profile -- not sure why...\n ls_sd = wprofs_nu2\n\n # interpolate onto frequency axis\n ls_sd = np.interp(self.freq_axis, delta_nu2 + self.freq_centre, ls_sd)\n\n return ls_sd", "def build_dm_from_data_to_file(data, filename, derivative=False):\n distance_matrix = []\n\n for _ in range(len(data)):\n distance_matrix.append([0] * len(data))\n\n for i in tqdm(range(len(data) - 1)):\n for j in range(i + 1, len(data)):\n distance = dtw(data[i],\n data[j],\n derivative=derivative)\n distance_matrix[i][j] = distance\n distance_matrix[j][i] = distance\n\n file = open(filename, \"w\")\n for row in distance_matrix:\n file.write(str(row[0]))\n for i in range(1, len(row)):\n file.write(\",\" + str(row[i]))\n file.write(\"\\n\")\n file.close()", "def mounting_matrix(self):\n # fmt: off\n count = 0\n for x in range(self.ntheta):\n self.M[count][count] = 1\n self.f[count][0] = self.p_in\n count = count + self.nz - 1\n self.M[count][count] = 1\n self.f[count][0] = self.p_out\n count = count + 1\n count = 0\n for x in range(self.nz - 2):\n self.M[self.ntotal - self.nz + 1 + count][1 + count] = 1\n self.M[self.ntotal - self.nz + 1 + count][self.ntotal - self.nz + 1 + count] = -1\n count = count + 1\n count = 1\n j = 0\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i][self.ntheta - 1])\n self.M[count][self.ntotal - 2 * self.nz + count] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1, j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][self.ntheta - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = self.nz + 1\n for j in range(1, self.ntheta - 1):\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i, j - 1])\n self.M[count][count - self.nz] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1][j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][j - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = count + 2\n count = 1\n for j in range(self.ntheta - 1):\n for i in range(1, self.nz - 1):\n if j == 0:\n self.f[count][0] = (self.c0w[i][j] - self.c0w[i][self.ntheta - 1]) / self.dtheta\n else:\n self.f[count][0] = (self.c0w[i, j] - self.c0w[i, j - 1]) / self.dtheta\n count = count + 1\n count = count + 2\n # fmt: on", "def write_calculations(params, hdf5_data):\n\n if params.rho is not None:\n dset = require_dataset(hdf5_data, structure.H5_ENV_VOLUME, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(params.rho)\n set_hdf5_attributes(dset, structure.H5_ENV_VOLUME_ATTR)\n\n if params.g is not None:\n dset = require_dataset(hdf5_data, structure.H5_ENV_GRAVITY, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(params.g)\n set_hdf5_attributes(dset, structure.H5_ENV_GRAVITY_ATTR)\n\n if params.depth is not None:\n dset = require_dataset(hdf5_data, structure.H5_ENV_DEPTH, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(params.depth)\n set_hdf5_attributes(dset, structure.H5_ENV_DEPTH_ATTR)\n\n if (params.xeff is not None) and (params.yeff is not None):\n dset = require_dataset(hdf5_data, structure.H5_ENV_WAVE_POINT, (2,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(params.xeff)\n dset[1] = float(params.yeff)\n set_hdf5_attributes(dset, structure.H5_ENV_WAVE_POINT_ATTR)\n\n if params.floating_bodies is not None:\n num_bodies = len(params.floating_bodies)\n i = 0\n for fb in params.floating_bodies:\n i += 1\n body = structure.H5_BODIES + structure.H5_BODY_BASE + str(i) + '/'\n mesh_x = []\n with open(fb.mesh_file, 'r') as mesh_file:\n for line in mesh_file:\n mesh_x.append(line)\n\n num_points = int(float(fb.points))\n num_panels = int(float(fb.panels))\n dset = require_dataset(hdf5_data, body + structure.H5_BODY_NUM_POINTS, (1, ), dtype=settings.NEMOH_INT)\n dset[0] = num_points\n set_hdf5_attributes(dset, structure.H5_BODY_NUM_POINTS_ATTR)\n\n dset = require_dataset(hdf5_data, body + structure.H5_BODY_NUM_PANELS, (1, ), dtype=settings.NEMOH_INT)\n dset[0] = num_panels\n set_hdf5_attributes(dset, structure.H5_BODY_NUM_PANELS_ATTR)\n\n mesh_idx = 0\n dset = require_dataset(hdf5_data, body + structure.H5_BODY_MESH, (num_points+num_panels+1, 4),\n dtype=settings.NEMOH_FLOAT)\n mesh_x2 = mesh_x[mesh_idx].split()\n set_hdf5_attributes(dset, structure.H5_BODY_MESH_ATTR)\n\n dset[0, 0] = int(float(mesh_x2[0]))\n dset[0, 1] = int(float(mesh_x2[1]))\n\n for j in range(1, num_points+num_panels+1):\n mesh_idx += 1\n mesh_x2 = mesh_x[mesh_idx].split()\n dset[j, :] = [float(x) for x in mesh_x2[:4]]\n\n if j == num_points:\n mesh_idx += 1\n\n num = int(float(fb.degrees_of_freedom))\n dset = require_dataset(hdf5_data, body + structure.H5_FREEDOM_DEGREE, (num, 7), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREEDOM_DEGREE_ATTR)\n\n x1 = [fb.surge, fb.sway, fb.heave, fb.roll_about_cdg, fb.pitch_about_cdg, fb.yaw_about_cdg]\n for j in range(len(x1)):\n if x1[j]:\n x2 = x1[j].split()\n dset[j, :] = np.array([float(x) for x in x2[:7]])\n\n num = int(float(fb.resulting_generalised_forces))\n dset = require_dataset(hdf5_data, body + structure.H5_GENERALISED_FORCES, (num, 7),\n dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_GENERALISED_FORCES_ATTR)\n x1 = [fb.force_in_x_direction, fb.force_in_y_direction, fb.force_in_z_direction,\n fb.moment_cdg_force_in_x_direction, fb.moment_cdg_force_in_y_direction,\n fb.moment_cdg_force_in_z_direction]\n for j in range(len(x1)):\n if x1[j]:\n x2 = x1[j].split()\n dset[j, :] = [float(x) for x in x2[:7]]\n\n if params.wave_frequencies is not None:\n dset = require_dataset(hdf5_data, structure.H5_NUM_WAVE_FREQUENCIES, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_NUM_WAVE_FREQUENCIES_ATTR)\n dset[0] = int(float(params.wave_frequencies))\n\n if params.min_wave_frequencies is not None:\n dset = require_dataset(hdf5_data, structure.H5_MIN_WAVE_FREQUENCIES, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MIN_WAVE_FREQUENCIES_ATTR)\n dset[0] = float(params.min_wave_frequencies)\n\n if params.max_wave_frequencies is not None:\n dset = require_dataset(hdf5_data, structure.H5_MAX_WAVE_FREQUENCIES, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MAX_WAVE_FREQUENCIES_ATTR)\n dset[0] = float(params.max_wave_frequencies)\n\n if params.wave_directions is not None:\n dset = require_dataset(hdf5_data, structure.H5_NUM_WAVE_DIRECTIONS, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_NUM_WAVE_DIRECTIONS_ATTR)\n dset[0] = int(params.wave_directions)\n\n if params.min_wave_directions is not None:\n dset = require_dataset(hdf5_data, structure.H5_MIN_WAVE_DIRECTIONS, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MIN_WAVE_DIRECTIONS_ATTR)\n dset[0] = float(params.min_wave_directions)\n\n if params.max_wave_direction is not None:\n dset = require_dataset(hdf5_data, structure.H5_MAX_WAVE_DIRECTIONS, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MAX_WAVE_DIRECTIONS_ATTR)\n dset[0] = float(params.max_wave_direction)\n\n x1 = ['1 0.1 10.', '0', '181. 0. 180.', '1 2 1000. 2.']\n idx = 0\n x2 = x1[idx].split()\n\n dset = require_dataset(hdf5_data, structure.H5_COMPUTE_IRF, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_COMPUTE_IRF_ATTR)\n dset[0] = int(x2[0])\n\n dset = require_dataset(hdf5_data, structure.H5_IRF_TIME_STEP, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_IRF_TIME_STEP_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_IRF_DURATION, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_IRF_DURATION_ATTR)\n dset[0] = float(x2[2])\n\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_SHOW_PRESSURE, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_SHOW_PRESSURE_ATTR)\n dset[0] = int(x2[0])\n\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_NUMBER, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_NUMBER_ATTR)\n dset[0] = float(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_MIN, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_MIN_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_MAX, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_MAX_ATTR)\n dset[0] = float(x2[2])\n\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_POINTS_X, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_POINTS_X_ATTR)\n dset[0] = int(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_POINTS_Y, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_POINTS_Y_ATTR)\n dset[0] = int(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_DIMENSION_X, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_DIMENSION_X_ATTR)\n dset[0] = float(x2[2])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_DIMENSION_Y, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_DIMENSION_Y_ATTR)\n dset[0] = float(x2[3])\n\n if params.indiq_solver is not None:\n dset = require_dataset(hdf5_data, structure.H5_SOLVER_TYPE, (1,), dtype=settings.NEMOH_INT)\n dset[0] = int(float(params.indiq_solver))\n set_hdf5_attributes(dset, structure.H5_SOLVER_TYPE_ATTR)\n\n if params.ires is not None:\n dset = require_dataset(hdf5_data, structure.H5_SOLVER_GMRES_RESTART, (1,), dtype=settings.NEMOH_INT)\n dset[0] = int(float(params.ires))\n set_hdf5_attributes(dset, structure.H5_SOLVER_GMRES_RESTART_ATTR)\n\n if params.tol_gmres is not None:\n dset = require_dataset(hdf5_data, structure.H5_SOLVER_GMRES_STOPPING, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(params.tol_gmres)\n set_hdf5_attributes(dset, structure.H5_SOLVER_GMRES_STOPPING_ATTR)\n\n if params.max_iterations is not None:\n dset = require_dataset(hdf5_data, structure.H5_SOLVER_GMRES_MAX_ITERATIONS, (1,), dtype=settings.NEMOH_INT)\n\n dset[0] = int(float(params.max_iterations))\n set_hdf5_attributes(dset, structure.H5_SOLVER_GMRES_MAX_ITERATIONS_ATTR)", "def save_state(self):\n\t\tf = open('output.csv', 'a')\n\t\tstate = ';'.join([str(datetime.now()), str(self.thin._actuation_value), str(self.thin.temperature), str(self.thin.presence), str(self.outside.temperature)])\n\t\tprint(state)\n\t\tf.write(state + '\\n')\n\t\tf.close()", "def calculate_manhattan_dist(state):", "def stochastic_data():\n\n tagPar = np.array([\n 'k = ',\n 'r = ',\n 'T0 = ',\n 'N = ',\n 'R = ',\n 'T = ',\n 'dt = ',\n 'Dt = ',\n 'L = ',\n 'gamma = ',\n 'eta = ',\n 'lambda =',\n 'delta = ',\n 'beta = ',\n 'a = ',\n 'N0 = ',\n 'u = ',\n 'sigma1 = ',\n 'sigma2 = ',\n 'x01 = ',\n 'x02 = ',\n 'x03 = ',\n ])\n ParValues = np.array([\n self.k,\n self.r,\n self.T0,\n self.N,\n self.R,\n self.T,\n self.dt,\n self.Dt,\n self.L,\n self.gamma,\n self.eta,\n self.Lambda,\n self.delta,\n self.beta,\n self.a,\n self.NN,\n self.u,\n self.sigma1,\n self.sigma2,\n self.Xzero[0, 0],\n self.Xzero[0, 1],\n self.Xzero[0, 2]\n ])\n strPrefix = str(self.Dt)\n name1 = 'StoParameters' + strPrefix + '.txt'\n '''\n name2 = 'StoSolution' + strPrefix + '.txt'\n name3 = 'StoRefSolution' + str(self.dt) + '.txt'\n '''\n PARAMETERS = np.column_stack((tagPar, ParValues))\n np.savetxt(name1, PARAMETERS, delimiter=\" \", fmt=\"%s\")\n '''\n np.save(name2,\n np.transpose(\n (\n t, Uem1, Uem2, Uem3, Ustk1, Ustk2, Ustk3, Utem1,\n Utem2, Utem3\n )\n ))\n np.savetxt(name3,\n np.transpose(\n (\n self.t, Ueem1, Ueem2, Ueem3\n )\n ))\n if self.sigma1 == 0.0:\n if self.sigma2 == 0.0:\n DeterministicData()\n return\n StochasticData()\n '''", "def __init__(self, path, verbose=False, mindens=1e3, **kwargs):\n self.path = path\n self.aux = os.path.dirname(os.path.realpath(__file__))\n self.aux = self.aux.replace('analysis', 'aux/')\n self.mindens = mindens\n self.depletion = kwargs.get('depletion', 1.0)\n self.verbose = verbose\n if not self.verbose:\n warnings.simplefilter('ignore')\n\n # Currently only important are the grid [1] and level populations [4]\n # from the grid. Both [2] and [3] are for the Delanunay triangulation\n # and can thus be ignored.\n\n self.hdu = fits.open(self.path)\n self.grid = self.hdu[1]\n if self.verbose:\n for c in self.grid.columns:\n print c\n print('\\n')\n self.names = self.grid.columns.names\n\n # Coordinates. Remove all the sink particles and convert to au. The\n # native system is cartesian, (x, y, z). Also convert them into\n # spherical polar coordinates, (r, p, t).\n\n self.notsink = ~self.grid.data['IS_SINK']\n self.xvals = self.grid.data['x1'][self.notsink] / sc.au\n self.yvals = self.grid.data['x2'][self.notsink] / sc.au\n self.zvals = self.grid.data['x3'][self.notsink] / sc.au\n self.rvals = np.hypot(self.yvals, self.xvals)\n self.pvals = np.arctan2(self.yvals, self.xvals)\n self.tvals = np.arctan2(self.zvals, self.rvals)\n\n # Physical properties at each cell. If dtemp == -1, then use gtemp,\n # this allows us to calculate the dust continuum for the line emission.\n\n self.gtemp = self.grid.data['TEMPKNTC'][self.notsink]\n self.dtemp = self.grid.data['TEMPDUST'][self.notsink]\n self.dtemp = np.where(self.dtemp == -1, self.gtemp, self.dtemp)\n\n # Assume that the densities are only ever H2 or [oH2, pH2]. If the\n # latter, allow density to be the sum. Individual values can still be\n # accessed through _density.\n\n self.ndens = len([n for n in self.names if 'DENSITY' in n])\n if self.ndens > 1 and self.verbose:\n print('Assuming DENSITY1 and DENSITY2 are oH2 and pH2.')\n self._dens = {d: self.grid.data['DENSITY%d' % (d+1)][self.notsink]\n for d in range(self.ndens)}\n self.dens = np.sum([self._dens[k] for k in range(self.ndens)], axis=0)\n\n # Include the other physical properties.\n\n self.nabun = len([n for n in self.names if 'ABUNMOL' in n])\n if self.nabun > 1:\n raise NotImplementedError()\n self.abun = self.grid.data['ABUNMOL1'][self.notsink]\n self.velo = np.array([self.grid.data['VEL%d' % i][self.notsink]\n for i in [1, 2, 3]])\n self.turb = self.grid.data['TURBDPLR'][self.notsink]\n\n # Mask out all points with a total density of <= min_density, with a\n # default of 10^4. Include the depletion of the emitting molecule.\n\n self.dmask = self.dens > kwargs.get('min_density', 1e3)\n self.xvals = self.xvals[self.dmask]\n self.yvals = self.yvals[self.dmask]\n self.zvals = self.zvals[self.dmask]\n self.rvals = self.rvals[self.dmask]\n self.pvals = self.pvals[self.dmask]\n self.tvals = self.tvals[self.dmask]\n self.gtemp = self.gtemp[self.dmask]\n self.dtemp = self.dtemp[self.dmask]\n self.dens = self.dens[self.dmask]\n self.abun = self.abun[self.dmask] * self.depletion\n self.turb = self.turb[self.dmask]\n\n # Excitation properties. Remove all the sink particles.\n\n pops = self.hdu[4].data.T\n idxs = [i for i, b in enumerate(self.notsink) if not b]\n self.levels = np.delete(pops, idxs, axis=1)\n idxs = [i for i, b in enumerate(self.dmask) if not b]\n self.levels = np.delete(self.levels, idxs, axis=1)\n\n # -- Gridding Options --\n #\n # There are three options here. One can provide the axes either\n # as an array, otherwise the grids are generated depending on the\n # points of the model.\n # By default the grid is logarithmic in the vertical direction but\n # linear in the radial direction with 500 points in each, however\n # these are customisable.\n\n grids = kwargs.get('grids', None)\n if grids is None:\n self.xgrid, self.ygrid = self.estimate_grids(**kwargs)\n else:\n try:\n self.xgrid, self.ygrid = grids\n except ValueError:\n self.xgrid = grids\n self.ygrid = grids\n except:\n raise ValueError('grids = [xgrid, ygrid].')\n self.xpnts = self.xgrid.size\n self.ypnts = self.ygrid.size\n\n # With the grids, grid the parameters and store them in a dictionary.\n # Only read in the first (by default) 5 energy levels, but this can be\n # increased later with a call to self.grid_levels(j_max).\n\n self.method = kwargs.get('method', 'linear')\n if self.verbose:\n print('Beginning gridding using %s interpolation.' % self.method)\n if self.method == 'nearest':\n print('Warning: neartest may produce unwanted features.')\n\n self.gridded = {}\n self.gridded['dens'] = self.grid_param(self.dens, self.method)\n self.gridded['gtemp'] = self.grid_param(self.gtemp, self.method)\n self.gridded['dtemp'] = self.grid_param(self.dtemp, self.method)\n self.gridded['abun'] = self.grid_param(self.abun, self.method)\n self.gridded['turb'] = self.grid_param(self.turb, self.method)\n self.gridded['levels'] = {}\n self.grid_levels(kwargs.get('nlevels', 5))\n\n return", "def write_mm(g, fn):\n f = open(fn, \"w\")\n f.write(\"%d %d %d\\n\" % (g.vcount(), g.vcount(), g.ecount()))\n\n if g.is_weighted():\n for e in g.es():\n f.write(\"%d %d %.4f\\n\" % (e.source, e.target, e[\"weight\"]))\n else:\n for e in g.es():\n f.write(\"%d %d 1\\n\" % (e.source, e.target))\n\n f.close()", "def main():\n\n \"\"\"\n nodes, hd3 = erdos_rennie_like(100,8333,5)\n export('d3',hd3)\n\n nodes, hd5 = erdos_rennie_like(100,8333,6)\n export('d5',hd5)\n\n nodes, hd6 = erdos_rennie_like(100,8333,7)\n export('d6',hd6)\n \"\"\"\n\n \"\"\"\n nodes, sparse1 = erdos_rennie_like(600, 1200, 3)\n export('sparse_diag1', sparse1)\n\n nodes, sparse2 = erdos_rennie_like(600, 2400, 3)\n export('sparse_diag2',sparse2)\n\n nodes, sparse3 = erdos_rennie_like(600, 5800, 3)\n export('sparse_diag3',sparse3)\n\n nodes, sparse4 = erdos_rennie_like(600,11600, 3)\n export('sparse_diag4',sparse4)\n\n nodes, sparse5 = erdos_rennie_like(600,23200, 3)\n export('sparse_diag5',sparse5)\n \"\"\"\n\n nodes, size1 = erdos_rennie_like(100, 500, 3)\n nodes, size2 = erdos_rennie_like(200,1000,3)\n nodes,size3 = erdos_rennie_like(300,1500,3)\n nodes,size4 = erdos_rennie_like(400,2000,3)\n nodes,size5 = erdos_rennie_like(500,2500,3)\n\n export('size_diag1',size1)\n export('size_diag2',size2)\n export('size_diag3',size3)\n export('size_diag4',size4)\n export('size_diag5',size5)", "def saveState(self, fname):\n data = { 'ksize': self.ksize,\n 'alpha': self.alpha,\n 'id': self.node.id,\n 'neighbors': self.bootstrappableNeighbors() }\n if len(data['neighbors']) == 0:\n self.log.warning(\"No known neighbors, so not writing to cache.\")\n return\n with open(fname, 'wb') as f:\n pickle.dump(data, f)", "def casdetude_dinardo():\n file_path = PROJECT_PATH + \"/geographycal_data/Monterusciello/MontEdo_buildings\"\n router = Router(building_file=file_path)\n\n router.design_aqueduct(0)\n\n router.solve(router.acqueduct)\n minimal = router.design_minimal_aqueduct(router.acqueduct, \"Q*H\")\n kpi_calculator(minimal)\n\n print(\"N H Z P\")\n for i, (node, datadict) in enumerate(router.acqueduct.nodes.items()):\n print(i, round(datadict[\"H\"]), round(datadict[\"ELEVATION\"]), round(datadict[\"H\"] - datadict[\"ELEVATION\"]))\n\n\n router.write2shp(minimal, PROJECT_PATH + \"/Monterusciello_solution/Monterusciello_acqueduct\")\n router.write2epanet(minimal, PROJECT_PATH + \"/Monterusciello_solution/Monterusciello_acqueduct\")", "def analysis_save_dm(analyzed_datas,plotables,analysis_attributes,file_name):\n#TODO implement error handling\n save_dict = {}\n for i,ana_attr in enumerate(analysis_attributes):\n if ana_attr.method == 'FSD':\n save_dict['FSD_SMAP'] = analyzed_datas[i][0]\n np.save(file_name,save_dict)", "def dl(z,h=0.7,omegalambda=0.7,omegam=0.3,omegak=0.0):\n return distcalc(z,h,omegalambda,omegam,omegak)['dl']", "def estimateZ_AIS(self, steps, M):\n\n\t\tscaling = self.data.sum(axis=1).mean().astype(int) # mean document length - so we know how many visible activations to sample\n\t\tlw = self.h_units * numpy.log(2) * numpy.ones(M)\n\t\tvisible_f1_pdf = numpy.ones( self.dictsize)/ self.dictsize # uniform initial distribution\n\t\t# sample negative data:\n\t\tvisible_samp = numpy.zeros(( self.dictsize, M ))\n\t\tfor doc in xrange(M):\n\t\t\tvisible_samp[:, doc] = numpy.random.multinomial(scaling, visible_f1_pdf, size=1)\n\n\t\tW_h = numpy.dot( visible_samp.T, self.W )*self.bottomUp + scaling*self.hbias # part of free energy for negative data\n\t\tbias_v = numpy.dot(visible_samp.T, self.vbias) # other part of free energy equation\n\n\t\tfor s in xrange(1,steps):\n\t\t\tb_k = float(s)/steps\n\t\t\texpW_h = numpy.exp( b_k * W_h) # weight adjusted hidden variable distribution (without normalization)\n\t\t\tlw += b_k*bias_v + (numpy.log(1+expW_h)).sum(axis=1) # this is effectively adding p^{*}_k(v_k) (following notation from Salak & Murray 2008)\n\n\t\t\t# apply transition (so sample hidden and visible once):\n\t\t\thidden_f1 = sigmoid( numpy.dot(visible_samp.T, self.W)*b_k*self.bottomUp + b_k*scaling* self.hbias) # fantasy 1, probability\n\t\t\thidden_f1_sample = (hidden_f1 > numpy.random.rand( M, self.h_units) )*1\n\n\t\t\t# sample visible units:\n\t\t\tvisible_f1 = numpy.exp(b_k * (numpy.dot(self.W, hidden_f1_sample.T) + self.vbias.reshape((self.dictsize, 1)) ) )\n\t\t\tnormC = visible_f1.sum(axis=0).reshape(( M, ))\n\t\t\tvisible_f1_pdf = visible_f1/normC\n\t\t\tfor doc in xrange(M):\n\t\t\t\tvisible_samp[:, doc] = numpy.random.multinomial(scaling, visible_f1_pdf[:, doc], size=1)\n\n\t\t\t# update W_h and bias_v\n\t\t\tW_h = numpy.dot( visible_samp.T, self.W ) + scaling * self.hbias # part of free energy for negative data\n\t\t\tbias_v = numpy.dot(visible_samp.T, self.vbias)\n\n\t\t\texpW_h = numpy.exp( b_k *( W_h))\n\t\t\tlw -= (b_k*bias_v + (numpy.log(1+expW_h)).sum(axis=1) ) # this is effectively subtracting p^{*}_k(v_{k+1}) \n\n\t\t# add final term:\n\t\texpW_h = numpy.exp( ( W_h))\n\t\tlw += (bias_v + (numpy.log(1+expW_h)).sum(axis=1))\n\n\t\t# now collect all terms and return estimate of log partition function:\n\t\tself.logZ = logsumexp(lw) - numpy.log( M) # this is the log of the mean estimate\n\t\tself.logZ += self.h_units * numpy.log(2)", "def getTOD(self,i,d):\n output_filename = 'Output_Fits/{}'.format( d.filename.split('/')[-1])\n if os.path.exists(output_filename):\n os.remove(output_filename)\n\n tod_shape = d['level2/averaged_tod'].shape\n dset = d['level2/averaged_tod']\n tod_in = np.zeros((tod_shape[1],tod_shape[2],tod_shape[3]),dtype=dset.dtype)\n az = np.zeros((tod_shape[3]),dtype=dset.dtype)\n el = np.zeros((tod_shape[3]),dtype=dset.dtype)\n\n feeds = d['level1/spectrometer/feeds'][:]\n scan_edges = d['level2/Statistics/scan_edges'][...]\n\n todall = np.zeros((len(self.FeedIndex), self.datasizes[i])) \n weights = np.zeros((len(self.FeedIndex), self.datasizes[i])) \n\n # Read in data from each feed\n for index, ifeed in enumerate(self.FeedIndex[:]):\n\n dset.read_direct(tod_in,np.s_[ifeed:ifeed+1,:,:,:])\n d['level1/spectrometer/pixel_pointing/pixel_az'].read_direct(az,np.s_[ifeed:ifeed+1,:])\n d['level1/spectrometer/pixel_pointing/pixel_el'].read_direct(el,np.s_[ifeed:ifeed+1,:])\n\n # Statistics for this feed\n medfilt_coefficient = d['level2/Statistics/filter_coefficients'][ifeed,...]\n atmos = d['level2/Statistics/atmos'][ifeed,...]\n atmos_coefficient = d['level2/Statistics/atmos_coefficients'][ifeed,...]\n wnoise_auto = d['level2/Statistics/wnoise_auto'][ifeed,...]\n\n # then the data for each scan\n last = 0\n for iscan,(start,end) in enumerate(scan_edges):\n median_filter = d['level2/Statistics/FilterTod_Scan{:02d}'.format(iscan)][ifeed,...]\n N = int((end-start)//self.offsetLen * self.offsetLen)\n end = start+N\n tod = tod_in[...,start:end]\n\n # Subtract atmospheric fluctuations per channel\n for iband in range(4):\n for ichannel in range(64):\n if self.channelmask[ifeed,iband,ichannel] == False:\n amdl = Statistics.AtmosGroundModel(atmos[iband,iscan],az[start:end],el[start:end]) *\\\n atmos_coefficient[iband,ichannel,iscan,0]\n tod[iband,ichannel,:] -= median_filter[iband,:N] * medfilt_coefficient[iband,ichannel,iscan,0]\n tod[iband,ichannel,:] -= amdl\n tod[iband,ichannel,:] -= np.nanmedian(tod[iband,ichannel,:])\n tod /= self.calfactors[ifeed,:,:,None] # Calibrate to Jupiter temperature scale\n\n # Then average together the channels\n wnoise = wnoise_auto[:,:,iscan,:]\n channels = (self.channelmask[ifeed].flatten() == False)\n channels = np.where((channels))[0]\n\n tod = np.reshape(tod,(tod.shape[0]*tod.shape[1], tod.shape[2]))\n wnoise = np.reshape(wnoise,(wnoise.shape[0]*wnoise.shape[1], wnoise.shape[2]))\n\n nancheck = np.sum(tod[channels,:],axis=1)\n channels = channels[np.isfinite(nancheck) & (nancheck != 0)]\n nancheck = np.sum(wnoise[channels,:],axis=1)\n channels = channels[np.isfinite(nancheck) & (nancheck != 0)]\n\n\n top = np.sum(tod[channels,:]/wnoise[channels,:]**2,axis=0)\n bot = np.sum(1/wnoise[channels,:]**2)\n\n todall[index,last:last+N] = top/bot\n weights[index,last:last+N] = bot\n last += N\n\n return todall, weights", "def fn_onstatetime_hist(file_name,folder,mean_onstatetime,distribution):\n import numpy as np\n import matplotlib.pyplot as plt\n from scipy.stats import lognorm,norm\n from pylab import text\n \n n_molecules=len(mean_onstatetime)\n \n #Plot photon flux\n figure_name=file_name+'_onstatetime'\n ax = plt.subplot(111)\n num_bins = np.linspace(int(min(mean_onstatetime)), int(np.mean(mean_onstatetime)), int(np.sqrt(len(mean_onstatetime))*8))\n ax.hist(mean_onstatetime, bins=num_bins, density=True, color='forestgreen',edgecolor='black')\n \n #Choose distribution\n if distribution=='lognormal':\n #Fit lognormal curve\n sigma,loc,mean = lognorm.fit(mean_onstatetime, floc=0)\n pdf = lognorm.pdf(num_bins, sigma, loc, mean) #sigma=shape, mu=np.log(scale)\n ax.plot(num_bins, pdf, 'k',linestyle='--')\n \n elif distribution=='normal':\n #Fit normal curve\n mean, std = norm.fit(mean_onstatetime)\n pdf = norm.pdf(num_bins, mean, std) #sigma=shape, mu=np.log(scale)\n ax.plot(num_bins, pdf, 'k',linestyle='--')\n \n \n \n #Edit plot\n plt.xlabel('Mean on-state time (s)', fontname='Arial', fontsize=12)\n plt.ylabel('Probability density', fontname='Arial', fontsize=12)\n plt.xticks(fontname='Arial', fontsize=12)\n plt.yticks(fontname='Arial', fontsize=12)\n plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n text(0.75, 0.95,'μ='+str(round(mean,2))+' s',horizontalalignment='center', verticalalignment='center',transform = ax.transAxes,fontname='Arial', fontsize=12)\n text(0.40, 0.95,'N='+str(n_molecules),horizontalalignment='center', verticalalignment='center',transform = ax.transAxes,fontname='Arial', fontsize=12)\n plt.savefig(folder+'/Figures/PDFs'+ '/' + figure_name + '.pdf', dpi=500)\n plt.savefig(folder+'/Figures/PNGs'+ '/' + figure_name + '.png', dpi=500)\n \n return (plt.show())", "def get_logits(self, hidden_states: torch.FloatTensor,\n temperature: float = 1.0):\n return self.logits(hidden_states) / temperature", "def getDensityEstimate(self):\n return self.density", "def writeFundamental(self):\n dfFund = pd.DataFrame(self.fundamental_series)\n dfFund.set_index('FundamentalTime', inplace=True)\n self.writeLog(dfFund, filename='fundamental_{symbol}_freq_{self.log_frequency}_ns'.format(self.symbol))\n\n print(\"Noise-free fundamental archival complete.\")", "def save_nelder_mead_data(name, simplex, fvals, iters, evals):\n N = simplex.shape[0] # Number of points in simplex\n K = simplex.shape[1] # Total number of parameters\n\n with open(name + \".txt\", \"w\") as f:\n my_writer = csv.writer(f, delimiter=\",\")\n my_writer.writerow(simplex.shape)\n my_writer.writerow([iters, evals])\n for n in range(N):\n my_writer.writerow(simplex[n, :])\n my_writer.writerow(fvals)", "def write_distances(filename):\n print \"Computing distances and writing to \" + filename + \"...\"\n distances = {}\n n = 0\n\n for a1 in attrs:\n distances[a1] = {}\n n += 1\n print \"On point \" + str(n) + \" out of \" + str(len(attrs))\n for a2 in attrs:\n distances[a1][a2] = KL_divergence(attr_value_counts[a1],\n attr_value_counts[a2])\n\n with open(filename, 'w') as f:\n f.write(json.dumps(distances))", "def calc_det_dzh(theta):\n return 919.49 - 27.018 * theta + 0.26209 * theta ** 2 - 0.00083803 * theta ** 3", "def sumofstate_D2(T):\n\n Q = np.float64(0.0)\n\n #--- nuclear spin statistics ------------\n g_even = 6 # deuterium\n g_odd = 3\n # ---------------------------------------\n\n data = eJD2\n\n nCols = data.shape[1]\n # nCols is equal to the number of vibrational\n # states included in the summation\n\n # generate Q using each energy from the dataset\n for i in range(0, nCols):\n\n # select row for v=i\n row = data[:,i]\n\n # remove nan values\n x = row[np.logical_not(np.isnan(row))]\n\n # get the dimension (equal to J_max)\n nRows = x.shape[0]\n\n # iterate over the available energies\n for j in range(0, nRows):\n E = x[j]\n energy = (-1*E*H*C)\n\n factor = (2*j+1)*math.exp(energy/(K*T))\n\n if j % 2 == 0:\n factor = factor*g_even\n else:\n factor = factor*g_odd\n Q = Q+factor\n\n\n\n # return the sum of states for H2\n return Q", "def amet_memoryWise(self):\r\n # set up logging files to monitor the calculation\r\n logging.basicConfig(filename = os.path.join(self.path,'history_amet_python.log'),\r\n filemode = 'w+', level = logging.DEBUG,\r\n format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n # initialize the time span\r\n # define sigma level\r\n A, B = self.defineSigmaLevels()\r\n # use example input file to load the basic dimensions information\r\n datapath_var = os.path.join(self.path, 'MERRA2_400.inst3_3d_asm_Nv.20160101.nc4.nc')\r\n var_key = Dataset(datapath_var)\r\n lat = var_key.variables['lat'][:]\r\n lon = var_key.variables['lon'][:]\r\n # calculate the reference levels based on A & B and standard surface pressure\r\n half_level = A + B * 101325\r\n level = (half_level[1:] + half_level[:-1]) / 2\r\n # create space for the output\r\n # AMET in the entire column\r\n E = np.zeros((len(lat),len(lon)), dtype=float)\r\n cpT = np.zeros((len(lat),len(lon)), dtype=float)\r\n Lvq = np.zeros((len(lat),len(lon)), dtype=float)\r\n gz = np.zeros((len(lat),len(lon)), dtype=float)\r\n uv2 = np.zeros((len(lat),len(lon)), dtype=float)\r\n logging.info(\"Start retrieving variables T,q,u,v,sp\")\r\n # The shape of each variable is (8,72,361,576)\r\n T = var_key.variables['T'][:]\r\n q = var_key.variables['QV'][:]\r\n sp = var_key.variables['PS'][:] #(8,361,576)\r\n u = var_key.variables['U'][:]\r\n v = var_key.variables['V'][:]\r\n logging.info(\"Extracting variables successfully!\") \r\n # compute gz\r\n z_model = self.calc_gz(var_key)\r\n # get the basic shape\r\n tt, hh, yy, xx = q.shape\r\n AMET = amet.met()\r\n E, cpT, Lvq, gz, uv2 = AMET.calc_met(T, q, sp, u, v, z_model, A, B,\r\n tt, hh, len(lat), len(lon), lat, self.lat_unit)\r\n\r\n return np.mean(E)", "def make_poi_frequency_and_entropy(city):\n frequency_res, entropy_res = [], []\n with open(spider_data_path + os.sep + 'poi' + os.sep + 'poi_{}.csv'.format(city)) as f:\n reader = csv.reader(f)\n for line in reader:\n line = list(map(int, line))\n total = sum(line[1:])\n frequency_tmp = [line[0]]\n frequency_tmp.extend([num / total for num in line[1:]])\n entropy_tmp = [line[0]]\n entropy_tmp.extend([-freq * np.log2(freq) if freq > 0 else 0 for freq in frequency_tmp[1:]])\n frequency_res.append(frequency_tmp)\n entropy_res.append(entropy_tmp)\n\n with open(spider_data_path + os.sep + 'poi' + os.sep + 'poi_frequency_{}.csv'.format(city), \"w\") as f:\n writer = csv.writer(f)\n writer.writerows(frequency_res)\n with open(spider_data_path + os.sep + 'poi' + os.sep + 'poi_entropy_{}.csv'.format(city), \"w\") as f:\n writer = csv.writer(f)\n writer.writerows(entropy_res)\n pass", "def savez(d,file):\n np.savez(file,row=d.row,col=d.col,data=d.data,shape=d.shape)", "def write_structures(self, plot_singular_values=False, directory='./'):\n\t\t\n\t\tweighted_snapshots = np.sqrt(self.weights)*self.snapshots.T\n\t\teigenvectors,eigenvalues,__ = np.linalg.svd(weighted_snapshots.T, full_matrices=False)\n\t\tself.pod_basis = np.transpose(np.power(self.weights,-0.5)*eigenvectors.T)\n\n\t\tif plot_singular_values == True:\n\t\t\tplt.semilogy(np.linspace(0,eigenvalues.shape[0],eigenvalues.shape[0]), eigenvalues/eigenvalues[0])\n\t\t\tplt.show()\n\n\t\t#nBasis = input('Chose the number of basis functions ')\t\n\t\t#u = u[:,:nBasis]\n\t\t\n\t\tn_points = self.mu_values.shape[1]\n\t\tn_basis = self.pod_basis.shape[1]\n\t\tcoefs_tria = np.array([])\n\t\tcoefs = np.zeros([n_basis,n_points])\n\n\t\tfor i in range(0,n_points):\n\t\t\tcoefs[:,i] = np.dot(np.transpose(self.pod_basis), self.snapshots[:,i]*self.weights)\n\n\t\tfor i in range(0,n_basis):\n\t\t\tcoefs_surf = interpolate.LinearNDInterpolator(np.transpose(self.mu_values),coefs[i,:])\n\t\t\tcoefs_tria = np.append(coefs_tria, coefs_surf)\n\n\t\tnp.save(directory + 'coefs_tria_' + self.output_name, coefs_tria)\n\t\tnp.save(directory + 'pod_basis_' + self.output_name, self.pod_basis)", "def info_density(self):\n tot_fields = self.tot_col * self.tot_rows # Total number of fields\n pop_fields = 100 - ((self.empty / tot_fields) * 100)\n\n print('Information density (%): ' + str(pop_fields) + '%')\n results.append('Information density (%): ' + str(pop_fields) + '%')", "def write_hdf5( self, iteration ) :\n # Before opening the file, select the particles that\n # need to be written for each species\n # (This allows to know the number of particles to be written,\n # which is needed when setting up the file)\n select_array_dict = {}\n selected_nlocals_dict = {}\n selected_nglobal_dict = {}\n # Loop over the different species, select the particles and fill\n # select_array_dict, selected_nlocals_dict, selected_nglobal_dict\n for species_name in sorted(self.species_dict.keys()):\n # Select the particles that will be written\n species = self.species_dict[species_name]\n select_array_dict[species_name] = self.apply_selection( species )\n # Get their total number\n n = select_array_dict[species_name].sum()\n if self.comm_world is not None :\n # In MPI mode: gather and broadcast an array containing\n # the number of particles on each process\n selected_nlocals_dict[species_name] = mpiallgather( n )\n selected_nglobal_dict[species_name] = \\\n sum(selected_nlocals_dict[species_name])\n else:\n # Single-proc output\n selected_nlocals_dict[species_name] = None\n selected_nglobal_dict[species_name] = n\n\n # Find the file name\n filename = \"data%08d.h5\" %iteration\n fullpath = os.path.join( self.write_dir, \"hdf5\", filename )\n\n # Create the file and setup its attributes\n # (can be done by one proc or in parallel)\n self.create_file_empty_particles( fullpath, self.top.it,\n self.top.time, self.top.dt, selected_nglobal_dict )\n\n # Open the file again (possibly in parallel)\n f = self.open_file( fullpath, parallel_open=self.lparallel_output )\n # (f is None if this processor does not participate in writing data)\n\n # Loop over the different species and write the requested quantities\n for species_name in sorted(self.species_dict.keys()) :\n\n # Get the HDF5 species group\n if f is not None:\n species_path = \"/data/%d/particles/%s\"%(iteration,species_name)\n species_grp = f[species_path]\n else:\n species_grp = None\n\n # Get the relevant species object and selection array\n species = self.species_dict[species_name]\n select_array = select_array_dict[species_name]\n n_rank = selected_nlocals_dict[species_name]\n\n # Write the datasets for each particle datatype\n self.write_particles( species_grp, species, n_rank, select_array )\n\n # Close the file\n if f is not None:\n f.close()", "def test_densities():\n\n actual, r, wt = GridGenerator.make_grid(400)\n grid = 4*pi*r**2*wt\n\n data = AtomData()\n\n print(\"\\nINTEGRATED DENSITY TEST\")\n print(\"=======================\")\n for a in list(data.nuclear_charge.keys()):\n atom = Atom(a)\n Nel = data.electron_count[a]\n d0, d1, g0, g1, t0, t1, l0, l1 = atom.get_densities(r)\n\n # Count electrons per spin channel\n s_occ = AtomData.s_occ.get(a, [0, 0])\n p_occ = AtomData.p_occ.get(a, [0, 0])\n d_occ = AtomData.d_occ.get(a, [0, 0])\n f_occ = AtomData.f_occ.get(a, [0, 0])\n nela = np.sum(s_occ[0])+np.sum(p_occ[0])+np.sum(d_occ[0])+np.sum(f_occ[0])\n nelb = np.sum(s_occ[1])+np.sum(p_occ[1])+np.sum(d_occ[1])+np.sum(f_occ[1])\n assert(nela+nelb == Nel)\n\n id0 = np.dot(d0, grid)\n id1 = np.dot(d1, grid)\n\n diff_0 = id0 - nela\n percent_diff_0 = 100*diff_0/nela\n\n # Check to catch for Hydrogen having no beta electrons\n if nelb > 0.0:\n diff_1 = id1 - nelb\n percent_diff_1 = 100*diff_1/nelb\n else:\n diff_1 = 0.0\n percent_diff_1 = 0.0\n\n print(\"{:>3} - N_0 = ({:4.1f}) {:+2.6e}%, N_1 = ({:4.1f}) {:+2.6e}%, {:}\".format(a, id0, percent_diff_0, id1, percent_diff_1, \"PASSED\" if max(abs(diff_0), abs(diff_1)) < 1e-4 else \"FAILED - \"))\n\n print(\"\\nINTEGRATED KINETIC TEST\")\n print(\"=======================\")\n for a in list(data.ke_test.keys()):\n atom = Atom(a)\n t_bm = data.ke_test[a]\n d0, d1, g0, g1, t0, t1, l0, l1 = atom.get_densities(r)\n\n it0 = np.dot(t0, grid)\n it1 = np.dot(t1, grid)\n itot = it0 + it1\n\n diff = itot - t_bm\n print(\"{:>3} - T = {:+.6e}%, {:}\".format(a, 100*diff/t_bm, \"PASSED\" if abs(100*diff/t_bm) < 1e-2 else \"FAILED - \"))\n\n\n # The integral of the Laplacian over all space should be 0. Check that.\n print(\"\\nINTEGRATED LAPLACIAN TEST\")\n print(\"=========================\")\n for a in list(AtomData.ke_test.keys()):\n atom = Atom(a)\n\n d0, d1, g0, g1, t0, t1, l0, l1 = atom.get_densities(r)\n\n il0 = np.dot(grid, l0)\n il1 = np.dot(grid, l1)\n print(\"{:>3} - L_0 = {:+.6e}, L_1 = {:+.6e}, {:}\".format(a, il0, il1, \"PASSED\" if max(abs(il0), abs(il1)) < 1e-6 else \"FAILED - \"))\n\n\n print(\"\\nFINITE DIFFERENCE GRADIENT TEST\")\n print(\"===============================\")\n print(\"Testing gradient evaluation function against finite difference estimate...\")\n ne = Atom(\"Ne\") # Let's use \"the guvnor\"\n # We only need to test a few points around the core\n fdh = 1e-8\n fdr = np.arange(0.9, 0.9+fdh*10, fdh)\n d0, d1, g0, g1, t0, t1, l0, l1 = ne.get_densities(fdr)\n\n # First the first central difference\n fdiff = (d0[2:]-d0[:-2])/(2*fdh) # Construct the central difference\n if np.allclose(fdiff, g0[1:-1], atol=1e-1): # finite difference is not perfect, so lenient tollerance\n print(\"Gradient: PASSED\")\n else:\n print(\"Gradient: FAILED -\")\n\n print(\"\\nELEMENT COLOR FUNCTIONS TEST\")\n print(\"===========================\")\n test_obj = [Atom(\"H\"), Atom(\"C\"), Atom(\"O\")]\n test_str = [\"H\", \"C\", \"O\"]\n ref = np.array([[1., 1., 1.], [0.565, 0.565, 0.565], [1. , 0.051, 0.051]])\n\n if np.allclose( np.array(get_colors_for_elements(test_obj)), ref):\n print(\"\\nColor from objects: PASSED\")\n else:\n print(\"\\nColor from objects: FAILED -\")\n\n if np.allclose( np.array(get_colors_for_elements(test_str)), ref):\n print(\"Color from strings: PASSED\")\n else:\n print(\"Color from strings: FAILED -\")\n\n if HAVE_LIBXC:\n test_functional='GGA_X_PBE'\n print(\"\\nATOMIC EXCHANGE ENERGIES WITH {}\".format(test_functional))\n print(\"============================================\")\n for a in list(data.ke_test.keys()):\n atom = Atom(a)\n nE, vrho, vsigma, vtau, vlapl = atom.libxc_eval(r, functional=test_functional, restricted=False)\n Exc = (np.dot(nE, grid)).item()\n print('{:3s} {:.10f}'.format(a, Exc))\n else:\n print(\"\\nNot doing energy calculations due to lack of libxc.\\n\")", "def dm(z,h=0.7,omegalambda=0.7,omegam=0.3,omegak=0.0):\n return distcalc(z,h,omegalambda,omegam,omegak)['dm']", "def write_file(self,f=None):\n nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper\n # Open file for writing\n if f is None:\n f = open(self.fn_path, 'w')\n # First line: heading\n f.write('{}\\n'.format(self.heading))\n # write dataset 1\n f.write('{} {} {} {} {} {} {}\\n'.format(self.ipakcb, self.iswtoc,\n self.nsystm, self.ithk,\n self.ivoid, self.istpcs,\n self.icrcc))\n # write dataset 2\n t = self.lnwt.array\n for tt in t:\n f.write('{} '.format(tt + 1))\n f.write('\\n')\n\n # write dataset 3\n f.write(\n '{} {} {} {} {} {} {} {} {} {}\\n'.format(self.izcfl, self.izcfm,\n self.iglfl, self.iglfm,\n self.iestfl, self.iestfm,\n self.ipcsfl, self.ipcsfm,\n self.istfl, self.istfm))\n\n # write dataset 4\n f.write(self.gl0.get_file_entry())\n\n # write dataset 5\n f.write(self.sgm.get_file_entry())\n\n # write dataset 6\n f.write(self.sgs.get_file_entry())\n\n # write datasets 7 to 13\n for k in range(self.nsystm):\n f.write(self.thick[k].get_file_entry())\n if self.icrcc != 0:\n f.write(self.sse[k].get_file_entry())\n f.write(self.ssv[k].get_file_entry())\n else:\n f.write(self.cr[k].get_file_entry())\n f.write(self.cc[k].get_file_entry())\n f.write(self.void[k].get_file_entry())\n f.write(self.sub[k].get_file_entry())\n\n # write datasets 14 and 15\n for k in range(nlay):\n if self.istpcs != 0:\n f.write(self.pcsoff[k].get_file_entry())\n else:\n f.write(self.pcs[k].get_file_entry())\n\n # write dataset 16 and 17\n if self.iswtoc > 0:\n # dataset 16\n for i in self.ids16:\n f.write('{} '.format(i))\n f.write(' #dataset 16\\n')\n\n # dataset 17\n for k in range(self.iswtoc):\n t = self.ids17[k, :].copy()\n t[0:4] += 1\n for i in t:\n f.write('{} '.format(i))\n f.write(' #dataset 17 iswtoc {}\\n'.format(k + 1))\n\n # close swt file\n f.close()", "def weighted_log_density(self):\n return self.rho*math.log(self.density)", "def write_xdmf(self, filename: str):\n\n mesh = UnstructuredMesh.from_h5(filename)\n mesh.write_h5(filename)", "def save_data(self) -> None:\n # Construct a grid in physical space\n rvals = np.logspace(start=-3,\n stop=2.5,\n num=21,\n endpoint=True)\n # Compute C, D, K1 and F on that grid\n Cvals = np.array([self.compute_C(r, Suppression.RAW) for r in rvals])\n Dvals = np.array([self.compute_D(r, Suppression.RAW) for r in rvals])\n K1vals = np.array([self.compute_K1(r, Suppression.RAW) for r in rvals])\n Fvals = np.array([self.compute_F(r, Suppression.RAW) for r in rvals])\n # Save them to file\n df = pd.DataFrame([rvals, Cvals[:, 0], Dvals[:, 0], K1vals[:, 0], Fvals[:, 0],\n Cvals[:, 1], Dvals[:, 1], K1vals[:, 1], Fvals[:, 1]]).transpose()\n df.columns = ['r', 'C(r)', 'D(r)', 'K1(r)', 'F(r)', 'dC(r)', 'dD(r)', 'dK1(r)', 'dF(r)']\n df.to_csv(self.file_path(self.filename + '.csv'), index=False)", "def get_data(filename):\n\n data = load(filename)\n\n density_factor = float(data.gas.densities.cosmo_factor.a_factor)\n temperature_factor = float(data.gas.temperatures.cosmo_factor.a_factor)\n\n number_density = (data.gas.densities * (density_factor / mh)).to(cm ** -3)\n temperature = (data.gas.temperatures * temperature_factor).to(\"K\")\n metallicity = data.gas.metal_mass_fractions\n metallicity[metallicity < min_metallicity] = min_metallicity\n\n return number_density.value, temperature.value, np.log10(metallicity.value)", "def univariate_dlm_simulation(F,G,W,v,initial_state,n,T):\n \n ZEROS = np.zeros(n)\n \n emissions = np.zeros([T,1])\n state = np.zeros([T,n])\n \n state[0] = initial_state\n emissions[0] = F.dot(initial_state) + np.random.normal(loc = 0.0,scale = v)\n \n for t in range(T):\n state[t] = G.dot(state[t-1]) + np.random.multivariate_normal(ZEROS,W)\n emissions[t] = F.dot(state[t]) + np.random.normal(0.0, v)\n \n return state,emissions", "def WHD(q, p, h, weights):\n \n num = 0.0\n for j in range(q, p):\n num += weights[j]\n den = 0.0\n for j in range(1, h):\n den += weights[j]\n return 0 if num == 0 else num/den", "def create_dnz_file(args):\n\n file = open(args.o, 'w')\n\n file.write(\"% ----DATA VARIABLES----\\n\\n\")\n file.write(\"t=\" + str(args.t) + \";\" + \"%number of attributes\\n\")\n file.write(\"k=\" + str(args.k) + \";\" + \"%max length of the support set\\n\")\n file.write(\"n=\" + str(args.n) + \";\" + \"%number of positive instances\\n\")\n file.write(\"m=\" + str(args.m) + \";\" + \"%number of negative instances\\n\")\n file.write(\"c=\" + str(args.c) + \";\" + \"%number of atMostOne Constraints\\n\\n\")\n\n file.write(\"% ----OMEGAS----\\n\\n\")\n\n omega_p = generate_omega_data(args.t, args.n, args.b)\n file.write(\"omegap= \" + omega_to_mz(omega_p) + \"\\n\\n\")\n\n omega_n = generate_disjoint_omega_data(omega_p, args.m, args.b)\n file.write(\"omegan= \" + omega_to_mz(omega_n) + \"\\n\\n\")\n\n file.write(\"% ----CONSTRAINS----\\n\\n\")\n at_most_one = generate_at_most_one(int(args.t/2), args.c, 1, args.t)\n file.write(\"atMostOne=\" + at_most_one_to_mz(at_most_one))", "def system(self, t, state, strength, density):\n v, m, theta, z, _, r = state\n A = np.pi*r**2 # radius generally varies with time after break-up\n rhoa = self.rhoa(z)\n\n # u = [dv/dt, dm/dt, dtheta/dt, dz/dt, dx/dt, dr/dt]\n u = np.zeros_like(state)\n u[0] = -self.Cd*rhoa*A*v**2 / (2*m) + self.g*np.sin(theta) # dv/dt\n u[1] = -self.Ch*rhoa*A*v**3/(2*self.Q) # dm/dt\n u[2] = self.g*np.cos(theta)/v - self.Cl*rhoa * A*v / \\\n (2*m) - (v*np.cos(theta) / (self.Rp+z)) # dtheta/dt\n u[3] = -v*np.sin(theta) # dz/dt\n u[4] = v*np.cos(theta)/(1+z/self.Rp) # dx/dt\n if rhoa * v**2 < strength:\n u[5] = 0\n else:\n u[5] = (7/2*self.alpha*rhoa/density)**0.5 * v # dr/dt\n\n return u" ]
[ "0.55582106", "0.54553246", "0.5443375", "0.5435445", "0.53110385", "0.5228803", "0.51917344", "0.5187912", "0.51358545", "0.5119606", "0.5118766", "0.5114473", "0.5104518", "0.5102728", "0.5058224", "0.5031976", "0.50309247", "0.5023144", "0.5012087", "0.50105166", "0.50067145", "0.50007135", "0.49895647", "0.4960418", "0.49542803", "0.49407378", "0.49402097", "0.4925612", "0.4925204", "0.49233353", "0.49233136", "0.4922537", "0.4913044", "0.49081054", "0.4901895", "0.48969555", "0.48892048", "0.48861188", "0.48837656", "0.48821577", "0.48807034", "0.4880465", "0.4879698", "0.48751372", "0.48748472", "0.48703203", "0.48701286", "0.48656657", "0.485564", "0.4830731", "0.48292348", "0.48275703", "0.48237947", "0.4823515", "0.48234236", "0.48155916", "0.48075294", "0.4800522", "0.47994137", "0.47951198", "0.4778101", "0.47725102", "0.47701505", "0.47688714", "0.47603166", "0.47546867", "0.47545484", "0.4752119", "0.4751112", "0.47445944", "0.47373137", "0.4733575", "0.47285542", "0.472678", "0.4724737", "0.4718014", "0.47149912", "0.4714332", "0.47089052", "0.47026172", "0.47024402", "0.4700735", "0.46991494", "0.46974474", "0.4695846", "0.46948987", "0.46923703", "0.46920943", "0.46861738", "0.4683674", "0.46818078", "0.46802565", "0.46766856", "0.46762037", "0.46733996", "0.4669984", "0.46698993", "0.46657276", "0.4664602", "0.466245", "0.46623927" ]
0.0
-1
Calculates the local density of states of a hamiltonian and writes it in file, using arpack
def ldos0d_wf(h,e=0.0,delta=0.01,num_wf = 10,robust=False,tol=0): if h.dimensionality==0: # only for 0d intra = csc_matrix(h.intra) # matrix else: raise # not implemented... if robust: # go to the imaginary axis for stability eig,eigvec = slg.eigs(intra,k=int(num_wf),which="LM", sigma=e+1j*delta,tol=tol) eig = eig.real # real part only else: # Hermitic Hamiltonian eig,eigvec = slg.eigsh(intra,k=int(num_wf),which="LM",sigma=e,tol=tol) d = np.array([0.0 for i in range(intra.shape[0])]) # initialize for (v,ie) in zip(eigvec.transpose(),eig): # loop over wavefunctions v2 = (np.conjugate(v)*v).real # square of wavefunction fac = delta/((e-ie)**2 + delta**2) # factor to create a delta d += fac*v2 # add contribution # d /= num_wf # normalize d /= np.pi # normalize d = spatial_dos(h,d) # resum if necessary g = h.geometry # store geometry write_ldos(g.x,g.y,d,z=g.z) # write in file
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_density(fname, density):\n K, M, N = density.shape\n output = open(fname, \"w\")\n output.write(\"ARMA_CUB_TXT_FN008\\n\")\n output.write(\"%d %d %d\\n\" % (K, M, N))\n for i in range(N):\n for k in range(K):\n for m in range(M):\n output.write(\" %+.6e\" % density[k, m, i])\n output.write(\"\\n\")\n\n output.close()", "def onestatfile():\n with hp.File('StatsFile.h5', 'w') as onefile:\n alldata = np.empty((600, 4, 3, 500), dtype=np.float32)\n for j in range(600):\n for i in range(3):\n msd, vol, rms, asp = getstats(i, j+1)\n alldata[j, 0, i, :] = msd\n alldata[j, 1, i, :] = vol\n alldata[j, 2, i, :] = rms\n alldata[j, 3, i, :] = asp\n onefile.create_dataset('Stats', data=alldata, chunks=(1, 4, 3, 500),\n compression='gzip', compression_opts=9)", "def write_dftb_in(self, filename):\n\n outfile = open(filename, 'w')\n outfile.write('Geometry = GenFormat { \\n')\n #outfile.write(' <<< \"geo_end.gen\" \\n')\n outfile.write(' <<< %s \\n' %self.geo_fname)\n outfile.write('} \\n')\n outfile.write(' \\n')\n\n params = self.parameters.copy()\n\n s = 'Hamiltonian_MaxAngularMomentum_'\n for key in params:\n if key.startswith(s) and len(key) > len(s):\n break\n else:\n # User didn't specify max angular mometa. Get them from\n # the .skf files:\n symbols = set(self.atoms.get_chemical_symbols())\n for symbol in symbols:\n path = os.path.join(self.slako_dir,\n '{0}-{0}.skf'.format(symbol))\n l = read_max_angular_momentum(path)\n params[s + symbol] = '\"{}\"'.format('spdf'[l])\n\n # --------MAIN KEYWORDS-------\n previous_key = 'dummy_'\n myspace = ' '\n for key, value in sorted(params.items()):\n current_depth = key.rstrip('_').count('_')\n previous_depth = previous_key.rstrip('_').count('_')\n for my_backsclash in reversed(\n range(previous_depth - current_depth)):\n outfile.write(3 * (1 + my_backsclash) * myspace + '} \\n')\n outfile.write(3 * current_depth * myspace)\n if key.endswith('_') and len(value) > 0:\n outfile.write(key.rstrip('_').rsplit('_')[-1] +\n ' = ' + str(value) + '{ \\n')\n elif (key.endswith('_') and (len(value) == 0) \n and current_depth == 0): # E.g. 'Options {'\n outfile.write(key.rstrip('_').rsplit('_')[-1] +\n ' ' + str(value) + '{ \\n')\n elif (key.endswith('_') and (len(value) == 0) \n and current_depth > 0): # E.g. 'Hamiltonian_Max... = {'\n outfile.write(key.rstrip('_').rsplit('_')[-1] +\n ' = ' + str(value) + '{ \\n')\n elif key.count('_empty') == 1:\n outfile.write(str(value) + ' \\n')\n elif ((key == 'Hamiltonian_ReadInitialCharges') and \n (str(value).upper() == 'YES')):\n f1 = os.path.isfile(self.directory + os.sep + 'charges.dat')\n f2 = os.path.isfile(self.directory + os.sep + 'charges.bin')\n if not (f1 or f2):\n print('charges.dat or .bin not found, switching off guess')\n value = 'No'\n outfile.write(key.rsplit('_')[-1] + ' = ' + str(value) + ' \\n')\n else:\n outfile.write(key.rsplit('_')[-1] + ' = ' + str(value) + ' \\n')\n # point\n if self.pcpot is not None and ('DFTB' in str(value)):\n outfile.write(' ElectricField = { \\n')\n outfile.write(' PointCharges = { \\n')\n outfile.write(\n ' CoordsAndCharges [Angstrom] = DirectRead { \\n')\n outfile.write(' Records = ' +\n str(len(self.pcpot.mmcharges)) + ' \\n')\n outfile.write(\n ' File = \"dftb_external_charges.dat\" \\n')\n outfile.write(' } \\n')\n outfile.write(' } \\n')\n outfile.write(' } \\n')\n previous_key = key\n\n current_depth = key.rstrip('_').count('_')\n for my_backsclash in reversed(range(current_depth)):\n outfile.write(3 * my_backsclash * myspace + '} \\n')\n #outfile.write('ParserOptions { \\n')\n #outfile.write(' IgnoreUnprocessedNodes = Yes \\n')\n #outfile.write('} \\n')\n #if self.do_forces:\n # outfile.write('Analysis { \\n')\n # outfile.write(' CalculateForces = Yes \\n')\n # outfile.write('} \\n')\n\n outfile.close()", "def create_dat_file(\n src_depth: float,\n epi_in_km: float,\n baz: float,\n focal_mech: [float],\n dt: float,\n save_path: str,\n bm_file_path: str,\n M0: float = None,\n fdom: str = 1.000,\n):\n\n bm_file = bm_file_path\n\n f = np.loadtxt(bm_file, skiprows=5)\n f_ud = np.flipud(f)\n\n radius_mars = 3389.5 * 1e3 # f_ud[0][0] # 3390 (km)\n\n # radius_of_planet = 3390\n # km_per_deg = np.pi * (radius_mars * 1e-3) / 180.0\n # dist_in_km = epi_in_km * np.pi * (radius_mars * 1e-3) / 180.0\n dist = epi_in_km\n\n if baz < 0:\n baz *= -1\n rec_az = baz\n rec_z = 0.0\n\n src_x = 0.0\n src_y = 0.0\n src_z = src_depth\n or_time = 0.0\n s_strength = 1.0\n\n assert (M0 is None and len(focal_mech) == 6) or (M0 is not None and len(focal_mech) == 3), (\n \"focal_mech length is incorrect. \"\n \"If you specify M0, focal_mech is [strike,dip,rake]. \"\n \"Otherwise focal_mech is [m_rr, m_tt, m_pp, m_rt, m_rp, m_tp]\"\n )\n\n for i in range(len(focal_mech)):\n focal_mech[i] += 0\n\n M_tt_ins = focal_mech[1] # / 1e14\n M_pp_ins = focal_mech[2] # / 1e14\n M_rr_ins = focal_mech[0] # / 1e14\n M_rp_ins = focal_mech[4] # / 1e14\n M_rt_ins = focal_mech[3] # / 1e14\n M_tp_ins = focal_mech[5] # / 1e14\n\n moment_tensor = f\"{M_tt_ins:10.4f}{-M_tp_ins+0:10.4f}{M_rt_ins:10.4f}{M_pp_ins:10.4f}{-M_rp_ins+0:10.4f}{M_rr_ins:10.4f}\"\n # moment_tensor = f\"{M_tt_ins:10.4f}{M_tp_ins:10.4f}{M_rt_ins:10.4f}{M_pp_ins:10.4f}{M_rp_ins:10.4f}{M_rr_ins:10.4f}\"\n\n # model = TauPyModel(taup_path)\n # model_layers = model.model.s_mod.v_mod.layers\n\n with open(join(save_path, \"crfl.dat\"), \"w\") as f:\n f.write(\"Test name\\n\")\n f.write(\" 0 0 0 0 0 0 0 1 1 1 2 1 0 0 1 0 1 2 0 1 1\\n\")\n f.write(\" 5 1 0 1 1\\n\")\n\n # Get the indices of the velocity model with blocky description\n indices = np.setdiff1d(\n np.arange(len(f_ud[:, 0])), np.unique(f_ud[:, 0], return_index=True)[1]\n )\n indices1 = indices - 1\n inds = np.sort(np.hstack((0, np.hstack((indices1, indices)))))\n\n for i, layer in enumerate(f_ud):\n if layer[0] == 0.0:\n continue\n depth = (radius_mars - layer[0]) * 1e-3\n dens = layer[1] * 1e-3\n vp = layer[2] * 1e-3\n vs = layer[3] * 1e-3\n qka = layer[4] # qka\n qmu = layer[5] # qmu\n vph = layer[6]\n vsh = layer[7]\n eta = layer[8]\n\n qs = qmu\n L = (4 / 3) * (vs / vp) ** 2\n qp = 1 / (L * (1 / qmu) + (1 - L) * (1 / qka))\n if np.isnan(qp):\n qp = qka\n qs = 10.0\n\n # Check if part of velocity model is part of the gradient:\n if i not in inds and vs != 0.0:\n # prev_depth = (radius_mars - f_ud[i - 1, 0]) * 1e-3\n # layer_thickness = depth - prev_depth\n # factor = 0.07\n # layer_thickness_lim = factor * (\n # vs / fdom\n # ) # layer limit should be less then 1/10 of wavelength\n # vs0 = f_ud[i - 1, 3] * 1e-3\n # if layer_thickness_lim > factor * (vs0 / fdom):\n # layer_thickness_lim = factor * (vs0 / fdom)\n # import math\n\n # n_layers = math.ceil(layer_thickness / layer_thickness_lim)\n n_layers = 1\n else:\n n_layers = 1\n text = f\"{depth:10.4f}{vp:10.4f}{qp:10.4f}{vs:10.4f}{qs:10.4f}{dens:10.4f}{n_layers:10d}\\n\"\n f.write(text)\n f.write(\"\\n\")\n f.write(f\"{rec_z:10.4f}\\n\")\n f.write(f\"{src_x:10.4f}{src_y:10.4f}{src_z:10.4f}{or_time:10.4f}{s_strength:10.4f}\\n\")\n f.write(f\"{moment_tensor}\\n\")\n f.write(f\"{dist:10.4f}{dist:10.4f}{0.:10.4f}{rec_az:10.4f}{1:10d}\\n\")\n f.write(f\"{dist:10.4f}\\n\")\n f.write(f\"{rec_az:10.4f}\\n\")\n f.write(f\"{12.:10.4f} {-300.:10.4f}\\n\")\n f.write(\" 3.0000 3.5000 23.5000 25.0000 650\\n\")\n f.write(f\" 0.0100 0.0133{fdom:10.4f} 1.0300 0.0000\\n\")\n # f.write(\" 0.2420 32768 0 2 0.2420 245.7600\\n\")\n npts = 32768\n t_sigma = 0.3 * dt * npts\n f.write(f\"{dt:10.4f}{npts:10d}{0:10d}{2:10d}{dt:10.4f}{t_sigma:10.4f}\\n\")\n\n f.close()", "def export_hamilton(args):\n if args.type == 'filling_out':\n clarity_epp.export.hamilton.samplesheet_filling_out(lims, args.process_id, args.output_file)\n elif args.type == 'purify':\n clarity_epp.export.hamilton.samplesheet_purify(lims, args.process_id, args.output_file)", "def export_1D_edp(self, filename=\"1Dedp.dat\", start=(-10,25), end=(30,-20), \n N=100):\n rho = []\n x0, z0 = start\n x1, z1 = end\n xpoints = np.linspace(x0, x1, N)\n zpoints = np.linspace(z0, z1, N)\n for x, z in zip(xpoints, zpoints):\n tmp = self.phase * self.F * np.cos(self.qx*x+self.qz*z)\n dist = np.sqrt((x-x0)**2 + (z-z0)**2)\n rho.append([x, z, dist, tmp.sum(axis=0)])\n rho = np.array(rho, float)\n X, Z, DIST, EDP = rho[:,0], rho[:,1], rho[:,2], rho[:,3]\n with open(filename, 'w') as f:\n f.write(\"x z dist ED\\n\")\n for x, z, dist, edp in zip(X, Z, DIST, EDP):\n f.write(\"{0: 3.1f} {1: 3.1f} {2: 3.1f} {3: }\\n\".format(x, z, dist, edp))", "def __init__(self, path, verbose=False, mindens=1e3, **kwargs):\n self.path = path\n self.aux = os.path.dirname(os.path.realpath(__file__))\n self.aux = self.aux.replace('analysis', 'aux/')\n self.mindens = mindens\n self.verbose = verbose\n if not self.verbose:\n warnings.simplefilter('ignore')\n\n # Read in the .fits data. HDU[1] is the grid and HDU[4] are the level\n # populations. [2] and [3] can be ignored.\n\n self.hdu = fits.open(self.path)\n self.grid = self.hdu[1]\n if self.verbose:\n for c in self.grid.columns:\n print c\n print('\\n')\n self.names = self.grid.columns.names\n\n # Remove all the sink particles and convert units to [au].\n\n self.notsink = ~self.grid.data['IS_SINK']\n self.xvals = self.grid.data['x1'][self.notsink] / sc.au\n self.yvals = self.grid.data['x2'][self.notsink] / sc.au\n self.zvals = self.grid.data['x3'][self.notsink] / sc.au\n\n # Extract the physical properties. Assume that the densities are only\n # ever H2 or [oH2, pH2]. If the latter, allow density to be the sum.\n # Individual values can still be accessed through _density.\n\n self.gtemp = self.grid.data['TEMPKNTC'][self.notsink]\n self.dtemp = self.grid.data['TEMPDUST'][self.notsink]\n self.dtemp = np.where(self.dtemp == -1, self.gtemp, self.dtemp)\n\n self.ndens = len([n for n in self.names if 'DENSITY' in n])\n if self.ndens > 1 and self.verbose:\n print('Assuming DENSITY1 and DENSITY2 are oH2 and pH2.')\n self._dens = {d: self.grid.data['DENSITY%d' % (d+1)][self.notsink]\n for d in range(self.ndens)}\n self.dens = np.sum([self._dens[k] for k in range(self.ndens)], axis=0)\n\n self.nabun = len([n for n in self.names if 'ABUNMOL' in n])\n if self.nabun > 1:\n raise NotImplementedError()\n self.abun = self.grid.data['ABUNMOL1'][self.notsink]\n self.velo = np.array([self.grid.data['VEL%d' % i][self.notsink]\n for i in [1, 2, 3]])\n self.turb = self.grid.data['TURBDPLR'][self.notsink]\n\n # Remove all particles that fall below the minimum density.\n\n self.dmask = self.dens > self.mindens\n self.xvals = self.xvals[self.dmask]\n self.yvals = self.yvals[self.dmask]\n self.zvals = self.zvals[self.dmask]\n self.gtemp = self.gtemp[self.dmask]\n self.dtemp = self.dtemp[self.dmask]\n self.dens = self.dens[self.dmask]\n self.abun = self.abun[self.dmask]\n self.turb = self.turb[self.dmask]\n\n # Remove all the particles that are |x_i| > rmax.\n\n self.rmax = kwargs.get('rmax', 20)\n self.rmask = np.where(abs(self.xvals) > self.rmax, 1, 0)\n self.rmask += np.where(abs(self.yvals) > self.rmax, 1, 0)\n self.rmask += np.where(abs(self.zvals) > self.rmax, 1, 0)\n self.rmask = np.where(self.rmask == 0, True, False)\n self.xvals = self.xvals[self.rmask]\n self.yvals = self.yvals[self.rmask]\n self.zvals = self.zvals[self.rmask]\n self.gtemp = self.gtemp[self.rmask]\n self.dtemp = self.dtemp[self.rmask]\n self.dens = self.dens[self.rmask]\n self.abun = self.abun[self.rmask]\n self.turb = self.turb[self.rmask]\n\n # Excitation properties. Remove all the sink particles.\n\n pops = self.hdu[4].data.T\n idxs = [i for i, b in enumerate(self.notsink) if not b]\n self.levels = np.delete(pops, idxs, axis=1)\n idxs = [i for i, b in enumerate(self.dmask) if not b]\n self.levels = np.delete(self.levels, idxs, axis=1)\n idxs = [i for i, b in enumerate(self.rmask) if not b]\n self.levels = np.delete(self.levels, idxs, axis=1)\n\n # Apply the gridding. Note we include a single point radial grid to\n # better interface with the function in analysemodel.\n\n self.xgrid = np.zeros(1)\n self.ygrid = self.estimate_grids(**kwargs)\n self.gridded = {}\n self.gridded['dens'] = self.grid_param(self.dens)\n self.gridded['gtemp'] = self.grid_param(self.gtemp)\n self.gridded['dtemp'] = self.grid_param(self.dtemp)\n self.gridded['abun'] = self.grid_param(self.abun)\n self.gridded['turb'] = self.grid_param(self.turb)\n self.gridded['levels'] = {}\n self.grid_levels(kwargs.get('nlevels', 5))\n\n return", "def __init__(self, path, verbose=False, mindens=1e3, **kwargs):\n self.path = path\n self.aux = os.path.dirname(os.path.realpath(__file__))\n self.aux = self.aux.replace('analysis', 'aux/')\n self.mindens = mindens\n self.depletion = kwargs.get('depletion', 1.0)\n self.verbose = verbose\n if not self.verbose:\n warnings.simplefilter('ignore')\n\n # Currently only important are the grid [1] and level populations [4]\n # from the grid. Both [2] and [3] are for the Delanunay triangulation\n # and can thus be ignored.\n\n self.hdu = fits.open(self.path)\n self.grid = self.hdu[1]\n if self.verbose:\n for c in self.grid.columns:\n print c\n print('\\n')\n self.names = self.grid.columns.names\n\n # Coordinates. Remove all the sink particles and convert to au. The\n # native system is cartesian, (x, y, z). Also convert them into\n # spherical polar coordinates, (r, p, t).\n\n self.notsink = ~self.grid.data['IS_SINK']\n self.xvals = self.grid.data['x1'][self.notsink] / sc.au\n self.yvals = self.grid.data['x2'][self.notsink] / sc.au\n self.zvals = self.grid.data['x3'][self.notsink] / sc.au\n self.rvals = np.hypot(self.yvals, self.xvals)\n self.pvals = np.arctan2(self.yvals, self.xvals)\n self.tvals = np.arctan2(self.zvals, self.rvals)\n\n # Physical properties at each cell. If dtemp == -1, then use gtemp,\n # this allows us to calculate the dust continuum for the line emission.\n\n self.gtemp = self.grid.data['TEMPKNTC'][self.notsink]\n self.dtemp = self.grid.data['TEMPDUST'][self.notsink]\n self.dtemp = np.where(self.dtemp == -1, self.gtemp, self.dtemp)\n\n # Assume that the densities are only ever H2 or [oH2, pH2]. If the\n # latter, allow density to be the sum. Individual values can still be\n # accessed through _density.\n\n self.ndens = len([n for n in self.names if 'DENSITY' in n])\n if self.ndens > 1 and self.verbose:\n print('Assuming DENSITY1 and DENSITY2 are oH2 and pH2.')\n self._dens = {d: self.grid.data['DENSITY%d' % (d+1)][self.notsink]\n for d in range(self.ndens)}\n self.dens = np.sum([self._dens[k] for k in range(self.ndens)], axis=0)\n\n # Include the other physical properties.\n\n self.nabun = len([n for n in self.names if 'ABUNMOL' in n])\n if self.nabun > 1:\n raise NotImplementedError()\n self.abun = self.grid.data['ABUNMOL1'][self.notsink]\n self.velo = np.array([self.grid.data['VEL%d' % i][self.notsink]\n for i in [1, 2, 3]])\n self.turb = self.grid.data['TURBDPLR'][self.notsink]\n\n # Mask out all points with a total density of <= min_density, with a\n # default of 10^4. Include the depletion of the emitting molecule.\n\n self.dmask = self.dens > kwargs.get('min_density', 1e3)\n self.xvals = self.xvals[self.dmask]\n self.yvals = self.yvals[self.dmask]\n self.zvals = self.zvals[self.dmask]\n self.rvals = self.rvals[self.dmask]\n self.pvals = self.pvals[self.dmask]\n self.tvals = self.tvals[self.dmask]\n self.gtemp = self.gtemp[self.dmask]\n self.dtemp = self.dtemp[self.dmask]\n self.dens = self.dens[self.dmask]\n self.abun = self.abun[self.dmask] * self.depletion\n self.turb = self.turb[self.dmask]\n\n # Excitation properties. Remove all the sink particles.\n\n pops = self.hdu[4].data.T\n idxs = [i for i, b in enumerate(self.notsink) if not b]\n self.levels = np.delete(pops, idxs, axis=1)\n idxs = [i for i, b in enumerate(self.dmask) if not b]\n self.levels = np.delete(self.levels, idxs, axis=1)\n\n # -- Gridding Options --\n #\n # There are three options here. One can provide the axes either\n # as an array, otherwise the grids are generated depending on the\n # points of the model.\n # By default the grid is logarithmic in the vertical direction but\n # linear in the radial direction with 500 points in each, however\n # these are customisable.\n\n grids = kwargs.get('grids', None)\n if grids is None:\n self.xgrid, self.ygrid = self.estimate_grids(**kwargs)\n else:\n try:\n self.xgrid, self.ygrid = grids\n except ValueError:\n self.xgrid = grids\n self.ygrid = grids\n except:\n raise ValueError('grids = [xgrid, ygrid].')\n self.xpnts = self.xgrid.size\n self.ypnts = self.ygrid.size\n\n # With the grids, grid the parameters and store them in a dictionary.\n # Only read in the first (by default) 5 energy levels, but this can be\n # increased later with a call to self.grid_levels(j_max).\n\n self.method = kwargs.get('method', 'linear')\n if self.verbose:\n print('Beginning gridding using %s interpolation.' % self.method)\n if self.method == 'nearest':\n print('Warning: neartest may produce unwanted features.')\n\n self.gridded = {}\n self.gridded['dens'] = self.grid_param(self.dens, self.method)\n self.gridded['gtemp'] = self.grid_param(self.gtemp, self.method)\n self.gridded['dtemp'] = self.grid_param(self.dtemp, self.method)\n self.gridded['abun'] = self.grid_param(self.abun, self.method)\n self.gridded['turb'] = self.grid_param(self.turb, self.method)\n self.gridded['levels'] = {}\n self.grid_levels(kwargs.get('nlevels', 5))\n\n return", "def export_2D_edp(self, filename=\"2Dedp.dat\", xmin=-100, xmax=100, \n zmin=-100, zmax=100, N=201):\n rho_xz = []\n xgrid = np.linspace(xmin, xmax, num=N)\n zgrid = np.linspace(zmin, zmax, num=N)\n for x in xgrid:\n for z in zgrid:\n tmp = self.phase * self.F * np.cos(self.qx*x+self.qz*z)\n rho_xz.append([x, z, tmp.sum(axis=0)])\n rho_xz = np.array(rho_xz, float) \n X, Y, Z= rho_xz[:,0], rho_xz[:,1], rho_xz[:,2]\n with open(filename, 'w') as f:\n f.write(\"x z ED\\n\")\n for x, y, z in zip(X, Y, Z):\n f.write(\"{0: 3.1f} {1: 3.1f} {2: }\\n\".format(x, y, z))", "def frequencyEstimator(ctd, ladcp, bathy, rho_neutral, strain,\\\n wl_min=100, wl_max=500, full_set=False):\n \n U, V, p_ladcp = oc.loadLADCP(ladcp)\n S, T, p_ctd, lat, lon = oc.loadCTD(ctd)\n \n \n Ek, Ep, Etotal, eta_power,\\\n Upow, Vpow, UVkx, eta_kx,\\\n N2mean, wl_min, wl_max,\\\n dist, depths, U, V, p_ladcp,\\\n Uspec, Vspec, etaSpec =\\\n internal_wave_energy(ctd, ladcp,\\\n rho_neutral,\\\n bathy, strain, wl_min=wl_min, wl_max=wl_max)\n \n eta_power_export = np.vstack(eta_power)\n eta_kx_export = np.vstack(eta_kx)\n Up_export = np.vstack(Upow)\n Vp_export = np.vstack(Vpow)\n UVkx_export = np.vstack(UVkx)\n \n\n np.savetxt('eta_power.csv',eta_power_export)\n np.savetxt('eta_kx.csv',eta_kx_export)\n np.savetxt('Upow.csv',Up_export)\n np.savetxt('Vpow.csv',Vp_export)\n np.savetxt('UVkx.csv',UVkx_export)\n\n\n \n \n # look for wavenumber maxes\n \n \n # Use ratios to solve for internal frequncys\n f = np.nanmean(gsw.f(lat))\n \n omega = f*np.sqrt(Etotal/(Ek-Ep))\n\n m = np.mean((wl_min, wl_max))\n m = (2*np.pi)/m\n kh = (m/np.sqrt(np.abs(N2mean)))*(np.sqrt(omega**2 - f**2))\n mask = kh == 0\n kh[mask]= np.nan\n lambdaH = 1e-3*(2*np.pi)/kh\n \n # get mean spectra\\\n \n eta_mean = []\n for station in eta_power:\n eta_mean.append(np.nanmean(station, axis=0))\n \n eta_mean = np.vstack(eta_mean).T\n \n \n aspect = kh/m \n \n file2save = pd.DataFrame(lambdaH)\n file2save.index = np.squeeze(depths)\n file2save.to_excel('lambdaH.xlsx')\n file2save = pd.DataFrame(kh)\n file2save.index = np.squeeze(depths)\n file2save.to_excel('Kh.xlsx')\n file2save = pd.DataFrame(omega)\n file2save.index = np.squeeze(depths)\n file2save.to_excel('omega.xlsx')\n file2save = pd.DataFrame(aspect)\n file2save.index = np.squeeze(depths)\n file2save.to_excel('aspect.xlsx')\n \n np.savetxt('eta_mean.csv', eta_mean)\n \n \n np.savetxt('kh.csv', kh)\n np.savetxt('lamdah.csv', lambdaH)\n np.savetxt('omega.csv', omega)\n \n if full_set:\n return lambdaH, kh, omega, N2mean,\\\n dist, depths, U, V, p_ladcp,\\\n Uspec, Vspec, etaSpec, aspect\n \n else:\n return lambdaH, kh, omega, N2mean", "def write_mesh_file(allxyz, beck_bed):\n if SAVEMESH:\n print('+> Saving finite element mesh files...', end='')\n fname = FNAME.rsplit('.', 1)[0]\n ncol = beck_bed[0,:].size\n nrow = beck_bed[:,0].size\n nele = (nrow-1)*(ncol-1)*2\n d = compute_mesh(nrow, ncol, nele)\n h = ':NodeCount ' + str(allxyz[:,0].size) + '\\n:ElementCount ' \\\n + str(nele) + '\\n#\\n:EndHeader\\n'\n with open(fname + '_mesh.t3s', 'w') as f: \n f.write(h)\n with open(fname + '_mesh.t3s', 'a') as f:\n np.savetxt(f, allxyz, fmt='%.6e')\n np.savetxt(f, d, fmt='%d')\n f.write('\\n\\n')\n h = 'TITLE = \\\"' + fname \\\n + '_mesh\\\"\\nVARIABLES = \\\"X\\\", \\\"Y\\\", \\\"' + fname \\\n + '_mesh\\\"\\nZONE NODES=' + str(allxyz[:,0].size) + ', ELEMENTS=' \\\n + str(nele) + ', DATAPACKING=POINT, ZONETYPE=FETRIANGLE\\n'\n with open(fname + '_mesh.dat', 'w') as f: \n f.write(h)\n with open(fname + '_mesh.dat', 'a') as f:\n np.savetxt(f, allxyz, fmt='%.6e')\n np.savetxt(f, d, fmt='%d')\n f.write('\\n\\n')\n inlet = np.zeros((ncol,), dtype=int)\n outlet = np.zeros((ncol,), dtype=int)\n for i in range(ncol):\n inlet[i] = 1 + i*nrow\n outlet[i] = (1 + i)*nrow\n left = np.zeros((nrow-2,), dtype=int)\n right = np.zeros((nrow-2,), dtype=int)\n for i in range(1, nrow-1):\n left[i-1] = (ncol-2)*nrow + i + 1\n right[i-1] = (ncol-1)*nrow + i + 1\n cli = np.zeros((2*(nrow+ncol-2), 13))\n cli[:,:2] = 2\n cli[:,7] = 2\n cli[:,11] = np.concatenate((inlet, outlet, left, right))\n cli[:,12] = np.arange(2*(nrow+ncol-2)) + 1\n cli[:ncol,0] = 4\n cli[:ncol,1] = 5\n cli[:ncol,2] = 5\n cli[:ncol,7] = 4\n cli[ncol:2*ncol,0] = 5\n cli[ncol:2*ncol,1] = 4\n cli[ncol:2*ncol,2] = 4\n cli[ncol:2*ncol,7] = 4\n np.savetxt(fname + '_BC_tmp.cli', cli, fmt='%d')\n with open(fname + '_BC.cli', 'w') as out_f:\n with open(fname + '_BC_tmp.cli', 'r') as in_f:\n for i, line in enumerate(in_f):\n if i < ncol:\n s = ' #Inlet'\n elif i >= ncol and i < 2*ncol:\n s = ' #Outlet'\n else:\n s = ' #'\n out_f.write(line.rstrip('\\n') + s + '\\n')\n out_f.write('\\n')\n os.remove(fname + '_BC_tmp.cli')\n h = ':FileType bc2 ASCII EnSim 1.0' \\\n + '\\n:NodeCount ' + str(allxyz[:,0].size) \\\n + '\\n:ElementCount ' + str(nele) \\\n + '\\n:ElementType T3' \\\n + '\\n:BoundarySegmentCount 2' \\\n + '\\n# id code sectionCount startNode1 endNode1 startNode2 endNode2 tracerCode name' \\\n + '\\n:BoundarySegment 1 455 1 1 ' + str(ncol) + ' 1 1 4 \\\"Inlet\\\"' \\\n + '\\n:BoundarySegment 2 544 1 ' + str(ncol+1) + ' ' + str(2*ncol) + ' 1 1 4 \\\"Outlet\\\"' \\\n + '\\n:ShorelineCount 1' \\\n + '\\n:ShorelineNodeCount ' + str(2*(nrow+ncol-2)) \\\n + '\\n:EndHeader' \\\n + '\\n:BeginNodes ' + str(allxyz[:,0].size) + '\\n'\n with open(fname + '_BC.bc2', 'w') as f: \n f.write(h)\n with open(fname + '_BC.bc2', 'a') as f:\n xyz = np.copy(allxyz)\n xyz[:,2] = 0\n np.savetxt(f, xyz, fmt='%.6e')\n f.write(':EndNodes\\n:BeginElements ' + str(nele) + '\\n')\n np.savetxt(f, d, fmt='%d')\n f.write(':EndElements\\n:BeginTable ' + str(2*(nrow+ncol-2)) + ' 15\\n')\n with open(fname + '_BC.cli', 'r') as g:\n lines = g.read()\n f.write(lines[:-1])\n f.write(':EndTable\\n\\n')\n print(' [done]')", "def do_H5F_2_PY(AX_dic, tag, d):\n\n # accesing xs file\n ps1 = xs_data(AX_dic['path']['file_path'], AX_dic['A2'][tag]['info']['xs_folder'], AX_dic['A2'][tag]['info']['xs_file'],\n AX_dic['path']['sbr_path'], AX_dic['path']['sbr_file']) # path for xs and sbr is defines\n ps1.get_phase_space(grid_flag='FG')\n # the auxiliary files are generated with sbr. if generate_out_flag='yes'\n # the *.out files are generated.\n grid_flag = 'FG'\n ps1.xs_auxiliar_file_generator(AX_dic['A2'][tag]['info']['generate_out_flag'], AX_dic['A2'][tag]['info']['flag_FG2semiFG'], grid_flag,\n AX_dic['path']['out_folder'], AX_dic['A2'][tag]['info']['out_alias']) # grid_flag is required, options; 'SG', 'FG'\n domain_ofinterest = cp.deepcopy(ps1.phase_space)\n xs_ofinterest, domain_ofinterest = domain_reduction(domain_ofinterest, d, AX_dic['A2'][\n tag]['info']['evol_vec'], ps1.order)\n IRG = []\n for key in xs_ofinterest.keys():\n IRG.append('_' + str(len(xs_ofinterest[key])))\n AX_dic['A2'][tag]['info']['IRG'] = ''.join(IRG)\n xs_out, order = ps1.xs_retrival_FG(xs_ofinterest, domain_ofinterest,\n AX_dic['path']['out_folder'], AX_dic['A2'][tag]['info']['out_alias'], AX_dic['A2'][tag]['info']['flag_FG2semiFG'])\n conc_dic, fi_dic, k_dic = ps1.cellwise_retrival(domain_ofinterest, AX_dic['path']['out_folder'],\n AX_dic['A2'][tag]['info']['out_alias'], AX_dic['A2'][tag]['info']['flag_FG2semiFG'], AX_dic['A2'][tag]['info']['evol_vec'])\n\n # The structure of the xs data is here generated\n AX_dic['A2'][tag]['data'] = {}\n AX_dic['A2'][tag]['data']['I'] = xs_out\n AX_dic['A2'][tag]['data']['order_tuple'] = order\n AX_dic['A2'][tag]['data']['PS'] = ps1.domain_ofinterest\n\n for i in AX_dic['A2'][tag]['data']['I'].keys():\n AX_dic['A2'][tag]['data']['I'][i]['conc'] = conc_dic[i]\n AX_dic['A2'][tag]['data']['fi'] = fi_dic\n AX_dic['A2'][tag]['data']['k'] = k_dic", "def _write_nemo_hr_file(rpn_hr_ds_path, nemo_hr_ds_path):\n with xarray.open_dataset(rpn_hr_ds_path) as rpn_hr:\n logging.debug(\n f\"calculating specific humidity & incoming longwave radiation from {rpn_hr_ds_path}\"\n )\n qair, ilwr, rh = _calc_qair_ilwr(rpn_hr)\n u_out, v_out = _rotate_winds(rpn_hr)\n data_vars = {\n \"nav_lon\": rpn_hr.nav_lon,\n \"nav_lat\": rpn_hr.nav_lat,\n # [:, 0] drops z dimension that NEMO will not tolerate\n \"qair\": qair[:, 0],\n \"RH_2maboveground\": rh[:, 0],\n \"therm_rad\": ilwr[:, 0],\n \"u_wind\": u_out[:, 0],\n \"v_wind\": v_out[:, 0],\n # \"LHTFL_surface\": ** needs to be calculated**,\n }\n nemo_rpn_vars = (\n (\"atmpres\", \"PN\"),\n (\"percentcloud\", \"NT\"),\n (\"PRATE_surface\", \"RT\"),\n (\"precip\", \"PR\"),\n (\"solar\", \"FB\"),\n (\"tair\", \"TT\"),\n )\n missing_vars = \"\"\n for nemo_var, rpn_var in nemo_rpn_vars:\n try:\n # [:, 0] drops z dimension that NEMO will not tolerate\n data_vars.update({nemo_var: getattr(rpn_hr, rpn_var)[:, 0]})\n except AttributeError:\n # Variable is missing from RPN dataset, so provide a placeholder DataArray\n # full of NaNs that we will deal with later via interpolation\n data_vars.update(\n {nemo_var: xarray.DataArray(numpy.full_like(qair[:, 0], numpy.nan))}\n )\n missing_vars = (\n \", \".join((missing_vars, nemo_var)) if missing_vars else nemo_var\n )\n logging.warning(f\"missing RPN variable {rpn_var} from {rpn_hr_ds_path}\")\n nemo_hr = xarray.Dataset(\n data_vars=data_vars, coords=rpn_hr.coords, attrs=rpn_hr.attrs\n )\n nemo_hr.attrs[\"history\"] += (\n f\"\\n{arrow.now().format('ddd MMM DD HH:mm:ss YYYY')}: \"\n f\"Add specific and relative humidity and incoming longwave radiation variables from \"\n f\"correlations\"\n )\n if missing_vars:\n nemo_hr.attrs[\"missing_variables\"] = missing_vars\n _add_vars_metadata(nemo_hr)\n _write_netcdf_file(nemo_hr, nemo_hr_ds_path)", "def make_stehle(self):\n\n temp_k = self.temp * e / k # temperature in K\n dens_cm = self.e_dens * 1.e-6 # electronic density in cm-3\n prefix = 'n_' + str(self.n_upper) + '_' + str(self.n_lower) + '_'\n\n # extract raw tabulated tabulated_data\n tab_temp_k = np.array(pystark.nc.variables[prefix + 'tempe'].data) # tabulated electron temperatures (K)\n olam0 = pystark.nc.variables[prefix + 'olam0'].data # line centre wavelength (A)\n num_tab_dens = pystark.nc.variables[prefix + 'id_max'].data\n fainom = pystark.nc.variables[prefix + 'fainom'].data\n tab_dens_cm = np.array(pystark.nc.variables[prefix + 'dense'].data) # tabulated electron densities (cm ** -3)\n f00 = np.array(pystark.nc.variables[prefix + 'f00'].data) # normal Holtsmark field strength (30 kV / m)\n dl12 = np.array(pystark.nc.variables[prefix + 'dl12'].data)\n dl12s = np.array(pystark.nc.variables[prefix + 'dl12s'].data)\n fainu = pystark.nc.variables[\n prefix + 'fainu'].data # Asymptotic value of iStark * (alpha ** 2.5) (\"wings factor in alfa units\")\n pr0 = np.array(pystark.nc.variables[\n prefix + 'pr0'].data) # Ratio of the mean interelectronic distance to the electronic Debye length\n jtot = np.array(pystark.nc.variables[prefix + 'jtot'].data,\n dtype=np.int) # \"number of wave lengths for the couple (T,Ne)\"\n dom = np.array(pystark.nc.variables[prefix + 'dom'].data) # frequency detunings in units (rad / (s*ues)\n d1om = np.array(pystark.nc.variables[prefix + 'd1om'].data)\n o1line = np.array(pystark.nc.variables[prefix + 'o1line'].data)\n o1lines = np.array(pystark.nc.variables[prefix + 'o1lines'].data)\n\n # ensure given temperature + density falls within tabulated values\n # change sligtly the value of the input density\n # dens_cm in order to remain , as far as possible, inside the tabulation\n # JSA: this first step seems bogus!\n\n if np.abs(dens_cm - tab_dens_cm[0]) / dens_cm <= 1.0E-3:\n dens_cm = tab_dens_cm[0] * 1.001\n\n for id in np.arange(1, num_tab_dens + 1):\n if np.abs(dens_cm - tab_dens_cm[id]) / dens_cm <= 1.0E-3:\n dens_cm = tab_dens_cm[id] * 0.999\n\n if dens_cm >= 2.0 * tab_dens_cm[num_tab_dens]:\n raise Exception(\n 'Your input density is higher than the largest tabulated value %f' % tab_dens_cm[num_tab_dens])\n\n if dens_cm <= tab_dens_cm[0]:\n raise Exception('Your input density is smaller than the smallest tabulated value %f' % tab_dens_cm[0])\n\n if temp_k >= tab_temp_k[9]:\n raise Exception('Your input temperature is higher than the largest tabulated value %f' % tab_temp_k[9])\n\n if temp_k <= tab_temp_k[0]:\n raise Exception('Your input temperature is lower than the smallest tabulated value %f' % tab_temp_k[0])\n\n normal_holtsmark_field = 1.25e-9 * (dens_cm ** (2. / 3.)) # normal field value in ues\n\n # calculate line centre wavelength and frequency using Rydberg formula\n # JSA: I have made this step clearer and corrected for deuteron mass in the Rydberg constant (though the effect is small)\n # TODO make sure this matches olam0 parameter above -- why were there two variables in the first place?!\n # rydberg_m = Rydberg / (1. + (electron_mass / physical_constants['deuteron mass'][0]))\n # wl_0_angst = 1e10 * (rydberg_m * (1 / n_lower ** 2 - 1 / n_upper ** 2)) ** -1\n\n wl_centre_angst = self.wl_centre * 1e10\n\n c_angst = c * 1e10 # velocity of light in Ansgtroms / s\n angular_freq_0 = 2 * np.pi * c_angst / wl_centre_angst # rad / s\n\n otrans = -2 * np.pi * c_angst / wl_centre_angst ** 2\n\n olines = o1lines / np.abs(otrans)\n oline = o1line / np.abs(otrans)\n\n # Limit analysis_tools to uncorrelated plasmas.\n # check that mean interelectronic distance is smaller than the electronic Debye length (equ. 10)\n PR0_exp = 0.0898 * (dens_cm ** (1. / 6.)) / np.sqrt(temp_k) # = (r0 / debye)\n if PR0_exp > 1.:\n raise Exception('The plasma is too strongly correlated\\ni.e. r0/debye=0.1\\nthe line cannot be computed.')\n\n # fainom_exp=fainom*(F00_exp**1.5)\n # fainum_exp=fainom_exp/( (OPI*2.)**1.5)\n\n # ========================\n # TABULATION Format CDS\n # si on veut ecrire\n # n -np lambda0 kalpha Ne E0 T R0/Debye Dalpha iDoppler iStark\n\n # IN_cds= N+0.01\n # INP_cds = NP+0.01\n\n # ***********************************************************\n # Don't edit the CDS format...\n # ***********************************************************\n\n # Skipped the code in the IF statement starting at line 470, since it\n # isn't used, if (.FALSE.) ...\n\n # ==============================================\n # define an unique detunings grid - domm - for the tabulated\n # profiles ( various temperatures , densities)\n # calculate all the line shapes for this common grid\n # units used at this points are Domega_new= Delta(omega)/F00\n # in rd/(s-1 ues)\n\n max_num_dens = 30 # Maximum number of densities\n max_num_tab_temp = 10\n max_num_detunings = 60 # Maximum number of detunings\n jtot = jtot.astype(np.int)\n domm = np.zeros(100000)\n dom0 = np.zeros(10000)\n tprof = np.zeros([max_num_dens, max_num_tab_temp, 10000])\n tprofs = np.zeros([max_num_dens, max_num_tab_temp, 10000])\n uprof = np.zeros([max_num_dens, 10000])\n uprofs = np.zeros([max_num_dens, 10000])\n\n inc = 0\n domm[inc] = 0.0\n # ---- Look to replace this loop\n for id in np.arange(num_tab_dens + 1): # loop over tab densities\n for j in np.arange(max_num_tab_temp): # loop over tab temperatures (?)\n for i in np.arange(1, jtot[id, j]):\n inc += 1\n dom0[inc] = dom[id, j, i]\n\n inc = np.count_nonzero(dom)\n npik = inc + 1\n # nut=10000\n\n # Calling numpy sort instead of piksrt\n tmp = np.sort(dom0[0:npik])\n dom0[0:npik] = tmp[0:npik]\n # dom0 seems to agree with the FORTRAN version\n\n inc = 0\n domm[0] = 0.0\n # print 'npik',npik\n # ---- Look to replace this loop\n for i in np.arange(1, npik):\n dif = (dom0[i] - dom0[i - 1])\n if dif <= 1.0E-6:\n continue\n if dif / np.abs(dom0[i]) <= 0.1:\n continue\n inc = inc + 1\n domm[inc] = dom0[i]\n\n jdom = inc + 1 # One line after marker 35\n\n for id in np.arange(num_tab_dens):\n for j in np.arange(10):\n if pr0[id, j] > 1.0:\n continue\n\n tprof[id, j, 0] = oline[id, j, 0]\n tprofs[id, j, 0] = olines[id, j, 0]\n\n if jtot[id, j] == 0:\n continue\n\n for i in np.arange(1, jdom + 1):\n skip1 = False\n skip2 = False\n # print 'i',i\n domeg = domm[i]\n ij_max = jtot[id, j]\n # print 'domeg,ij_max',domeg,ij_max\n for ij in np.arange(1, ij_max - 1):\n # print 'ij',ij\n test = (domeg - dom[id, j, ij]) * (domeg - dom[id, j, ij - 1])\n # print 'test1:',test\n if test <= 0.0:\n # print 'triggered test1'\n x1 = dom[id, j, ij - 1]\n x2 = dom[id, j, ij]\n x3 = dom[id, j, ij + 1]\n y1 = oline[id, j, ij - 1]\n y2 = oline[id, j, ij]\n y3 = oline[id, j, ij + 1]\n # print 'x1,x2,x3',x1,x2,x3\n # print 'y1,y2,y3',y1,y2,y3\n tprof[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n y1 = olines[id, j, ij - 1]\n y2 = olines[id, j, ij]\n y3 = olines[id, j, ij + 1]\n tprofs[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n # print 'tprof[id,j,i]',tprof[id,j,i]\n # print 'tprofs[id,j,i]',tprofs[id,j,i]\n skip1 = True\n skip2 = True\n break\n\n if skip1 is False:\n test = (domeg - dom[id, j, ij_max - 2]) * (domeg - dom[id, j, ij_max - 1])\n # print 'test2:',test\n # print 'domeg',domeg\n # print 'dom[id,j,ij_max-1]',dom[id,j,ij_max-2]\n # print 'dom[id,j,ij_max]',dom[id,j,ij_max-1]\n if test <= 0.0:\n # print 'triggered test2'\n x1 = dom[id, j, ij_max - 3]\n x2 = dom[id, j, ij_max - 2]\n x3 = dom[id, j, ij_max - 1]\n y1 = oline[id, j, ij_max - 3]\n y2 = oline[id, j, ij_max - 2]\n y3 = oline[id, j, ij_max - 1]\n tprof[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n y1 = olines[id, j, ij_max - 3]\n y2 = olines[id, j, ij_max - 2]\n y3 = olines[id, j, ij_max - 1]\n tprofs[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n skip2 = True\n # print 'x1,x2,x3',x1,x2,x3\n # print 'y1,y2,y3',y1,y2,y3\n # print 'tprof[id,j,i]',tprof[id,j,i]\n # print 'tprofs[id,j,i]',tprofs[id,j,i]\n continue\n\n if skip2 is False:\n if domeg > dom[id, j, ij_max]:\n # print 'triggered test3'\n tprof[id, j, i] = fainom / (domeg ** 2.5)\n tprofs[id, j, i] = tprof[id, j, i]\n continue\n\n # We can skip writing the intermediate file\n\n\n for id in np.arange(num_tab_dens):\n otest_dens = (dens_cm - tab_dens_cm[id]) * (dens_cm - tab_dens_cm[id + 1])\n if otest_dens <= 0.0:\n dense1 = tab_dens_cm[id]\n dense2 = tab_dens_cm[id + 1]\n id1 = id\n id2 = id + 1\n break\n\n if dens_cm >= tab_dens_cm[num_tab_dens]:\n dense1 = tab_dens_cm[num_tab_dens - 1]\n dense2 = tab_dens_cm[num_tab_dens]\n id1 = num_tab_dens - 1\n id2 = num_tab_dens\n\n for it in np.arange(10):\n otest = (temp_k - tab_temp_k[it]) * (temp_k - tab_temp_k[it + 1])\n if otest <= 0.0:\n it1 = it\n it2 = it + 1\n # pr01 = pr0[id2,it1] # max value of pr0 for T1,T2,dense1,dense2\n tempe1 = tab_temp_k[it]\n tempe2 = tab_temp_k[it + 1]\n break\n\n # interpolation in temperature\n for id in np.arange(id1, id2 + 1):\n for i in np.arange(jdom):\n uprof[id, i] = tprof[id, it1, i] + (temp_k - tempe1) * (tprof[id, it2, i] - tprof[id, it1, i]) / (\n tempe2 - tempe1)\n uprofs[id, i] = tprofs[id, it1, i] + (temp_k - tempe1) * (tprofs[id, it2, i] - tprofs[id, it1, i]) / (\n tempe2 - tempe1)\n\n delta_lambda = np.zeros(jdom)\n delta_nu = np.zeros(jdom)\n wprof_nu = np.zeros(jdom)\n wprofs_nu = np.zeros(jdom)\n\n for i in np.arange(jdom):\n wprof = uprof[id1, i] + (dens_cm - dense1) * (uprof[id2, i] - uprof[id1, i]) / (dense2 - dense1)\n wprofs = uprofs[id1, i] + (dens_cm - dense1) * (uprofs[id2, i] - uprofs[id1, i]) / (dense2 - dense1)\n delta_omega = domm[i] * normal_holtsmark_field\n delta_nu[i] = delta_omega / (2 * np.pi)\n delta_lambda[i] = wl_centre_angst * delta_omega / (angular_freq_0 + delta_omega)\n # print(delta_lambda[i])\n wprof_nu[i] = (wprof / normal_holtsmark_field) * (2. * np.pi)\n wprofs_nu[i] = (wprofs / normal_holtsmark_field) * (2. * np.pi)\n # print '%e %e %e %e' %(delta_lambda[i],delta_nu[i],wprof_nu[i],wprofs_nu[i])\n\n delta_lambda2 = np.concatenate((-delta_lambda[::-1], delta_lambda)) + wl_centre_angst # + olam0\n delta_nu2 = np.concatenate((-delta_nu[::-1], delta_nu))\n wprof_nu2 = np.concatenate((wprof_nu[::-1], wprof_nu))\n wprofs_nu2 = np.concatenate((wprofs_nu[::-1], wprofs_nu))\n\n # for some reason, i only get a good agreement with the other models if i take the pure Stark broadened Stehle\n # output and manually convolve it with the Doppler profile -- not sure why...\n ls_sd = wprofs_nu2\n\n # interpolate onto frequency axis\n ls_sd = np.interp(self.freq_axis, delta_nu2 + self.freq_centre, ls_sd)\n\n return ls_sd", "def casdetude_dinardo():\n file_path = PROJECT_PATH + \"/geographycal_data/Monterusciello/MontEdo_buildings\"\n router = Router(building_file=file_path)\n\n router.design_aqueduct(0)\n\n router.solve(router.acqueduct)\n minimal = router.design_minimal_aqueduct(router.acqueduct, \"Q*H\")\n kpi_calculator(minimal)\n\n print(\"N H Z P\")\n for i, (node, datadict) in enumerate(router.acqueduct.nodes.items()):\n print(i, round(datadict[\"H\"]), round(datadict[\"ELEVATION\"]), round(datadict[\"H\"] - datadict[\"ELEVATION\"]))\n\n\n router.write2shp(minimal, PROJECT_PATH + \"/Monterusciello_solution/Monterusciello_acqueduct\")\n router.write2epanet(minimal, PROJECT_PATH + \"/Monterusciello_solution/Monterusciello_acqueduct\")", "def magnetization(h):\n if h.has_eh: raise\n if h.has_spin: \n mx = extract.mx(h.intra)\n my = extract.my(h.intra)\n mz = extract.mz(h.intra)\n else: raise\n np.savetxt(\"MAGNETIZATION_X.OUT\",np.matrix([h.geometry.x,h.geometry.y,mx]).T)\n np.savetxt(\"MAGNETIZATION_Y.OUT\",np.matrix([h.geometry.x,h.geometry.y,my]).T)\n np.savetxt(\"MAGNETIZATION_Z.OUT\",np.matrix([h.geometry.x,h.geometry.y,mz]).T)", "def read_input():\n \n argv = sys.argv\n\n # Read file names from sd input\n f_dy = argv[1] # matdyn.modes\n f_pat = argv[2] # path.out (should be in crystal coords)\n f_ph = argv[3] # ph.x output (Gamma point)\n\n # Read input card\n f_inp = open(\"input.dat\",'r')\n l1 = f_inp.readline()\n l2 = f_inp.readline()\n l3 = f_inp.readline().split()\n f_inp.close()\n\n # Open files\n\n f = open(f_dy,'r') # matdyn.modes \n f_dyn = f.readlines()\n f.close()\n\n f = open(f_pat,'r') # path.out\n f_path = f.readlines()\n f.close()\n\n f = open(f_ph,'r') # ph.x output\n f_zs = f.readlines()\n f.close()\n\n # Assign values to a0, nat, M, nqp\n a0, vol = float(l1.split()[0]), float(l1.split()[1])\n nat = int(l2) \n mass = np.zeros(nat)\n for iat in range(nat):\n mass[iat] = float(l3[iat])\n\n # Assign values to G (reciprocal lattice vec)\n ig = 0 ; i = 0\n for line in f_zs:\n if \"reciprocal axes:\" in line:\n ig = i + 1 \n break\n i += 1 \n\n rG = np.zeros((3,3))\n for ic in range(3):\n rGtext = f_zs[ig+ic][23:48].split()\n rG[ic,:] = np.array([float(rGtext[0]), float(rGtext[1]), float(rGtext[2])])\n\n # Read Z* tensor from f_zs\n i = 0\n iz = 0\n zstart = []\n for line in f_zs:\n if \"(d P / du)\" in line:\n iz = i + 3\n if \"Px\" in line:\n zstart.append(i)\n\n i += 1\n\n # Read the dielectric tensor from f_zs\n i = 0\n ie = 0\n for line in f_zs:\n if \"Dielectric constant in cartesian axis\" in line:\n ie = i + 2\n break\n\n i += 1\n\n # Assign Z* values\n zs = np.zeros((nat,3,3)) # initialize Z*\n\n for iat in range(nat):\n for ic in range(3):\n ztext = f_zs[zstart[iat]+ic][19:56].split()\n for jc in range(3):\n zs[iat][ic][jc] = float(ztext[jc])\n\n # Assing the dielectric tensor\n eps = np.zeros((3,3))\n\n for ic in range(3):\n epstext = f_zs[ie+ic][16:66].split()\n for jc in range(3):\n eps[ic][jc] = float(epstext[jc])\n\n # Number of modes and q-points\n nmodes = 3 * nat\n nqpt = int(f_path[0].split()[0])\n\n # Read the q-points\n q = np.zeros((nqpt,4)) # 4th dimension is lenght for q-points on a line, weights for q-points on a grid \n for iq in range(1,nqpt+1):\n q[iq-1,] = np.array([float(f_path[iq].split()[0]),float(f_path[iq].split()[1]), \\\n float(f_path[iq].split()[2]),float(f_path[iq].split()[3])])\n\n # Read the eigenvalues(om) and eigenvectors(eig) \n # Initiate first\n om = np.zeros((nmodes,nqpt))\n eig = np.zeros((nmodes,nqpt,nat,3), dtype=complex) \n\n # Get the starting lines for each q-pt\n i = 0\n i_q = []\n for line in f_dyn:\n if \"q =\" in line:\n i_q.append(i+2)\n i += 1\n\n #Assign values to om and eig\n for iq in range(nqpt):\n for imod in range(nmodes):\n omtext = f_dyn[i_q[iq]+imod*(nat+1)][43:55]\n om[imod][iq] = float(omtext)\n for iat in range(nat):\n etext = f_dyn[i_q[iq]+imod*(nat+1)+iat+1][2:72].split()\n for ic in range(3):\n eig.real[imod][iq][iat][ic]=float(etext[2*ic])*np.sqrt(mass[iat])\n eig.imag[imod][iq][iat][ic]=float(etext[2*ic+1])*np.sqrt(mass[iat])\n\n #Normalize the eigenvectors\n t1 = eig[imod,iq,:,:]\n t_nu = np.sum(np.sum(np.conjugate(t1)*t1,axis=0))\n eig[imod,iq,:,:] = eig[imod,iq,:,:]/np.sqrt(np.abs(t_nu))\n\n # Check normalization\n delta = np.zeros((nmodes,nmodes), dtype=complex)\n for iat in range(nat):\n for ic in range(3):\n t2 = eig[:,iq,iat,ic]\n delta += np.outer(np.conjugate(t2),t2)\n\n unit = np.diag(np.diag(np.ones((nmodes,nmodes)))) # Unit vector\n test = np.abs( (delta-unit) )\n if ( np.max(test) > 1e-3):\n print \"Non-orthonormal eigenvector at iq=\", q[iq,:]\n\n return om, eig, q, zs, eps, mass, a0, vol, rG, nmodes, nqpt, nat", "def dag2htk(self, file_path):\n open_fn = gzip.open if file_path.endswith('.gz') else open\n with open_fn(file_path, 'wb') as fh:\n for k, v in self.header.items():\n string = '%s=%s\\n' % (k, v)\n fh.write(string.encode())\n fh.write(('N=%d\\tL=%d\\n' % (\n self.num_nodes(), self.num_arcs())).encode())\n mapping = {}\n for idx, node in enumerate(self.nodes):\n if node.var:\n string = 'I=%d\\tt=%.2f\\tW=%s\\tv=%d\\n' % (\n idx, node.entry/FRATE, node.sym, node.var)\n else:\n string = 'I=%d\\tt=%.2f\\tW=%s\\n' % (\n idx, node.entry/FRATE, node.sym)\n fh.write(string.encode())\n mapping[node] = idx\n for idx, arc in enumerate(self.arcs):\n string = 'J=%d\\tS=%d\\tE=%d\\ta=%.2f\\tl=%.3f' % (\n idx,\n mapping[arc.src],\n mapping[arc.dest],\n arc.ascr,\n arc.lscr,\n )\n if arc.nscr:\n string += '\\tn=' + ','.join(\n ['{:.3f}'.format(n) for n in arc.nscr])\n if arc.iscr:\n string += '\\ti=' + ','.join(\n ['{:.3f}'.format(i) for i in arc.iscr])\n string += '\\n'\n fh.write(string.encode())", "def main():\n\n \"\"\"\n nodes, hd3 = erdos_rennie_like(100,8333,5)\n export('d3',hd3)\n\n nodes, hd5 = erdos_rennie_like(100,8333,6)\n export('d5',hd5)\n\n nodes, hd6 = erdos_rennie_like(100,8333,7)\n export('d6',hd6)\n \"\"\"\n\n \"\"\"\n nodes, sparse1 = erdos_rennie_like(600, 1200, 3)\n export('sparse_diag1', sparse1)\n\n nodes, sparse2 = erdos_rennie_like(600, 2400, 3)\n export('sparse_diag2',sparse2)\n\n nodes, sparse3 = erdos_rennie_like(600, 5800, 3)\n export('sparse_diag3',sparse3)\n\n nodes, sparse4 = erdos_rennie_like(600,11600, 3)\n export('sparse_diag4',sparse4)\n\n nodes, sparse5 = erdos_rennie_like(600,23200, 3)\n export('sparse_diag5',sparse5)\n \"\"\"\n\n nodes, size1 = erdos_rennie_like(100, 500, 3)\n nodes, size2 = erdos_rennie_like(200,1000,3)\n nodes,size3 = erdos_rennie_like(300,1500,3)\n nodes,size4 = erdos_rennie_like(400,2000,3)\n nodes,size5 = erdos_rennie_like(500,2500,3)\n\n export('size_diag1',size1)\n export('size_diag2',size2)\n export('size_diag3',size3)\n export('size_diag4',size4)\n export('size_diag5',size5)", "def density_of_state_plot(N=400,a=1.0,eita=0.01):\n foot_step=2*np.pi/N\n k=np.arange(0.0,2*np.pi/a,foot_step)\n Ek=band_energy(k)\n E=np.arange(-3.0,3.0,0.01)\n Ek.shape=(N,1)\n E.shape=(1,600)\n \"\"\"Reshape E and Ek series with broadcasting method.\"\"\"\n dirac_function=np.imag(np.true_divide(1/np.pi,np.subtract(E-Ek,1j*eita)))\n D=np.sum(np.true_divide(dirac_function,N),axis=0)\n \"\"\"Calculate the density of state with lorentzian broadenning method.\"\"\" \n E.shape=(600)\n plt.plot(D,E)", "def new_track_density(track_key,hist_dims,conn):\n\n # extract all of the md needed to look up the data\n\n (fname,iden_key,track_key,dset_key) = conn.execute(\"select fout,iden_key,comp_key,dset_key from tracking where comp_key = ?\",\n track_key).fetchone()\n print fname\n F = h5py.File(fname,'r')\n print F.keys()[:5]\n try:\n start_plane = F[fd('tracking',track_key)]['start_plane'][:]\n start_part = F[fd('tracking',track_key)]['start_particle'][:]\n\n print len(start_plane)\n \n # figure out the right size to make the array\n dims = F.attrs['dims']\n print dims\n # make data collection object\n hist2D_ac = Hist2D_accumlator(dims,hist_dims)\n # loop over the heads of track index and hash result\n cur_plane = None\n cur_x = None\n cur_y = None\n temp = 0\n fr_count = 0\n for plane,part in zip(start_plane,start_part):\n if not plane == cur_plane:\n cur_plane = plane\n cp = F[ff(cur_plane)]\n cur_x = cp[fd('x',iden_key)]\n cur_y = cp[fd('y',iden_key)]\n temp += cp.attrs['temperature']\n fr_count += 1\n\n hist2D_ac.add_point(\n (cur_x[part],\n cur_y[part])\n )\n pass\n except ValueError,er:\n print ff(cur_plane)\n \n \n finally:\n F.close()\n del F\n\n f = plt.figure()\n ax = f.add_axes([.1,.1,.8,.8])\n c = ax.imshow(np.flipud(hist2D_ac.data.T),interpolation='nearest')\n plt.colorbar(c)\n ax.set_title('%.2f C '%(temp/fr_count) + str(dset_key))\n return hist2D_ac.data", "def read_h5(self):\n infile = h5py.File(self.inf_name,'r')\n\n vardict = self.labdict\n #store data with the correct labels\n for k in infile['plasma/1d'].keys():\n try:\n vardict[k] = infile[self.labdict[k]].value\n except:\n vardict[k] = []\n\n vardict['a_ions']=infile['/plasma/anum'].value\n vardict['znum']=infile['/plasma/znum'].value\n \n\n self.rho_in = vardict['rho']\n self._rho_vol = infile['distributions/rhoDist/abscissae/dim1'].value[1:]\n self._volumes = infile['distributions/rhoDist/shellVolume'].value\n self.nrho_in = np.size(self.rho_in)\n\n if vardict['a_ions'][0]!='/':\n self.nspec = len(vardict['a_ions'])\n else:\n self.nspec = vardict['ni'].shape[1]\n print(\"Number of ions: \", self.nspec)\n if len(vardict['a_ions'])!=len(vardict['znum']):\n print(\"ERROR! array of A and Z don't have the same length\")\n\n self.A = vardict['a_ions']\n self.Z = vardict['znum']\n self.nion = self.nspec\n \n self.te_in = vardict['te'][:]\n self.ne_in = vardict['ne'][:]\n self.ti_in = vardict['ti'][:]\n ni1_in = vardict['ni'][:,0]\n self.ni_in = np.zeros((self.nion, self.nrho_in),dtype=float)\n self.ni_in[0,:] = ni1_in\n if self.nion==2:\n ni2_in = vardict['ni'][:,1]\n self.ni_in[1,:] = ni2_in\n elif self.nion==3:\n ni2_in = vardict['ni'][:,1]\n ni3_in = vardict['ni'][:,2]\n self.ni_in[1,:] = ni2_in\n self.ni_in[2,:] = ni3_in\n\n try:\n self.vt_in = vardict['vtor']\n except:\n self.vt_in = np.zeros(self.nrho_in,dtype=float)\n\n try:\n self.zeff_in = vardict['zeff'][:]\n except:\n self.zeff_in = np.zeros(self.nrho_in,dtype=float)\n\n self.ni = np.zeros((self.nion, self.nrho),dtype = float)\n self.spline()", "def sumofstate_HD(T):\n\n Q = np.float64(0.0)\n\n #--- nuclear spin statistics ------------\n g_even = 1 # hydrogen deuteride\n g_odd = 1\n # ---------------------------------------\n\n data = eJHD\n\n nCols = data.shape[1]\n # nCols is equal to the number of vibrational\n # states included in the summation\n\n # generate Q using each energy from the dataset\n for i in range(0, nCols):\n\n # select row for v=i\n row = data[:,i]\n\n # remove nan values\n x = row[np.logical_not(np.isnan(row))]\n\n # get the dimension (equal to J_max)\n nRows = x.shape[0]\n\n # iterate over the available energies\n for j in range(0, nRows):\n E = x[j]\n energy = (-1*E*H*C)\n\n factor = (2*j+1)*math.exp(energy/(K*T))\n\n if j % 2 == 0:\n factor = factor*g_even\n else:\n factor = factor*g_odd\n Q = Q+factor\n\n\n\n # return the sum of states for HD\n return Q", "def mounting_matrix(self):\n # fmt: off\n count = 0\n for x in range(self.ntheta):\n self.M[count][count] = 1\n self.f[count][0] = self.p_in\n count = count + self.nz - 1\n self.M[count][count] = 1\n self.f[count][0] = self.p_out\n count = count + 1\n count = 0\n for x in range(self.nz - 2):\n self.M[self.ntotal - self.nz + 1 + count][1 + count] = 1\n self.M[self.ntotal - self.nz + 1 + count][self.ntotal - self.nz + 1 + count] = -1\n count = count + 1\n count = 1\n j = 0\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i][self.ntheta - 1])\n self.M[count][self.ntotal - 2 * self.nz + count] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1, j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][self.ntheta - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = self.nz + 1\n for j in range(1, self.ntheta - 1):\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i, j - 1])\n self.M[count][count - self.nz] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1][j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][j - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = count + 2\n count = 1\n for j in range(self.ntheta - 1):\n for i in range(1, self.nz - 1):\n if j == 0:\n self.f[count][0] = (self.c0w[i][j] - self.c0w[i][self.ntheta - 1]) / self.dtheta\n else:\n self.f[count][0] = (self.c0w[i, j] - self.c0w[i, j - 1]) / self.dtheta\n count = count + 1\n count = count + 2\n # fmt: on", "def check_dense_gas(dir='./'):\n import glob\n import pandas as pd\n ff = glob.glob('*.gas')\n\n for i in ff:\n f = pd.read_pickle(i)\n print(i)\n print (f['f_H21'] > 0.0).sum()\n\n print(\"Total dense gas mass: \")\n print(f['m'] * f['f_H21']).sum()\n return None", "def gauss_anharm_inp(filename, anlevel):\n full = io.read_file(filename)\n full = full.split('Z-matrix:')\n zmat = full[0].split('***************************')[2].replace('*', '')\n zmat = zmat.split('Will')[0]\n zmat = ' ' + zmat.lstrip()\n zmat += full[0].split('-------------------------------------------')[3].replace(\n '-', '').replace('-', '').replace('-', '').replace('\\n ', '')\n if not anlevel == 'ignore':\n zmat = zmat.split('#')[0] + ' # ' + anlevel + \\\n ' opt = internal ' + zmat.split('#')[2]\n zmat += '# scf=verytight nosym Freq=Anharmonic Freq=Vibrot\\n'\n zmat += '\\nAnharmonic computation\\n'\n zmat += full[1].split(' Variables:')[0]\n zmat += 'Variables:\\n'\n zmat = zmat.replace('Charge = ', '')\n zmat = zmat.replace('Multiplicity =', '')\n varis = full[1].split('Optimized Parameters')[1].split(\n '--------------------------------------')[1]\n varis = varis.split('\\n')\n del varis[0]\n del varis[-1]\n for var in varis:\n var = var.split()\n zmat += ' ' + var[1] + '\\t' + var[2] + '\\n'\n return zmat", "def amet_memoryWise(self):\r\n # set up logging files to monitor the calculation\r\n logging.basicConfig(filename = os.path.join(self.path,'history_amet_python.log'),\r\n filemode = 'w+', level = logging.DEBUG,\r\n format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n # initialize the time span\r\n # define sigma level\r\n A, B = self.defineSigmaLevels()\r\n # use example input file to load the basic dimensions information\r\n datapath_var = os.path.join(self.path, 'MERRA2_400.inst3_3d_asm_Nv.20160101.nc4.nc')\r\n var_key = Dataset(datapath_var)\r\n lat = var_key.variables['lat'][:]\r\n lon = var_key.variables['lon'][:]\r\n # calculate the reference levels based on A & B and standard surface pressure\r\n half_level = A + B * 101325\r\n level = (half_level[1:] + half_level[:-1]) / 2\r\n # create space for the output\r\n # AMET in the entire column\r\n E = np.zeros((len(lat),len(lon)), dtype=float)\r\n cpT = np.zeros((len(lat),len(lon)), dtype=float)\r\n Lvq = np.zeros((len(lat),len(lon)), dtype=float)\r\n gz = np.zeros((len(lat),len(lon)), dtype=float)\r\n uv2 = np.zeros((len(lat),len(lon)), dtype=float)\r\n logging.info(\"Start retrieving variables T,q,u,v,sp\")\r\n # The shape of each variable is (8,72,361,576)\r\n T = var_key.variables['T'][:]\r\n q = var_key.variables['QV'][:]\r\n sp = var_key.variables['PS'][:] #(8,361,576)\r\n u = var_key.variables['U'][:]\r\n v = var_key.variables['V'][:]\r\n logging.info(\"Extracting variables successfully!\") \r\n # compute gz\r\n z_model = self.calc_gz(var_key)\r\n # get the basic shape\r\n tt, hh, yy, xx = q.shape\r\n AMET = amet.met()\r\n E, cpT, Lvq, gz, uv2 = AMET.calc_met(T, q, sp, u, v, z_model, A, B,\r\n tt, hh, len(lat), len(lon), lat, self.lat_unit)\r\n\r\n return np.mean(E)", "def statsi(h):\n\n # Define constants\n zsa = np.array([0.0, 11000.0, 20000.0, 32000.0, 47000.0, 52000.0, 61000.0, 79000.0, 9.9e20])\n Tsa = np.array([288.15, 216.65, 216.65, 228.65, 270.65, 270.65,252.65, 180.65, 180.65])\n g = 9.80665\n R = 287.0528\n Re = 6346766.0\n Psa = 101325.0\n\n # Calculate geopotential altitude\n z = Re*h/(Re+h)\n\n # Loop through atmosphere layers\n for i in range(8):\n \n # Calculate layer temperature gradient\n Lt = -(Tsa[i+1]-Tsa[i])/(zsa[i+1]-zsa[i])\n\n # If no temperature gradient\n if Lt == 0.0:\n\n # Are we in this layer of the atmosphere?\n if z <= zsa[i+1]:\n t = Tsa[i] # Temp isn't changing\n p = Psa*np.exp(-g*(z-zsa[i])/R/Tsa[i])\n d = p/R/t\n break\n\n # We need to go higher\n else:\n Psa *= np.exp(-g*(zsa[i+1]-zsa[i])/R/Tsa[i])\n\n # Temperature gradient\n else:\n ex = g/R/Lt\n if z <= zsa[i+1]:\n t = Tsa[i]-Lt*(z-zsa[i])\n p = Psa*(t/Tsa[i])**ex\n d = p/R/t\n break\n else:\n Psa *= (Tsa[i+1]/Tsa[i])**ex\n\n # We have left the atmosphere...\n else:\n t = Tsa[-1]\n p = 0.0\n d = 0.0\n\n return z, t, p, d", "def main():\n # output filename\n param = read_parameters('Kanki01_input.yaml')\n s = seal(param)\n s.solve_zeroth()\n s.plot_res()", "def nc_to_hdf5_mudis(dataset, config):\n np.warnings.filterwarnings('ignore')\n\n date = datetime.datetime.strptime(dataset.recorddate,\n '%d.%m.%Y') # read date from dateset\n date_name = datetime.datetime.strftime(date,\n '%Y%m%d') # convert date to YYYYMMDD format\n config['date'] = date_name\n\n # Create the directory to save the results\n path = config['str_dir'] + '/radiance/{}/data/'.format(config['date'])\n os.makedirs(os.path.dirname(path), exist_ok=True)\n\n # Read time of the file (correct time)\n time = datetime.datetime.strptime(dataset.recordtime, '%H:%M:%S.')\n time = datetime.datetime.time(time)\n\n # convert time to datetime format\n datetime_name = datetime.datetime.combine(date, time)\n new_name = datetime.datetime.strftime(datetime_name, '%Y%m%d_%H%M%S')\n\n # radiance = dataset.variables['data'][:].reshape(113, 1281)\n # wavelength_axis = dataset.variables['xAxis'][:]\n\n # Create a file in the disk\n with h5py.File(config['str_dir'] + '/radiance/{}/data/{}.h5'.format(\n config['date'], new_name), 'w') as datos:\n\n if not list(datos.items()):\n # Create two datasets(use only one time)\n datos.create_dataset('/data',\n data=dataset['data'][:].reshape(113, 1281),\n dtype='f4')\n # datos.create_dataset('/skymap', data=skymap, dtype='f4')\n else:\n del datos['data']\n # del datos['skymap']\n print('data deleted and corrected')\n datos.create_dataset('/data', data=data, dtype='f4')\n # datos.create_dataset('/skymap', data=skymap, dtype='f4')\n\n # Add attributes to datasets\n datos['data'].attrs['time'] = str(time)\n datos['data'].attrs['Exposure'] = dataset.exposuretime\n datos['data'].attrs['NumAver'] = dataset.AVERAGED\n datos['data'].attrs['CCDTemp'] = dataset.detectortemperature\n datos['data'].attrs['NumSingMes'] = dataset.noofaccumulations\n # datos['data'].attrs['ElectrTemp'] = dataset.\n datos['data'].attrs['Latitude'] = '52.39N'\n datos['data'].attrs['Longitude'] = '9.7E'\n datos['data'].attrs['Altitude'] = '65 AMSL'\n\n chn = np.arange(1, 114)\n datos.create_dataset('/channel', data=chn, dtype=np.float32)\n datos.create_dataset('/wavelength', data=dataset['xAxis'][:])\n\n datos['data'].dims.create_scale(datos['channel'], 'channel')\n datos['data'].dims[0].attach_scale(datos['channel'])\n datos['data'].dims[0].label = 'channel'\n datos['data'].dims[1].label = 'wavelength'\n\n # datos['skymap'].dims[0].label = 'channel'\n # datos['skymap'].dims[1].label = 'Azimuth, Zenith'\n\n datos.close()", "def main():\n N = 201 # Amount of gridpoints, odd number to include 0\n L = 10 # Size of the system\n k = 50 # Amount of energies and states calculated\n x = y = np.linspace(-L/2,L/2,N) # Gridpoints\n h = x[1]-x[0] # Spacing of gridpoints\n\n # Solve the system with and without perturbation\n E,psi,E_p,psi_p = fdm_2d(N,L,x,y,h,k)\n\n # Print the first two energies and the absolute error of the energies\n print('Energies of the two lowest states:')\n print('E_00 = %.4f' % E[0])\n print('E_01 = %.4f' % E[1], '\\n')\n print('Absolute error for E_00: %.4e' % np.abs(E[0]-1))\n print('Absolute error for E_01: %.4e' % np.abs(E[1]-2))\n\n print('\\nEnergies of the two lowest states after perturbation:')\n print('E_00 = %.4f' % E_p[0])\n print('E_01 = %.4f' % E_p[1])\n\n # Calculate the normalized densities of the states\n densities_norm = np.zeros((k,N,N))\n densities_norm_p = np.zeros((k,N,N))\n for i in range(k):\n # meshgrid form\n state = np.reshape(psi.T[i],(N,N))\n state_p = np.reshape(psi_p.T[i],(N,N))\n densities_norm[i] = normalized_density(state,x)\n densities_norm_p[i] = normalized_density(state_p,x)\n\n # Analytical solution of the ground state\n X,Y = np.meshgrid(x,y)\n psi00_exact = phi(X,0)*phi(Y,0)\n psi00_exact_density = normalized_density(psi00_exact,x)\n\n print('\\nMaximum absolute error of the normalized ground state densities of the unperturbated system:')\n print('errmax = {:.4e}'.format(np.max(np.abs(densities_norm[0]-psi00_exact_density))))\n\n # Plotting the ground state density of the unperturbated system\n fig1 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig1.add_subplot(1,2,1,projection='3d')\n surf1 = ax.plot_surface(X, Y, densities_norm[0], cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n fig1.suptitle(r'Normalized ground state density $|\\psi|^2$ using FDM')\n ax = fig1.add_subplot(1,2,2)\n ax.imshow(densities_norm[0],extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'FDM_psi00_unperturbated.png'))\n plt.close()\n\n\n fig2 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig2.add_subplot(1,2,1,projection='3d')\n surf2 = ax.plot_surface(X, Y, psi00_exact_density, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n\n\n # Plotting the ground state density of the perturbated system\n fig1 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig1.add_subplot(1,2,1,projection='3d')\n surf1 = ax.plot_surface(X, Y, densities_norm_p[0], cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n fig1.suptitle(r'Normalized ground state density $|\\psi|^2$ of the perturbated system using FDM')\n ax = fig1.add_subplot(1,2,2)\n ax.imshow(densities_norm_p[0],extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'FDM_psi00_perturbated.png'))\n plt.close()\n\n fig2 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig2.add_subplot(1,2,1,projection='3d')\n surf2 = ax.plot_surface(X, Y, psi00_exact_density, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n\n # Plotting the analytic ground state density\n fig1 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig1.add_subplot(1,2,1,projection='3d')\n surf1 = ax.plot_surface(X, Y, psi00_exact_density, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n fig1.suptitle(r'Analytical normalized ground state density $|\\psi|^2$')\n ax = fig1.add_subplot(1,2,2)\n ax.imshow(psi00_exact_density,extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'exact_psi00.png'))\n plt.close()\n\n # Plot some of the other densities and save them as pdf\n for i in range(1,20):\n density = densities_norm[i]\n fig = plt.figure(figsize=plt.figaspect(0.5))\n plt.imshow(density,extent=[-L/2,L/2,-L/2,L/2])\n plt.title('n={}'.format(i))\n plt.savefig(os.path.join(path,'FDM_unperturbated{}.png'.format(i)))\n plt.close()\n\n density_p = densities_norm_p[i]\n fig = plt.figure(figsize=plt.figaspect(0.5))\n plt.imshow(density_p,extent=[-L/2,L/2,-L/2,L/2])\n plt.title('n={}'.format(i))\n plt.savefig(os.path.join(path,'FDM_perturbated{}.png'.format(i)))\n plt.close() \n\n # Plot analytical states until nx,ny = 5\n for nx in range(6):\n for ny in range(6):\n state = phi(X,nx)*phi(Y,ny)\n density = normalized_density(state,x)\n plt.figure()\n plt.imshow(density,extent=[-L/2,L/2,-L/2,L/2])\n plt.title('$n_x={}, n_y={}$'.format(nx,ny))\n plt.savefig(os.path.join(path,'analytical_state_{}_{}.png'.format(nx,ny)))\n plt.close()\n\n # Get analytical energies from nx,ny = 0 to 10\n n = 10\n energies = analytical_energies(n)\n\n # Plot k analytical and the FDM energies\n index = np.arange(k)\n plt.figure()\n plt.plot(index,energies[0:k],label='Analytical energies')\n plt.plot(index,E,label='Unperturbated energies')\n plt.plot(index,E_p,label='Perturbated energies')\n plt.legend()\n plt.xlabel('n')\n plt.ylabel(r'$\\tilde{E} = \\frac{E}{\\hbar\\omega}$')\n plt.title('Energies')\n plt.savefig(os.path.join(path,'energies.png'))\n plt.close()", "def _write_dx(self, FN, data):\n n_points = data['counts'][0] * data['counts'][1] * data['counts'][2]\n if FN.endswith('.dx'):\n F = open(FN, 'w')\n else:\n import gzip\n F = gzip.open(FN, 'w')\n\n F.write(\"\"\"object 1 class gridpositions counts {0[0]} {0[1]} {0[2]}\norigin {1[0]} {1[1]} {1[2]}\ndelta {2[0]} 0.0 0.0\ndelta 0.0 {2[1]} 0.0\ndelta 0.0 0.0 {2[2]}\nobject 2 class gridconnections counts {0[0]} {0[1]} {0[2]}\nobject 3 class array type double rank 0 items {3} data follows\n\"\"\".format(data['counts'], data['origin'], data['spacing'], n_points))\n\n for start_n in range(0, len(data['vals']), 3):\n F.write(' '.join(['%6e' % c\n for c in data['vals'][start_n:start_n + 3]]) + '\\n')\n\n F.write('object 4 class field\\n')\n F.write('component \"positions\" value 1\\n')\n F.write('component \"connections\" value 2\\n')\n F.write('component \"data\" value 3\\n')\n F.close()", "def data(dbfilename = os.path.expanduser('~/python/project/znuc2012.S4.star.el.y.stardb.gz')):\n db = stardb.load(dbfilename) # loads database\n nmass = db.nvalues[0] # finds the number of values\n masses = db.values[0][:nmass] #creates a vector of the initial masses\n isodb = stardb.load(os.path.expanduser('~/python/project/znuc2012.S4.star.deciso.y.stardb.gz'))\n \n massnumber = []\n for x in range(len(isodb.ions)):\n mn = isodb.ions[x].A\n massnumber.append(mn)\n massnumber = np.array(massnumber)\n np.save(os.path.expanduser('~/python/project/filestoload/Massnumber'), massnumber) \n####################### \n# write all energy and mixing values\n\n energyvalues = np.unique(db.fielddata['energy'])\n mixingvalues = np.unique(db.fielddata['mixing'])\n masterremnant = [] # result will be a multidimensional array\n elementdata = []\n isodata = []\n r = len(db.ions) # for loop iteration\n w = len(isodb.ions)\n for energy in energyvalues:\n remmixingarray = [] # reinitialise the next dimension\n elmixingarray = []\n isomixingarray = []\n for mixing in mixingvalues:\n \n \n ii = np.logical_and(np.isclose(db.fielddata['energy'], energy), np.isclose(db.fielddata['mixing'], mixing))\n \n mass = db.fielddata[ii]['remnant']\n remmixingarray.append(mass) # this is an array of remnant masses for one energy and every mixing value\n \n elfill = [] # reinitialise the next dimension again\n isofill = []\n \n \n for m in range(w):\n \n a = isodb.ions[m] #for obtaining the element string\n kk = np.where(isodb.ions==isotope.ion(a)) # finding the indices in db.ions for a particular element\n jj = np.where(ii)\n isotopes = isodb.data[jj, kk][0] # array of abundances for that particular element\n isofill.append(isotopes) # this is an array of element data for every mass for one energy and one mixing value\n\n\n\n\n isomixingarray.append(isofill) \n \n \n masterremnant.append(remmixingarray) # these master arrays have every bit of data under its own energy. so called like elementdata[energy][mixing][elementnumber] gives the element data for every star for a single element.\n \n isodata.append(isomixingarray)\n \n np.save(os.path.expanduser('~/python/project/filestoload/IsoData'), isodata)\n np.save(os.path.expanduser('~/python/project/filestoload/RemnantMasses'), masterremnant)\n np.save(os.path.expanduser('~/python/project/filestoload/Ioninfo'), isodb.ions)\n time = [] \n \n for mass in masses: # for loop will cycle through the masses and grab the lifetime of each star\n s = str(mass) # converts the mass number to a string for file acquiring\n if s.endswith('.0'): # formatting issue, to match the filenames\n s = s[:-2] \n filename = os.path.expanduser('~/python/project/dumps/z{}#presn').format(s)\n # grabs filename corrosponding to this mass\n d = kepdump.load(filename) # loads the kepdump data for this star\n time.append(d.time) \n yr = 365.2425*86400 \n time = np.array(time)/yr\n dataarray = [masses, time]\n\n\n return dataarray", "def save_electronic_eigenstates(m_max, nu_max, mu_max, R_grid, beta, theta):\n\n #Parallel stuff\n #--------------\n #Get processor 'name'.\n my_id = pypar.rank() \n \n #Get total number of processors.\n nr_procs = pypar.size()\n \n #Get number of tasks.\n nr_tasks = len(R_grid)\n\n #Get a list of the indices of this processors share of R_grid. \n my_tasks = nice_stuff.distribute_work(nr_procs, nr_tasks, my_id)\n\n #The processors will be writing to the same file.\n #In order to avoid problems, the procs will do a relay race of writing to\n #file. This is handeled by blocking send() and receive().\n #Hopefully there will not be to much waiting.\n\n #ID of the processor that will start writing.\n starter = 0\n\n #ID of the processor that will be the last to write.\n ender = (nr_tasks - 1) % nr_procs\n\n #Buffer for the baton, i.e. the permission slip for file writing.\n baton = r_[0]\n\n #The processor one is to receive the baton from.\n receive_from = (my_id - 1) % nr_procs \n\n #The processor one is to send the baton to.\n send_to = (my_id + 1) % nr_procs \n #-------------------------------\n\n \n #Initializing the HDF5 file\n #--------------------------\n if my_id == 0:\n\t#Creates a config instance.\n\tmy_config = config.Config(m = m_max, nu = nu_max, mu = mu_max, \n\t R = R_grid[0], beta = beta, theta = theta)\n\t\n\t#Number of basis functions.\n\tbasis_size = (2 * m_max + 1) * (nu_max + 1) * (mu_max + 1)\n\n\t#Generate a filename.\n\tfilename = name_gen.electronic_eigenstates_R(my_config)\n\n\tf = tables.openFile(filename, 'w')\n\ttry:\n\t f.createArray(\"/\", \"R_grid\", R_grid)\t \n\t \n\t #Looping over the m values.\n\t for m in range(-1 * m_max, m_max + 1):\n\t\t#Creating an m group in the file.\n\t\tm_group = name_gen.m_name(m)\n\t\tf.createGroup(\"/\", m_group)\n\t\t\n\t\t#Looping over th q values.\n\t\tfor q in range(mu_max + 1):\n\t\t #Creating a q group in the m group in the file.\n\t\t q_group = name_gen.q_name(q)\n\t\t f.createGroup(\"/%s/\"%m_group, q_group)\n\n\t\t #Initializing the arrays for the eigenvalues and states.\n\t\t f.createCArray('/%s/%s/'%(m_group, q_group),'E', \n\t\t\ttables.atom.FloatAtom(), \n\t\t\t(basis_size/(mu_max + 1), nr_tasks),\n\t\t\tchunkshape=(basis_size/(mu_max + 1), 1))\n\t\t \n\t\t f.createCArray('/%s/%s/'%(m_group, q_group),'V', \n\t\t\ttables.atom.ComplexAtom(16), \n\t\t\t(basis_size, basis_size/(mu_max + 1), nr_tasks),\n\t\t\tchunkshape=(basis_size, basis_size/(mu_max + 1), 1))\n\t \n\tfinally:\n\t f.close()\n\t\n\t#Save config instance.\n\tmy_config.save_config(filename)\n #----------------------------------\n\n\n #Solving the TISE\n #----------------\n #Looping over the tasks of this processor.\n for i in my_tasks:\n\t#Creating TISE instance.\n\ttise = tise_electron.TISE_electron(m = m_max, nu = nu_max, \n\t mu = mu_max, R = R_grid[i], beta = beta, theta = theta)\n\t\n\t#Diagonalizing the hamiltonian.\n\tE,V = tise.solve()\n\t\n\t#First file write. (Send, but not receive baton.)\n\tif starter == my_id:\n\t #Write to file.\n\t tise.save_eigenfunctions_R(E, V, R_grid[i])\n\n\t #Avoiding this statement 2nd time around.\n\t starter = -1\n\n\t #Sending the baton to the next writer.\n\t pypar.send(baton, send_to, use_buffer = True)\n\t\n\t#Last file write. (Receive, but not send baton.)\n\telif i == my_tasks[-1] and ender == my_id :\n\t #Receiving the baton from the previous writer.\n\t pypar.receive(receive_from, buffer = baton)\n\n\t #Write to file.\n\t tise.save_eigenfunctions_R(E, V, R_grid[i])\n\t\n\t#The rest of the file writes.\n\telse:\n\t #Receiving the baton from the previous writer.\n\t pypar.receive(receive_from, buffer = baton)\n\n\t #Write to file.\n\t tise.save_eigenfunctions_R(E, V, R_grid[i])\n\n\t #Sending the baton to the next writer.\n\t pypar.send(baton, send_to, use_buffer = True)\n\t\n\t\n\t#Showing the progress of the work.\n\tif my_id == 0:\n\t nice_stuff.status_bar(\"Electronic BO calculations\", \n\t\ti, len(my_tasks))\n #----------------------------\n \n #Letting everyone catch up. \n pypar.barrier()\n\n #Since the sign of the eigenfunctions are completely arbitrary, one must\n #make sure they do not change sign from one R to another.\n if my_id == 0:\n\ttise.align_all_phases()\n \n #Letting 0 catch up. \n pypar.barrier()", "def savez(d,file):\n np.savez(file,row=d.row,col=d.col,data=d.data,shape=d.shape)", "def load_zeta(self, **kwargs):\r\n dampfile = kwargs['damp']\r\n\r\n with open(dampfile) as f:\r\n for line in f:\r\n if line[0] != '$' and line[0] != 'i':\r\n row = line.split()\r\n row = list(map(float, row))\r\n self.zeta[int(row[0] - 1)] = 0.01 * row[1]", "def export_prop(self, d, title, t):\r\n self._check_out(title)\r\n np.savez_compressed(os.path.join(self.out_dir, title, title + '_' + str(t)), d)", "def read_results():\r\n with open(\"packing.nfo\", \"r\") as fin:\r\n fin.readline()\r\n fin.readline()\r\n por_theory = float(fin.readline().split()[2])\r\n por_final = float(fin.readline().split()[2])\r\n print('Theoretical porosity:', por_theory)\r\n print('Final porosity:', por_final)\r\n with open(\"packing.xyzd\", \"rb\") as fin:\r\n btxt = fin.read()\r\n txt = list(struct.unpack(\"<\" + \"d\" * (len(btxt) // 8), btxt))\r\n data = array(zip(*[iter(txt)] * 4))\r\n data[:, 3] = data[:, 3] * \\\r\n ((1 - por_final) / (1 - por_theory))**(1 / 3)\r\n return data", "def writestat(self, outfile=None, hubble=None):\n s = self._base()\n mindarkmass = min(s.dark['mass'])\n\n if hubble is None:\n hubble = s.properties['h']\n\n if outfile is None: outfile = self._base().filename+'.stat'\n print \"write stat file to \", outfile\n fpout = open(outfile, \"w\")\n header = \"#Grp N_tot N_gas N_star N_dark Mvir(M_sol) Rvir(kpc) GasMass(M_sol) StarMass(M_sol) DarkMass(M_sol) V_max R@V_max VelDisp Xc Yc Zc VXc VYc VZc Contam Satellite? False? ID_A\"\n print >> fpout, header\n for ii in np.arange(self._nhalos)+1:\n print '%d '%ii,\n sys.stdout.flush()\n h = self[ii].properties # halo index starts with 1 not 0\n## 'Contaminated'? means multiple dark matter particle masses in halo)\"\n icontam = np.where(self[ii].dark['mass'] > mindarkmass)\n if (len(icontam[0]) > 0):\n contam = \"contam\"\n else:\n contam = \"clean\"\n## may want to add implement satellite test and false central breakup test.\n ss = \" \" # can adjust column spacing\n outstring = str(ii)+ss\n outstring += str(len(self[ii]))+ss+str(len(self[ii].g))+ss\n outstring += str(len(self[ii].s)) + ss+str(len(self[ii].dark))+ss\n outstring += str(h['m']/hubble)+ss+str(h['r']/hubble)+ss\n outstring += str(self[ii].g['mass'].in_units('Msol').sum())+ss\n outstring += str(self[ii].s['mass'].in_units('Msol').sum())+ss\n outstring += str(self[ii].d['mass'].in_units('Msol').sum())+ss\n outstring += str(h['vmax'])+ss+str(h['vmax_r']/hubble)+ss\n outstring += str(h['vrms'])+ss\n ## pos: convert kpc/h to mpc (no h).\n outstring += str(h['pos'][0][0]/hubble)+ss\n outstring += str(h['pos'][0][1]/hubble)+ss\n outstring += str(h['pos'][0][2]/hubble)+ss\n outstring += str(h['vel'][0][0])+ss+str(h['vel'][0][1])+ss\n outstring += str(h['vel'][0][2])+ss\n outstring += contam+ss\n outstring += \"unknown\" + \\\n ss # unknown means sat. test not implemented.\n outstring += \"unknown\"+ss # false central breakup.\n print >> fpout, outstring\n fpout.close()", "def zonal_stats(in_path, raster, grid_id_name='GRIDMET_ID'):\n if not os.path.isfile(in_path):\n raise FileNotFoundError('Input summary CSV file given'+\\\n ' was invalid or not found')\n # look for fishnet created in 'in_path/spatial'\n path_root = os.path.split(in_path)[0]\n file_name = os.path.split(in_path)[1]\n # get variable names from input file prefix\n grid_var = file_name.split('_summ')[0]\n var_name = Path(raster).name.split('.')[0]\n # grid is in the \"spatial\" subdir of in_path\n grid_file = OPJ(path_root, 'spatial', 'grid.shp')\n # save zonal stats to summary CSV in same dir as raster as of version 0.3\n raster_root = os.path.split(raster)[0]\n out_file = OPJ(raster_root, 'zonal_stats.csv')\n\n # this error would only occur when using within Python \n if not os.path.isfile(grid_file):\n raise FileNotFoundError(\n os.path.abspath(grid_file),\n '\\ndoes not exist, create it using spatial.make_grid first'\n )\n print(\n 'Calculating', grid_var, 'zonal means for', var_name\n )\n\n # calc zonal stats and open for grid IDs\n with fiona.open(grid_file, 'r') as source:\n zs = zstats(source, raster, all_touched=True)\n grid_ids = [f['properties'].get(grid_id_name) for f in source]\n\n # get just mean values, zonal_stats can do other stats...\n means = [z['mean'] for z in zs]\n out_df = pd.DataFrame(\n data={\n grid_id_name: grid_ids, \n var_name: means\n }\n )\n out_df[grid_id_name] = out_df[grid_id_name].astype(int)\n # drop rows for cells outside of gridMET master grid\n out_df = out_df.drop(out_df[out_df[grid_id_name] == -999].index)\n\n # save or update existing csv file\n if not os.path.isfile(out_file):\n print(\n os.path.abspath(out_file),\n '\\ndoes not exist, creating file'\n )\n out_df.to_csv(out_file, index=False)\n else:\n # overwrite column values if exists, else append\n existing_df = pd.read_csv(out_file)\n existing_df[grid_id_name] = existing_df[grid_id_name].astype(int)\n if var_name in existing_df.columns:\n # may throw error if not same size as original grid\n try:\n existing_df.update(out_df)\n existing_df.to_csv(out_file, index=False) \n except:\n print('Zonal stats for this variable already exist but they',\n 'appear to have been calculated with a different grid',\n 'overwriting existing file at:\\n',\n os.path.abspath(out_file)\n )\n out_df.to_csv(out_file, index=False)\n else:\n existing_df = existing_df.merge(out_df, on=grid_id_name)\n #existing_df = pd.concat([existing_df, out_df], axis=1).drop_duplicates()\n existing_df.to_csv(out_file, index=False)", "def slabldos(h,energies=np.linspace(-1.0,1.0,40),delta=None,nk=40):\n if h.dimensionality!=2: raise # nope\n ds = ldosmap(h,energies=energies,delta=delta,nk=nk)\n if len(ds[0])!=len(h.geometry.z): \n print(\"Wrong dimensions\",len(ds[0]),len(h.geometry.z))\n raise\n f = open(\"DOSMAP.OUT\",\"w\")\n f.write(\"# energy, index, DOS, zposition\\n\")\n for ie in range(len(energies)):\n for iz in range(len(h.geometry.z)):\n f.write(str(energies[ie])+\" \")\n f.write(str(iz)+\" \")\n f.write(str(ds[ie,iz])+\" \")\n f.write(str(h.geometry.z[iz])+\"\\n\")\n f.close()\n return energies,np.transpose(ds) # retunr LDOS ", "def write_to_file_z(path):\n path1 = path + \"/z_Macros\"\n if not os.path.exists(path1):\n os.mkdir(path1)\n for e in range(int(e_steps)+1):\n filename = \"x0y0z%ske%s.mac\" %(dz*z + z_min, e*de + e_min)\n path = path1\n fullpath = os.path.join(path, filename)\n f = open(fullpath, \"w\")\n f.write('/rat/physics_list/OmitMuonicProcesses true\\n')\n f.write(\"/rat/physics_list/OmitHadronicProcesses true \\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write('/rat/db/set DETECTOR geo_file \"geo/snoplus.geo\"\\n')\n f.write('/rat/db/set GEO[scint] material \"labppo_scintillator\"\\n')\n f.write('/rat/db/set DAQ dqxx_info 0 \\n')\n f.write(\"/run/initialize \\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/rat/proc frontend\\n\")\n f.write(\"/rat/proc trigger\\n\")\n f.write(\"/rat/proc eventbuilder\\n\")\n f.write(\"/rat/proc count\\n\")\n f.write(\"/rat/procset update 100\\n\")\n f.write(\"/rat/proc calibratePMT\\n\")\n f.write(\"/rat/proc scintFitter\\n\")\n f.write(\"/rat/proclast outroot\\n\")\n f.write('/rat/procset file \"x0y0z%ske%s.root\"\\n' %(dz*z + z_min, e*de + e_min))\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/generator/add combo gun:point:poisson\\n\")\n f.write(\"# want random, isotropic momentum distribution; energy given in MeV\\n\")\n f.write(\"/generator/vtx/set e- 0 0 0 %s\\n\" %(e*de + e_min))\n f.write(\"# position given in Cartesians, relative to detector center, in mm\\n\")\n f.write(\"/generator/pos/set 0 0 %s\\n\" % (dz*z + z_min))\n f.write(\"/generator/rate/set 1\\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/rat/run/start %s\\n\" %(n))\n f.write(\"exit\")", "def write_anharm_inp(readfile='reac1_l1.log', writefile='anharm.inp', anlevel='ignore'):\n zmat = gauss_anharm_inp(readfile, anlevel)\n io.write_file(zmat, writefile)\n return", "def read_elevation(filepath):\n h = 83 #distance between elevation measures\n N = 1201\n theta = np.pi / 6\n elev_array = np.zeros((N, N))\n grad_array = np.zeros((N, N, 2))\n I_array = np.zeros((N, N))\n # Read the elevation data as described in Question 3, and store in the elvation array\n f = open(filepath, \"rb\")\n for i in range(N):\n for j in range(N):\n buf = f.read(2)\n val = struct.unpack(\">h\", buf)[0]\n elev_array[i][j] = val\n f.close()\n # Populate the gradient array\n for i in range(N):\n for j in range(N):\n #This if statements handle the border cases\n if j == 0:\n grad_array[i][j][0] = (elev_array[i][j+1] - elev_array[i][j]) / h\n elif j == N - 1:\n grad_array[i][j][0] = (elev_array[i][j] - elev_array[i][j-1]) / h\n else:\n grad_array[i][j][0] = (elev_array[i][j+1] - elev_array[i][j-1]) / (2 * h)\n \n if i == 0:\n grad_array[i][j][1] = (elev_array[i][j] - elev_array[i-1][j]) / h\n elif i == N - 1:\n grad_array[i][j][1] = (elev_array[i-1][j] - elev_array[i][j]) / h\n else:\n grad_array[i][j][1] = (elev_array[i-1][j] - elev_array[i+1][j]) / (2 * h)\n \n # Populate intensities\n for i in range(N):\n for j in range(N):\n denom = np.sqrt(grad_array[i][j][0] ** 2 + grad_array[i][j][1] ** 2 + 1)\n numer = np.cos(theta) * grad_array[i][j][0] + np.sin(theta) * grad_array[i][j][1]\n I_array[i][j] = -1 * numer / denom\n \n return elev_array, I_array", "def get_planar_data(th_object, start, end, filename, path):\n res = th_object.get_planarity(start, end, filename, remove_planar_nodes=0)\n with open(path, 'w') as f:\n f.write(\"Time, Planar(1) or not (0),meshedness\\n\")\n for k, v in res.items():\n f.write(str(k))\n for ke, va in v.items():\n f.write(\",\" + str(va))\n f.write(\"\\n\")", "def LoadMAGIKPSD(filename, path=\"\", friendly_name=\"\", collapse_y=True, auto_PolState=False, PolState='', flip=True, transpose=True, **kw):\n lookup = {\"DOWN_DOWN\":\"_down_down\", \"UP_DOWN\":\"_up_down\", \"DOWN_UP\":\"_down_up\", \"UP_UP\":\"_up_up\", \"entry\": \"\"}\n if '.nxz' in filename:\n file_obj = hzf.File(filename)\n else:\n # nexus\n file_obj = h5py.File(os.path.join(path, filename))\n\n #if not (len(file_obj.detector.counts.shape) == 2):\n # not a 2D object!\n # return\n for entryname, entry in file_obj.items():\n active_slice = slice(None, DETECTOR_ACTIVE[0], DETECTOR_ACTIVE[1])\n counts_value = entry['DAS_logs']['areaDetector']['counts'][:, 1:DETECTOR_ACTIVE[0]+1, :DETECTOR_ACTIVE[1]]\n dims = counts_value.shape\n print(dims)\n ndims = len(dims)\n if auto_PolState:\n PolState = lookup.get(entryname, \"\")\n # force PolState to a regularized version:\n if not PolState in lookup.values():\n PolState = ''\n #datalen = file_obj.detector.counts.shape[0]\n if ndims == 2:\n if DEBUG: print(\"2d\")\n ypixels = dims[0]\n xpixels = dims[1]\n elif ndims >= 3:\n if DEBUG: print(\"3d\")\n frames = dims[0]\n xpixels = dims[1]\n ypixels = dims[2]\n\n creation_story = \"LoadMAGIKPSD('{fn}', path='{p}')\".format(fn=filename, p=path, aPS=auto_PolState, PS=PolState)\n\n # doesn't really matter; changing so that each keyword (whether it took the default value\n # provided or not) will be defined\n # if not PolState == '':\n # creation_story += \", PolState='{0}'\".format(PolState)\n # creation_story += \")\"\n\n\n if ndims == 2: # one of the dimensions has been collapsed.\n info = []\n info.append({\"name\": \"xpixel\", \"units\": \"pixels\", \"values\": arange(xpixels) }) # reverse order\n info.append({\"name\": \"theta\", \"units\": \"degrees\", \"values\": entry['DAS_logs']['sampleAngle']['softPosition'][()] })\n info.extend([\n {\"name\": \"Measurements\", \"cols\": [\n {\"name\": \"counts\"},\n {\"name\": \"pixels\"},\n {\"name\": \"monitor\"},\n {\"name\": \"count_time\"}]},\n {\"PolState\": PolState, \"filename\": filename, \"start_datetime\": dateutil.parser.parse(file_obj.attrs.get('file_time')), \"friendly_name\": friendly_name,\n \"CreationStory\":creation_story, \"path\":path, \"det_angle\":entry['DAS_logs']['detectorAngle']['softPosition'][()]}]\n )\n data_array = zeros((xpixels, ypixels, 4))\n mon = entry['DAS_logs']['counter']['liveMonitor'][()]\n count_time = entry['DAS_logs']['counter']['liveTime'][()]\n if ndims == 2:\n mon.shape = (1,) + mon.shape # broadcast the monitor over the other dimension\n count_time.shape = (1,) + count_time.shape\n counts = counts_value\n if transpose == True: counts = counts.swapaxes(0,1)\n if flip == True: counts = flipud(counts)\n data_array[..., 0] = counts\n #data_array[..., 0] = file_obj.detector.counts\n data_array[..., 1] = 1\n data_array[..., 2] = mon\n data_array[..., 3] = count_time\n # data_array[:,:,4]... I wish!!! Have to do by hand.\n data = MetaArray(data_array, dtype='float', info=info)\n data.friendly_name = friendly_name # goes away on dumps/loads... just for initial object.\n\n elif ndims == 3: # then it's an unsummed collection of detector shots. Should be one sample and detector angle per frame\n if collapse_y == True:\n info = []\n info.append({\"name\": \"xpixel\", \"units\": \"pixels\", \"values\": arange(xpixels) }) # reverse order\n info.append({\"name\": \"theta\", \"units\": \"degrees\", \"values\": entry['DAS_logs']['sampleAngle']['softPosition'][()] })\n info.extend([\n {\"name\": \"Measurements\", \"cols\": [\n {\"name\": \"counts\"},\n {\"name\": \"pixels\"},\n {\"name\": \"monitor\"},\n {\"name\": \"count_time\"}]},\n {\"PolState\": PolState, \"filename\": filename, \"start_datetime\": dateutil.parser.parse(file_obj.attrs.get('file_time')), \"friendly_name\": friendly_name,\n \"CreationStory\":creation_story, \"path\":path, \"det_angle\":entry['DAS_logs']['detectorAngle']['softPosition'][()]}]\n )\n data_array = zeros((xpixels, frames, 4))\n mon = entry['DAS_logs']['counter']['liveMonitor'][()]\n count_time = entry['DAS_logs']['counter']['liveTime'][()]\n if ndims == 3:\n mon.shape = (1,) + mon.shape # broadcast the monitor over the other dimension\n count_time.shape = (1,) + count_time.shape\n counts = numpy.sum(counts_value, axis=2)\n if transpose == True: counts = counts.swapaxes(0,1)\n if flip == True: counts = flipud(counts)\n data_array[..., 0] = counts\n #data_array[..., 0] = file_obj.detector.counts\n data_array[..., 1] = 1\n data_array[..., 2] = mon\n data_array[..., 3] = count_time\n # data_array[:,:,4]... I wish!!! Have to do by hand.\n data = MetaArray(data_array, dtype='float', info=info)\n data.friendly_name = friendly_name # goes away on dumps/loads... just for initial object.\n else: # make separate frames\n infos = []\n data = []\n samp_angle = entry['DAS_logs']['sampleAngle']['softPosition'][()]\n if samp_angle.shape[0] == 1:\n samp_angle = numpy.ones((frames,)) * samp_angle\n det_angle = entry['DAS_logs']['detectorAngle']['softPosition'][()]\n if det_angle.shape[0] == 1:\n det_angle = numpy.ones((frames,)) * det_angle\n for i in range(frames):\n samp_angle = entry['DAS_logs']['sampleAngle']['softPosition'][i]\n det_angle = entry['DAS_logs']['detectorAngle']['softPosition'][i]\n info = []\n info.append({\"name\": \"xpixel\", \"units\": \"pixels\", \"values\": range(xpixels) })\n info.append({\"name\": \"ypixel\", \"units\": \"pixels\", \"values\": range(ypixels) })\n info.extend([\n {\"name\": \"Measurements\", \"cols\": [\n {\"name\": \"counts\"},\n {\"name\": \"pixels\"},\n {\"name\": \"monitor\"},\n {\"name\": \"count_time\"}]},\n {\"PolState\": PolState, \"filename\": filename, \"start_datetime\": entry['start_time'][()], \"friendly_name\": friendly_name,\n \"CreationStory\":creation_story, \"path\":path, \"samp_angle\": samp_angle, \"det_angle\": det_angle}]\n )\n data_array = zeros((xpixels, ypixels, 4))\n mon = entry['DAS_logs']['counter']['liveMonitor'][i]\n count_time = entry['DAS_logs']['counter']['liveTime'][i]\n counts = counts_value[i]\n if flip == True: counts = flipud(counts)\n data_array[..., 0] = counts\n data_array[..., 1] = 1\n data_array[..., 2] = mon\n data_array[..., 3] = count_time\n # data_array[:,:,4]... I wish!!! Have to do by hand.\n subdata = MetaArray(data_array, dtype='float', info=info)\n subdata.friendly_name = friendly_name + (\"_%d\" % i) # goes away on dumps/loads... just for initial object.\n data.append(subdata)\n return data", "def make_alf_template():\n import alf.alf\n import fsps\n \n ssp = alf.alf.Alf()\n \n sp = fsps.StellarPopulation(zcontinuous=1)\n sp.params['logzsol'] = 0.2\n\n # Alf\n m = ssp.get_model(in_place=False, logage=0.96, zh=0.2, mgh=0.2)\n \n # FSPS\n w, spec = sp.get_spectrum(tage=10**0.96, peraa=True)\n \n # blue\n blue_norm = spec[w > 3600][0] / m[ssp.wave > 3600][0]\n red_norm = spec[w > 1.7e4][0] / m[ssp.wave > 1.7e4][0]\n \n templx = np.hstack([w[w < 3600], ssp.wave[(ssp.wave > 3600) & (ssp.wave < 1.7e4)], w[w > 1.7e4]])\n temply = np.hstack([spec[w < 3600]/blue_norm, m[(ssp.wave > 3600) & (ssp.wave < 1.7e4)], spec[w > 1.7e4]/red_norm])\n \n np.savetxt('alf_SSP.dat', np.array([templx, temply]).T, fmt='%.5e', header='wave flux\\nlogage = 0.96\\nzh=0.2\\nmgh=0.2\\nfsps: w < 3600, w > 1.7e4')", "def main():\n regexham = r'\\s+\\((\\d+,\\s*\\d+)\\)\\s+([\\-+]?\\d+\\.\\d+[eEdD]?[\\-+]?\\d+)' #to extract the Hamiltonian.\n root = '.'\n #fname = 'output_files/'\n ciffci = CIFlow_Reader('testfci.dat', regexp = regexham , read_ham= True)\n ciffcipar = CIFlow_Reader( 'psi0_output10outputfci.dat', regexp = regexham , read_ham = True)\n #print ciffci.calc_overlap(cifdoci)\n #print e.get_groundstate('00000000000011|00000000000011') \n\n psir = rp.PsiReader('psi0_output10.dat', isbig = False, numorbs = -1 , read_ints = False)\n\n detlist = dw.cimain(psir.values['nalpha'],psir.values['nbeta'], psir.values['norb'], [range(1,psir.values['nalpha']+psir.values['nbeta']), []], [] , fname = 'determinants.dat' ,ref = [lambda x , y , z : psir.get_hf_orbs()] , add_frozen = 0, write = False) #CISDDOCI\n count = 0\n for det in detlist:\n for det2 in detlist:\n #+ because the eigenvectors have already a different phasefactor of 1.\n if abs(ciffci.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) - ciffcipar.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) ) > 1e-10 :\n print 'difference in hamiltonian row: ' , det[0]+'|'+det[1] , \" col: \" , det2[0]+'|'+det2[1] , 'fci: ', ciffci.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) , 'fciaddres: ' , ciffcipar.get_mat_element(det[0]+'|'+det[1],det2[0]+'|'+det2[1]) \n count += 1\n print 'There were ' , count , ' different elements'", "def readOutputfile(filename, verbose=False):\n\n # -----------------------------------------------------------------------------\n # Defining the classes for data structure\n T_Simulation = namedtuple('Simulation', ['step'])\n T_Step = namedtuple('Step', ['element', 'node'])\n\n T_Displacement = namedtuple('Displacement', ['ux', 'uy'])\n\n T_Element = namedtuple('Element', ['gp', 'avstrain', 'avstress', 'eqstrain'])\n T_GP = namedtuple('GP', ['stress', 'strain'])\n T_Stresses = namedtuple('Stresses', ['xx', 'yy', 'zz', 'yz', 'zx', 'xy'])\n T_Strains = namedtuple('Strains', ['xx', 'yy', 'zz', 'yz', 'zx', 'xy'])\n # -----------------------------------------------------------------------------\n\n nSteps = 0 # Simulation step counter\n\n SimData = T_Simulation(list())\n\n with open(filename) as f:\n line = f.readline() # Read in the first line of the input file\n while True: # Loop over all lines of the input file\n # Read the nodes displacements\n #line = f.readline()\n #print(line)\n if line == 'DofManager output:\\n': # String starts a list of nodes displacement information\n nSteps += 1 # The above string starts a new simulation step\n line = f.readline() # Cancel ---------- seperator\n line = f.readline()\n Nodes = list() # Initialize/clear list of nodes\n\n while line != '\\n' and line != 'Element output:\\n': # Strings that finish the list\n #\t\t\t\tnNode = int(line.strip().split()[1]) # Node id\n line = f.readline()\n dim1 = float(line.strip().split()[3]) # Displacement dim1\n line = f.readline()\n dim2 = float(line.strip().split()[3]) # Displacement dim2\n Nodes.append(\n T_Displacement(dim1, dim2)) # Append displacements of the current node to the node list\n line = f.readline()\n\n\n if verbose:\n print('Step {}: Dofs completed.\\n'.format(nSteps))\n print('---------------------------------\\n')\n\n # Read the stresses an strains at Gauss points\n elif line == 'Element output:\\n': # String starts a list elements, GPs, strains and stresses\n line = f.readline() # Cancel ---------- seperator\n line = f.readline()\n Elements = list() # Initialize/clear list of elements\n\n while line != '\\n' and line != '\\tR E A C T I O N S O U T P U T:\\n': # Strings that finish the list\n #\t\t\t\t\tnElement = line.strip().split()[2] # Element id\n line = f.readline()\n GPs = T_Element(list(), 0, 0, 0) # List of Gauss points\n\n while line != '\\n' and line.strip().split()[0] == 'GP': # String that starts a new GP\n #\t\t\t\t\t\tnGP = int(line.strip().split()[1].split('.')[1]) # GP id\n tmp = [float(i) for i in line.strip().split()[4:10]] # Read the strains\n strain = T_Strains(tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5])\n line = f.readline()\n tmp = [float(i) for i in line.strip().split()[1:7]] # Read the stresses\n stress = T_Stresses(tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5])\n GPs.gp.append(\n T_GP(stress, strain)) # Append stresses and strains of the current GP to the GP list\n line = f.readline()\n\n\n Elements.append(GPs) # Append GP list of the current element to the element list\n\n if verbose:\n print('Step {}: GPs completed.\\n'.format(nSteps))\n print('---------------------------------\\n')\n\n SimData.step.append(T_Step(Elements, Nodes)) # Append element and node list of the current step to the step list\n #print('the file input ends')\n #print(nSteps)\n # only needed with a while loop\n # Jump over the lines until we reach the next time step (Caught by if-clause)\n try:\n line = f.readline() # Will generate an error if files end is reached\n if line == \"\":\n raise EOFError\n except:\n if verbose: print(\"End of file reached.\\n\")\n break # Break the 'while True' loop\n\n # -----------------------------------------------------------------------------\n\n\n print('averaging the stress')\n # Averaging of strains and stress of GPs of each element\n for istep in range(len(SimData.step)):\n\n for ielement in range(len(SimData.step[istep].element)):\n print(len)\n # Initialization before each element\n stresses = np.array([0., 0., 0., 0., 0., 0.])\n strains = np.array([0., 0., 0., 0., 0., 0.])\n\n for igp in range(len(SimData.step[istep].element[ielement])):\n print(igp)\n # Add up all data of all GPs\n #stresses[:] += SimData.step[istep].element[ielement].gp[igp].stress[:]\n strains[:] += SimData.step[istep].element[ielement].gp[igp].strain[:]\n\n # Divide GP sum by number of GPs\n stresses /= len(SimData.step[istep].element[ielement])\n strains /= len(SimData.step[istep].element[ielement])\n # Replace the field (initialized with 0) with new information\n SimData.step[istep].element[ielement] = SimData.step[istep].element[ielement]._replace(\n avstress=T_Stresses(stresses[0], stresses[1], stresses[2], stresses[3], stresses[4], stresses[5]))\n SimData.step[istep].element[ielement] = SimData.step[istep].element[ielement]._replace(\n avstrain=T_Strains(strains[0], strains[1], strains[2], strains[3], strains[4], strains[5]))\n print('Analysis finished')\n return SimData", "def make_digital_map(self):\n self.uni.home(axis='X')\n time.sleep(10.0)\n azimuths = []\n for x in numpy.arange(self.azimuth.xmin, self.azimuth.xmax + self.azimuth.xinc,\n self.azimuth.xinc):\n if x > self.azimuth.xmax:\n x = self.azimuth.xmax\n azimuths.append(x)\n azimuths = numpy.array(azimuths)\n wait = (abs(azimuths[0]-self.uni.pos_az)/self.azimuth.xslew_vel) + 1.0\n self.uni.set_azimuth(azimuths[0], self.azimuth.xslew_vel)\n logger.info(\"Sleeping for %.2f seconds while stage gets to start of map\" % wait)\n time.sleep(wait)\n\n fp = open(self.filename, 'w')\n header = self.make_digital_header()\n fp.write(header)\n plt.ion()\n plt.plot([self.azimuth.xmin, self.azimuth.xmax], [0, 0], 'r-')\n plt.xlim(self.azimuth.xmin, self.azimuth.xmax)\n plt.ylim(-0.5, 6)\n plt.draw()\n for az in azimuths:\n wait = (abs(az-self.uni.pos_az)/self.azimuth.xmap_vel) + 1.0\n self.uni.set_azimuth(az, self.azimuth.xmap_vel)\n logger.info(\"Sleeping for %.2f seconds while stage gets to %.1f degrees\" % (wait, az))\n time.sleep(wait)\n fp.write(\"%.3f\" % az)\n #data = self.take_readings()\n for i, freq in enumerate(self.freq_list):\n self.syn.set_freq(freq)\n for dig_channel in range(8):\n for dig in range(8):\n if dig != dig_channel:\n self.labjack.digital_output(dig, 1)\n time.sleep(0.050)\n self.labjack.digital_output(dig_channel, 0)\n time.sleep(0.050)\n ratio, phase = self.vv.measure_vector_averaged_transmission(self.average)\n fp.write(\",%.6g,%.6g\" % (ratio, phase))\n logger.info(\"Az: %.2f, Freq: %.3f, Ratio: %g; Phase: %g\" % (az, freq/1e9, ratio, phase))\n plt.plot(az, ratio, self.plot_symbols[i])\n plt.draw()\n fp.write('\\n')\n \n time.sleep(10.0)\n self.uni.home(axis='X')\n logger.info(\"Map Completed, Saving data file %s\" % self.filename)\n fp.close()", "def write_input(self, suffix=''):\n \n out_fname = \"input.plasma_1d\"+suffix\n with open(out_fname, 'w+') as outfile:\n outfile.write('# Input file for ASCOT containing radial 1D information of plasma temperature,density and toroidal rotation \\n')\n outfile.write('# range must cover [0,1] of normalised poloidal rho. It can exceed 1. \\n')\n outfile.write('# {:s} (first 3 lines are comment lines) \\n'.format(time.strftime('%d%b%y')))\n outfile.write('{:d}\\t{:1d}\\t# Nrad,Nion \\n'.format(self.nrho,self.nion))\n strcoll = str(1)+' ' # for electrons\n strZ=''\n strA=''\n for i in range(self.nion):\n strZ += str(self.Z[i]) + ' '\n strA += str(self.A[i]) + ' '\n strcoll += str(int(self.coll_mode[i])) + ' '\n strZ +='\\t\\t# ion Znum \\n'\n strA +='\\t\\t# ion Amass \\n'\n strcoll += '# collision mode (0= no colls, 1=Maxw colls, 2=binary colls, 3=both colls) 1st number is for electrons \\n'\n outfile.write(strZ)\t\t\t\t\n outfile.write(strA)\n outfile.write(strcoll)\n \n lab_len=15\n strlabel='RHO (pol)'.ljust(lab_len)+'Te (eV)'.ljust(lab_len)+'Ne (1/m3)'.ljust(lab_len)+'Vtor_I (rad/s)'.ljust(lab_len)+\\\n 'Ti1 (eV)'.ljust(lab_len)\n for i in range(self.nion):\n tmpstr ='Ni{:d} (1/m3)'.format(i+1)\n strlabel+=tmpstr.ljust(lab_len)\n strlabel+='\\n'\n outfile.write(strlabel)\n data=np.array((self.rho, self.te, self.ne, self.vt, self.ti), dtype=float)\n data = np.concatenate([data, [self.ni[i,:] for i in range(self.nion)]])\n\n data=np.transpose(data)\n #print(data)\n #print(\"if i don't print, it won't work\")\n np.savetxt(outfile, data, fmt='%.5e')", "def padova_interpolated_isomake(directories, bands_dict, output_filename,\n bands_ordered=None):\n\n if isinstance(directories, basestring):\n directories = [directories]\n\n if bands_ordered is None:\n bands_ordered = bands_dict.values()\n\n output_obj = open(output_filename, \"w\")\n\n header_string = \"#\\t[M/H]\\tMi\\tlogAge\\tlogTe\\tlogg\\tJacobian\"\n for band in bands_ordered:\n header_string += \"\\t{}\".format(band)\n header_string += \"\\tinner_count\\touter_count\\n\"\n output_obj.write(header_string)\n\n iso_metal_dict = {}\n bands_metal_dicts = {}\n for band in bands_dict.keys():\n bands_metal_dicts[band] = {}\n\n # instead do this on band-by-band basis? *******************\n\n for direc in directories:\n iso_files_gz = gb.glob(\"{}/*.dat.gz\".format(direc.rstrip(\"/\")))\n iso_files = gb.glob(\"{}/*.dat\".format(direc.rstrip(\"/\")))\n\n # check for metallicity of each file\n # and check which bands it has\n\n for iso_file1 in iso_files_gz:\n metal = None\n iso_data = gz.open(\"{0}\".format(iso_file1))\n for line in iso_data:\n split_line = line.split()\n if \"[M/H]\" in split_line:\n metal = float(split_line[split_line.index(\"[M/H]\")+2])\n if \"M_ini\" in split_line:\n for band in bands_metal_dicts.keys():\n if band in split_line:\n bands_metal_dicts[band][metal] = iso_file1\n\n for iso_file1 in iso_files:\n metal = None\n iso_data = open(\"{0}\".format(iso_file1), \"r\")\n for line in iso_data:\n split_line = line.split()\n if \"[M/H]\" in split_line:\n metal = float(split_line[split_line.index(\"[M/H]\")+2])\n if \"M_ini\" in split_line:\n for band in bands_metal_dicts.keys():\n if band in split_line:\n bands_metal_dicts[band][metal] = iso_file1\n\n for metal in bands_metal_dicts[bands_metal_dicts.keys()[0]]:\n filenames = []\n for band in bands_metal_dicts:\n if metal in bands_metal_dicts[band]:\n if bands_metal_dicts[band][metal] not in filenames:\n filenames.append(bands_metal_dicts[band][metal])\n else:\n break\n else:\n iso_metal_dict[metal] = filenames\n\n print(iso_metal_dict)\n keys = iso_metal_dict.keys()\n keys.sort()\n\n if len(keys) > 2:\n # iso_metal_weights=dict(zip(keys, np.gradient(np.array(keys)) ) )\n # in numpy 1.9.0 gradient has changed to use second order behaviour\n # at boundaries which gives wrong results in this context\n iso_metal_weights = dict(zip(keys,\n replacement_gradient(np.array(keys))))\n else:\n iso_metal_weights = dict(zip(keys, np.ones(len(keys))))\n print(\"metals and weights: \", iso_metal_weights)\n\n# interp in metallicity order\n\n for key in keys:\n iso_interp(iso_metal_dict[key], key, iso_metal_weights[key],\n output_obj, bands_dict, bands_ordered)\n\n output_obj.close()", "def plot_initial_state(input_file_name='initial_state.nc',\n output_file_name='initial_state.png'):\n\n # load mesh variables\n chunks = {'nCells': 32768, 'nEdges': 32768}\n ds = xarray.open_dataset(input_file_name, chunks=chunks)\n nCells = ds.sizes['nCells']\n nEdges = ds.sizes['nEdges']\n nVertLevels = ds.sizes['nVertLevels']\n\n fig = plt.figure()\n fig.set_size_inches(16.0, 12.0)\n plt.clf()\n\n print('plotting histograms of the initial condition')\n print('see: init/initial_state/initial_state.png')\n d = datetime.datetime.today()\n txt = \\\n 'MPAS-Ocean initial state\\n' + \\\n 'date: {}\\n'.format(d.strftime('%m/%d/%Y')) + \\\n 'number cells: {}\\n'.format(nCells) + \\\n 'number cells, millions: {:6.3f}\\n'.format(nCells / 1.e6) + \\\n 'number layers: {}\\n\\n'.format(nVertLevels) + \\\n ' min val max val variable name\\n'\n\n plt.subplot(3, 3, 2)\n varName = 'maxLevelCell'\n var = ds[varName]\n maxLevelCell = var.values - 1\n xarray.plot.hist(var, bins=nVertLevels - 4)\n plt.ylabel('frequency')\n plt.xlabel(varName)\n txt = '{}{:9.2e} {:9.2e} {}\\n'.format(txt, var.min().values,\n var.max().values, varName)\n\n plt.subplot(3, 3, 3)\n varName = 'bottomDepth'\n var = ds[varName]\n xarray.plot.hist(var, bins=nVertLevels - 4)\n plt.xlabel(varName)\n txt = '{}{:9.2e} {:9.2e} {}\\n'.format(txt, var.min().values,\n var.max().values, varName)\n\n cellsOnEdge = ds['cellsOnEdge'].values - 1\n cellMask = np.zeros((nCells, nVertLevels), bool)\n edgeMask = np.zeros((nEdges, nVertLevels), bool)\n for k in range(nVertLevels):\n cellMask[:, k] = k <= maxLevelCell\n cell0 = cellsOnEdge[:, 0]\n cell1 = cellsOnEdge[:, 1]\n edgeMask[:, k] = np.logical_and(np.logical_and(cellMask[cell0, k],\n cellMask[cell1, k]),\n np.logical_and(cell0 >= 0,\n cell1 >= 0))\n cellMask = xarray.DataArray(data=cellMask, dims=('nCells', 'nVertLevels'))\n edgeMask = xarray.DataArray(data=edgeMask, dims=('nEdges', 'nVertLevels'))\n\n plt.subplot(3, 3, 4)\n varName = 'temperature'\n var = ds[varName].isel(Time=0).where(cellMask)\n xarray.plot.hist(var, bins=100, log=True)\n plt.ylabel('frequency')\n plt.xlabel(varName)\n txt = '{}{:9.2e} {:9.2e} {}\\n'.format(txt, var.min().values,\n var.max().values, varName)\n\n plt.subplot(3, 3, 5)\n varName = 'salinity'\n var = ds[varName].isel(Time=0).where(cellMask)\n xarray.plot.hist(var, bins=100, log=True)\n plt.xlabel(varName)\n txt = '{}{:9.2e} {:9.2e} {}\\n'.format(txt, var.min().values,\n var.max().values, varName)\n\n plt.subplot(3, 3, 6)\n varName = 'layerThickness'\n var = ds[varName].isel(Time=0).where(cellMask)\n xarray.plot.hist(var, bins=100, log=True)\n plt.xlabel(varName)\n txt = '{}{:9.2e} {:9.2e} {}\\n'.format(txt, var.min().values,\n var.max().values, varName)\n\n plt.subplot(3, 3, 7)\n varName = 'rx1Edge'\n var = ds[varName].isel(Time=0).where(edgeMask)\n maxRx1Edge = var.max().values\n xarray.plot.hist(var, bins=100, log=True)\n plt.ylabel('frequency')\n plt.xlabel('Haney Number, max={:4.2f}'.format(maxRx1Edge))\n txt = '{}{:9.2e} {:9.2e} {}\\n'.format(txt, var.min().values,\n var.max().values, varName)\n\n font = FontProperties()\n font.set_family('monospace')\n font.set_size(12)\n print(txt)\n plt.subplot(3, 3, 1)\n plt.text(0, 1, txt, verticalalignment='top', fontproperties=font)\n plt.axis('off')\n\n plt.tight_layout(pad=4.0)\n\n plt.savefig(output_file_name, bbox_inches='tight', pad_inches=0.1)", "def write_sum(self):\n with open('{}_Simulation_Fst.dat'.format(self.simulation_window), 'w+') as output:\n for line in self.fst_data:\n output.write(line)", "def txt2hdf5_mudis(config, init_file=0, final_file=100, step=1, expo='100'):\n # --------SKYMAP--------------\n # Create the directory to save the results\n os.makedirs(os.path.dirname(cwd + '/config_files/'), exist_ok=True)\n\n alignment = add_align()\n\n # Extract skymap from alignment file\n skymap = np.zeros((len(alignment), 2))\n\n for i in np.arange(len(skymap)):\n skymap[i] = alignment['Azimuth'][i], alignment['Zenith'][i]\n\n # Save Skymap information\n with h5py.File(cwd + '/config_files/skymap_radiance.h5', 'w') as sky:\n\n if not list(sky.items()):\n sky.create_dataset('/skymap', data=skymap)\n else:\n del sky['skymap']\n\n sky.create_dataset('/skymap', data=skymap, dtype='f4')\n sky['skymap'].attrs['Columns'] = 'Azimuth, Zenith'\n sky['skymap'].dims[0].label = 'channel'\n sky['skymap'].dims[1].label = 'Azimuth, Zenith'\n\n config['skymap'] = skymap\n\n # Save MUDIS file information\n\n # Import the radiance data from sensor\n files = sorted(\n glob.glob(config['raw_dir'] + '/radiance/{}/data/data_*.txt'.format(config['date'])))\n\n print('Total files in the directory: ' + str(len(files)) + ' files')\n\n ans = input('convert all files? (y/n): ')\n\n if ans == 'n':\n print('configure initial and final file index in the function options')\n else:\n init_file = 0\n final_file = len(files)\n\n for fil in np.arange(init_file, final_file):\n # Import the data from the file\n file = np.genfromtxt(files[fil], delimiter='', skip_header=11)\n\n # ------------RADIANCE DATA RAW---------------\n # create the radiance matrix\n data = np.zeros([113, 992])\n\n for i in np.arange(113):\n if str(alignment.iloc[i][3]) == 'nan':\n data[i] = np.nan\n else:\n try:\n data[i] = file[:, int(alignment.iloc[i][3] + config['channel_pixel_adj'])] #\n except:\n pass #\n # read the pixels index\n # in the alignment file and copy the\n # data in the radiance matrix']))\n\n # Correct time for the file UTC\n name = os.path.split(files[fil])\n\n # Read name of the file (correct time)\n time = name[1][6:25]\n # convert time to datetime format\n time = datetime.datetime.strptime(time, '%d.%m.%Y_%H_%M_%S')\n # print(time)\n new_name = datetime.datetime.strftime(time, '%Y%m%d_%H%M%S')\n\n with open(files[fil], 'r') as file:\n dat = file.readlines()\n\n # Extract information from .dat file\n exposure = int(dat[4][12:-1])\n NumAve = int(dat[7][17:-1])\n CCDTemp = int(dat[8][15:-1])\n NumSingMes = int(dat[10][27:-1])\n ElectrTemp = int(dat[9][23:-1])\n\n # Create the directory to save the results\n os.makedirs(os.path.dirname(config['str_dir'] + '/radiance/{}/data/').format(config['date']),\n exist_ok=True)\n\n if exposure == expo:\n # Create a file in the disk\n with h5py.File(config['str_dir'] + '/radiance/{}/data/{}.h5'.format(config['date'], new_name),\n 'w') as datos:\n\n if not list(datos.items()):\n # Create two datasets(use only one time)\n datos.create_dataset('/data', data=data, dtype='f4')\n datos.create_dataset('/skymap', data=skymap, dtype='f4')\n else:\n del datos['data']\n del datos['skymap']\n print('data deleted and corrected')\n datos.create_dataset('/data', data=data, dtype='f4')\n datos.create_dataset('/skymap', data=skymap, dtype='f4')\n\n # Add attributes to datasets\n datos['data'].attrs['time'] = str(time)\n datos['data'].attrs['Exposure'] = exposure\n datos['data'].attrs['NumAver'] = NumAve\n datos['data'].attrs['CCDTemp'] = CCDTemp\n datos['data'].attrs['NumSingMes'] = NumSingMes\n datos['data'].attrs['ElectrTemp'] = ElectrTemp\n datos['data'].attrs['Latitude'] = '52.39N'\n datos['data'].attrs['Longitude'] = '9.7E'\n datos['data'].attrs['Altitude'] = '65 AMSL'\n datos['data'].dims[0].label = 'channel'\n datos['data'].dims[1].label = 'wavelength'\n\n datos['skymap'].attrs['Columns'] = 'Azimuth, Zenith'\n\n datos.close()\n\n print('File ' + str(fil + init_file + 1) + ' of ' +\n str((final_file - init_file)) + ' saved')\n else:\n print('Exposure are not same', expo, exposure)\n break\n\n print('Completed')", "def ReadIndmfl(filename, fh_info):\n def divmodulo(x,n):\n \"We want to take modulo and divide in fortran way, so that it is compatible with fortran code\"\n return ( sign(x)* (abs(x)/n) , sign(x)*mod(abs(x),n))\n\n fh = open(filename, 'r')\n lines = [line.split('#')[0].strip() for line in fh.readlines()] # strip comments\n lines = (line for line in lines if line) # strip blank lines & create generator expression\n\n hybr_emin, hybr_emax, Qrenorm, projector = [float(x) for x in lines.next().split()[:4]]\n if projector>=4:\n hybr_emin = int(hybr_emin)\n hybr_emax = int(hybr_emax)\n matsubara, broadc, broadnc, om_npts, om_emin, om_emax = [float(e) for e in lines.next().split()[:6]]\n matsubara = int(matsubara) # recast these to integers\n om_npts = int(om_npts) \n\n atoms={}\n cps={}\n natom = int(lines.next())\n for i in range(natom):\n iatom, nL, locrot_shift = [int(x) for x in lines.next().split()]\n (shift,locrot) = divmodulo(locrot_shift,3)\n if locrot<0: locrot=3\n \n Ls, qsplits, icps = array([[int(x) for x in lines.next().split()] for i in range(nL)]).T\n new_zx = [[float(x) for x in lines.next().split()] for loro in range(abs(locrot))]\n vec_shift = [float(x) for x in lines.next().split()] if shift else None\n\n atoms[iatom] = (locrot, new_zx, vec_shift)\n for icp, L, qsplit in zip(icps, Ls, qsplits):\n if cps.has_key(icp):\n cps[icp] += [(iatom, L, qsplit)]\n else:\n cps[icp] = [(iatom, L, qsplit)]\n\n #####################################################\n # read the big block of siginds and cftrans\n ncp, maxdim, maxsize = [int(e) for e in lines.next().split()[:3]]\n legends={}\n siginds={}\n cftrans={}\n for i in range(ncp):\n icp, dim, size = [int(e) for e in lines.next().split()]\n legends[icp] = lines.next().split(\"'\")[1::2]\n siginds[icp] = array([[int(e) for e in lines.next().split()] for row in range(dim)])\n raw_cftrans = array([[float(e) for e in lines.next().split()] for row in range(dim)])\n cftrans[icp] = raw_cftrans[:,0::2] + raw_cftrans[:,1::2]*1j\n\n return (siginds, cftrans, cps)", "def create_dnz_file(args):\n\n file = open(args.o, 'w')\n\n file.write(\"% ----DATA VARIABLES----\\n\\n\")\n file.write(\"t=\" + str(args.t) + \";\" + \"%number of attributes\\n\")\n file.write(\"k=\" + str(args.k) + \";\" + \"%max length of the support set\\n\")\n file.write(\"n=\" + str(args.n) + \";\" + \"%number of positive instances\\n\")\n file.write(\"m=\" + str(args.m) + \";\" + \"%number of negative instances\\n\")\n file.write(\"c=\" + str(args.c) + \";\" + \"%number of atMostOne Constraints\\n\\n\")\n\n file.write(\"% ----OMEGAS----\\n\\n\")\n\n omega_p = generate_omega_data(args.t, args.n, args.b)\n file.write(\"omegap= \" + omega_to_mz(omega_p) + \"\\n\\n\")\n\n omega_n = generate_disjoint_omega_data(omega_p, args.m, args.b)\n file.write(\"omegan= \" + omega_to_mz(omega_n) + \"\\n\\n\")\n\n file.write(\"% ----CONSTRAINS----\\n\\n\")\n at_most_one = generate_at_most_one(int(args.t/2), args.c, 1, args.t)\n file.write(\"atMostOne=\" + at_most_one_to_mz(at_most_one))", "def atm_print():\n\n # Initialize file\n metric_filename = \"stdatmos_si.txt\"\n with open(metric_filename, 'w') as output_handle:\n\n # Create header\n output_handle.write(\"Geometric Geopotential Speed of\\n\")\n output_handle.write(\"Altitude Altitude Temperature Pressure Density Sound \\n\")\n output_handle.write(\" (m) (m) (K) (N/m**2) (kg/m**3) (m/s) \\n\")\n output_handle.write(\"-----------------------------------------------------------------------\\n\")\n\n # Loop through altitudes\n for i in range(51):\n\n # Calculate properties\n h = i*2000.0\n z, t, p, d = statsi(h)\n a = np.sqrt(1.4*287.0528*t)\n\n # Write to file\n write_string = \"{0:<10}{1:<13.5f}{2:<13.5f}{3:<14.5e}{4:<13.5e}{5:<8.4f}\\n\".format(h, z, t, p, d, a)\n output_handle.write(write_string)\n\n # Initialize file\n english_filename = \"stdatmos_ee.txt\"\n with open(english_filename, 'w') as output_handle:\n\n # Create header\n output_handle.write(\"Geometric Geopotential Speed of\\n\")\n output_handle.write(\"Altitude Altitude Temperature Pressure Density Sound \\n\")\n output_handle.write(\" (ft) (ft) (R) (lbf/ft^2) (slugs/ft^3) (ft/s) \\n\")\n output_handle.write(\"------------------------------------------------------------------------\\n\")\n\n # Loop through altitudes\n for i in range(51):\n\n # Calculate properties\n h = i*5000.0\n z, t, p, d = statee(h)\n a = np.sqrt(1.4*287.0528*t/1.8)/0.3048\n\n # Write to file\n write_string = \"{0:<10}{1:<13.5f}{2:<13.5f}{3:<14.5e}{4:<13.5e}{5:<8.4f}\\n\".format(h, z, t, p, d, a)\n output_handle.write(write_string)", "def import_data():\n from project_parameters import perm,dataPointsPerAxis,numElectrodes,debug,scale\n from project_parameters import baseDataName,simulationDirectory,savePath,name\n from project_parameters import position,zMin,zMax,zStep,name,charge,mass,r0\n import pickle\n # renaming for convenience\n na, ne = dataPointsPerAxis, numElectrodes \n\n #works with import_data_HOA -smouradi 04/19\n print('Loading trap data from Sandia...')\n fname = (simulationDirectory+baseDataName+'.pkl')\n print fname\n try:\n f = open(fname,'rb')\n except IOError:\n return ('No pickle file foudn.')\n trap = pickle.load(f)\n\n Xi = trap['X'] #sandia defined coordinates\n Yi = trap['Y']\n Zi = trap['Z']\n #get everything into expected coordinates (described in project_paramters)\n coords = [Xi,Yi,Zi]\n X = coords[perm[0]]/scale\n Y = coords[perm[1]]/scale\n Z = coords[perm[2]]/scale\n if debug.import_data:\n print ('size of X,Y, and Z:')\n print X.shape\n print Y.shape\n print Z.shape\n\n sim=TreeDict()\n el = 0\n for key in trap['electrodes']:\n Vs = trap['electrodes'][key]['V']\n Vs = Vs.reshape(na[0],na[1],na[2])\n Vs = np.transpose(Vs,perm)\n electrode = TreeDict()\n electrode.potential = Vs\n electrode.name = trap['electrodes'][key]['name']\n electrode.position = trap['electrodes'][key]['position']\n if electrode.name == 'RF':\n sim['EL_RF'.format(el)] = electrode.copy()\n sim['EL_DC_{}'.format(el)] = electrode.copy()\n el=el+1\n\n del trap\n #4) Build the simulation data structure\n sim.X,sim.Y,sim.Z=X,Y,Z # set grid vectors\n sim.simulationDirectory = simulationDirectory\n sim.baseDataName = baseDataName\n sim.dataPointsPerAxis = na\n sim.numElectrodes = ne\n sim.savePath = savePath\n sim.perm = perm\n\n sim.configuration.position = position\n sim.configuration.charge = charge\n sim.configuration.mass = mass\n sim.configuration.r0 = r0\n\n if debug.import_data: # Plot each electrode\n print(plot_potential(sim.EL_RF.potential,X,Y,Z,'1D plots','RF electrode'))\n for el in range(0,ne): \n electrode = sim['EL_DC_{}'.format(el)] \n print(plot_potential(electrode.potential,X,Y,Z,\\\n '1D plots','Electrode {},{} Position:{}'.format(el,electrode.name,electrode.position)))\n\n #5) save the particular simulation as a pickle data structure\n nameOut=savePath+name+'.pkl'\n print ('Saving '+nameOut+' as a data structure...')\n output = open(nameOut,'wb')\n pickle.dump(sim,output)\n output.close()\n return 'Import complete.'", "def writeFundamental(self):\n dfFund = pd.DataFrame(self.fundamental_series)\n dfFund.set_index('FundamentalTime', inplace=True)\n self.writeLog(dfFund, filename='fundamental_{symbol}_freq_{self.log_frequency}_ns'.format(self.symbol))\n\n print(\"Noise-free fundamental archival complete.\")", "def save_state(self, file):\n np.savez(file, z_mn=self.z_mn, theta=self.theta, phi=self.phi,\n z_best=self.z_best, ll_best=self.ll_best, log=self.log)", "def test_density(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n r = np.array([0.7, 0.8])\n t = 6.25e-6\n solrt = sol(r, t)\n np.testing.assert_allclose(solrt.density[0], 2.26666666666663)", "def aluminum_hexathiohypodiphosphate():\n\n positions = [[0.000000, 0.000000, 0.000000],\n [0.500000, 0.000000, 0.500000],\n [0.000000, 0.500000, 0.000000],\n [0.000000, 0.000000, 0.500000],\n [0.197847, 0.276435, 0.101916],\n [0.197847, 0.723565, 0.898084],\n [0.802153, 0.276435, 0.898084],\n [0.802153, 0.723565, 0.101916],\n [0.776404, 0.800507, 0.601208],\n [0.776404, 0.199493, 0.398792],\n [0.223596, 0.800507, 0.398792],\n [0.223596, 0.199493, 0.601208]]\n\n species = ['Al','Al','P','P','S','S','S','S','S','S','S','S']\n\n bravais = 'orthorhombic'\n\n space_group = 16\n lattice_parameters = {'a': Set(5.71230345, 'angstrom'),\n 'b': Set(5.71644625, 'angstrom'),\n 'c': Set(11.46678755,'angstrom')}\n data = {'fractional': positions,\n 'species': species,\n 'lattice_parameters': lattice_parameters,\n 'space_group': ('', space_group),\n 'n_atoms': len(species)}\n\n return data", "def write_mat_file(self):\n mat_dict = {}\n mat_dict['Lx_p'] = self.Lx_p\n mat_dict['Ly_p'] = self.Ly_p\n mat_dict['Lz_p'] = self.Lz_p\n mat_dict['Lo'] = self.obst.get_Lo()\n mat_dict['Ny_divs'] = self.N_divs\n mat_dict['rho_p'] = self.rho_p\n mat_dict['nu_p'] = self.nu_p\n mat_dict['snl'] = list(np.union1d(self.obst_list[:],self.solid_list[:]))\n mat_dict['inl'] = list(self.inlet_list[:])\n mat_dict['onl'] = list(self.outlet_list[:])\n\n scipy.io.savemat('geometry_description',mat_dict)", "def export_wells(self, w, title):\r\n self._check_out(title)\r\n np.savez_compressed(os.path.join(self.out_dir, title, title), w)", "def main(argv):\n logging.basicConfig(format=\"%(message)s\", level=logging.INFO, stream=sys.stdout)\n logger = logging.getLogger(\"demo4\")\n\n # Define some parameters we'll use below and make directories if needed.\n cat_file_name = os.path.join('input','galsim_default_input.asc')\n if not os.path.isdir('output'):\n os.mkdir('output')\n multi_file_name = os.path.join('output','multi.fits')\n\n random_seed = galsim.BaseDeviate(8241573).raw()\n sky_level = 1.e6 # ADU / arcsec^2\n pixel_scale = 1.0 # arcsec / pixel (size units in input catalog are pixels)\n gal_flux = 1.e6 # arbitrary choice, makes nice (not too) noisy images\n gal_g1 = -0.009 #\n gal_g2 = 0.011 #\n\n # the fraction of flux in each component\n # 40% is in the bulge, 60% in a disk. 70% of that disk light is placed\n # into point sources distributed as a random walk\n\n bulge_frac = 0.4\n disk_frac = 0.6\n knot_frac = 0.42\n smooth_disk_frac = 0.18\n\n # number of knots of star formation. To simulate a nice irregular (all the\n # flux is in knots) we find ~100 is a minimum number needed, but we will\n # just use 10 here to make the demo run fast.\n\n n_knots = 10\n\n xsize = 64 # pixels\n ysize = 64 # pixels\n\n logger.info('Starting demo script 4 using:')\n logger.info(' - parameters taken from catalog %r',cat_file_name)\n logger.info(' - Moffat PSF (parameters from catalog)')\n logger.info(' - pixel scale = %.2f',pixel_scale)\n logger.info(' - Bulge + Disc galaxies (parameters from catalog)')\n logger.info(' - 100 Point sources, distributed as random walk')\n logger.info(' - Applied gravitational shear = (%.3f,%.3f)',gal_g1,gal_g2)\n logger.info(' - Poisson noise (sky level = %.1e).', sky_level)\n\n # Read in the input catalog\n cat = galsim.Catalog(cat_file_name)\n\n\n # save a list of the galaxy images in the \"images\" list variable:\n images = []\n for k in range(cat.nobjects):\n # Initialize the (pseudo-)random number generator that we will be using below.\n # Use a different random seed for each object to get different noise realizations.\n # Using sequential random seeds here is safer than it sounds. We use Mersenne Twister\n # random number generators that are designed to be used with this kind of seeding.\n # However, to be extra safe, we actually initialize one random number generator with this\n # seed, generate and throw away two random values with that, and then use the next value\n # to seed a completely different Mersenne Twister RNG. The result is that successive\n # RNGs created this way produce very independent random number streams.\n rng = galsim.BaseDeviate(random_seed+k+1)\n\n # Take the Moffat beta from the first column (called 0) of the input catalog:\n # Note: cat.get(k,col) returns a string. To get the value as a float, use either\n # cat.getFloat(k,col) or float(cat.get(k,col))\n beta = cat.getFloat(k,0)\n # A Moffat's size may be either scale_radius, fwhm, or half_light_radius.\n # Here we use fwhm, taking from the catalog as well.\n fwhm = cat.getFloat(k,1)\n # A Moffat profile may be truncated if desired\n # The units for this are expected to be arcsec (or specifically -- whatever units\n # you are using for all the size values as defined by the pixel_scale).\n trunc = cat.getFloat(k,4)\n # Note: You may omit the flux, since the default is flux=1.\n psf = galsim.Moffat(beta=beta, fwhm=fwhm, trunc=trunc)\n\n # Take the (e1, e2) shape parameters from the catalog as well.\n psf = psf.shear(e1=cat.getFloat(k,2), e2=cat.getFloat(k,3))\n\n # Galaxy is a bulge + disk(+knots) with parameters taken from the catalog:\n\n # put some fraction of the disk light into knots of star formation\n\n disk_hlr = cat.getFloat(k,5)\n disk_e1 = cat.getFloat(k,6)\n disk_e2 = cat.getFloat(k,7)\n bulge_hlr = cat.getFloat(k,8)\n bulge_e1 = cat.getFloat(k,9)\n bulge_e2 = cat.getFloat(k,10)\n\n smooth_disk = galsim.Exponential(flux=smooth_disk_frac, half_light_radius=disk_hlr)\n\n knots = galsim.RandomKnots(n_knots, half_light_radius=disk_hlr, flux=knot_frac, rng=rng)\n\n disk = galsim.Add([smooth_disk, knots])\n disk = disk.shear(e1=disk_e1, e2=disk_e2)\n\n # the rest of the light goes into the bulge\n bulge = galsim.DeVaucouleurs(flux=bulge_frac, half_light_radius=bulge_hlr)\n bulge = bulge.shear(e1=bulge_e1, e2=bulge_e2)\n\n # The flux of an Add object is the sum of the component fluxes.\n # Note that in demo3.py, a similar addition was performed by the binary operator \"+\".\n gal = galsim.Add([disk, bulge])\n\n # This flux may be overridden by withFlux. The relative fluxes of the components\n # remains the same, but the total flux is set to gal_flux.\n gal = gal.withFlux(gal_flux)\n gal = gal.shear(g1=gal_g1, g2=gal_g2)\n\n # The center of the object is normally placed at the center of the postage stamp image.\n # You can change that with shift:\n gal = gal.shift(dx=cat.getFloat(k,11), dy=cat.getFloat(k,12))\n\n final = galsim.Convolve([psf, gal])\n\n # Draw the profile\n image = galsim.ImageF(xsize, ysize)\n final.drawImage(image, scale=pixel_scale)\n\n # Add Poisson noise to the image:\n image.addNoise(galsim.PoissonNoise(rng, sky_level * pixel_scale**2))\n\n logger.info('Drew image for object at row %d in the input catalog'%k)\n \n # Add the image to our list of images\n images.append(image)\n \n # Now write the images to a multi-extension fits file. Each image will be in its own HDU.\n galsim.fits.writeMulti(images, multi_file_name)\n logger.info('Images written to multi-extension fits file %r',multi_file_name)", "def minfo():\n model = np.loadtxt('cumul_depths.tmp',dtype={'names': ('H'),'formats': \\\n ('f4')}, usecols=[0])\n d = model['H']\n model = np.loadtxt('start_model.dat',dtype={'names': (\"S\"),'formats': \\\n ('f4')}, skiprows=1,usecols=[2])\n vs = model['S']\n\n A = np.repeat(vs,2)\n B = np.repeat(d,2)\n B = np.insert(B,[0],0.0)[:-1] \n out = zip(A, B)\n \n f = open('model.info','w+')\n for line in out:\n print (\" \".join(str(x) for x in line))\n f.write(\" \".join(str(x) for x in line) + \"\\n\") \n f.close()", "def plot_density_of_states(xlim=(-10, 5), ylim=(-1.5, 1.5), fmt='pdf'):\n\n efermi = Vasprun('vasprun.xml').efermi\n dos_lines = open ('DOSCAR').readlines()\n\n x, up, down = [], [], []\n nedos = Incar.from_file('INCAR').as_dict()['NEDOS'] - 1\n\n for line in dos_lines[6:6+nedos]:\n split_line = line.split()\n x.append(float(split_line[0]) - efermi)\n up.append(float(split_line[1]))\n down.append(-float(split_line[2]))\n\n x, up, down = np.array(x), np.array(up), np.array(down)\n sum = up + down\n\n ax = plt.figure().gca()\n ax.set_xlim(xlim[0], xlim[1])\n ax.set_ylim(ylim[0], ylim[1])\n\n ax.set_xlabel(r'$\\mathrm{E\\/(eV)}$')\n ax.set_ylabel(r'$\\mathrm{Density\\/of\\/States$')\n ax.set_xticklabels([r'$\\mathrm{%s}$' % t for t in ax.get_xticklabels()])\n ax.set_yticklabels([r'$\\mathrm{%s}$' % t for t in ax.get_yticklabels()])\n\n ax.plot(x, up, color='red' )\n ax.plot(x, down, color='green')\n ax.plot(x, sum, color='black' )\n if fmt is not None:\n plt.savefig('density_of_states.{}'.format(fmt))\n else:\n return ax\n\n plt.close()", "def main():\n snowdensity=0.35 #from May 1 2010 SNOTEL (2011,2013 were similar, 2014 was 0.4), at the saddle in May 1 2010 it was 0.4\n snodasyears=[2010,2004,2005]\n wdata=[wrf.load(\"wrf/SWE_daily.nc\",extractday=212+5+int(np.round(365.25*year))) for year in [3,4]]\n wdata.extend([wrf.load(\"wrf/SWE_daily.nc\",extractday=212+20+int(np.round(365.25*year))) for year in [3,4]])\n print(len(wdata))\n sdata=[snodas.load(\"snodas/SWE_Daily0600UTC_WesternUS_{}.dat\".format(year),extractday=125) for year in snodasyears]\n sdata.extend([snodas.load(\"snodas/SWE_Daily0600UTC_WesternUS_{}.dat\".format(year),extractday=140) for year in snodasyears])\n print(len(sdata))\n # sdata=[snodas.load(\"snodas/SWE_Daily0600UTC_WesternUS_{}.dat\".format(year),extractday=120) for year in range(2004,2013)]\n # sdata.insert(0,sdata.pop(6)) #move year 2010 to the begining of the list\n ldata=lidar.load_fast(loc=\"lidar/\",geofile=\"snow-on-dem.nc\",decimation_factor=10)\n \n print(\"Calculating WRF weights\")\n try:\n wrfweights=mygis.read_nc(\"wrf2lidar_weights.nc\").data\n except:\n wrfweights =gen_weights(ldata.lat,ldata.lon,wdata[0].lat,wdata[0].lon,mask=(ldata.dem>1500))\n mygis.write(\"wrf2lidar_weights.nc\",wrfweights)\n \n # wrfbounds =find_bounds(wrfweights)\n print(\"Calculating SNODAS weights\")\n try:\n snodasweights=mygis.read_nc(\"snodas2lidar_weights.nc\").data\n except:\n snodasweights=gen_weights(ldata.lat,ldata.lon,sdata[0].lat,sdata[0].lon,mask=(ldata.dem>1500))\n mygis.write(\"snodas2lidar_weights.nc\",snodasweights)\n \n # snodasbounds =find_bounds(snodasweights)\n \n wdata[0].lc[wrfweights==0]=0\n sdata[0].lc[snodasweights==0]=0\n\n print(\"Binning by elevations...\")\n #dx=4000) #note use dx=lidar_dx because weights are lidar gridcells...\n wrfbyz=[bin_by_elevation(w.data,w.dem,wdata[0].lc,weights=wrfweights,dz=200,dx=10) for w in wdata]\n print(\"Binning by elevations...\")\n snodasbyz=[bin_by_elevation(s.data,sdata[0].dem,sdata[0].lc,weights=snodasweights,dz=150,dx=10) for s in sdata]#dx=926)\n print(\"Binning by elevations...\")\n lidarbyz=bin_by_elevation(ldata.data*snowdensity,ldata.dem,ldata.lc,dz=100,dx=10)\n print(\"Plotting\")\n plot_volumes(wrfbyz,snodasbyz,lidarbyz)\n\n snodasyears=[2010,2004,2005,2010.2,2004.2,2005.2]\n for i in range(len(snodasbyz)):\n plot_elevation_bands(snodasbyz[i],outputfile=\"SNODAS_swe_by_z_{}.png\".format(snodasyears[i]),title=\"SNODAS SWE {}\".format(snodasyears[i]))", "def download():\r\n reader = GSODDataReader()\r\n year_list = range(2001, 2012)\r\n austin = reader.collect_data(year_list, exact_station=True,\r\n station_name='AUSTIN CAMP MABRY', state='TX', country='US')\r\n houston = reader.collect_data(year_list, exact_station=True,\r\n station_name='HOUSTON/D.W. HOOKS', state='TX', country='US')\r\n new_york = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEW YORK/LA GUARDIA', state='NY', country='US')\r\n newark = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEWARK INTL AIRPORT', state='NJ', country='US')\r\n punta_arenas = reader.collect_data(year_list, exact_station=True,\r\n station_name='PUNTA ARENAS', country='CH')\r\n wellington = reader.collect_data(year_list, exact_station=True,\r\n station_name='WELLINGTON AIRPORT', country='NZ')\r\n store = HDFStore('weather.h5')\r\n store['austin'] = austin\r\n store['houston'] = houston\r\n store['nyc'] = new_york\r\n store['newark'] = newark\r\n store['punta_arenas'] = punta_arenas\r\n store['wellington'] = wellington\r\n store.close()", "def resultInHDF5(self, iStep):\n filePath = os.path.expanduser('~/LBMResults')\n resultFile = filePath + '/SimulationResults.h5'\n dataFile = tb.open_file(resultFile, 'a')\n #output the densities of fluids\n for i in sp.arange(self.typesFluids):\n dataFile.create_array('/FluidMacro', 'FluidDensityType%gin%g' % (i, iStep), \\\n self.fluidsDensity[i])\n dataFile.create_array('/FluidVelocity', 'FluidVelocityXAt%g' % iStep, \\\n self.physicalVX)\n dataFile.create_array('/FluidVelocity', 'FluidVelocityYAt%g' % iStep, \\\n self.physicalVY)\n dataFile.close()", "def initialize(self):\n try:\n os.makedirs('./output/' + self.output)\n except:\n pass\n with open(self.trajectory_file, 'w') as f:\n f.write('MODEL 0\\n')\n for atom in self.atoms:\n f.write(atom.to_pdb_line() + '\\n')\n f.write('TER\\n')\n potential_energy = 0\n kinetic_energy = 0\n for atom in self.atoms:\n potential_energy += atom.potential_energy\n kinetic_energy += atom.get_kinetic_energy()\n with open(self.kinetic_energy_file, 'w') as f:\n f.write('0,' + str(kinetic_energy) + '\\n')\n with open(self.potential_energy_file, 'w') as f:\n f.write('0,' + str(potential_energy) + '\\n')", "def harzburgite():\n\n rho = 3200.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 226.5; C[0,1] = 75.34; C[0,2] = 74.73; C[0,3] = -0.27; C[0,4] = -2.00; C[0,5] = 1.85\n C[1,0] = C[0,1]; C[1,1] = 242.8; C[1,2] = 73.68; C[1,3] = -3.6; C[1,4] = -1.91; C[1,5] = 4.14\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 230.; C[2,3] = -4.36; C[2,4] = -4.27; C[2,5] = -0.27\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 80.75; C[3,4] = 1.81; C[3,5] = -2.19\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 76.94; C[4,5] = -1.88\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 79.15\n\n return C, rho", "def _write(self):\n f = FortranFile(self.filename,mode='w')\n # Default omnivor binary header\n f.writeInts ( self.data['MK'] , 'i' ) \n f.writeInts ( self.data['itime'] , 'i' ) \n f.writeString ( self.data['version'] ) \n f.writeInts ( self.data['file_id'] , 'i' ) \n f.writeString ( self.data['sversion'] ) \n # Velocity field\n f.writeString ( self.data['stype'] ) \n f.writeInts ( self.data['is_grid'] , 'i' ) \n f.writeInts ( self.data['nCPs'] , 'i' ) \n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n f.writeInts ( self.data['n1'] , 'i' ) \n f.writeInts ( self.data['n2'] , 'i' ) \n f.writeInts ( self.data['n3'] , 'i' ) \n f.writeInts ( self.data['is_straight'] , 'i' ) \n f.writeReals ( self.data['v1'] , real_char ) \n f.writeReals ( self.data['v2'] , real_char ) \n f.writeReals ( self.data['v3'] , real_char ) \n\n CPs = self.data['CPs'].flatten(order = 'F')\n Utot = self.data['Utot'].flatten(order = 'F')\n f.writeReals(CPs,real_char)\n f.writeReals(Utot,real_char)", "def wrhdf(hdf_filename, x, y, z, f):\n\n # Create an HDF file\n sd_id = SD(hdf_filename, SDC.WRITE | SDC.CREATE | SDC.TRUNC)\n\n if f.dtype == np.float32:\n ftype = SDC.FLOAT32\n elif f.dtype == np.float64:\n ftype = SDC.FLOAT64\n\n # Create the dataset (Data-Set-2 is the name used by the psi data)).\n sds_id = sd_id.create(\"Data-Set-2\", ftype, f.shape)\n\n #Get number of dimensions:\n ndims = np.ndim(f)\n\n #Set the scales:\n for i in range(0,ndims):\n dim = sds_id.dim(i)\n if i == 0:\n if x.dtype == np.float32:\n stype = SDC.FLOAT32\n elif x.dtype == np.float64:\n stype = SDC.FLOAT64\n dim.setscale(stype,x)\n elif i == 1:\n if y.dtype == np.float32:\n stype = SDC.FLOAT32\n elif y.dtype == np.float64:\n stype = SDC.FLOAT64\n dim.setscale(stype,y)\n elif i == 2: \n if z.dtype == np.float32:\n stype = SDC.FLOAT32\n elif z.dtype == np.float64:\n stype = SDC.FLOAT64\n dim.setscale(stype,z)\n\n # Write the data:\n sds_id.set(f)\n\n # Close the dataset:\n sds_id.endaccess()\n\n # Flush and close the HDF file:\n sd_id.end()", "def to_dense_grid( fname_in, fname_out = None, dim=2 ):\n import numpy as np\n import insect_tools\n import matplotlib.pyplot as plt\n\n # read data\n time, x0, dx, box, data, treecode = read_wabbit_hdf5( fname_in )\n\n # convert blocks to complete matrix\n field, box = dense_matrix( x0, dx, data, treecode, dim=dim )\n\n # write data to FLUSI-type hdf file\n if fname_out:\n insect_tools.write_flusi_HDF5( fname_out, time, box, field)\n else: \n dx = [b/(np.size(field,k)) for k,b in enumerate(box)]\n X = [np.arange(0,np.size(field,k))*dx[k] for k,b in enumerate(box)]\n return field, box, dx, X", "def write_mat_file(self, geom_filename):\n mat_dict = {}\n mat_dict['Lx_p'] = self.Lx_p\n mat_dict['Ly_p'] = self.Ly_p\n mat_dict['Lz_p'] = self.Lz_p\n mat_dict['Lo'] = self.obst.get_Lo()\n mat_dict['Ny_divs'] = self.N_divs\n mat_dict['rho_p'] = self.rho_p\n mat_dict['nu_p'] = self.nu_p\n mat_dict['snl'] = list(np.union1d(self.obst_list[:],self.solid_list[:]))\n mat_dict['inl'] = list(self.inlet_list[:])\n mat_dict['onl'] = list(self.outlet_list[:])\n\n scipy.io.savemat(geom_filename,mat_dict)", "def Hillshade(InputFilePath,OutputFilePath): # perform hillshade on a DEM and return it\r\n try:\r\n print(\"\"\"\r\nProcessing Hillshade Layer...\r\n \"\"\") \r\n \r\n TheRaster=arcpy.sa.Hillshade(InputFilePath, 315, 45, 1)\r\n TheRaster.save(OutputFilePath)\r\n print(\"Complete\")\r\n \r\n except Exception, err: # an error occurred (probably in arcGIS)\r\n raise RuntimeError(\"** Error: Hillshade Failed (\"+str(err)+\")\")", "def write_dftb_in(self, outfile):\n\n outfile.write('Geometry = GenFormat { \\n')\n outfile.write(' <<< \"geo_end.gen\" \\n')\n outfile.write('} \\n')\n outfile.write(' \\n')\n\n params = self.parameters.copy()\n\n s = 'Hamiltonian_MaxAngularMomentum_'\n for key in params:\n if key.startswith(s) and len(key) > len(s):\n break\n # --------MAIN KEYWORDS-------\n previous_key = 'dummy_'\n myspace = ' '\n for key, value in sorted(params.items()):\n current_depth = key.rstrip('_').count('_')\n previous_depth = previous_key.rstrip('_').count('_')\n for my_backsclash in reversed(\n range(previous_depth - current_depth)):\n outfile.write(3 * (1 + my_backsclash) * myspace + '} \\n')\n outfile.write(3 * current_depth * myspace)\n if key.endswith('_') and len(value) > 0:\n outfile.write(key.rstrip('_').rsplit('_')[-1] +\n ' = ' + str(value) + '{ \\n')\n elif (key.endswith('_') and (len(value) == 0)\n and current_depth == 0): # E.g. 'Options {'\n outfile.write(key.rstrip('_').rsplit('_')[-1] +\n ' ' + str(value) + '{ \\n')\n elif (key.endswith('_') and (len(value) == 0)\n and current_depth > 0): # E.g. 'Hamiltonian_Max... = {'\n outfile.write(key.rstrip('_').rsplit('_')[-1] +\n ' = ' + str(value) + '{ \\n')\n elif key.count('_empty') == 1:\n outfile.write(str(value) + ' \\n')\n else:\n outfile.write(key.rsplit('_')[-1] + ' = ' + str(value) + ' \\n')\n previous_key = key\n current_depth = key.rstrip('_').count('_')\n for my_backsclash in reversed(range(current_depth)):\n outfile.write(3 * my_backsclash * myspace + '} \\n')\n outfile.write('ParserOptions { \\n')\n outfile.write(' IgnoreUnprocessedNodes = Yes \\n')\n outfile.write('} \\n')", "def writeHedr(self):\n path = os.path.join(self.dir,self.name)\n out = file(path,'r+b')\n out.seek(16) #--Skip to Hedr record data\n self.tes3.hedr.getSize()\n self.tes3.hedr.dump(out)\n out.close()\n #--Done\n self.getHeader()\n self.setMTime()", "def main(self,fnames):\n\n\n \"\"\"\n Extract the data and calculate the means in a few different ways\n \"\"\"\n\n\n ReturnDict = dict()\n with open('DataAttrition.txt', 'w') as f:\n # f.write(\"nTotal,nNan, nUseableInital, nAttrition, nUsableFinal \\n \")\n for i in range(len(fnames)):\n InternalDict = dict()\n fname=fnames[i]\n print 'fname', fname\n output=self.readafile(os.path.join(fname))\n\n onameIndividualTime = fname.split('/')[-1][:-3]+'_IndividualTime.pdf'\n odir = '/'.join(fname.split('/')[0:-1])\n TimeStr = fname.split('/')[-1].split('_')[0]\n # outFile = os.path.join(odir,oname)\n # outFileContour = os.path.join(odir,onameContour)\n # outFilePolar = os.path.join(odir,onamePolar)\n\n # outfilefull = os.path.join(dpath,oname)\n # outfilefull2 = os.path.join(dpath, 'Median','%04d.png'%count)\n\n # print i\n # if i==0:\n Altitude=output['/Winds']['Altitude'];\n if Altitude.ndim == 2:\n InternalDict['MeanAltitude']=numpy.mean(Altitude,axis=1)\n elif Altitude.ndim == 1:\n InternalDict['MeanAltitude'] = Altitude\n\n\n\n InternalDict['ZonalWind'] = output['/Winds']['WindGeo'][:,:,0]\n InternalDict['MeridWind'] = output['/Winds']['WindGeo'][:,:,1]\n InternalDict['errZonalWind'] =output['/Winds']['errWindGeo'][:,:,0]\n InternalDict['errMeridWind'] =output['/Winds']['errWindGeo'][:,:,1]\n InternalDict['ZonalFlow'] = output['/VectorVels']['VestGmag'][:,:,0]\n InternalDict['MeridFlow'] = output['/VectorVels']['VestGmag'][:,:,1]\n InternalDict['errZonalFlow'] = output['/VectorVels']['errVestGmag'][:,:,0]\n InternalDict['errMeridFlow'] = output['/VectorVels']['errVestGmag'][:,:,1]\n InternalDict['time'] = output['/Time']['UnixTime'];\n InternalDict['mlt'] = output['/Time']['MLTDecHrs']\n InternalDict['slt'] = output['/Time']['LocalDecHrs']\n InternalDict['ZonalE'] = output['/ElectricFields']['Efield'][:,0]#.read()']\n InternalDict['MeridE'] = output['/ElectricFields']['Efield'][:,1]\n InternalDict['NeMean'] = output['/Ne']['MeanNeFitted']\n\n InternalDict['AE'] = output['/GeophysicalParameters']['AE']\n InternalDict['KP'] = output['/GeophysicalParameters']['KP']\n InternalDict['AP'] = output['/GeophysicalParameters']['AP']\n InternalDict['F107'] = output['/GeophysicalParameters']['F107']\n\n # InternalDict['ZonalFlowFregion'] = output['/Fregion']['VestGmag_300km'][:,0]\n # InternalDict['MeridFlowFregion'] = output['/Fregion']['VestGmag_300km'][:,1]\n\n # else:\n # InternalDict['ZonalWind'] = numpy.concatenate((InternalDict['ZonalWind'],output['/Winds']['WindGeo'][:,:,0]),axis=0)\n # InternalDict['MeridWind'] = numpy.concatenate((InternalDict['MeridWind'],output['/Winds']['WindGeo'][:,:,1]),axis=0)\n # InternalDict['ZonalFlow'] = numpy.concatenate((InternalDict['ZonalFlow'],output['/VectorVels']['VestGmag'][:,:,0]),axis=0)\n # InternalDict['MeridFlow'] = numpy.concatenate((InternalDict['MeridFlow'],output['/VectorVels']['VestGmag'][:,:,1]),axis=0)\n # InternalDict['errZonalWind'] = numpy.concatenate((InternalDict['errZonalWind'],output['/Winds']['errWindGeo'][:,:,0]),axis=0)\n # InternalDict['errMeridWind'] = numpy.concatenate((InternalDict['errMeridWind'],output['/Winds']['errWindGeo'][:,:,1]),axis=0)\n # InternalDict['errZonalFlow'] = numpy.concatenate((InternalDict['errZonalFlow'], output['/VectorVels']['errVestGmag'][:,:,0]), axis=0)\n # InternalDict['errMeridFlow'] = numpy.concatenate((InternalDict['errMeridFlow'], output['/VectorVels']['errVestGmag'][:,:,1]), axis=0)\n # InternalDict['time'] = numpy.concatenate((InternalDict['time'],output['/Time']['UnixTime']),axis=0)\n # InternalDict['mlt'] = numpy.concatenate((InternalDict['mlt'], output['/Time']['MLTDecHrs']),axis=0)\n # InternalDict['slt'] = numpy.concatenate((InternalDict['slt'], output['/Time']['LocalDecHrs']),axis=0)\n # InternalDict['ZonalE'] = numpy.concatenate((InternalDict['ZonalE'],output['/ElectricFields']['Efield'][:,0]), axis=0)#.read()']\n # InternalDict['MeridE'] = numpy.concatenate((InternalDict['MeridE'],output['/ElectricFields']['Efield'][:,1]), axis=0)\n # InternalDict['NeMean'] = numpy.concatenate((InternalDict['NeMean'],output['/Ne']['MeanNeFitted']), axis=0)\n # InternalDict['AE'] = numpy.concatenate((output['/GeophysicalParameters']['AE'],output['/GeophysicalParameters']['AE']),axis=0)\n # InternalDict['KP'] = numpy.concatenate((output['/GeophysicalParameters']['KP'], output['/GeophysicalParameters']['KP']), axis=0)\n # InternalDict['AP'] = numpy.concatenate((output['/GeophysicalParameters']['AP'], output['/GeophysicalParameters']['AE']), axis=0)\n # InternalDict['F107'] = numpy.concatenate((output['/GeophysicalParameters']['F107'], output['/GeophysicalParameters']['F107']), axis=0)\n\n # filter by velocity\n # Ibad=numpy.where( (InternalDict['ZonalWind'] > 500.0) | (InternalDict['ZonalWind'] < -500. ) | \\\n # (InternalDict['MeridWind'] > 500.0) | (InternalDict['MeridWind'] < -500.) | \\\n # (InternalDict['errZonalWind'] > 500.) | (InternalDict['errMeridWind'] > 500.) | \\\n # (InternalDict['NeMean'] < 1.e11) \\\n # )\n nTot = float(numpy.ravel(InternalDict['ZonalWind']).shape[0])\n print numpy.where(numpy.isfinite(numpy.ravel(InternalDict['ZonalWind'])) == False)[0]\n print 'nnan raw', numpy.where(numpy.isnan(numpy.ravel(InternalDict['ZonalWind']))==True)\n\n # note if you look at the raw output where outputs a tuple of two arrays\n # those two arrays combined will double count.\n\n nNan = float(numpy.where(numpy.isnan(numpy.ravel(InternalDict['ZonalWind'])) == True)[0].shape[0])\n qnan = numpy.where(numpy.isnan(numpy.ravel(InternalDict['ZonalWind'])) == True)\n print numpy.ravel(InternalDict['ZonalWind'])[qnan]\n\n qUse = numpy.where(numpy.isfinite(numpy.ravel(InternalDict['ZonalWind']))==True)\n print numpy.ravel(InternalDict['ZonalWind'])[qUse]\n\n nUsableInital = float(numpy.where(numpy.isfinite(numpy.ravel(InternalDict['ZonalWind']))==True)[0].shape[0])\n print 'Total starting Data,', numpy.ravel(InternalDict['ZonalWind']).shape[0], numpy.ravel(InternalDict['NeMean']).shape[0]\n # print 'Data initially Nans', numpy.ravel(numpy.where(numpy.isfinite(InternalDict['ZonalWind']) == False)).shape[0], numpy.ravel(numpy.where(numpy.isnan(InternalDict['ZonalWind']) == True)).shape[0]/nTot\n # print 'Usable Data', numpy.ravel(InternalDict['ZonalWind']).shape[0]-numpy.ravel(numpy.where(numpy.isnan(InternalDict['ZonalWind']) == True)).shape[0]\n print 'nUsableInital, nNan', nUsableInital, nNan\n nTotalCheck = 0.\n nUsableCheck = 0.\n f.write('%s \\n'%TimeStr)\n f.write('Initial Usable Data \\n')\n for i in range(InternalDict['ZonalWind'].shape[1]):\n nUsableInitalAlt = float(numpy.where(numpy.isfinite(numpy.ravel(InternalDict['ZonalWind'][:,i]))==True)[0].shape[0])\n print 'alt, initial usable data', InternalDict['MeanAltitude'][i],nUsableInitalAlt, InternalDict['ZonalWind'][:,i].shape\n f.write('alt, %0.1f, %0.1f \\n'%(InternalDict['MeanAltitude'][i],nUsableInitalAlt))\n nTotalCheck = InternalDict['ZonalWind'][:,i].shape[0]+nTotalCheck\n nUsableCheck = nUsableInitalAlt + nUsableCheck\n print 'initial nTotal check nUsable+nNan, nTot', nUsableInital+nNan, nTot\n print 'CHECK nUsableInital, nTotal', nUsableInital, nUsableCheck, nTot, nTotalCheck\n Ibad=numpy.where( (InternalDict['ZonalWind'] > self.config['DATADISCARD']['ZonalWindMax']) | \\\n (InternalDict['ZonalWind'] < self.config['DATADISCARD']['ZonalWindMin'] ) | \\\n (InternalDict['MeridWind'] > self.config['DATADISCARD']['ZonalWindMax']) | \\\n (InternalDict['MeridWind'] < self.config['DATADISCARD']['ZonalWindMin']) | \\\n (InternalDict['errZonalWind'] > self.config['DATADISCARD']['ErrorZonalWind']) | \\\n (InternalDict['errMeridWind'] > self.config['DATADISCARD']['ErrorMeridWind']) | \\\n (InternalDict['NeMean'] < self.config['DATADISCARD']['NeMean']) \\\n )\n\n\n\n # f.write(\"nTotal,nNan, nUseableInital, nAttrition, nUsableFinal \\n \")\n\n InternalDict['ZonalWind'][Ibad]=numpy.nan\n InternalDict['MeridWind'][Ibad]=numpy.nan\n InternalDict['errZonalWind'][Ibad]=numpy.nan\n InternalDict['errMeridWind'][Ibad]=numpy.nan\n InternalDict['ZonalFlow'][Ibad] = numpy.nan\n InternalDict['MeridFlow'][Ibad] = numpy.nan\n InternalDict['errZonalFlow'][Ibad] = numpy.nan\n InternalDict['errMeridFlow'][Ibad] = numpy.nan\n InternalDict['NeMean'][Ibad] = numpy.nan\n\n # print 'Data After Filtering Nans', numpy.ravel(numpy.where(numpy.isnan(InternalDict['ZonalWind']) == True)).shape[0]\n # print 'Number of finite data points', numpy.ravel(numpy.where(numpy.isfinite(InternalDict['ZonalWind']) == True)).shape[0]\n # nUsableFinal = float(numpy.ravel(numpy.where(numpy.isfinite(InternalDict['ZonalWind']) == True)).shape[0])\n nUsableFinal = float(numpy.where(numpy.isfinite(numpy.ravel(InternalDict['ZonalWind']))==True)[0].shape[0])\n nNanFinal = float(numpy.where(numpy.isnan(numpy.ravel(InternalDict['ZonalWind'])) == True)[0].shape[0])\n print 'Ibad size', numpy.ravel(Ibad[0]).shape[0]\n print 'nUsableFinal, nNanFinal,nTot check', nUsableFinal, nNanFinal, nUsableFinal+nNanFinal, numpy.ravel(InternalDict['ZonalWind']).shape[0]\n\n print 'total ending data number', numpy.ravel(InternalDict['ZonalWind']).shape[0]\n nTotalCheck = 0.\n nUsableCheck = 0.\n f.write('\\n Final Usable data \\n')\n for i in range(InternalDict['ZonalWind'].shape[1]):\n nUsableInitalAlt = float(numpy.where(numpy.isfinite(numpy.ravel(InternalDict['ZonalWind'][:,i]))==True)[0].shape[0])\n print 'alt, initial usable data', InternalDict['MeanAltitude'][i],nUsableInitalAlt, InternalDict['ZonalWind'][:,i].shape\n TotalCheck = InternalDict['ZonalWind'][:,i].shape[0]+nTotalCheck\n nUsableCheck = nUsableInitalAlt + nUsableCheck\n f.write('alt, %0.1f, %0.1f \\n'%(InternalDict['MeanAltitude'][i],nUsableInitalAlt))\n print 'initial nTotal check nUsable+nNan, nTot', nUsableInital+nNan, nTot\n print 'CHECK nUsableInital, nUsableCheck, nTot, nTotalCheck ', nUsableInital, nUsableCheck, nTot, nTotalCheck\n print '\\n \\n'\n\n # do not need to do Ibad for AE,KP,F107, etc.\n f.write( '-------------Hours-------------------- \\n')\n f.write('%s \\n'%TimeStr)\n # try to determine how much data in terms of time\n for i in range(InternalDict['ZonalWind'].shape[1]):\n qGoodData = numpy.where(numpy.isfinite(numpy.ravel(InternalDict['ZonalWind'][:,i]))==True)[0]\n # print 'test WindData', numpy.ravel(InternalDict['ZonalWind'][:,i])[qGoodData]\n print 'qgoodDataShape', qGoodData.shape\n tmpHours = numpy.sum(InternalDict['time'][qGoodData,1]-InternalDict['time'][qGoodData,0])/3600.\n totalTime = numpy.nansum(InternalDict['time'][:,1]-InternalDict['time'][:,0])/3600.\n # print 'alt, initial usable data', InternalDict['MeanAltitude'][i],nUsableInitalAlt, InternalDict['ZonalWind'][:,i].shape\n f.write('alt, %0.1f, %0.1f, %0.1f \\n'%(InternalDict['MeanAltitude'][i], tmpHours,totalTime))\n f.write('\\n ########################################## \\n ')\n # in case I need to check that the filtering is working\n # for i in range(ZonalWind.shape[0]):\n # print i, numpy.nanmax(ZonalWind[i,:]), numpy.nanmin(ZonalWind[i,:])\n\n\n MeanTime=numpy.nanmean(InternalDict['time'],axis=1)\n # filter out all times with nans\n qnan = numpy.where(numpy.isnan(MeanTime) == False)[0]\n\n # print 'MeanTime.shape', MeanTime.shape, mlt.shape, slt.shape\n\n if (MeanTime.shape[0] == InternalDict['mlt'].shape[0]) & (MeanTime.shape[0] == InternalDict['slt'].shape[0]):\n MeanTime = MeanTime[qnan]\n InternalDict['mlt'] = InternalDict['mlt'][qnan]\n InternalDict['slt'] = InternalDict['slt'][qnan]\n else:\n raise ValueError (\"Wrong dimensions on time arrays\")\n\n\n InternalDict['ZonalWind'] = InternalDict['ZonalWind'][qnan,:]\n InternalDict['MeridWind'] = InternalDict['MeridWind'][qnan,:]\n InternalDict['errZonalWind'] = InternalDict['errZonalWind'][qnan,:]\n InternalDict['errMeridWind'] = InternalDict['errMeridWind'][qnan,:]\n InternalDict['ZonalE'] = InternalDict['ZonalE'][qnan]\n InternalDict['MeridE'] = InternalDict['MeridE'][qnan]\n InternalDict['ZonalFlow'] = InternalDict['ZonalFlow'][qnan,:]\n InternalDict['MeridFlow'] = InternalDict['MeridFlow'][qnan,:]\n InternalDict['errZonalFlow'] = InternalDict['errZonalFlow'][qnan,:]\n InternalDict['errMeridFlow'] = InternalDict['errMeridFlow'][qnan,:]\n InternalDict['NeMean'] = InternalDict['NeMean'][qnan,:]\n\n # # added on 10/30/2018\n # InternalDict['ZonalFlowFregion'] = InternalDict['ZonalFlowFregion'][qnan]\n # InternalDict['MeridFlowFregion'] = InternalDict['MeridFlowFregion'][qnan]\n\n print 'AE', len(qnan), InternalDict['AE'].shape\n InternalDict['AE'] = InternalDict['AE'][qnan]\n InternalDict['KP'] = InternalDict['KP'][qnan]\n InternalDict['AP'] = InternalDict['AP'][qnan]\n InternalDict['F107'] = InternalDict['F107'][qnan]\n\n # have some sort of filtering\n # need to fiilter out wind estimates > 500 or 100 m/s\n\n # new function which will basically calculate the mean and then plot the data\n\n \"\"\"\n Setting up the time grid\n \"\"\"\n # dminute = self.config['TIME']['dMinutes']\n dhours = self.config['TIME']['TimeIntervalMinutes']/60.\n dt = self.config['TIME']['TimeIntervalLengthMinutes']/60.\n DecimalHoursTimeGrid =numpy.arange(0,24,dhours)\n DecimalTime = numpy.array([datetime.datetime.utcfromtimestamp(t) for t in MeanTime])\n DecimalHours = numpy.array([t.hour+t.minute/60.+t.second/3600. for t in DecimalTime])\n InternalDict['ut'] = DecimalHours\n\n\n\n\n # utTimeDict = CalculateMeanWindDict(DecimalHoursTimeGrid,dt,DecimalHours,MeanTime,\\\n # ZonalWind,MeridWind,errZonalWind, \\\n # errMeridWind,ZonalE, MeridE,MeanAltitude, \\\n # ZonalFlow,MeridFlow,errZonalFlow,errMeridFlow,NeMean)\n\n outDict = dict()\n outDict['ut'] = self.CalculateMeanWindDict(DecimalHoursTimeGrid,dt,InternalDict['ut'],InternalDict)\n outDict['slt'] = self.CalculateMeanWindDict(DecimalHoursTimeGrid,dt,InternalDict['slt'],InternalDict)\n outDict['mlt'] = self.CalculateMeanWindDict(DecimalHoursTimeGrid,dt,InternalDict['mlt'],InternalDict)\n\n ReturnDict[TimeStr] = outDict\n f.close()\n # mltTimeDict = self.CalculateMeanWindDict(DecimalHoursTimeGrid,dt,mlt,MeanTime,\\\n # ZonalWind,MeridWind,errZonalWind, \\\n # errMeridWind,ZonalE, MeridE,MeanAltitude,\\\n # ZonalFlow,MeridFlow,errZonalFlow,errMeridFlow,\\\n # NeMean, AE, AP,KP, F107)\n return ReturnDict", "def main():\n \n data_base = '/local/duman/SIMULATIONS/many_polymers_5/'\n save_base = '/usr/users/iff_th2/duman/RolfData/many_polymers_5'\n \n \n ## Index the data\n# density = [0.08, 0.2, 0.4]\n# xi_L = [0.05, 0.1, 0.2, 1.0, 2.5, 5.0, 8.0, 16.0]\n# Pe = [3.0, 7.0, 15.0, 50.0, 150.0, 500.0, 750.0, 1500.0, 4375.0, 8000.0, 10000.0]\n# kappa = [1.25, 2.5, 5.0, 25.0, 62.5, 125.0, 200.0, 400.0]\n# fp = [0.0, 0.0048, 0.0112, 0.024, 0.08, 0.24, 0.8, 1.2, 2.4, 7.0]\n \n density = [0.2]\n kappa = [1.25, 2.5, 5.0, 25.0, 62.5, 125.0, 200.0, 400.0]\n xi_L = [0.05, 0.1, 0.2, 1.0, 2.5, 5.0, 8.0, 16.0]\n fp = [0.0048, 0.0112, 0.024, 0.08, 0.24, 0.8, 1.2, 2.4, 7.0]\n Pe = [3.0, 7.0, 15.0, 50.0, 150.0, 500.0, 750.0, 1500.0, 4375.0]\n \n ## Create points\n points = []\n for i, x in enumerate(xi_L):\n for j, p in enumerate(Pe):\n points.append( Phase(x, p, kappa[i], fp[j], 'short') ) \n \n for point in points:\n point.analyse_type()\n point.set_plot_props()\n \n long_xil = [0.05, 0.2, 1.0, 2.5, 16.0]\n long_pe = [3.0, 150.0, 750.0, 8000.0, 10000.0]\n long_kappa = [5.0, 20.0, 100.0, 250.0, 1600.0]\n long_fp = [0.0003, 0.015, 0.075, 0.0, 0.0]\n long_points = []\n for i, x in enumerate(long_xil):\n for j, p in enumerate(long_pe):\n long_points.append( Phase(x, p, long_kappa[i], long_fp[j], 'long') ) \n \n for point in long_points:\n point.determine_type()\n point.set_plot_props()\n \n plot_data(points, long_points, save_base, xi_L, Pe)", "def save_to_arc(self, filename, header = True, comment = None):\n if header:\n F = open( filename, 'w' )\n F.write( \"!BIOSYM archive 2\\n\" )\n if comment is not None:\n F.write( '!%s\\n'%comment )\n F.write( \"PBC=ON\\n\" )\n else:\n F = open( filename, 'a' )\n \n #FIXME: If you think this is the ugliest python code you've ever seen,\n # you are quite right! It is literal translation of some old AWK script.\n # But it works for now, so... \n\n unit_cell = self.unit_cell\n a=sqrt(unit_cell[0,0]*unit_cell[0,0]+\n unit_cell[0,1]*unit_cell[0,1]+\n unit_cell[0,2]*unit_cell[0,2])\n b=sqrt(unit_cell[1,0]*unit_cell[1,0]+\n unit_cell[1,1]*unit_cell[1,1]+\n unit_cell[1,2]*unit_cell[1,2])\n c=sqrt(unit_cell[2,0]*unit_cell[2,0]+\n unit_cell[2,1]*unit_cell[2,1]+\n unit_cell[2,2]*unit_cell[2,2])\n alpha=(unit_cell[1,0]*unit_cell[2,0]+\n unit_cell[1,1]*unit_cell[2,1]+\n unit_cell[1,2]*unit_cell[2,2])/(b*c)\n beta =(unit_cell[0,0]*unit_cell[2,0]+\n unit_cell[0,1]*unit_cell[2,1]+\n unit_cell[0,2]*unit_cell[2,2])/(a*c)\n gamma=(unit_cell[0,0]*unit_cell[1,0]+\n unit_cell[0,1]*unit_cell[1,1]+\n unit_cell[0,2]*unit_cell[1,2])/(a*b)\n alpha=math.atan2(sqrt(1-alpha*alpha),alpha)\n beta =math.atan2(sqrt(1-beta *beta ),beta )\n gamma=math.atan2(sqrt(1-gamma*gamma),gamma)\n\n transf=zeros((3,3))\n transf[0,0]=a\n transf[1,0]=0.0\n transf[2,0]=0.0\n transf[0,1]=b*cos(gamma)\n transf[1,1]=b*sin(gamma)\n transf[2,1]=0.0\n transf[0,2]=c*cos(beta)\n transf[1,2]=c*(cos(alpha)-(cos(gamma)*cos(beta)))/sin(gamma)\n transf[2,2]=sqrt(c*c-transf[0,2]*transf[0,2]-transf[1,2]*transf[1,2])\n\n alpha=180*alpha/pi\n beta =180* beta/pi\n gamma=180*gamma/pi\n\n recip_cell = self.recip_cell.T\n frac_pos = zeros(self.atoms.shape)\n positions= zeros(self.atoms.shape)\n for i in range(self.num_atoms):\n for j in range(3):\n frac_pos[i,j]=0.\n for k in range(3):\n frac_pos[i,j]+=self.atoms[i,k]*recip_cell[j,k]\n for j in range(3):\n positions[i,j] = 0.\n for k in range(3):\n positions[i,j]+=frac_pos[i,k]*transf[j,k]\n\n try:\n F.write( '%80.6f\\n'%self.energy )\n except:\n F.write( '\\n' )\n F.write( '!DATE\\n' )\n F.write( 'PBC %8.4f %8.4f %8.4f %8.4f %8.4f %8.4f\\n'%( a, b, c, alpha, beta, gamma ) )\n \n for i in range(self.num_atoms):\n F.write( '%2s %-13.9f %-13.9f %-13.9f CORE %4d %2s %2s %6.4f %4d\\n'%(\n self.species[i], positions[i,0], positions[i,1], positions[i,2],i,\n self.species[i], self.species[i], 0, i ) )\n F.write( 'end\\n' )\n F.write( 'end\\n' )", "def main(\n f0=0.996,\n psi0=0,\n th=0,\n material=None,\n logFileName=None):\n # np.seterr(all='raise')\n np.seterr(all='ignore')\n import os\n from mk.library.mk_lib import findStressOnYS\n from mk.library.lib import gen_tempfile, calcAlphaRho\n from mk_paths import constructBC,findCorrectPath\n import mk.materials.constitutive as constitutive\n import dill\n snapshot = constitutive.Snapshot()\n # from yf2 import wrapHill48\n\n print 'material:',material, type(material).__name__\n\n if type(material).__name__=='NoneType':\n print 'given material', material\n from materials import IsoMat\n matA = IsoMat()\n matB = IsoMat()\n elif type(material).__name__=='str':\n with open(material,'rb') as fo:\n matA = dill.load(fo)\n with open(material,'rb') as fo:\n matB = dill.load(fo)\n matA.set_hrd()\n matA.set_yld()\n matB.set_hrd()\n matB.set_yld()\n else:\n raise IOError, 'Unexpected case'\n # ## Should work on here to allow\n # ## both A and B materials are described using the\n # ## same constitutive model\n # matA = material\n # matB = material\n\n rad2deg = 180./np.pi\n deg2rad = 1./rad2deg\n\n stressA_off, dum1, dum2 = constructBC(\n epsAng = th,\n f_yld = matA.f_yld,\n verbose = False)\n\n ## put the stress on the locus\n matA.update_yld(stressA_off)\n np.set_printoptions(precision=3)\n print('stressA:'+('%7.3f'*6)%(\n matA.stress[0],matA.stress[1],matA.stress[2],\n matA.stress[3],matA.stress[4],matA.stress[5]))\n print('strainA:'+('%7.3f'*6)%(\n matA.dphi[0],matA.dphi[1],matA.dphi[2],\n matA.dphi[3],matA.dphi[4],matA.dphi[5]))\n alpha,rho = calcAlphaRho(matA.stress,matA.dphi)\n print('alpha: %7.4f'%alpha)\n print('rho : %7.4f'%rho)\n\n if type(logFileName).__name__=='NoneType':\n logFileName = gen_tempfile(\n prefix='mk-f0%3.3i-th%4.4i-psi%2.2i'%(\n int(f0*1e3),int(th),int(psi0)),\n affix='log')\n logFile = open(logFileName,'w')\n\n ## integrate for each path.\n absciss = 1e3\n absciss0 = 1e3\n nind = max([len(matA.logfn),len(matB.logfn)])+3\n print('Iteration over the given psi angle')\n head = (\n '%8s'*9+ ## variables\n ('%'+'%is'%nind)*2+ ## aLogFN and bLogFN\n '%'+'%is'%(len(snapshot.logfn)+3))%(\n 'epsRD','epsTD','psi0','psif','sigRD',\n 'sigTD','sigA','T','cmpt','aLogFN','bLogFN','ssFN')\n head = '%s\\n'%head\n logFile.write(head)\n t0 = time.time()\n\n ynew, absciss, xbb= onepath(\n matA=matA,matB=matB,\n psi0=psi0*deg2rad,f0=f0,\n T=absciss,snapshot=snapshot)\n\n matA.recordCurrentStat()\n matB.recordCurrentStat()\n\n dTime = time.time() - t0\n psif1 = xbb[0]\n\n cnt = (\n '%8.3f'*8+\n '%8i'+\n ('%'+'%is'%nind)*2+\n '%'+'%is'%(len(snapshot.logfn)+3))%(\n ynew[1],ynew[2],psi0,\n psif1*rad2deg,\n matA.stress[0],matA.stress[1],\n matA.sig, ## hardening (effective stress)\n absciss,dTime,matA.logfn,matB.logfn,snapshot.logfn)\n print(cnt)\n logFile.write(cnt+'\\n')\n uet(dTime,'total time spent');print('')\n logFile.close()\n print('%s has been saved'%logFileName)\n return logFileName,dTime, matA, matB", "def simulate_spectrum(file_name = \"bands_full.dat\", k0=0,kf= 0.576,Nk=200,E_max=0.5,E_min=-0.5,Ne=200,gamma_k=0.002,gamma=0.004,lambda_0=20,orbital=\"pz\",suffix=\"\"):\n\n #define energy and momentum domains\n dk = (kf-k0)/Nk\n momenta = np.linspace(k0, kf+dk, Nk)\n energies = np.linspace(E_min, E_max, Ne)\n\n #initialize spectral function A_final to zero\n A_final = []\n for i_k in range(len(momenta)):\n I_e = []\n for j_e in range(len(energies)):\n I_e.append(0)\n A_final.append(I_e)\n\n #compute all lorenztian functions\n all_lor = compute_functions(file_name,E_max,E_min,gamma,gamma_k)\n\n #evaluate all functions\n print \"Evaluating functions\"\n for func in all_lor:\n s = np.vectorize(func)\n A = s(momenta[:,None],energies[None,:])\n A_final += A\n\n #print output\n file_output = \"\"\"A_gammak_%(gamma_k)s_gammae_%(gamma)s_Nk_%(Nk)s_Ne_%(Ne)s_lambda_%(lambda_0)s_%(orbital)s%(suffix)s\"\"\"%locals()\n file = open(file_output,'w')\n for i in range(len(momenta)):\n for j in range(len(energies)):\n print >> file,momenta[i],energies[j],A_final[i][j]\n print >> file,\"\"\n file.close()\n\n return file_output", "def write_initdata(xy0, v0, NL, BND, h, beta, outdir):\n dio.ensure_dir(outdir)\n M = np.hstack((xy0, v0))\n np.savetxt(outdir + 'NL.txt', NL, fmt='%i', delimiter=',', header='NL (Neighbor List)')\n np.savetxt(outdir + 'BND.txt', BND, fmt='%i', header='BND (Boundary List)')\n np.savetxt(outdir + 'xyv0.txt', M, delimiter=',', header='xy0 (initial positions) v0 (initial velocities)')\n with open(outdir + 'h.txt', \"w\") as hfile:\n hfile.write(\"# h (time step) \\n{0:4f}\".format(h))\n if beta != 'none':\n with open(outdir + 'beta.txt', \"w\") as betafile:\n betafile.write(\"# beta (damping coeff) \\n{0:4f}\".format(beta))", "def load_oned(self):\n self.od=self.data1(\"xgc.oneddiag.bp\") #actual reading routine\n self.od.psi=self.od.psi[0,:]\n self.od.psi00=self.od.psi00[0,:]\n try:\n self.od.psi00n=self.od.psi00/self.psix #Normalize 0 - 1(Separatrix)\n except:\n print(\"psix is not defined - call load_unitsm() to get psix to get psi00n\")\n # Temperatures\n try: \n Teperp=self.od.e_perp_temperature_df_1d\n except:\n print('No electron')\n self.electron_on=False\n else:\n self.electron_on=True\n Tepara=self.od.e_parallel_mean_en_df_1d #parallel flow ignored, correct it later\n self.od.Te=(Teperp+Tepara)/3*2\n Tiperp=self.od.i_perp_temperature_df_1d\n Tipara=self.od.i_parallel_mean_en_df_1d #parallel flow ignored, correct it later\n self.od.Ti=(Tiperp+Tipara)/3*2\n\n #ExB shear calculation\n if(self.electron_on):\n shear=self.od.d_dpsi(self.od.e_poloidal_ExB_flow_1d,self.od.psi_mks)\n self.od.grad_psi_sqr = self.od.e_grad_psi_sqr_1d\n else:\n shear=self.od.d_dpsi(self.od.i_poloidal_ExB_flow_1d,self.od.psi_mks)\n self.od.grad_psi_sqr = self.od.i_grad_psi_sqr_1d\n self.od.shear_r=shear * np.sqrt(self.od.grad_psi_sqr) # assuming electron full-f is almost homogeneouse\n\n if(self.electron_on):\n self.od.density = self.od.e_gc_density_df_1d\n else:\n self.od.density = self.od.i_gc_density_df_1d\n\n #gradient scale\n self.od.Ln = self.od.density / self.od.d_dpsi(self.od.density, self.od.psi_mks) / np.sqrt(self.od.grad_psi_sqr)\n self.od.Lti =self.od.Ti / self.od.d_dpsi(self.od.Ti , self.od.psi_mks) / np.sqrt(self.od.grad_psi_sqr)\n if(self.electron_on):\n self.od.Lte =self.od.Te / self.od.d_dpsi(self.od.Te , self.od.psi_mks) / np.sqrt(self.od.grad_psi_sqr)\n \n\n #find tmask\n d=self.od.step[1]-self.od.step[0]\n st=self.od.step[0]/d\n ed=self.od.step[-1]/d\n st=st.astype(int)\n ed=ed.astype(int)\n idx=np.arange(st,ed, dtype=int)\n\n self.od.tmask=idx #mem allocation\n for i in idx:\n tmp=np.argwhere(self.od.step==i*d)\n #self.od.tmask[i-st/d]=tmp[-1,-1] #LFS zero based, RHS last element\n self.od.tmask[i-st]=tmp[-1,-1] #LFS zero based, RHS last element", "def save(self, filename: str) -> None:\n assert self._species is not None, \"Must have a species to save\"\n\n if len(self._history) == 0:\n logger.warning(\"Optimiser did no steps. Not saving a trajectory\")\n return None\n\n atomic_symbols = self._species.atomic_symbols\n title_str = (\n f\" etol = {self.etol.to('Ha')} Ha\"\n f\" gtol = {self.gtol.to('Ha Å^-1')} Ha Å^-1\"\n f\" maxiter = {self._maxiter}\"\n )\n\n if os.path.exists(filename):\n logger.warning(f\"FIle {filename} existed. Overwriting\")\n open(filename, \"w\").close()\n\n for i, coordinates in enumerate(self._history):\n\n energy = coordinates.e\n cart_coords = coordinates.to(\"cartesian\").reshape((-1, 3))\n gradient = cart_coords.g.reshape((-1, 3))\n\n n_atoms = len(atomic_symbols)\n assert n_atoms == len(cart_coords) == len(gradient)\n\n with open(filename, \"a\") as file:\n print(\n n_atoms,\n f\"E = {energy} Ha\" + (title_str if i == 0 else \"\"),\n sep=\"\\n\",\n file=file,\n )\n\n for j, symbol in enumerate(atomic_symbols):\n x, y, z = cart_coords[j]\n dedx, dedy, dedz = gradient[j]\n\n print(\n f\"{symbol:<3}{x:10.5f}{y:10.5f}{z:10.5f}\"\n f\"{dedx:15.5f}{dedy:10.5f}{dedz:10.5f}\",\n file=file,\n )\n return None", "def statee(h):\n # Convert height to SI\n hsi = h*0.3048\n\n # Get data\n zsi, tsi, psi, dsi = statsi(hsi)\n\n # Convert back to English\n z = zsi/0.3048\n t = tsi*1.8\n p = psi*0.02088543\n d = dsi*0.001940320\n\n return z, t, p, d", "def hf_energy(hf_state, hamiltonian_sp):\n qpu = get_default_qpu()\n res = qpu.submit(hf_state.to_job(job_type=\"OBS\", observable=hamiltonian_sp))\n return res.value", "def create_e3d_file(self,path='./'):\n dt=0.606*self.model_parameters['dh']/np.max(self.velocity_model['vp']) # dt needs to satify the courant condition\n t=int(self.model_parameters['duration']/dt)\n \n # Check path exists, if not create one\n if not os.path.exists(path):\n os.makedirs(path)\n \n # Create e3d parameter file\n f=open('%s%s_e3dmodel.txt'%(path,self.model_name),'w')\n f.write(\"grid x=%s z=%s dh=%s b=2 q=1\\ntime dt=%0.5f t=%s\\n\"%(self.model_parameters['xmax'],self.model_parameters['zmax'],self.model_parameters['dh'],dt,t))\n f.write(\"block p=%s s=%s r=%s Q=20 Qf=50\\n\"%(self.velocity_model['vp'][0],self.velocity_model['vs'][0],self.velocity_model['rho'][0]))\n \n for i in range(1,len(self.velocity_model['vp'])-1):\n f.write(\"block p=%s s=%s r=%s z1=%s z2=%s Q=20 Qf=50\\n\"%(self.velocity_model['vp'][i],self.velocity_model['vs'][i],self.velocity_model['rho'][i],\n self.velocity_model['depth'][i],self.velocity_model['depth'][i+1]))\n \n f.write(\"block p=%s s=%s r=%s z1=%s z2=%s Q=20 Qf=50\\n\\n\"%(self.velocity_model['vp'][i+1],self.velocity_model['vs'][i+1],self.velocity_model['rho'][i+1],\n self.velocity_model['depth'][i+1],self.model_parameters['zmax'])) # extend to the based of the model \n \n f.write(\"visual movie=5\\n\\n\")\n\n if self.source['src_type']!=4:\n f.write(\"source type=%s x=%s z=%s freq=%s amp=%s\\n\\n\"%(self.source['src_type'],self.source['srcx'],self.source['srcz'],self.source['freq'],self.source['amp'])) \n else:\n f.write(\"source type=%s x=%s z=%s freq=%s amp=%s Mxx=%s Myy=%s Mzz=%s Mxy=%s Mxz=%s Myz=%s\\n\\n\"%(self.source['src_type'],self.source['srcx'],self.source['srcz'],self.source['freq'],self.source['amp'],self.source['mt'][0],self.source['mt'][1],self.source['mt'][2],self.source['mt'][3],self.source['mt'][4],self.source['mt'][5])) \n\n for r in range(len(self.receivers['recxs'])):\n f.write('sac x=%0.3f z=%0.3f file=%s\\n'%(self.receivers['recxs'][r],self.receivers['reczs'][r],self.model_name))\n\n f.write(\"visual sample=0.1 movie=1 scale=10000000000/n\")\n f.close()\n \n print('File created: %s%s_e3dmodel.txt'%(path,self.model_name))", "def haperfluxMany(inputlist, maplist, radius, rinner, router, galactic=True, decimal=True, noise_model=0):\n\n ## Names and frequencies of the sample maps included in this repo.\n\n freqlist = ['30','44','70','100','143','217','353','545','857','1249','1874','2141','2998','3331','4612','4997','11992','16655','24983','24983','24983','33310']\n freqval = [28.405889, 44.072241,70.421396,100.,143.,217.,353.,545.,857.,1249.,1874.,2141.,2141.,2998.,2998.,3331.,4612.,4997.,11992.,16655.,24983.,24983.,24983.,33310.]\n band_names = [\"akari9\", \"dirbe12\",\"iras12\",\"wise12\",\"akari18\",\"iras25\",\"iras60\",\"akari65\",\"akari90\",\"dirbe100\",\"iras100\",\"akari140\",\"dirbe140\",\"akari160\",\"dirbe240\",\"planck857\", \"planck545\"]\n\n k0 = 1.0\n k1 = rinner/radius\n k2 = router/radius\n apcor = ((1 - (0.5)**(4*k0**2))-((0.5)**(4*k1**2) - (0.5)**(4*k2**2)))**(-1)\n\n # 'galactic' overrules 'decimal'\n if (galactic==True):\n dt=[('sname',np.dtype('S13')),('glon',np.float32),('glat',np.float32)]\n targets = np.genfromtxt(inputlist, delimiter=\",\",dtype=dt)\n\n ns = len(targets['glat'])\n\n fd3 = -1\n fd_err3 = -1\n\n fn = np.genfromtxt(maplist, delimiter=\" \", dtype='str')\n nmaps = len(fn)\n ## Initialize the arrays which will hold the results\n fd_all = np.zeros((ns,nmaps))\n fd_err_all = np.zeros((ns,nmaps))\n fd_bg_all = np.zeros((ns,nmaps))\n\n # Start the actual processing: Read-in the maps.\n for ct2 in range(0,nmaps):\n xtmp_data, xtmp_head = hp.read_map(fn[ct2], h=True, verbose=False, nest=False)\n freq = dict(xtmp_head)['FREQ']\n units = dict(xtmp_head)['TUNIT1']\n freq_str = str(freq)\n idx = freqlist.index(str(freq))\n currfreq = int(freq)\n\n if (radius == None):\n radval = fwhmlist[idx]\n else:\n radval = radius\n\n\n for ct in range(0,ns):\n\n glon = targets['glon'][ct]\n glat = targets['glat'][ct]\n\n fd_all[ct,ct2], fd_err_all[ct,ct2], fd_bg_all[ct,ct2] = \\\n haperflux(inmap= xtmp_data, freq= currfreq, lon=glon, lat=glat, aper_inner_radius=radius, aper_outer_radius1=rinner, \\\n aper_outer_radius2=router,units=units, noise_model=noise_model)\n\n if (np.isfinite(fd_err_all[ct,ct2]) == False):\n fd_all[ct,ct2] = -1\n fd_err_all[ct,ct2] = -1\n else:\n if radius==None:\n fd_all[ct,ct2] = fd_all[ct,ct2]*apcor\n fd_err_all[ct,ct2] = fd_err_all[ct,ct2]*apcor\n\n return fd_all, fd_err_all, fd_bg_all", "def hysteresis(T = 1, dimensions = 2, J = 1, filename = \"hist\", hmax = 2.5):\r\n h = np.linspace(-hmax, hmax, 100)\r\n \r\n #size of lattice\r\n N = 20\r\n \r\n #forward tabulated magnetisations and backward going\r\n Mforward = np.zeros(h.shape)\r\n Mbackward = np.zeros(h.shape)\r\n \r\n #initial lattice\r\n lattice = initialiser(N, dimensions = dimensions)\r\n \r\n #anneal lattice\r\n lattice = anneal(lattice, T, 20)\r\n\r\n #forward scan over different values of strength\r\n for i in range(len(h)):\r\n (m,e,l) = simulation(N, T, 200, lattice, h = h[i], nonabsmag=True,\\\r\n dimensions= dimensions, J = J)\r\n Mforward[i] = np.mean(m)\r\n lattice = l\r\n \r\n #backward scan over different values of strength \r\n for i in range(len(h)):\r\n index = len(h) - 1 - i\r\n (m,e,l) = simulation(N, T, 200, lattice, h = h[index], nonabsmag=True,\\\r\n dimensions = dimensions, J = J)\r\n Mbackward[index] = np.mean(m)\r\n lattice = l\r\n \r\n #plot data\r\n f = makeplot(h, [Mforward, Mbackward], [\"Increasing h\", \"Decreasing h\"],\\\r\n \"External field, h $[J]$\", \"Magnetisation\")\r\n f.show()\r\n f.savefig(filename+\".svg\")", "def get_data(filename):\n\n data = load(filename)\n\n density_factor = float(data.gas.densities.cosmo_factor.a_factor)\n temperature_factor = float(data.gas.temperatures.cosmo_factor.a_factor)\n\n number_density = (data.gas.densities * (density_factor / mh)).to(cm ** -3)\n temperature = (data.gas.temperatures * temperature_factor).to(\"K\")\n metallicity = data.gas.metal_mass_fractions\n metallicity[metallicity < min_metallicity] = min_metallicity\n\n return number_density.value, temperature.value, np.log10(metallicity.value)", "def save_serendipity_dic(y, filename):\n store = pd.io.pytables.HDFStore(y)\n mat = store.matrix\n store.close()\n n = len(mat.columns)\n ser = 1 - mat.sum(axis=1) / n\n\n f = open(filename, \"w\")\n cPickle.dump(ser.to_dict(), f, protocol=2)\n f.close()", "def writeFiles(self, directory = \"./\"):\n self.mass = []\n self.zero = 0\n self.natoms = self.numMonomer\n self.nangles = 0\n self.ndihedrals = 0\n\n self.ntypes = 4\n\n # set masses of all beads to be 1\n # in principle, the mass of counterions and salt ions should be smaller\n # expect this difference will no matter in terms of complexation of polyelectrolytes\n for i in range(self.ntypes):\n self.mass.append(1)\n\n\n\n self.bdtypes = 1\n self.angtypes = 0\n self.dihtypes = 0\n self.improtypes = 0\n\n iFileLammpsName = directory + \"data.pe.la{0}.na{1}.lc{2}.nc{3}.rho{4}.r{5}.lammps\".\\\n format(self.lenPa, self.numPa, self.lenPc, self.numPc, self.volRatio, self.chargeRepeat)\n iFileLammps = open(iFileLammpsName, 'w')\n\n iFileXYZName = directory + \"data.pe.la{0}.na{1}.lc{2}.nc{3}.rho{4}.r{5}.xyz\".\\\n format(self.lenPa, self.numPa, self.lenPc, self.numPc, self.volRatio, self.chargeRepeat)\n iFileXYZ = open(iFileXYZName, 'w' )\n\n iFileXYZ.write(\"{0}\\n\".format(self.natoms))\n iFileXYZ.write(\"data.polyelectrolyte.xyz\\n\")\n\n iFileLammpsHeader = \"data file for mixtures of charged polymer chains\\n\" + \\\n \"\\n\" + \\\n \"{0:10d} atoms\\n\".format(self.natoms) + \\\n \"{0:10d} bonds\\n\".format(self.numBonds) + \\\n \"{0:10d} angles\\n\".format(self.nangles) + \\\n \"{0:10d} dihedrals\\n\".format(self.ndihedrals) + \\\n \"{0:10d} impropers\\n\".format(self.zero) + \\\n \"\\n\" +\\\n \"{0:10d} atom types\\n\".format(self.ntypes) + \\\n \"{0:10d} bond types\\n\".format(self.bdtypes) + \\\n \"{0:10d} angle types\\n\".format(self.angtypes) + \\\n \"{0:10d} dihedral types\\n\".format(self.dihtypes) + \\\n \"{0:10d} improper types\\n\".format(self.improtypes) + \\\n \"\\n\" + \\\n \" {0:16.8f} {1:16.8f} xlo xhi\\n\".format(self.lx, self.hx) + \\\n \" {0:16.8f} {1:16.8f} ylo yhi\\n\".format(self.ly, self.hy) + \\\n \" {0:16.8f} {1:16.8f} zlo zhi\\n\".format(self.lz, self.hz) + \\\n \"\\n\" + \\\n \"Masses\\n\" + \\\n \"\\n\"\n\n iFileLammps.write(iFileLammpsHeader)\n for i in range(self.ntypes):\n iFileLammps.write( \"{0} {1:8.3f}\\n\".format(i+1, self.mass[i]))\n\n iFileLammps.write(\"\\nAtoms\\n\\n\")\n \n \n\n for i in range(self.natoms):\n if self.atomsType[i] == 1 or self.atomsType[i] == 3:\n iFileXYZ.write(\"S {0} {1} {2}\\n\".format(self.atomsCoords[i][0], \\\n self.atomsCoords[i][1], \\\n self.atomsCoords[i][2]))\n elif self.atomsType[i] == 2:\n iFileXYZ.write(\"P {0} {1} {2}\\n\".format(self.atomsCoords[i][0], \\\n self.atomsCoords[i][1], \\\n self.atomsCoords[i][2]))\n elif self.atomsType[i] == 4:\n iFileXYZ.write(\"N {0} {1} {2}\\n\".format(self.atomsCoords[i][0], \\\n self.atomsCoords[i][1], \\\n self.atomsCoords[i][2]))\n elif self.atomsType[i] == 5:\n iFileXYZ.write(\"A {0} {1} {2}\\n\".format(self.atomsCoords[i][0], \\\n self.atomsCoords[i][1], \\\n self.atomsCoords[i][2]))\n elif self.atomsType[i] == 6:\n iFileXYZ.write(\"C {0} {1} {2}\\n\".format(self.atomsCoords[i][0], \\\n self.atomsCoords[i][1], \\\n self.atomsCoords[i][2]))\n elif self.atomsType[i] == 7:\n iFileXYZ.write(\"I {0} {1} {2}\\n\".format(self.atomsCoords[i][0], \\\n self.atomsCoords[i][1], \\\n self.atomsCoords[i][2]))\n elif self.atomsType[i] == 8:\n iFileXYZ.write(\"K {0} {1} {2}\\n\".format(self.atomsCoords[i][0], \\\n self.atomsCoords[i][1], \\\n self.atomsCoords[i][2]))\n\n iFileLammps.write(\"{0} {1} {2} {3} {4} {5} {6}\\n\".format(i+1, \\\n self.molId[i], \\\n self.atomsType[i], \\\n self.atomsCharge[i], \\\n self.atomsCoords[i][0], \\\n self.atomsCoords[i][1], \\\n self.atomsCoords[i][2]))\n\n iFileLammps.write(\"\\nBonds\\n\\n\")\n for i in range(self.numBonds):\n iFileLammps.write(\"{0} 1 {1} {2}\\n\".format(i+1, self.bondList[i][0], self.bondList[i][1]))\n\n iFileXYZ.close()\n iFileLammps.close()", "def edf_gaze_data_to_hdf(self, \n\t\t\talias = None, \n\t\t\twhich_eye = 0, \n\t\t\tpupil_hp = 0.01, \n\t\t\tpupil_lp = 6,\n\t\t\tsample_rate = 1000.,\n\t\t\tminimal_frequency_filterbank = 0.0025, \n\t\t\tmaximal_frequency_filterbank = 0.1, \n\t\t\tnr_freq_bins_filterbank = 9, \n\t\t\tn_cycles_filterbank = 1, \n\t\t\tcycle_buffer_filterbank = 3,\n\t\t\ttf_decomposition_filterbank ='lp_butterworth' \n\t\t\t):\n\t\t\n\t\t# shell()\n\t\t\n\t\tif not hasattr(self, 'edf_operator'):\n\t\t\tself.add_edf_file(edf_file_name = alias)\n\t\t\n\t\tif alias == None:\n\t\t\talias = os.path.split(self.edf_operator.inputFileName)[-1]\n\t\tself.logger.info('Adding gaze data from %s to group %s to %s' % (os.path.split(self.edf_operator.inputFileName)[-1], alias, self.input_object))\n\t\t\n\t\t#\n\t\t#\tgaze data in blocks\n\t\t#\n\t\twith pd.get_store(self.input_object) as h5_file:\n\t\t\t# shell()\n\t\t\t# recreate the non-gaze data for the block, that is, its sampling rate, eye of origin etc.\n\t\t\tblocks_data_frame = pd.DataFrame([dict([[i,self.edf_operator.blocks[j][i]] for i in self.edf_operator.blocks[0].keys() if i not in ('block_data', 'data_columns')]) for j in range(len(self.edf_operator.blocks))])\n\t\t\th5_file.put(\"/%s/blocks\"%alias, blocks_data_frame)\n\t\t\t\n\t\t\t# gaze data per block\n\t\t\tif not 'block_data' in self.edf_operator.blocks[0].keys():\n\t\t\t\tself.edf_operator.take_gaze_data_for_blocks()\n\t\t\tfor i, block in enumerate(self.edf_operator.blocks):\n\t\t\t\tbdf = pd.DataFrame(block['block_data'], columns = block['data_columns'])\n\t\t\t\n\t\t\t\t#\n\t\t\t\t# preprocess pupil:\n\t\t\t\t#\n\t\t\t\tfor eye in blocks_data_frame.eye_recorded[i]: # this is a string with one or two letters, 'L', 'R' or 'LR'\n\t\t\t\t# create dictionary of data per block:\n\t\t\t\t\tgazeX = bdf[eye+'_gaze_x']\n\t\t\t\t\tgazeY = bdf[eye+'_gaze_y']\n\t\t\t\t\tpupil = bdf[eye+'_pupil']\n\t\t\t\t\teye_dict = {'timepoints':bdf.time, 'gaze_X':gazeX, 'gaze_Y':gazeY, 'pupil':pupil,}\n\t\t\t\t\t\n\t\t\t\t\t# create instance of class EyeSignalOperator, and include the blink data as detected by the Eyelink 1000:\n\t\t\t\t\tif hasattr(self.edf_operator, 'blinks_from_message_file'):\n\t\t\t\t\t\tblink_dict = self.read_session_data(alias, 'blinks_from_message_file')\n\t\t\t\t\t\tblink_dict[blink_dict['eye'] == eye]\n\t\t\t\t\t\tsac_dict = self.read_session_data(alias, 'saccades_from_message_file')\n\t\t\t\t\t\tsac_dict[sac_dict['eye'] == eye]\n\t\t\t\t\t\teso = EyeSignalOperator(input_object=eye_dict, eyelink_blink_data=blink_dict,sample_rate=sample_rate, eyelink_sac_data = sac_dict)\n\t\t\t\t\telse:\n\t\t\t\t\t\teso = EyeSignalOperator(input_object=eye_dict,sample_rate=sample_rate)\n\t\n\t\t\t\t\t# interpolate blinks:\n\t\t\t\t\teso.interpolate_blinks(method='linear')\n\t\t\t\t\teso.interpolate_blinks2()\n\n\t\t\t\t\t# low-pass and band-pass pupil data:\n\t\t\t\t\teso.filter_pupil(hp=pupil_hp, lp=pupil_lp)\n\n\t\t\t\t\t# regress blink and saccade responses\n\t\t\t\t\teso.regress_blinks()\n\n\t\t\t\t\tfor dt in ['lp_filt_pupil','lp_filt_pupil_clean','bp_filt_pupil','bp_filt_pupil_clean']:\n\t\t\t\t\t\t# percent signal change filtered pupil data:\n\t\t\t\t\t\teso.percent_signal_change_pupil(dtype=dt)\n\t\t\t\t\t\teso.zscore_pupil(dtype=dt)\n\t\t\t\t\t\teso.dt_pupil(dtype=dt)\n\t\t\t\t\t\n\t\t\t\t\t# add to existing dataframe:\n\t\t\t\t\tbdf[eye+'_pupil_int'] = eso.interpolated_pupil\n\t\t\t\t\tbdf[eye+'_pupil_hp'] = eso.hp_filt_pupil\n\t\t\t\t\tbdf[eye+'_pupil_lp'] = eso.lp_filt_pupil\n\n\t\t\t\t\tbdf[eye+'_pupil_lp_psc'] = eso.lp_filt_pupil_psc\n\t\t\t\t\tbdf[eye+'_pupil_lp_diff'] = np.concatenate((np.array([0]),np.diff(eso.lp_filt_pupil)))\n\t\t\t\t\tbdf[eye+'_pupil_bp'] = eso.bp_filt_pupil\n\t\t\t\t\tbdf[eye+'_pupil_bp_dt'] = eso.bp_filt_pupil_dt\n\t\t\t\t\tbdf[eye+'_pupil_bp_zscore'] = eso.bp_filt_pupil_zscore\n\t\t\t\t\tbdf[eye+'_pupil_bp_psc'] = eso.bp_filt_pupil_psc\n\t\t\t\t\tbdf[eye+'_pupil_baseline'] = eso.baseline_filt_pupil\n\n\t\t\t\t\tbdf[eye+'_gaze_x_int'] = eso.interpolated_x\n\t\t\t\t\tbdf[eye+'_gaze_y_int'] = eso.interpolated_y\n\n\t\t\t\t\t# blink/saccade regressed versions\n\t\t\t\t\tbdf[eye+'_pupil_lp_clean'] = eso.lp_filt_pupil_clean\n\t\t\t\t\tbdf[eye+'_pupil_lp_clean_psc'] = eso.lp_filt_pupil_clean_psc\n\t\t\t\t\tbdf[eye+'_pupil_lp_clean_zscore'] = eso.lp_filt_pupil_clean_zscore\n\t\t\t\t\tbdf[eye+'_pupil_bp_clean'] = eso.bp_filt_pupil_clean\n\t\t\t\t\tbdf[eye+'_pupil_bp_clean_psc'] = eso.bp_filt_pupil_clean_psc\n\t\t\t\t\tbdf[eye+'_pupil_bp_clean_zscore'] = eso.bp_filt_pupil_clean_zscore\n\t\t\t\t\n\t\t\t\t\t# plot interpolated pupil time series:\n\t\t\t\t\tfig = pl.figure(figsize = (16, 2.5))\n\t\t\t\t\tx = np.linspace(0,eso.raw_pupil.shape[0]/sample_rate, eso.raw_pupil.shape[0])\n\t\t\t\t\tpl.plot(x, eso.raw_pupil, 'b', rasterized=True)\n\t\t\t\t\tpl.plot(x, eso.interpolated_pupil, 'g', rasterized=True)\n\t\t\t\t\tpl.ylabel('pupil size (raw)')\n\t\t\t\t\tpl.xlabel('time (s)')\n\t\t\t\t\tpl.legend(['raw', 'int + filt'])\n\t\t\t\t\tfig.savefig(os.path.join(os.path.split(self.input_object)[0], 'blink_interpolation_1_{}_{}_{}.pdf'.format(alias, i, eye)))\n\t\t\t\t\t\n\t\t\t\t\t# plot results blink detection next to hdf5:\n\t\t\t\t\tfig = pl.figure(figsize = (16, 2.5))\n\t\t\t\t\tpl.plot(eso.pupil_diff, rasterized=True)\n\t\t\t\t\tpl.plot(eso.peaks, eso.pupil_diff[eso.peaks], '+', mec='r', mew=2, ms=8, rasterized=True)\n\t\t\t\t\tpl.ylim(ymin=-200, ymax=200)\n\t\t\t\t\tpl.ylabel('diff pupil size (raw)')\n\t\t\t\t\tpl.xlabel('samples')\n\t\t\t\t\tfig.savefig(os.path.join(os.path.split(self.input_object)[0], 'blink_interpolation_2_{}_{}_{}.pdf'.format(alias, i, eye)))\n\n\t\t\t\t\t# try time-frequency decomposition of the baseline signal\n\t\t\t\t\ttry:\n\t\t\t\t\t\teso.time_frequency_decomposition_pupil(\n\t\t\t\t\t\t\t\tminimal_frequency = minimal_frequency_filterbank, \n\t\t\t\t\t\t\t\tmaximal_frequency = maximal_frequency_filterbank, \n\t\t\t\t\t\t\t\tnr_freq_bins = nr_freq_bins_filterbank, \n\t\t\t\t\t\t\t\tn_cycles = n_cycles_filterbank, \n\t\t\t\t\t\t\t\tcycle_buffer = cycle_buffer_filterbank,\n\t\t\t\t\t\t\t\ttf_decomposition=tf_decomposition_filterbank,\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.logger.info('Performed T-F analysis of type %s'%tf_decomposition_filterbank)\n\t\t\t\t\t\tfor freq in eso.band_pass_filter_bank_pupil.keys():\n\t\t\t\t\t\t\tbdf[eye+'_pupil_filterbank_bp_%2.5f'%freq] = eso.band_pass_filter_bank_pupil[freq]\n\t\t\t\t\t\t\tself.logger.info('Saved T-F analysis %2.5f'%freq)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tself.logger.error('Something went wrong with T-F analysis of type %s'%tf_decomposition_filterbank)\n\t\t\t\t\t\tpass\n\t\t\t\t\t\n\t\t\t\t# put in HDF5:\n\t\t\t\th5_file.put(\"/%s/block_%i\"%(alias, i), bdf)", "def test_densities():\n\n actual, r, wt = GridGenerator.make_grid(400)\n grid = 4*pi*r**2*wt\n\n data = AtomData()\n\n print(\"\\nINTEGRATED DENSITY TEST\")\n print(\"=======================\")\n for a in list(data.nuclear_charge.keys()):\n atom = Atom(a)\n Nel = data.electron_count[a]\n d0, d1, g0, g1, t0, t1, l0, l1 = atom.get_densities(r)\n\n # Count electrons per spin channel\n s_occ = AtomData.s_occ.get(a, [0, 0])\n p_occ = AtomData.p_occ.get(a, [0, 0])\n d_occ = AtomData.d_occ.get(a, [0, 0])\n f_occ = AtomData.f_occ.get(a, [0, 0])\n nela = np.sum(s_occ[0])+np.sum(p_occ[0])+np.sum(d_occ[0])+np.sum(f_occ[0])\n nelb = np.sum(s_occ[1])+np.sum(p_occ[1])+np.sum(d_occ[1])+np.sum(f_occ[1])\n assert(nela+nelb == Nel)\n\n id0 = np.dot(d0, grid)\n id1 = np.dot(d1, grid)\n\n diff_0 = id0 - nela\n percent_diff_0 = 100*diff_0/nela\n\n # Check to catch for Hydrogen having no beta electrons\n if nelb > 0.0:\n diff_1 = id1 - nelb\n percent_diff_1 = 100*diff_1/nelb\n else:\n diff_1 = 0.0\n percent_diff_1 = 0.0\n\n print(\"{:>3} - N_0 = ({:4.1f}) {:+2.6e}%, N_1 = ({:4.1f}) {:+2.6e}%, {:}\".format(a, id0, percent_diff_0, id1, percent_diff_1, \"PASSED\" if max(abs(diff_0), abs(diff_1)) < 1e-4 else \"FAILED - \"))\n\n print(\"\\nINTEGRATED KINETIC TEST\")\n print(\"=======================\")\n for a in list(data.ke_test.keys()):\n atom = Atom(a)\n t_bm = data.ke_test[a]\n d0, d1, g0, g1, t0, t1, l0, l1 = atom.get_densities(r)\n\n it0 = np.dot(t0, grid)\n it1 = np.dot(t1, grid)\n itot = it0 + it1\n\n diff = itot - t_bm\n print(\"{:>3} - T = {:+.6e}%, {:}\".format(a, 100*diff/t_bm, \"PASSED\" if abs(100*diff/t_bm) < 1e-2 else \"FAILED - \"))\n\n\n # The integral of the Laplacian over all space should be 0. Check that.\n print(\"\\nINTEGRATED LAPLACIAN TEST\")\n print(\"=========================\")\n for a in list(AtomData.ke_test.keys()):\n atom = Atom(a)\n\n d0, d1, g0, g1, t0, t1, l0, l1 = atom.get_densities(r)\n\n il0 = np.dot(grid, l0)\n il1 = np.dot(grid, l1)\n print(\"{:>3} - L_0 = {:+.6e}, L_1 = {:+.6e}, {:}\".format(a, il0, il1, \"PASSED\" if max(abs(il0), abs(il1)) < 1e-6 else \"FAILED - \"))\n\n\n print(\"\\nFINITE DIFFERENCE GRADIENT TEST\")\n print(\"===============================\")\n print(\"Testing gradient evaluation function against finite difference estimate...\")\n ne = Atom(\"Ne\") # Let's use \"the guvnor\"\n # We only need to test a few points around the core\n fdh = 1e-8\n fdr = np.arange(0.9, 0.9+fdh*10, fdh)\n d0, d1, g0, g1, t0, t1, l0, l1 = ne.get_densities(fdr)\n\n # First the first central difference\n fdiff = (d0[2:]-d0[:-2])/(2*fdh) # Construct the central difference\n if np.allclose(fdiff, g0[1:-1], atol=1e-1): # finite difference is not perfect, so lenient tollerance\n print(\"Gradient: PASSED\")\n else:\n print(\"Gradient: FAILED -\")\n\n print(\"\\nELEMENT COLOR FUNCTIONS TEST\")\n print(\"===========================\")\n test_obj = [Atom(\"H\"), Atom(\"C\"), Atom(\"O\")]\n test_str = [\"H\", \"C\", \"O\"]\n ref = np.array([[1., 1., 1.], [0.565, 0.565, 0.565], [1. , 0.051, 0.051]])\n\n if np.allclose( np.array(get_colors_for_elements(test_obj)), ref):\n print(\"\\nColor from objects: PASSED\")\n else:\n print(\"\\nColor from objects: FAILED -\")\n\n if np.allclose( np.array(get_colors_for_elements(test_str)), ref):\n print(\"Color from strings: PASSED\")\n else:\n print(\"Color from strings: FAILED -\")\n\n if HAVE_LIBXC:\n test_functional='GGA_X_PBE'\n print(\"\\nATOMIC EXCHANGE ENERGIES WITH {}\".format(test_functional))\n print(\"============================================\")\n for a in list(data.ke_test.keys()):\n atom = Atom(a)\n nE, vrho, vsigma, vtau, vlapl = atom.libxc_eval(r, functional=test_functional, restricted=False)\n Exc = (np.dot(nE, grid)).item()\n print('{:3s} {:.10f}'.format(a, Exc))\n else:\n print(\"\\nNot doing energy calculations due to lack of libxc.\\n\")", "def density(self):\n return (1e-3*self.molar_mass) * self.pressure / (gas_constant * self.temperature) # kg/m^3", "def getTOD(self,i,d):\n output_filename = 'Output_Fits/{}'.format( d.filename.split('/')[-1])\n if os.path.exists(output_filename):\n os.remove(output_filename)\n\n tod_shape = d['level2/averaged_tod'].shape\n dset = d['level2/averaged_tod']\n tod_in = np.zeros((tod_shape[1],tod_shape[2],tod_shape[3]),dtype=dset.dtype)\n az = np.zeros((tod_shape[3]),dtype=dset.dtype)\n el = np.zeros((tod_shape[3]),dtype=dset.dtype)\n\n feeds = d['level1/spectrometer/feeds'][:]\n scan_edges = d['level2/Statistics/scan_edges'][...]\n\n todall = np.zeros((len(self.FeedIndex), self.datasizes[i])) \n weights = np.zeros((len(self.FeedIndex), self.datasizes[i])) \n\n # Read in data from each feed\n for index, ifeed in enumerate(self.FeedIndex[:]):\n\n dset.read_direct(tod_in,np.s_[ifeed:ifeed+1,:,:,:])\n d['level1/spectrometer/pixel_pointing/pixel_az'].read_direct(az,np.s_[ifeed:ifeed+1,:])\n d['level1/spectrometer/pixel_pointing/pixel_el'].read_direct(el,np.s_[ifeed:ifeed+1,:])\n\n # Statistics for this feed\n medfilt_coefficient = d['level2/Statistics/filter_coefficients'][ifeed,...]\n atmos = d['level2/Statistics/atmos'][ifeed,...]\n atmos_coefficient = d['level2/Statistics/atmos_coefficients'][ifeed,...]\n wnoise_auto = d['level2/Statistics/wnoise_auto'][ifeed,...]\n\n # then the data for each scan\n last = 0\n for iscan,(start,end) in enumerate(scan_edges):\n median_filter = d['level2/Statistics/FilterTod_Scan{:02d}'.format(iscan)][ifeed,...]\n N = int((end-start)//self.offsetLen * self.offsetLen)\n end = start+N\n tod = tod_in[...,start:end]\n\n # Subtract atmospheric fluctuations per channel\n for iband in range(4):\n for ichannel in range(64):\n if self.channelmask[ifeed,iband,ichannel] == False:\n amdl = Statistics.AtmosGroundModel(atmos[iband,iscan],az[start:end],el[start:end]) *\\\n atmos_coefficient[iband,ichannel,iscan,0]\n tod[iband,ichannel,:] -= median_filter[iband,:N] * medfilt_coefficient[iband,ichannel,iscan,0]\n tod[iband,ichannel,:] -= amdl\n tod[iband,ichannel,:] -= np.nanmedian(tod[iband,ichannel,:])\n tod /= self.calfactors[ifeed,:,:,None] # Calibrate to Jupiter temperature scale\n\n # Then average together the channels\n wnoise = wnoise_auto[:,:,iscan,:]\n channels = (self.channelmask[ifeed].flatten() == False)\n channels = np.where((channels))[0]\n\n tod = np.reshape(tod,(tod.shape[0]*tod.shape[1], tod.shape[2]))\n wnoise = np.reshape(wnoise,(wnoise.shape[0]*wnoise.shape[1], wnoise.shape[2]))\n\n nancheck = np.sum(tod[channels,:],axis=1)\n channels = channels[np.isfinite(nancheck) & (nancheck != 0)]\n nancheck = np.sum(wnoise[channels,:],axis=1)\n channels = channels[np.isfinite(nancheck) & (nancheck != 0)]\n\n\n top = np.sum(tod[channels,:]/wnoise[channels,:]**2,axis=0)\n bot = np.sum(1/wnoise[channels,:]**2)\n\n todall[index,last:last+N] = top/bot\n weights[index,last:last+N] = bot\n last += N\n\n return todall, weights", "def adiabatic(dict_energy,filename):\n #here we create the adiabatic map with phi and psi and values of energy extracted\n #from dict_energy\n #Here I show an adiabatic plot with interpolation. The energy are rescaled so the min energy = 0\n phi = []\n psi = []\n z = []\n #Collect the value of phi and psi\n for key in dict_energy:\n val_phi = (key[0])\n val_psi = (key[1])\n phi.append(val_phi)\n psi.append(val_psi)\n z.append((dict_energy[key])) #kcal/mol\n\n #let's keep the relative energies with min = 0.0\n energies = []\n min_en = min(z)\n for elem in z:\n scaled_en = elem - min_en\n energies.append(scaled_en)\n #let's create all the arrays\n phi = asarray(phi)\n psi = asarray(psi)\n energies = asarray(energies)\n #now create the meshgrid\n #here we span from phi min (-180 ) to max ( 180 ) with 100 points, this mean\n #we are binning 3.6 degrees, is it right?\n xi,yi = linspace(phi.min(),phi.max(),100), linspace(psi.min(),psi.max(),100)\n xi,yi = meshgrid(xi,yi)\n\n # Interpolate; there's also method='cubic' or Rbf for 2-D data such as here\n #rbf = scipy.interpolate.Rbf(x, y, z, function='linear')\n #zi = rbf(xi, yi)\n zi = interpolate.griddata((phi, psi), energies, (xi, yi), method='linear')\n\n fig, ax = plt.subplots(figsize=(15,10))\n cmap=\"plasma\"\n cax = ax.imshow(zi, vmin=energies.min(), vmax=energies.max(), origin='lower',\n extent=[phi.min(), phi.max(), psi.min(), psi.max()],aspect=\"auto\",\\\n cmap=cmap )\n #17 is a good cutoff value\n ax.set_xlabel(\"$\\Phi$\",fontsize=20)\n ax.set_ylabel(\"$\\Psi$\",fontsize=20)\n ax.tick_params(axis='x', labelsize=20)\n ax.tick_params(axis='y', labelsize=20)\n cbar = fig.colorbar(cax)\n cbar.ax.set_ylabel(\"Energy / kcal $\\cdot$ mol$^{-1}$\",fontsize=20)\n cbar.ax.tick_params(labelsize=20)\n\n plt.savefig(filename,dpi=600,transparent=True)" ]
[ "0.56314754", "0.55677277", "0.55518115", "0.53848726", "0.5382426", "0.53711474", "0.5332129", "0.53105676", "0.52964735", "0.52770823", "0.5270333", "0.52593464", "0.524486", "0.52432036", "0.5242412", "0.5241068", "0.5220861", "0.52199006", "0.5214131", "0.52032506", "0.51795834", "0.51341903", "0.51300573", "0.5125639", "0.5102619", "0.5098203", "0.509076", "0.5080162", "0.50714326", "0.5068771", "0.5067285", "0.50587153", "0.50506335", "0.50506085", "0.50482506", "0.50456876", "0.50406426", "0.50375134", "0.50342417", "0.5025773", "0.5021481", "0.50177145", "0.5014926", "0.4991094", "0.49887067", "0.49886355", "0.49862146", "0.4984268", "0.4975869", "0.4966099", "0.49657673", "0.4960136", "0.4957115", "0.49380332", "0.4929429", "0.49228677", "0.4920521", "0.49146685", "0.491087", "0.48972645", "0.4897121", "0.48913503", "0.48909047", "0.48901165", "0.4884414", "0.48815942", "0.4872692", "0.48711175", "0.48668697", "0.4859181", "0.48586684", "0.4856794", "0.4855817", "0.48526675", "0.48504105", "0.48476112", "0.48473644", "0.48430052", "0.48416796", "0.48338637", "0.48301387", "0.48259032", "0.4825048", "0.48205617", "0.48181182", "0.4803036", "0.4801756", "0.48006564", "0.4798054", "0.47931415", "0.47931013", "0.47898045", "0.47865424", "0.47832388", "0.47801885", "0.47801474", "0.47773784", "0.4773522", "0.47713816", "0.47695315", "0.4769171" ]
0.0
-1
Use arpack to calculate hte local density of states at a certain energy
def ldos_arpack(intra,num_wf=10,robust=False,tol=0,e=0.0,delta=0.01): if robust: # go to the imaginary axis for stability eig,eigvec = slg.eigs(intra,k=int(num_wf),which="LM", sigma=e+1j*delta,tol=tol) eig = eig.real # real part only else: # Hermitic Hamiltonian eig,eigvec = slg.eigsh(intra,k=int(num_wf),which="LM",sigma=e,tol=tol) d = np.array([0.0 for i in range(intra.shape[0])]) # initialize for (v,ie) in zip(eigvec.transpose(),eig): # loop over wavefunctions v2 = (np.conjugate(v)*v).real # square of wavefunction fac = delta/((e-ie)**2 + delta**2) # factor to create a delta d += fac*v2 # add contribution # d /= num_wf # normalize d /= np.pi # normalize return d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_density(\n self,\n states: FlowFieldMap,\n additional_states: FlowFieldMap,\n ) -> FlowFieldVal:\n zz = additional_states.get('zz', [tf.constant(0, dtype=TF_DTYPE)] *\n self._params.nz)\n\n if 'T' in states:\n t = states['T']\n elif 'theta' in states:\n t = self._potential_temperature_to_temperature(states['theta'], zz)\n else:\n raise ValueError(\n 'Either temperature or potential temperature is required for the '\n 'ideal gas law.'\n )\n\n scalars = {\n sc_name: thermodynamics_utils.regularize_scalar_bound(states[sc_name])\n for sc_name in self._molecular_weights.keys()\n if sc_name != INERT_SPECIES\n }\n\n if scalars:\n scalars.update({\n INERT_SPECIES:\n thermodynamics_utils.compute_ambient_air_fraction(scalars)\n })\n sc_reg = thermodynamics_utils.regularize_scalar_sum(scalars)\n else:\n sc_reg = {\n INERT_SPECIES: [\n tf.ones_like(sc_i, dtype=TF_DTYPE)\n for sc_i in list(states.values())[0]\n ]\n }\n\n mixture_molecular_weight = (\n thermodynamics_utils.compute_mixture_molecular_weight(\n self._molecular_weights, sc_reg))\n\n return [\n self.density_by_ideal_gas_law(p_i, R_U / w_mix_i, t_i)\n for p_i, w_mix_i, t_i in zip(\n self.p_ref(zz, additional_states), mixture_molecular_weight, t)\n ]", "def density_of_state_plot(N=400,a=1.0,eita=0.01):\n foot_step=2*np.pi/N\n k=np.arange(0.0,2*np.pi/a,foot_step)\n Ek=band_energy(k)\n E=np.arange(-3.0,3.0,0.01)\n Ek.shape=(N,1)\n E.shape=(1,600)\n \"\"\"Reshape E and Ek series with broadcasting method.\"\"\"\n dirac_function=np.imag(np.true_divide(1/np.pi,np.subtract(E-Ek,1j*eita)))\n D=np.sum(np.true_divide(dirac_function,N),axis=0)\n \"\"\"Calculate the density of state with lorentzian broadenning method.\"\"\" \n E.shape=(600)\n plt.plot(D,E)", "def getDensityOfStates(self, Elist):\n\t\tpass", "def main():\n N = 201 # Amount of gridpoints, odd number to include 0\n L = 10 # Size of the system\n k = 50 # Amount of energies and states calculated\n x = y = np.linspace(-L/2,L/2,N) # Gridpoints\n h = x[1]-x[0] # Spacing of gridpoints\n\n # Solve the system with and without perturbation\n E,psi,E_p,psi_p = fdm_2d(N,L,x,y,h,k)\n\n # Print the first two energies and the absolute error of the energies\n print('Energies of the two lowest states:')\n print('E_00 = %.4f' % E[0])\n print('E_01 = %.4f' % E[1], '\\n')\n print('Absolute error for E_00: %.4e' % np.abs(E[0]-1))\n print('Absolute error for E_01: %.4e' % np.abs(E[1]-2))\n\n print('\\nEnergies of the two lowest states after perturbation:')\n print('E_00 = %.4f' % E_p[0])\n print('E_01 = %.4f' % E_p[1])\n\n # Calculate the normalized densities of the states\n densities_norm = np.zeros((k,N,N))\n densities_norm_p = np.zeros((k,N,N))\n for i in range(k):\n # meshgrid form\n state = np.reshape(psi.T[i],(N,N))\n state_p = np.reshape(psi_p.T[i],(N,N))\n densities_norm[i] = normalized_density(state,x)\n densities_norm_p[i] = normalized_density(state_p,x)\n\n # Analytical solution of the ground state\n X,Y = np.meshgrid(x,y)\n psi00_exact = phi(X,0)*phi(Y,0)\n psi00_exact_density = normalized_density(psi00_exact,x)\n\n print('\\nMaximum absolute error of the normalized ground state densities of the unperturbated system:')\n print('errmax = {:.4e}'.format(np.max(np.abs(densities_norm[0]-psi00_exact_density))))\n\n # Plotting the ground state density of the unperturbated system\n fig1 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig1.add_subplot(1,2,1,projection='3d')\n surf1 = ax.plot_surface(X, Y, densities_norm[0], cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n fig1.suptitle(r'Normalized ground state density $|\\psi|^2$ using FDM')\n ax = fig1.add_subplot(1,2,2)\n ax.imshow(densities_norm[0],extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'FDM_psi00_unperturbated.png'))\n plt.close()\n\n\n fig2 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig2.add_subplot(1,2,1,projection='3d')\n surf2 = ax.plot_surface(X, Y, psi00_exact_density, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n\n\n # Plotting the ground state density of the perturbated system\n fig1 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig1.add_subplot(1,2,1,projection='3d')\n surf1 = ax.plot_surface(X, Y, densities_norm_p[0], cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n fig1.suptitle(r'Normalized ground state density $|\\psi|^2$ of the perturbated system using FDM')\n ax = fig1.add_subplot(1,2,2)\n ax.imshow(densities_norm_p[0],extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'FDM_psi00_perturbated.png'))\n plt.close()\n\n fig2 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig2.add_subplot(1,2,1,projection='3d')\n surf2 = ax.plot_surface(X, Y, psi00_exact_density, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n\n # Plotting the analytic ground state density\n fig1 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig1.add_subplot(1,2,1,projection='3d')\n surf1 = ax.plot_surface(X, Y, psi00_exact_density, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n fig1.suptitle(r'Analytical normalized ground state density $|\\psi|^2$')\n ax = fig1.add_subplot(1,2,2)\n ax.imshow(psi00_exact_density,extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'exact_psi00.png'))\n plt.close()\n\n # Plot some of the other densities and save them as pdf\n for i in range(1,20):\n density = densities_norm[i]\n fig = plt.figure(figsize=plt.figaspect(0.5))\n plt.imshow(density,extent=[-L/2,L/2,-L/2,L/2])\n plt.title('n={}'.format(i))\n plt.savefig(os.path.join(path,'FDM_unperturbated{}.png'.format(i)))\n plt.close()\n\n density_p = densities_norm_p[i]\n fig = plt.figure(figsize=plt.figaspect(0.5))\n plt.imshow(density_p,extent=[-L/2,L/2,-L/2,L/2])\n plt.title('n={}'.format(i))\n plt.savefig(os.path.join(path,'FDM_perturbated{}.png'.format(i)))\n plt.close() \n\n # Plot analytical states until nx,ny = 5\n for nx in range(6):\n for ny in range(6):\n state = phi(X,nx)*phi(Y,ny)\n density = normalized_density(state,x)\n plt.figure()\n plt.imshow(density,extent=[-L/2,L/2,-L/2,L/2])\n plt.title('$n_x={}, n_y={}$'.format(nx,ny))\n plt.savefig(os.path.join(path,'analytical_state_{}_{}.png'.format(nx,ny)))\n plt.close()\n\n # Get analytical energies from nx,ny = 0 to 10\n n = 10\n energies = analytical_energies(n)\n\n # Plot k analytical and the FDM energies\n index = np.arange(k)\n plt.figure()\n plt.plot(index,energies[0:k],label='Analytical energies')\n plt.plot(index,E,label='Unperturbated energies')\n plt.plot(index,E_p,label='Perturbated energies')\n plt.legend()\n plt.xlabel('n')\n plt.ylabel(r'$\\tilde{E} = \\frac{E}{\\hbar\\omega}$')\n plt.title('Energies')\n plt.savefig(os.path.join(path,'energies.png'))\n plt.close()", "def energy(self, state):\n energy = 0.0\n if isinstance(state, dict):\n # convert to array\n state = [state[elem] for elem in self.indices]\n\n state = np.array(state)\n for coeff in self.interactions[1:]:\n for _inds, value in coeff.items():\n energy += value * np.prod(state[list(_inds)])\n for i, hi in self.interactions[0].items():\n energy += hi * state[i]\n\n return energy", "def test_density(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n r = np.array([0.7, 0.8])\n t = 6.25e-6\n solrt = sol(r, t)\n np.testing.assert_allclose(solrt.density[0], 2.26666666666663)", "def getDensityOfStates(self, Elist, linear):\n\n\t\timport states\n\n\t\t# Create energies in cm^-1 at which to evaluate the density of states\n\t\tconv = constants.h * constants.c * 100.0 * constants.Na # [=] J/mol/cm^-1\n\t\tEmin = min(Elist) / conv\n\t\tEmax = max(Elist) / conv\n\t\tdE = (Elist[1] - Elist[0]) / conv\n\t\tElist0 = np.arange(Emin, Emax+dE/2, dE)\n\n\t\t# Prepare inputs for density of states function\n\t\tvib = np.array([mode.frequency for mode in self.modes if isinstance(mode, HarmonicOscillator)])\n\t\trot = np.array([mode.frequencies for mode in self.modes if isinstance(mode, RigidRotor)])\n\t\thind = np.array([[mode.frequency, mode.barrier] for mode in self.modes if isinstance(mode, HinderedRotor)])\n\t\tif len(hind) == 0: hind = np.zeros([0,2],np.float64)\n\t\tlinear = 1 if linear else 0\n\t\tsymm = self.symmetry\n\n\t\t# Calculate the density of states\n\t\tdensStates, msg = states.densityofstates(Elist0, vib, rot, hind, symm, linear)\n\t\tmsg = msg.strip()\n\t\tif msg != '':\n\t\t\traise Exception('Error while calculating the density of states for species %s: %s' % (self, msg))\n\n\t\t# Convert density of states from (cm^-1)^-1 to mol/J\n\t\tdensStates /= conv\n\n\t\t# Return result\n\t\treturn densStates", "def energy(n):\n return (n * pi * hbar / (2 * a)) ** 2 / (2 * m)", "def getDensityOfStates(self, Elist):\n\t\trho = np.zeros((len(Elist)), np.float64)\n\t\trho0 = _modes.hinderedrotor_densityofstates(Elist, self.frequency, self.barrier)\n\t\tfor i in range(self.degeneracy):\n\t\t\trho = _modes.convolve(rho, rho0, Elist)\n\t\treturn rho", "def total_energy(state, k=1, m=1):\n return 0.5*k*state[..., 0]*state[..., 0]+0.5*m*state[..., 1]*state[..., 1]", "def test_densities():\n\n actual, r, wt = GridGenerator.make_grid(400)\n grid = 4*pi*r**2*wt\n\n data = AtomData()\n\n print(\"\\nINTEGRATED DENSITY TEST\")\n print(\"=======================\")\n for a in list(data.nuclear_charge.keys()):\n atom = Atom(a)\n Nel = data.electron_count[a]\n d0, d1, g0, g1, t0, t1, l0, l1 = atom.get_densities(r)\n\n # Count electrons per spin channel\n s_occ = AtomData.s_occ.get(a, [0, 0])\n p_occ = AtomData.p_occ.get(a, [0, 0])\n d_occ = AtomData.d_occ.get(a, [0, 0])\n f_occ = AtomData.f_occ.get(a, [0, 0])\n nela = np.sum(s_occ[0])+np.sum(p_occ[0])+np.sum(d_occ[0])+np.sum(f_occ[0])\n nelb = np.sum(s_occ[1])+np.sum(p_occ[1])+np.sum(d_occ[1])+np.sum(f_occ[1])\n assert(nela+nelb == Nel)\n\n id0 = np.dot(d0, grid)\n id1 = np.dot(d1, grid)\n\n diff_0 = id0 - nela\n percent_diff_0 = 100*diff_0/nela\n\n # Check to catch for Hydrogen having no beta electrons\n if nelb > 0.0:\n diff_1 = id1 - nelb\n percent_diff_1 = 100*diff_1/nelb\n else:\n diff_1 = 0.0\n percent_diff_1 = 0.0\n\n print(\"{:>3} - N_0 = ({:4.1f}) {:+2.6e}%, N_1 = ({:4.1f}) {:+2.6e}%, {:}\".format(a, id0, percent_diff_0, id1, percent_diff_1, \"PASSED\" if max(abs(diff_0), abs(diff_1)) < 1e-4 else \"FAILED - \"))\n\n print(\"\\nINTEGRATED KINETIC TEST\")\n print(\"=======================\")\n for a in list(data.ke_test.keys()):\n atom = Atom(a)\n t_bm = data.ke_test[a]\n d0, d1, g0, g1, t0, t1, l0, l1 = atom.get_densities(r)\n\n it0 = np.dot(t0, grid)\n it1 = np.dot(t1, grid)\n itot = it0 + it1\n\n diff = itot - t_bm\n print(\"{:>3} - T = {:+.6e}%, {:}\".format(a, 100*diff/t_bm, \"PASSED\" if abs(100*diff/t_bm) < 1e-2 else \"FAILED - \"))\n\n\n # The integral of the Laplacian over all space should be 0. Check that.\n print(\"\\nINTEGRATED LAPLACIAN TEST\")\n print(\"=========================\")\n for a in list(AtomData.ke_test.keys()):\n atom = Atom(a)\n\n d0, d1, g0, g1, t0, t1, l0, l1 = atom.get_densities(r)\n\n il0 = np.dot(grid, l0)\n il1 = np.dot(grid, l1)\n print(\"{:>3} - L_0 = {:+.6e}, L_1 = {:+.6e}, {:}\".format(a, il0, il1, \"PASSED\" if max(abs(il0), abs(il1)) < 1e-6 else \"FAILED - \"))\n\n\n print(\"\\nFINITE DIFFERENCE GRADIENT TEST\")\n print(\"===============================\")\n print(\"Testing gradient evaluation function against finite difference estimate...\")\n ne = Atom(\"Ne\") # Let's use \"the guvnor\"\n # We only need to test a few points around the core\n fdh = 1e-8\n fdr = np.arange(0.9, 0.9+fdh*10, fdh)\n d0, d1, g0, g1, t0, t1, l0, l1 = ne.get_densities(fdr)\n\n # First the first central difference\n fdiff = (d0[2:]-d0[:-2])/(2*fdh) # Construct the central difference\n if np.allclose(fdiff, g0[1:-1], atol=1e-1): # finite difference is not perfect, so lenient tollerance\n print(\"Gradient: PASSED\")\n else:\n print(\"Gradient: FAILED -\")\n\n print(\"\\nELEMENT COLOR FUNCTIONS TEST\")\n print(\"===========================\")\n test_obj = [Atom(\"H\"), Atom(\"C\"), Atom(\"O\")]\n test_str = [\"H\", \"C\", \"O\"]\n ref = np.array([[1., 1., 1.], [0.565, 0.565, 0.565], [1. , 0.051, 0.051]])\n\n if np.allclose( np.array(get_colors_for_elements(test_obj)), ref):\n print(\"\\nColor from objects: PASSED\")\n else:\n print(\"\\nColor from objects: FAILED -\")\n\n if np.allclose( np.array(get_colors_for_elements(test_str)), ref):\n print(\"Color from strings: PASSED\")\n else:\n print(\"Color from strings: FAILED -\")\n\n if HAVE_LIBXC:\n test_functional='GGA_X_PBE'\n print(\"\\nATOMIC EXCHANGE ENERGIES WITH {}\".format(test_functional))\n print(\"============================================\")\n for a in list(data.ke_test.keys()):\n atom = Atom(a)\n nE, vrho, vsigma, vtau, vlapl = atom.libxc_eval(r, functional=test_functional, restricted=False)\n Exc = (np.dot(nE, grid)).item()\n print('{:3s} {:.10f}'.format(a, Exc))\n else:\n print(\"\\nNot doing energy calculations due to lack of libxc.\\n\")", "def testDensityCalculation(self):\n known_densities = np.array([1.76776695e-01, 1.76776695e-01, 1.76776695e-01,\n 4.59619433e-01, 4.59619433e-01, 1.76776695e-01, 5.00000000e-01, \n 8.84538011e-02, 3.40206909e-02, 2.26040275e-04])\n densities = nb._get_local_densities() \n np.testing.assert_allclose(densities, known_densities)", "def air_density(altitude):\n p = pressure(altitude) # psf\n t = temperature(altitude) # R\n rho = p/(gas_constant*t) # lb/ft3\n return rho", "def getDensityOfStates(self, Elist):\n\t\treturn _modes.freerotor_densityofstates(Elist, self.frequencies, 1 if self.linear else 0)", "def energy(nx,ny):\n return 1+nx+ny", "def altitude(p):\r\n \r\n R = 290 #specific gas constant \r\n T = 93.65 #surface temperature K from A.Coustenis book\r\n g = 1.354 #surface gravity from A.Coustenis book\r\n p0 = 1467 #surface pressure in hPa 6.1 for mars\r\n \r\n z = np.empty_like(p)\r\n \r\n for i in range(p.shape[0]):\r\n z[i] = (-1)*(R*T/g)*np.log((p[i])/p0)/(10**3)\r\n \r\n # Make into an xarray DataArray\r\n z_xr = xr.DataArray(z, coords=[z], dims=['pfull'])\r\n z_xr.attrs['units'] = 'km'\r\n \r\n #below is the inverse of the calculation\r\n #p[i] = p0*np.exp((-1)*z[i]*(10**3)/((R*T/g)))\r\n \r\n return z_xr", "def density(self, alt):\n (Z, T, CN2, CO2, CO, CAr, CHe, CH, CM, WM) = self.altitude_profile(alt)\n\n # using eqn(42) of COESA for multiple gases\n M_i = [wmN2, wmO2, wmO, wmAr, wmHe, wmH] << (u.g / u.mol)\n n_i = [\n CN2.to_value(u.m**-3),\n CO2.to_value(u.m**-3),\n CO.to_value(u.m**-3),\n CAr.to_value(u.m**-3),\n CHe.to_value(u.m**-3),\n CH.to_value(u.m**-3),\n ] << (1 / u.m**3)\n rho = (n_i @ M_i) / Na\n return rho.to(u.kg / u.m**3)", "def getDensityOfStates(self, Elist, V=1.0):\n\t\treturn _modes.translation_densityofstates(Elist, self.mass, self.dimension, V)", "def density(temp,pres):\n g_p = liq_g(0,1,temp,pres)\n dliq = g_p**(-1)\n return dliq", "def E(self, state):\n \n if state==0: # Invalid state has no energy\n return 0\n return sum([self.calcDistance(state[i+1], state[i]) for i in range(len(state)-1)])", "def test_density(self):\n earth = CoreMantleCrustModel()\n assert earth.density(0) == 14\n assert earth.density(1e6) == 14\n assert earth.density(3.464e6) == 14\n assert earth.density(3.5e6) == 3.4\n assert earth.density(5e6) == 3.4\n assert earth.density(6.338e6) == 3.4\n assert earth.density(6.378e6) == 2.9", "def air_density(self):\n return self.flow_field.air_density", "def internalenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_t = liq_g(1,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n u = g - temp*g_t - pres*g_p\n return u", "def energy_map(img):\n img_new = img.astype(float) #converting image to float\n total_energy = 0.0 # To store the sum of energy for all channels\n r,c,d = img.shape \n for i in range(d):\n dy = np.zeros([r, c], dtype=float) \n dx = np.zeros([r, c], dtype=float)\n if r > 1:\n dy = np.gradient(img_new[:,:,i], axis=0) #gradient along rows\n if c > 1:\n dx = np.gradient(img_new[:,:,i], axis=1) #gradient along columns\n total_energy += np.absolute(dy) + np.absolute(dx) \n return total_energy #Total energy map for entire image", "def energy(self, state):\n return _modeller.mod_state_optimizer_energy(self._modpt,\n self.__edat.modpt,\n state, self.__libs.modpt)", "def density(wair,pres,entr=None,temp=None,airf=None,dhum=None,\n chkvals=False,chktol=_CHKTOL,airf0=None,temp0=None,dhum0=None,\n chkbnd=False,mathargs=None):\n airf, temp, dhum = eq_wpte(wair,pres,entr=entr,temp=temp,airf=airf,\n dhum=dhum,chkvals=chkvals,chktol=chktol,airf0=airf0,temp0=temp0,\n dhum0=dhum0,chkbnd=chkbnd,mathargs=mathargs)\n h_p = iceair_h(0,0,1,wair,pres,temp=temp,airf=airf,dhum=dhum)\n dens = h_p**(-1)\n return dens", "def delta_energy(atom,layer1,layer2):\n global r,c,h\n return float('%.2E' % Decimal(str(r*((atom**2/layer1**2)-(atom**2/layer2**2)))))", "def getDensityEstimate(self):\n return self.density", "def density_from_pressure(temperature, pressure, RH):\n # R = specific gas constant , J/(kg*degK) = 287.05 for dry air\n Rd = 287.05\n # http://www.baranidesign.com/air-density/air-density.htm\n # http://wahiduddin.net/calc/density_altitude.htm\n # Evaporation into the Atmosphere, Wilfried Brutsaert, p37\n # saturation vapor pressure is a polynomial developed by Herman Wobus\n e_so = 6.1078\n c0 = 0.99999683\n c1 = -0.90826951e-2\n c2 = 0.78736169e-4\n c3 = -0.61117958e-6\n c4 = 0.43884187e-8\n c5 = -0.29883885e-10\n c6 = 0.21874425e-12\n c7 = -0.17892321e-14\n c8 = 0.11112018e-16\n c9 = -0.30994571e-19\n \n p = (c0 + temperature*(\n c1 + temperature*(\n c2 + temperature*(\n c3 + temperature*(\n c4 + temperature*(\n c5 + temperature*(\n c6 + temperature*(\n c7 + temperature*(\n c8 + temperature*(\n c9)))))))))) \n \n sat_vp = e_so / p**8\n Pv = sat_vp * RH\n density = (pressure / (Rd * temperature)) * (1 - (0.378 * Pv / pressure))\n return density", "def system(self, t, state, strength, density):\n v, m, theta, z, _, r = state\n A = np.pi*r**2 # radius generally varies with time after break-up\n rhoa = self.rhoa(z)\n\n # u = [dv/dt, dm/dt, dtheta/dt, dz/dt, dx/dt, dr/dt]\n u = np.zeros_like(state)\n u[0] = -self.Cd*rhoa*A*v**2 / (2*m) + self.g*np.sin(theta) # dv/dt\n u[1] = -self.Ch*rhoa*A*v**3/(2*self.Q) # dm/dt\n u[2] = self.g*np.cos(theta)/v - self.Cl*rhoa * A*v / \\\n (2*m) - (v*np.cos(theta) / (self.Rp+z)) # dtheta/dt\n u[3] = -v*np.sin(theta) # dz/dt\n u[4] = v*np.cos(theta)/(1+z/self.Rp) # dx/dt\n if rhoa * v**2 < strength:\n u[5] = 0\n else:\n u[5] = (7/2*self.alpha*rhoa/density)**0.5 * v # dr/dt\n\n return u", "def density(self):\n return (1e-3*self.molar_mass) * self.pressure / (gas_constant * self.temperature) # kg/m^3", "async def air_density(self, temperature, station_pressure):\n if temperature is not None and station_pressure is not None:\n kelvin = temperature + 273.15\n pressure = station_pressure\n r_specific = 287.058\n decimals = 2\n\n air_dens = (pressure * 100) / (r_specific * kelvin)\n\n if self._unit_system == UNITS_IMPERIAL:\n air_dens = air_dens * 0.06243\n decimals = 4\n\n return round(air_dens, decimals)\n\n _LOGGER.error(\"FUNC: air_density ERROR: Temperature or Pressure value was reported as NoneType. Check the sensor\")", "def find_local_energy(self):\n state = self.current_state\n (mat_elements, spin_flip_sites) = self.hamiltonian.find_nonzero_elements(state)\n\n flipped_states = [np.copy(state) for _ in spin_flip_sites]\n for i, site in enumerate(spin_flip_sites):\n flipped_states[i][0][site] *= -1\n\n energies = [self.amplitude_ratio(state, flipped_states[i])* element for (i, element) in enumerate(mat_elements)]\n return sum(energies)", "def buildE(self, debug = False):\n E = np.zeros([len(self.sta), len(self.dist)])\n i = 0\n for ikey, state in self.sta.items():\n j = 0\n c = state.c\n for jkey, disturbance in self.dist.items():\n if debug:\n print(i, ' ', j)\n if isinstance(disturbance, InputT) and disturbance in \\\n self.rc.adj[state]:\n # input is temperature and connected to this state\n\n E[i, j] = self.rc.adj[state][disturbance]['H'] / c\n elif isinstance(disturbance, InputQ) and disturbance in \\\n self.rc.adj[\n state]: # input is heat flow and connected to state\n E[i, j] = self.rc.adj[state][disturbance]['gain'] / c\n j += 1\n i += 1\n\n return E, list(self.sta.keys()), list(self.dist.keys())", "def specificHeatCapacity(d, d_iso, density, cp):\n d_t = min(0.5 * np.sum(d), d_iso , 0.1)\n sum_d_i = d[0]\n i = 0 \n kappa = 0 \n while sum_d_i <= d_t:\n kappa += d[i] * density[i] * cp[i]\n i += 1\n sum_d_i += d[i]\n else:\n sum_d_i -= d[i]\n d_part = d_t - sum_d_i \n kappa += d_part * density[i] * cp[i]\n\n return kappa", "def density(ensembles):\n if len(ensembles.shape) < 2:\n return ketbra(ensembles)\n else:\n den_mat = ketbra(ensembles[0])\n for i in range(1, len(ensembles)):\n den_mat += ketbra(ensembles[i])\n den_mat /= len(ensembles)\n return den_mat", "def get_density(element):\n return pt.elements.isotope(element).density", "def sumofstate_HD(T):\n\n Q = np.float64(0.0)\n\n #--- nuclear spin statistics ------------\n g_even = 1 # hydrogen deuteride\n g_odd = 1\n # ---------------------------------------\n\n data = eJHD\n\n nCols = data.shape[1]\n # nCols is equal to the number of vibrational\n # states included in the summation\n\n # generate Q using each energy from the dataset\n for i in range(0, nCols):\n\n # select row for v=i\n row = data[:,i]\n\n # remove nan values\n x = row[np.logical_not(np.isnan(row))]\n\n # get the dimension (equal to J_max)\n nRows = x.shape[0]\n\n # iterate over the available energies\n for j in range(0, nRows):\n E = x[j]\n energy = (-1*E*H*C)\n\n factor = (2*j+1)*math.exp(energy/(K*T))\n\n if j % 2 == 0:\n factor = factor*g_even\n else:\n factor = factor*g_odd\n Q = Q+factor\n\n\n\n # return the sum of states for HD\n return Q", "def plot_density_of_states(xlim=(-10, 5), ylim=(-1.5, 1.5), fmt='pdf'):\n\n efermi = Vasprun('vasprun.xml').efermi\n dos_lines = open ('DOSCAR').readlines()\n\n x, up, down = [], [], []\n nedos = Incar.from_file('INCAR').as_dict()['NEDOS'] - 1\n\n for line in dos_lines[6:6+nedos]:\n split_line = line.split()\n x.append(float(split_line[0]) - efermi)\n up.append(float(split_line[1]))\n down.append(-float(split_line[2]))\n\n x, up, down = np.array(x), np.array(up), np.array(down)\n sum = up + down\n\n ax = plt.figure().gca()\n ax.set_xlim(xlim[0], xlim[1])\n ax.set_ylim(ylim[0], ylim[1])\n\n ax.set_xlabel(r'$\\mathrm{E\\/(eV)}$')\n ax.set_ylabel(r'$\\mathrm{Density\\/of\\/States$')\n ax.set_xticklabels([r'$\\mathrm{%s}$' % t for t in ax.get_xticklabels()])\n ax.set_yticklabels([r'$\\mathrm{%s}$' % t for t in ax.get_yticklabels()])\n\n ax.plot(x, up, color='red' )\n ax.plot(x, down, color='green')\n ax.plot(x, sum, color='black' )\n if fmt is not None:\n plt.savefig('density_of_states.{}'.format(fmt))\n else:\n return ax\n\n plt.close()", "def density_from_fluorescence_for_el(p, q, maia_d, el):\n # override absorption settings temporarily to get the response with abs off\n conf_o = config.no_out_absorption\n conf_i = config.no_in_absorption\n config.no_out_absorption = True\n config.no_in_absorption = True\n sinogram = projection.project_sinogram(event_type='fluoro', p=p, q=q,\n maia_d=maia_d, anglelist=[0], el=el)\n # restore overridden absorption settings\n config.no_out_absorption = conf_o\n config.no_in_absorption = conf_i\n\n # Rescale sinogram pixel quantities based on pixel side length.\n # Note: I don't need to do this, assuming the length scale is not changing\n # sinogram *= (UM_PER_CM / p.um_per_px) ** 2\n\n # Now just integrate the density [g/cm3] in the elemental map.\n mass = p.el_maps[el]\n\n return mass.sum() / sinogram.sum()", "def energy_atom(atom,layer):\n global r,c,h\n backval= r*((atom**2/layer**2))\n return float('%.2E' % Decimal(str(backval)))", "def flowStress(f_hard,eps,d,q,a):\n\n pass", "def gas_zfactor(T_pr, P_pr):\n # T_pr : calculated pseudoreduced temperature\n # P_pr : calculated pseudoreduced pressure \n from scipy.optimize import fsolve # non-linear solver\n import numpy as np\n\n a1 = 0.3265; a2 = -1.0700; a3 = -0.5339; a4 = 0.01569; a5 = -0.05165; a6 = 0.5475\n a7 = -0.7361; a8 = 0.1844; a9 = 0.1056; a10 = 0.6134; a11 = 0.7210\n\n def f(y):\n rho_pr, z = y\n c1 = a1 + (a2/T_pr) + (a3/(T_pr**3))+ (a4/(T_pr**4))+ (a5/(T_pr**5))\n c2 = a6 + (a7/T_pr) + (a8/(T_pr**2))\n c3 = a9*((a7/T_pr) + (a8/(T_pr**2)))\n c4 = (a10)*(1+(a11*(rho_pr**2)))*((rho_pr**2)/(T_pr**3))*(np.exp(-a11*(rho_pr**2)))\n\n f1 = z + (c3*(rho_pr**5)) - (c2*(rho_pr**2)) - (c1*(rho_pr**1)) - c4 - 1\n f2 = rho_pr - ((0.27 * P_pr) / (z * T_pr))\n return[f1, f2]\n\n solve = fsolve(f, [1, 1]) # initial guess\n return(solve[0], solve[1]) # result is density, z-factor", "def _calc_energy( self, V_a, eos_d ):\n pass", "def energy_function(self, x):\n \n return -T.dot(T.transpose(x), T.dot(self.W, x)) -\\\n T.dot(T.transpose(self.b), x)", "def densitychange(self,dt=0.1):\n #Using conservation of mass and diffusion\n dp_dt = -div(self.u*self.d)\n dp_dt += ndimage.laplace(self.d)\n #This term seems to make the density clump together, producing \n #waves which can make the simulation blow up.\n #dp_dt -= np.add.reduce(self.u*np.array(np.gradient(self.d)))\n #Edge density shouldn't change.\n dp_dt[[0,-1]] = dp_dt[:,[0,-1]] = 0\n self.d += dp_dt*dt\n #Change pressure accordingly to ideal gas law\n #AAAAAAAAAAAAAAAA this fixed most of the poblems from before!!!\n self.P = self.d*8.214*273\n #Conserve mass by spreading out fluctuations \n self.d[1:-1,1:-1] += (self.mass-np.sum(self.d))/self.vol", "def local_energy(self):\n state = self.current_state\n (matrix_elements, transitions) = \\\n self.hamiltonian.find_matrix_elements(state)\n energy_list = [self.nqs.amplitude_ratio(state, transitions[i]) * mel\n for (i, mel) in enumerate(matrix_elements)]\n return sum(energy_list)", "def energy(density, coeff=1.0):\n # implementation goes here\n energy = 0\n for n_i in density:\n \tif type(n_i) != int:\n \t\traise TypeError('Wrong type!')\n \tenergy += n_i * (n_i - 1)\n\n return energy", "def flux_dEdRdP(energy, distance, power):\n\n return ReactorTools.dRdEnu_U235(energy) * ReactorTools.nuFlux(power, distance*100.)", "def compute_energy_density(kT):\n h=u.planck\n c=u.speed_of_light\n pi=np.pi\n return (8*pi/(h*c)**3)*((pi*kT)**4/15)", "def getDensity(h, R_w, R_sun): # k is a fitting constant\n\n R = np.sqrt(R_w**2+h**2)\n r = R/R_sun # units need to be in solar radii \n a = 77.1\n b = 31.4\n c = 0.954\n d = 8.30\n e = 0.550\n f = 4.63\n\n return (a*r**(-b) + c*r**(-d) + e*r**(-f))*10**8 #[cm-3]", "def linkDensity(self, time=None):\r\n listofDensities = list()\r\n for cell in self.cells:\r\n listofDensities.append(cell.cellDensity())\r\n return listofDensities", "def _density(self):\n fraction = np.array([0.]+[m.value for m in self.fraction])\n # TODO: handle invalid fractions using penalty functions\n # S = sum(fraction)\n # scale = S/100 if S > 100 else 1\n # fraction[0] = 100 - S/scale\n # penalty = scale - 1\n fraction[0] = 100 - sum(fraction)\n if (fraction < 0).any():\n return NaN\n volume = self._volume(fraction)\n density = np.array([m.density() for m in [self.base]+self.material])\n return np.sum(volume*density)", "def energy_function(self):\n E = 0\n for i in range(len(self.config)):\n for j in range(len(self.config)):\n s = self.config[i,j]\n #Calculate the impact of neighboring particle pairs\n neighbors = (self.config[(i+1)%L, j] +\n self.config[i, (j+1)%L] + \n self.config[(i-1)%L, j] + \n self.config[i, (j-1)%L])\n E += -J*s*neighbors\n #fix for extra neighbors\n return E/4", "def solid_surface_density_CL2013(M, a):\n return solid_surface_density(M, a, a)", "def energy(energy_name: str) -> float:\n pass", "def energy_surf(dbf, comps, phases, mode=None, **kwargs):\n # Here we check for any keyword arguments that are special, i.e.,\n # there may be keyword arguments that aren't state variables\n pdens_dict = unpack_kwarg(kwargs.pop('pdens', 2000), default_arg=2000)\n model_dict = unpack_kwarg(kwargs.pop('model', Model), default_arg=Model)\n\n # Convert keyword strings to proper state variable objects\n # If we don't do this, sympy will get confused during substitution\n statevar_dict = \\\n dict((v.StateVariable(key), value) \\\n for (key, value) in kwargs.items())\n\n # Generate all combinations of state variables for 'map' calculation\n # Wrap single values of state variables in lists\n # Use 'kwargs' because we want state variable names to be stringified\n statevar_values = [_listify(val) for val in kwargs.values()]\n statevars_to_map = [dict(zip(kwargs.keys(), prod)) \\\n for prod in itertools.product(*statevar_values)]\n\n # Consider only the active phases\n active_phases = dict((name.upper(), dbf.phases[name.upper()]) \\\n for name in phases)\n comp_sets = {}\n # Construct a list to hold all the data\n all_phase_data = []\n for phase_name, phase_obj in sorted(active_phases.items()):\n # Build the symbolic representation of the energy\n mod = model_dict[phase_name]\n # if this is an object type, we need to construct it\n if isinstance(mod, type):\n try:\n mod = mod(dbf, comps, phase_name)\n except DofError:\n # we can't build the specified phase because the\n # specified components aren't found in every sublattice\n # we'll just skip it\n logger.warning(\"\"\"Suspending specified phase %s due to\n some sublattices containing only unspecified components\"\"\",\n phase_name)\n continue\n # As a last resort, treat undefined symbols as zero\n # But warn the user when we do this\n # This is consistent with TC's behavior\n undefs = list(mod.ast.atoms(Symbol) - mod.ast.atoms(v.StateVariable))\n for undef in undefs:\n mod.ast = mod.ast.xreplace({undef: float(0)})\n logger.warning('Setting undefined symbol %s for phase %s to zero',\n undef, phase_name)\n # Construct an ordered list of the variables\n variables, sublattice_dof = generate_dof(phase_obj, mod.components)\n\n # Build the \"fast\" representation of that model\n comp_sets[phase_name] = make_callable(mod.ast, \\\n list(statevar_dict.keys()) + variables, mode=mode)\n\n # Get the site ratios in each sublattice\n site_ratios = list(phase_obj.sublattices)\n\n # Eliminate pure vacancy endmembers from the calculation\n vacancy_indices = list()\n for idx, sublattice in enumerate(phase_obj.constituents):\n if 'VA' in sorted(sublattice) and 'VA' in sorted(comps):\n vacancy_indices.append(sorted(sublattice).index('VA'))\n if len(vacancy_indices) != len(phase_obj.constituents):\n vacancy_indices = None\n logger.debug('vacancy_indices: %s', vacancy_indices)\n # Add all endmembers to guarantee their presence\n points = endmember_matrix(sublattice_dof,\n vacancy_indices=vacancy_indices)\n\n # Sample composition space for more points\n if sum(sublattice_dof) > len(sublattice_dof):\n points = np.concatenate((points,\n point_sample(sublattice_dof,\n pdof=pdens_dict[phase_name])\n ))\n\n\n\n # If there are nontrivial sublattices with vacancies in them,\n # generate a set of points where their fraction is zero and renormalize\n for idx, sublattice in enumerate(phase_obj.constituents):\n if 'VA' in set(sublattice) and len(sublattice) > 1:\n var_idx = variables.index(v.SiteFraction(phase_name, idx, 'VA'))\n addtl_pts = np.copy(points)\n # set vacancy fraction to log-spaced between 1e-10 and 1e-6\n addtl_pts[:, var_idx] = np.power(10.0, -10.0*(1.0 - addtl_pts[:, var_idx]))\n # renormalize site fractions\n cur_idx = 0\n for ctx in sublattice_dof:\n end_idx = cur_idx + ctx\n addtl_pts[:, cur_idx:end_idx] /= \\\n addtl_pts[:, cur_idx:end_idx].sum(axis=1)[:, None]\n cur_idx = end_idx\n # add to points matrix\n points = np.concatenate((points, addtl_pts), axis=0)\n\n data_dict = {'Phase': phase_name}\n # Generate input d.o.f matrix for all state variable combinations\n for statevars in statevars_to_map:\n # Prefill the state variable arguments to the energy function\n energy_func = \\\n lambda *args: comp_sets[phase_name](\n *itertools.chain(list(statevars.values()),\n args))\n # Get the stable points and energies for this configuration\n # Set max refinements equal to the number of independent dof\n mxr = sum(phase_obj.sublattices) - len(phase_obj.sublattices)\n refined_points, energies = \\\n refine_energy_surf(points, None, phase_obj, comps,\n variables, energy_func, max_iterations=-1)\n try:\n data_dict['GM'].extend(energies)\n for statevar in kwargs.keys():\n data_dict[statevar].extend(\n list(np.repeat(list(statevars.values()),\n len(refined_points))))\n except KeyError:\n data_dict['GM'] = list(energies)\n for statevar in kwargs.keys():\n data_dict[statevar] = \\\n list(np.repeat(list(statevars.values()),\n len(refined_points)))\n\n # Map the internal degrees of freedom to global coordinates\n\n # Normalize site ratios\n # Normalize by the sum of site ratios times a factor\n # related to the site fraction of vacancies\n site_ratio_normalization = np.zeros(len(refined_points))\n for idx, sublattice in enumerate(phase_obj.constituents):\n vacancy_column = np.ones(len(refined_points))\n if 'VA' in set(sublattice):\n var_idx = variables.index(v.SiteFraction(phase_name, idx, 'VA'))\n vacancy_column -= refined_points[:, var_idx]\n site_ratio_normalization += site_ratios[idx] * vacancy_column\n\n for comp in sorted(comps):\n if comp == 'VA':\n continue\n avector = [float(cur_var.species == comp) * \\\n site_ratios[cur_var.sublattice_index] for cur_var in variables]\n try:\n data_dict['X('+comp+')'].extend(list(np.divide(np.dot(\n refined_points[:, :], avector), site_ratio_normalization)))\n except KeyError:\n data_dict['X('+comp+')'] = list(np.divide(np.dot(\n refined_points[:, :], avector), site_ratio_normalization))\n\n # Copy coordinate information into data_dict\n # TODO: Is there a more memory-efficient way to deal with this?\n # Perhaps with hierarchical indexing...\n try:\n for column_idx, data in enumerate(refined_points.T):\n data_dict[str(variables[column_idx])].extend(list(data))\n except KeyError:\n for column_idx, data in enumerate(refined_points.T):\n data_dict[str(variables[column_idx])] = list(data)\n\n all_phase_data.append(pd.DataFrame(data_dict))\n\n # all_phases_data now contains energy surface information for the system\n return pd.concat(all_phase_data, axis=0, join='outer', \\\n ignore_index=True, verify_integrity=False)", "def count_energy(self,state):\n\t\tassert len(state) == self._size\n\n\t\ttmp = 0\n\t\tfor i in range(self._size):\n\t\t\tfor j in range(self._size):\n\t\t\t\ttmp += self.myWeights[i][j]* state [i] * state [j]\n\t\treturn tmp - self.myB * sum(state)", "def _redfield_old(nstates, rho0, c_ops, h0, Nt, dt,e_ops, env):\n t = 0.0\n\n print('Total number of states in the system = {}'.format(nstates))\n\n # initialize the density matrix\n rho = rho0\n\n # properties of the environment\n T = env.T\n cutfreq = env.cutfreq\n reorg = env.reorg\n\n #f = open(fname,'w')\n fmt = '{} '* (len(e_ops) + 1) + '\\n'\n\n # construct system-bath operators in H_SB\n\n # short time approximation\n # Lambda = 0.5 * reorg * T * ((hop - Delta)/cutfreq**2 * sigmay + 1./cutfreq * sigmaz)\n\n # constuct the Lambda operators needed in Redfield equation\n l_ops = []\n for c_op in c_ops:\n l_ops.append(getLambda(nstates, h0, c_op, T, cutfreq, reorg))\n\n f_dm = open('den_mat.dat', 'w')\n f_obs = open('obs.dat', 'w')\n\n t = 0.0\n dt2 = dt/2.0\n\n # first-step\n rho_half = rho0 + func(rho0, h0, c_ops, l_ops) * dt2\n rho1 = rho0 + func(rho_half, h0, c_ops, l_ops) * dt\n\n rho_old = rho0\n rho = rho1\n\n for k in range(Nt):\n\n t += dt\n\n rho_new = rho_old + func(rho, h0, c_ops, l_ops) * 2. * dt\n\n # update rho_old\n rho_old = rho\n rho = rho_new\n\n # dipole-dipole auto-corrlation function\n #cor = np.trace(np.matmul(d, rho))\n\n # store the reduced density matrix\n f_dm.write('{} '* (nstates**2 + 1) + '\\n'.format(t, *rho))\n\n # take a partial trace to obtain the rho_el\n obs = np.zeros(len(e_ops))\n for i, obs_op in enumerate(e_ops):\n obs[i] = observe(obs_op, rho)\n\n f_obs.write(fmt.format(t * au2fs, *obs))\n\n\n f_obs.close()\n f_dm.close()\n\n return rho", "def Render(shelf, **options):\n #low, high = options.pop('low', None), options.pop('high', None)\n steps = options.pop('steps')\n low = steps.min()\n high = steps.max()\n n = len(steps)\n\n print(n)\n\n xs = numpy.linspace(low, high, 1001)\n \n ds = shelf.Density(xs)\n return xs, ds", "def fGasDensity(GasGravity, Temperature, Pressure):\n\tGasConstant = 8.314\n\tPress = Pressure / 145.038 # MPa\n\tTemp = Temperature + 273.16 # Deg K\n\tPr = Press / (4.892 - (0.4048 * GasGravity))\n\tTr = Temp / (94.72 + (170.75 * GasGravity))\n\tA = 0.03 + 0.00527 * ((3.5 - Tr)**3)\n\tB = (0.642 * Tr) - (0.007 * (Tr**4)) - 0.52\n\tC = 0.109 * ((3.85 - Tr)**2)\n\tD = exp(-((0.45 + (8 * ((0.56 - (1 / Tr))**2))) * ((Pr**1.2) / Tr)))\n\tZ = (A * Pr) + B + (C * D)\n\treturn (28.8 * GasGravity * Press) / (Z * GasConstant * Temp)", "def boltz_dist(states_energy,temperature,verbose = False):\n\n if temperature > 0.001:\n boltz = np.exp(-(np.array(states_energy) - states_energy[0])/8.61733E-5/temperature)\n boltz /= np.sum(np.exp(-(np.array(states_energy) - states_energy[0])/8.61733E-5/temperature))\n else:\n boltz = [1.0]\n for i in range(len(states_energy)-1):\n boltz.append(0.0)\n\n if verbose == True:\n print('State energy (eV)\\tWeight')\n for i in range(len(states_energy)):\n print('{:0.3f}\\t{:0.3f}'.format(states_energy[i], boltz[i]))\n\n return boltz", "def density_by_ideal_gas_law(\n p: tf.Tensor,\n r: tf.Tensor,\n t: tf.Tensor,\n ) -> tf.Tensor:\n return p / r / t", "def local_density_mean(self):\n\n # the simulation units are msun / kpc ^3\n local = np.mean(self.dens)\n\n return local", "def calc_D(state):\n\t\tif t < thresh:\n\t\t\tstate.D_g[t] = 0.5\n\t\t\tstate.D_n[t] = 0.5\n\t\telse:\n\t\t\tif mod == \"constant\":\n\t\t\t\tstate.D_g[t] = D\n\t\t\t\tstate.D_n[t] = 1-D\n\t\t\tif mod == \"value\":\n\t\t\t\t# NOTE: if rmag and lmag is 1/0, can just use V\n\t\t\t\t# average of two actions\n\t\t\t\tV = np.mean(1/2*(state.QG[t,:] - state.QN[t,:])) # state average(?) \n\t\t\t\tV = 1/(1 + np.exp(-V*k)) # translate between 0 and 1\n\t\t\t\tstate.D_g[t] = V \n\t\t\t\tstate.D_n[t] = 1 - V\n\t\treturn state", "def wood_drum_env(N, sr):\n ## TODO: Fill this in\n return np.zeros(N)", "def density(x, kind=\"geopotential\"):\n\n rho = table(x, kind)[3]\n return rho", "def get_demand_per_dc():\n network = Network(pickle=True)\n for i in network.warehouses:\n k = i.inventory\n lat = i.lat\n longi = i.long\n index = network.get_nearest(lat, longi)\n for j in k.keys():\n network.dcs[index].add_inventory(j, i.inventory[j])\n network.pickle()", "def compute_density_geo(cur, N):\n ## geometric average cardinality\n avg_card = 1\n\n for n in range(N):\n card = get_parameter(cur, par=(\"card_B%d\" % n))\n avg_card *= card\n avg_card = math.pow(avg_card, 1.0/N)\n\n ## average mass\n if avg_card == 0:\n return (-1, 0)\n else:\n return (get_parameter(cur, par=\"B_mass\") / avg_card, avg_card)", "def drfl_dsurface(self, x_surface, geom):\n\n return np.zeros((self.n_wl, 1))", "def make_energy(self):\n def energy_func(m):\n heff = self.field(m)\n return -energy.zeeman(m, self.Ms, heff) \\\n + energy.shape_anisotropy(m, self.Ms,\n self.Nd[0], self.Nd[1], self.Nd[2])\n self.energy = energy_func", "def density_of_air(self) -> float:\n\n return self.pressure / (SPECIFIC_GAS_CONSTANT_OF_AIR * self.ambient_temperature)", "def energy(self, visible):\n bias_term = tf.matmul(visible, self._bias_visible)\n linear_transform = tf.matmul(visible, self._weights) + tf.squeeze(self._bias_hidden)\n hidden_term = tf.reduce_sum(tf.math.log(1 + tf.exp(linear_transform)), axis=1)\n return tf.reduce_mean(-hidden_term - bias_term)", "def energy(self, state, action, next_state, no_trans=False):\n\n norm = 0.5 / (self.sigma**2)\n\n if no_trans:\n diff = state - next_state\n else:\n pred_trans = self.transition_model(state, action)\n diff = state + pred_trans - next_state\n\n return norm * diff.pow(2).sum(2).mean(1)", "def get_qm_state_energy(geo):\n\n states_energy = {'state' : [], 'state_energy' : []}\n qm_lns = open(f'{os.getcwd()}/{geo}/fullqm_{geo}.log', 'r').readlines() \n for line in qm_lns:\n if 'EXCITED STATE' in line and 'ENERGY=' in line:\n state = float(line.split()[2])\n energy = float(line.split()[4])\n states_energy['state'].append(state)\n states_energy['state_energy'].append(round(energy, 5))\n \n return(pd.DataFrame(data=states_energy))", "def pureAl_solliq():\n # Given temperature.\n T = 800\n # Render thermodynamic database.\n db = Database(\"AlNiAnsara1997.TDB\")\n # Define the element.\n comp = \"AL\"\n # Two phases separated by the interface.\n phasenames = [\"FCC_A1\", \"LIQUID\"]\n\n # Molar volumes for elements.\n # Molar volume of Al.\n val = 10.269 * 10.0 ** (-6.0) + (3.860 * 10.0 ** (-5) * 10.0 ** (-6.0)) * (\n T ** 1.491\n )\n\n # Call the module for calculating solid/liquid interfacial energies in pure metals.\n sigma = SigmaPure(T, val, db, comp, phasenames)\n\n # Print the calculated interfacial energy with xarray.Dataset type.\n print(sigma, \"\\n\")\n # Print the calculated interfacial energy with xarray.DataArray type.\n print(sigma.Interfacial_Energy, \"\\n\")\n # Print the calculated interfacial energy value.\n print(sigma.Interfacial_Energy.values)\n\n # Output\n \"\"\"\n <xarray.Dataset>\n Dimensions: ()\n Data variables:\n Component <U2 'AL'\n Temperature int64 800\n Melting_Enthalpy float64 1.071e+04\n Interfacial_Energy float64 0.155 \n\n <xarray.DataArray 'Interfacial_Energy' ()>\n array(0.15497715523946845) \n\n 0.15497715523946845\n \"\"\"", "def energy(self, state, action, next_state, no_trans=False):\n\n norm = 0.5 / (self.sigma ** 2)\n\n if no_trans:\n diff = state - next_state\n else:\n pred_trans = self.transition_model(state, action)\n diff = state + pred_trans - next_state\n\n return norm * diff.pow(2).sum(1)", "def calc_air_density(temperature, pressure, elevation_ref=None, elevation_site=None, lapse_rate=-0.113,\n specific_gas_constant=286.9):\n\n temp = temperature\n temp_kelvin = temp + 273.15 # to convert deg C to Kelvin.\n pressure = pressure * 100 # to convert hPa to Pa\n ref_air_density = pressure / (specific_gas_constant * temp_kelvin)\n\n if elevation_ref is not None and elevation_site is not None:\n site_air_density = round(ref_air_density + (((elevation_site - elevation_ref) / 1000) * lapse_rate), 3)\n return site_air_density\n elif elevation_site is None and elevation_ref is not None:\n raise TypeError('elevation_site should be a number')\n elif elevation_site is not None and elevation_ref is None:\n raise TypeError('elevation_ref should be a number')\n else:\n return ref_air_density", "def computeChargeDensity(self):\n \n self.rho = np.zeros((self.ni, self.nj, self.nk))\n \n for species in self.speciesList:\n if species.charge!=0:\n self.rho += species.charge*species.den", "def model_onelayer_pert(r):\n\n\t#- march through the various depth levels -----------------------------------------------------\n\n\tif (r > 6361000.0):\n\t\trho = 2.7\n\t\tvpv = 5.8\n\t\tvph = vpv\n\t\tvsv = 2.0 + 0.02\n\t\tvsh = vsv \n\t\teta = 1.0\n\n\telse:\n\t\trho = 3.1\n\t\tvpv = 7.8\n\t\tvph = vpv\n\t\tvsv = 3.0\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- convert to elastic parameters --------------------------------------------------------------\n\n\trho = 1000.0 * rho\n\tvpv = 1000.0 * vpv\n\tvph = 1000.0 * vph\n\tvsv = 1000.0 * vsv\n\tvsh = 1000.0 * vsh\n\n\tA = rho * vph**2\n\tC = rho * vpv**2\n\tN = rho * vsh**2\n\tL = rho * vsv**2\n\tF = eta * (A - 2 * L)\n\n\treturn rho, A, C, F, L, N", "def sat_vap_dens(nz, T, SWVD, plot=False):\r\n rho_v = np.zeros(nz)\r\n rho_v_dT = np.zeros(nz)\r\n if SWVD == \"Libbrecht\":\r\n rho_v = (\r\n np.exp(-T_ref_L / T) / (f * T) * (a0 + a1 * (T - 273) + a2 * (T - 273) ** 2)\r\n ) # [kg/m^3] Water vapor density\r\n rho_v_dT = (\r\n np.exp(-T_ref_L / T)\r\n / (f * T ** 2)\r\n * (\r\n (a0 - a1 * 273 + a2 * 273 ** 2) * (T_ref_L / T - 1)\r\n + (a1 - a2 * 2 * 273) * T_ref_L\r\n + a2 * T ** 2 * (T_ref_L / T + 1)\r\n )\r\n ) # [kg/m^3/K]\r\n elif SWVD == \"Calonne\":\r\n x = (L_Cal * mH2O) / (rho_i * kB)\r\n rho_v = rho_ref * np.exp(x * ((1 / T_ref_C) - (1 / T)))\r\n\r\n rho_v_dT = x / T ** 2 * rho_ref * np.exp(x * ((1 / T_ref_C) - (1 / T)))\r\n\r\n elif SWVD == \"Hansen\":\r\n\r\n rho_v = (\r\n (10.0 ** (c1 / T + c2 * np.log(T) / np.log(10) + c3 * T + c4 * T ** 2 + c5))\r\n * c6\r\n / R_v\r\n / T\r\n )\r\n rho_v_dT = (\r\n rho_v\r\n * np.log(10)\r\n * (-c1 / T ** 2 + c2 / (T * np.log(10)) + c3 + 2 * c4 * T)\r\n - rho_v / T\r\n )\r\n else:\r\n raise ValueError(\"Saturation water vapor density not available\")\r\n if plot:\r\n fig1 = plt.plot(T, rho_v)\r\n plt.title(\"Water vapor density with respect to temperature\")\r\n plt.show(fig1)\r\n fig2 = plt.plot(T, rho_v_dT)\r\n plt.title(\"Derivative of water vapor density with respect to temperature\")\r\n plt.show(fig2)\r\n return rho_v, rho_v_dT", "def helmholtzenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n f = g - pres*g_p\n return f", "def __init__(self, plasma_parent):\n super(LevelNumberDensity, self).__init__(plasma_parent)\n self.calculate = self._calculate_dilute_lte\n self._update_inputs()\n self.initialize_indices = True", "def calc_ked_WFI(self):\n\n #Initialize kinetic energy density\n self.ked_WFI = np.zeros( (self.grid.Nelem, 1))\n\n #Figure out the number of occupied orbitals\n if self.m == 0:\n if self.pol == 1:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n else:\n Nocc = np.floor(self.N)\n nu = self.N - Nocc\n\n else:\n #m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n Nocc = np.floor(self.N / 4)\n nu = self.N / 4 - Nocc\n else:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n\n #Construct density\n for i in range(int(Nocc)):\n # print(\"phi from pssolver\", self.phi)\n # print(\"phi subset\", self.phi[:,i])\n # print(\"integrate returns\", self.grid.integrate( self.phi[:,i]**2 )**0.5)\n\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:,i]**2 )**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += (phi_norm * (self.H0 @ phi_norm)) / self.grid.w[:, None]\n\n #If we are doing fractional robitals and are non-integer\n if self.FRACTIONAL is True and nu != 0:\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:, Nocc+1]**2)**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += nu * ( phi_norm * (self.H0 @ phi_norm) ) / self.grid.w[:, None]\n\n #Scale densities appropriately\n if self.m == 0:\n if self.pol == 1: #Unpolarized electrons\n self.ked_WFI = 2 * self.ked_WFI\n\n else: # m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n self.ked_WFI = 4 * self.ked_WFI\n else:\n self.ked_WFI = 2 * self.ked_WFI", "def return_energy_distribution(graphs_list, all_states, observable_func=None, return_energies=False, verbose=0):\n\n all_e_distrib = []\n all_e_values_unique = []\n\n for i, G in enumerate(tqdm(graphs_list, disable=verbose==0)):\n if observable_func == None: \n observable = generate_Ham_from_graph(\n G, type_h='ising', type_ising='z'\n )\n else:\n observable = observable_func(G)\n e_values = observable.data.diagonal().real\n e_values_unique = np.unique(e_values)\n state = all_states[i]\n\n e_distrib = np.zeros(len(e_values_unique))\n\n for j, v in enumerate(e_values_unique):\n e_distrib[j] = np.sum(\n (np.abs(state.data.toarray()) ** 2)[e_values == v]\n )\n\n all_e_distrib.append(e_distrib)\n all_e_values_unique.append(e_values_unique)\n\n e_values_unique = np.unique(np.concatenate(all_e_values_unique, axis=0))\n\n all_e_masses = []\n for e_distrib, e_values in zip(all_e_distrib, all_e_values_unique):\n masses = np.zeros_like(e_values_unique)\n for d, e in zip(e_distrib, e_values):\n masses[e_values_unique == e] = d\n all_e_masses.append(masses)\n\n all_e_masses = np.array(all_e_masses)\n\n if return_energies:\n return all_e_masses, e_values_unique\n return all_e_masses", "def getDustDensity(grid=None, ppar=None):\n mesh = np.meshgrid(grid.x, grid.y, grid.z, indexing='ij')\n if ppar['crd_sys'] == 'sph':\n rr = mesh[0]\n tt = mesh[1]\n pp = mesh[2]\n xx = rr * np.sin(tt) * np.sin(pp)\n yy = rr * np.sin(tt) * np.cos(pp)\n zz = rr * np.cos(tt)\n cyrr = np.sqrt(xx**2. + yy**2)\n elif ppar['crd_sys'] == 'car':\n xx = mesh[0]\n yy = mesh[1]\n zz = mesh[2]\n rr = np.sqrt(xx**2 + yy**2 + zz**2)\n cyrr = np.sqrt(xx**2. + yy**2.)\n else:\n raise ValueError('crd_sys not specified in ppar')\n\n # calculate surface density\n nflat = len(ppar['dRin'])\n flat = cyrr * 0.\n for ii in range(nflat):\n flatii = fn_getflat(cyrr, ppar['dRin_w'][ii], ppar['dRin'][ii], \n ppar['dRout'][ii], ppar['dRout_w'][ii], \n ppar['dsigp'][ii], ppar['dsig0'][ii])\n flat = flat + flatii\n\n nring = len(ppar['dring_r'])\n ring = cyrr * 0\n for ii in range(nring):\n ringii = fn_getring(cyrr, ppar['dring_r'][ii], \n ppar['dring_win'][ii], ppar['dring_wout'][ii], \n ppar['dring_a'][ii])\n ring = ring + ringii\n\n nlynbell = len(ppar['dLB_Rin'])\n lynbell = cyrr * 0\n for ii in range(nlynbell):\n lynbellii = fn_getLyndenBell(cyrr, ppar['dLB_Rin'][ii], \n ppar['dLB_Rsig'][ii], ppar['dLB_sigp'][ii], \n ppar['dLB_sig0'][ii])\n lynbell = lynbell + lynbellii\n\n sig = flat + ring + lynbell\n\n # calculate the dust density\n op = dustopac.radmc3dDustOpac()\n dinfo = op.readDustInfo()\n ngs = len(dinfo['gsize'])\n dweights = dinfo['dweights']\n\n rhodust = np.zeros([grid.nx, grid.ny, grid.nz, ngs], dtype=np.float64) \n for ig in range(ngs):\n hhii = ppar['dHt'][ig] * (cyrr / ppar['dRt'][ig])**ppar['dqheight'][ig]\n rho_ig = sig / np.sqrt(2.*np.pi) / hhii * np.exp(-0.5*(zz/hhii)**2)\n rhodust[:,:,:,ig] = rho_ig * dweights\n\n reg = rhodust < ppar['cutddens']\n rhodust[reg]= ppar['cutddens']\n\n return rhodust", "def csr1d_steady_state_kick_calc(z, weights, nz=100, rho=1, species=\"electron\", normalized_units=False):\n\n assert species == \"electron\", f\"TODO: support species {species}\"\n\n # Density\n H, edges = np.histogram(z, weights=weights, bins=nz)\n zmin, zmax = edges[0], edges[-1]\n dz = (zmax - zmin) / (nz - 1)\n\n zvec = np.linspace(zmin, zmax, nz) # Sloppy with bin centers\n\n Qtot = np.sum(weights)\n density = H / dz / Qtot\n\n # Density derivative\n densityp = np.gradient(density) / dz\n densityp_filtered = savgol_filter(densityp, 13, 2)\n\n # Green function\n zi = np.arange(0, zmax - zmin, dz)\n #factor =\n # factor = -3**(2/3) * Qtot/e_charge * r_e * rho**(-2/3) / gamma # factor for ddelta/ds [1/m]\n if normalized_units:\n factor = -3**(2/3) * rho**(-2/3) # factor for normalized uinits [1/m^2]\n else:\n factor = ( -3**(2/3) * Qtot / e_charge * r_e * mec2 * rho**(-2/3) ) # factor for denergy/dz [eV/m]\n green = factor * np.diff(zi ** (2 / 3))\n\n # Convolve to get wake\n wake = np.convolve(densityp_filtered, green, mode=\"full\")[0 : len(zvec)]\n\n # Interpolate to get the kicks\n delta_kick = np.interp(z, zvec, wake)\n\n return {\"denergy_ds\": delta_kick, \"zvec\": zvec, \"wake\": wake}", "def _UpdateEnergy(self):\n self.mol.GetEnergy('nokinetic')", "def Density(material):\n if material == \"mild\":\n return 7850.0\n else:\n if material == \"al\":\n return 2700.0\n else:\n raise ValueError(\"Invalid material `\"+material+\"'\")", "def getDensity(self, lat, lon, alt, time):\n raise NotImplementedError(\n \"getDensity method must be implemented by class {}\".format(\n type(self).__name__))", "def energy(p,m):\n return math.sqrt(p*p + m*m)", "def dLs_dsurface(self, x_surface, geom):\n\n return np.zeros((self.n_wl, 1))", "def drfl_dsurfaceb(self, x_surface, geom):\n\n return np.zeros((self.n_wl, 1))", "def get_energy(edr, annealing_times, energy_type = 'Potential', out_fig = 'energy_distribution.svg'): # Could be Total-Energy\n fig, ax = plt.subplots(figsize = (16,9))\n data = pd.DataFrame()\n xvg_tmp_file = tempfile.NamedTemporaryFile(suffix='.xvg')\n energy = []\n iterator = range(0, len(annealing_times)-1, 2)\n\n for state, index in tqdm.tqdm(enumerate(iterator), total=len(iterator)):#enumerate(iterator):# # the calculation is per pair of times, beetween the first to time the temperature was keep constant, then the system was heated and repeated again.\n run = tools.run(f\"export GMX_MAXBACKUP=-1; echo {energy_type} | gmx energy -f {edr} -b {annealing_times[index]} -e {annealing_times[index + 1]} -o {xvg_tmp_file.name} | grep \\'{energy_type.replace('-',' ')}\\'\")\n energy.append(float(run.stdout.split()[-5]))\n \"\"\"\n Energy Average Err.Est. RMSD Tot-Drift\n -------------------------------------------------------------------------------\n Potential -1.30028e+06 -- 1682.1 -2422.24 (kJ/mol)\n Total Energy -952595 -- 2606.81 -3688.3 (kJ/mol)\n \"\"\"\n # Getting the histograms and checking for the same len in all intervals\n if state == 0:\n data[state] = xvg.XVG(xvg_tmp_file.name).data[:,1]\n else:\n xvg_data = xvg.XVG(xvg_tmp_file.name).data[:,1]\n if xvg_data.shape[0] > data.shape[0]:\n data[state] = xvg_data[:data.shape[0]]\n else:\n data = data.iloc[:xvg_data.shape[0]]\n data[state] = xvg_data\n\n\n print(data)\n sns.histplot(data = data, element='poly', stat = 'probability', axes = ax)\n ax.set(\n xlabel = f'{energy_type} [kJ/mol]',\n ylabel = 'Probability',\n title = f'Distribution of {energy_type}')\n # plt.show()\n fig.savefig(out_fig)\n return energy", "def main():\n snowdensity=0.35 #from May 1 2010 SNOTEL (2011,2013 were similar, 2014 was 0.4), at the saddle in May 1 2010 it was 0.4\n snodasyears=[2010,2004,2005]\n wdata=[wrf.load(\"wrf/SWE_daily.nc\",extractday=212+5+int(np.round(365.25*year))) for year in [3,4]]\n wdata.extend([wrf.load(\"wrf/SWE_daily.nc\",extractday=212+20+int(np.round(365.25*year))) for year in [3,4]])\n print(len(wdata))\n sdata=[snodas.load(\"snodas/SWE_Daily0600UTC_WesternUS_{}.dat\".format(year),extractday=125) for year in snodasyears]\n sdata.extend([snodas.load(\"snodas/SWE_Daily0600UTC_WesternUS_{}.dat\".format(year),extractday=140) for year in snodasyears])\n print(len(sdata))\n # sdata=[snodas.load(\"snodas/SWE_Daily0600UTC_WesternUS_{}.dat\".format(year),extractday=120) for year in range(2004,2013)]\n # sdata.insert(0,sdata.pop(6)) #move year 2010 to the begining of the list\n ldata=lidar.load_fast(loc=\"lidar/\",geofile=\"snow-on-dem.nc\",decimation_factor=10)\n \n print(\"Calculating WRF weights\")\n try:\n wrfweights=mygis.read_nc(\"wrf2lidar_weights.nc\").data\n except:\n wrfweights =gen_weights(ldata.lat,ldata.lon,wdata[0].lat,wdata[0].lon,mask=(ldata.dem>1500))\n mygis.write(\"wrf2lidar_weights.nc\",wrfweights)\n \n # wrfbounds =find_bounds(wrfweights)\n print(\"Calculating SNODAS weights\")\n try:\n snodasweights=mygis.read_nc(\"snodas2lidar_weights.nc\").data\n except:\n snodasweights=gen_weights(ldata.lat,ldata.lon,sdata[0].lat,sdata[0].lon,mask=(ldata.dem>1500))\n mygis.write(\"snodas2lidar_weights.nc\",snodasweights)\n \n # snodasbounds =find_bounds(snodasweights)\n \n wdata[0].lc[wrfweights==0]=0\n sdata[0].lc[snodasweights==0]=0\n\n print(\"Binning by elevations...\")\n #dx=4000) #note use dx=lidar_dx because weights are lidar gridcells...\n wrfbyz=[bin_by_elevation(w.data,w.dem,wdata[0].lc,weights=wrfweights,dz=200,dx=10) for w in wdata]\n print(\"Binning by elevations...\")\n snodasbyz=[bin_by_elevation(s.data,sdata[0].dem,sdata[0].lc,weights=snodasweights,dz=150,dx=10) for s in sdata]#dx=926)\n print(\"Binning by elevations...\")\n lidarbyz=bin_by_elevation(ldata.data*snowdensity,ldata.dem,ldata.lc,dz=100,dx=10)\n print(\"Plotting\")\n plot_volumes(wrfbyz,snodasbyz,lidarbyz)\n\n snodasyears=[2010,2004,2005,2010.2,2004.2,2005.2]\n for i in range(len(snodasbyz)):\n plot_elevation_bands(snodasbyz[i],outputfile=\"SNODAS_swe_by_z_{}.png\".format(snodasyears[i]),title=\"SNODAS SWE {}\".format(snodasyears[i]))", "def test_el_small_surface_instability():\n levels = np.array([959., 931.3, 925., 899.3, 892., 867.9, 850., 814.,\n 807.9, 790., 779.2, 751.3, 724.3, 700., 655., 647.5,\n 599.4, 554.7, 550., 500.]) * units.mbar\n temperatures = np.array([22.2, 20.2, 19.8, 18.4, 18., 17.4, 17., 15.4, 15.4,\n 15.6, 14.6, 12., 9.4, 7., 2.2, 1.4, -4.2, -9.7,\n -10.3, -14.9]) * units.degC\n dewpoints = np.array([20., 18.5, 18.1, 17.9, 17.8, 15.3, 13.5, 6.4, 2.2,\n -10.4, -10.2, -9.8, -9.4, -9., -15.8, -15.7, -14.8, -14.,\n -13.9, -17.9]) * units.degC\n el_pressure, el_temperature = el(levels, temperatures, dewpoints)\n assert_nan(el_pressure, levels.units)\n assert_nan(el_temperature, temperatures.units)", "def _DeRedden(lam,flux,ra,dec,dustmap_path='/Users/vzm83/Softwares/sfddata-master'): \n m = sfdmap.SFDMap(dustmap_path) \n flux_unred = pyasl.unred(lam,flux,m.ebv(ra,dec))\n return flux_unred", "def densidad(qe):\r\n global x,rhoe,rhoi,dx,nparticulas,npuntos_malla,pared_izquierda,pared_derecha\r\n \r\n j1 = sp.dtype(sp.int32) # Asegura que la variable permanezca entera\r\n j2 = sp.dtype(sp.int32) \r\n \r\n # Factor de ponderacion de carga \r\n re = qe/dx \r\n # Densidad electronica \r\n rhoe = sp.zeros(npuntos_malla+1) \r\n # Mapa de cargas sobre la malla\r\n for i in range(nparticulas):\r\n xa = x[i]/dx # xparticula/dx\r\n j1 = int(xa) # indices de la malla fija xmalla/dx\r\n j2 = j1 + 1 # Siguiente punto en la malla\r\n f2 = xa - j1 # |xmalla - xparticula|/dx\r\n f1 = 1.0 - f2\r\n rhoe[j1] = rhoe[j1] + re*f1\r\n rhoe[j2] = rhoe[j2] + re*f2\r\n\r\n # Condiciones de frontera periodica\r\n rhoe[0] += rhoe[npuntos_malla]\r\n rhoe[npuntos_malla] = rhoe[0]\r\n \r\n # Se agrega una densidad de iones neutral\r\n rhoi = rho0\r\n\r\n return True", "def get_fiber_density():\n return Global_Module.global_fiber_density", "def calc_enginprops(self):\n # Let's assemble the ABD matrix even if it is not required\n ABD = np.bmat([[self.A, self.B], [self.B, self.D]])\n ABD_inv = np.linalg.inv(ABD)\n # We would use the whole matrix. This gives results similar to elamX and considers poisson effects\n A_inv = ABD_inv[0:3, 0:3]\n self.Ex = 1 / (self.total_t * A_inv[0, 0]) # It is 2 * t because we need total thickness\n self.Ey = 1 / (self.total_t * A_inv[1, 1])\n self.Gxy = 1 / (self.total_t * A_inv[2, 2])\n self.poissonxy = - A_inv[0,1] / A_inv[0, 0]\n # Flexural stiffness properties\n self.zx = 0.0\n self.zy = 0.0\n zx_dem = 0.0\n zy_dem = 0.0\n self.EIx = 0.0\n self.EIy = 0.0\n z = 0.0\n # Calculate neutral axis in direction x and y\n for S_bar, t in zip(self.S_bars, self.ts):\n Ex = 1 / S_bar[0, 0]\n Ey = 1 / S_bar[1, 1]\n z += t / 2.0\n self.zx += Ex * t * z\n zx_dem += Ex * t\n self.zy += Ey * t * z\n zy_dem += Ey * t\n z += t / 2.0\n self.zx = self.zx / zx_dem\n self.zy = self.zy / zy_dem\n # Calculate EI in direction x and y\n z = 0.0\n for S_bar, t in zip(self.S_bars, self.ts):\n Ex = 1 / S_bar[0, 0]\n Ey = 1 / S_bar[1, 1]\n Gxy = 1 / S_bar[2, 2]\n z += t / 2.0\n self.EIx += Ex * (t**3 / 12 + t * (z - self.zx)**2)\n self.EIy += Ey * (t**3 / 12 + t * (z - self.zy)**2)\n self.GA += Gxy * t\n z += t / 2.0\n return self.Ex, self.Ey, self.Gxy, self.poissonxy", "def thermal_state(self, beta: float = .1) -> numpy.ndarray:\n rho = numpy.exp(-beta * self.cost)\n return rho / numpy.sum(rho)" ]
[ "0.639904", "0.6349566", "0.6122259", "0.587715", "0.5850544", "0.5828956", "0.5805845", "0.57823133", "0.57640857", "0.5762716", "0.575468", "0.5743525", "0.57068014", "0.568855", "0.56624216", "0.56600386", "0.5653631", "0.56313336", "0.5620443", "0.56074494", "0.56047225", "0.5594828", "0.5593493", "0.55907947", "0.55837363", "0.557672", "0.555689", "0.5547914", "0.5522239", "0.55123913", "0.55069715", "0.55026793", "0.5499668", "0.54879", "0.5452904", "0.5431458", "0.5430099", "0.5424282", "0.5423658", "0.5416485", "0.5397237", "0.5386879", "0.53783995", "0.536167", "0.5348905", "0.5343112", "0.53377736", "0.53314805", "0.5322066", "0.5317233", "0.53150284", "0.5297203", "0.5293419", "0.528893", "0.5288422", "0.5287639", "0.52872145", "0.52864015", "0.5265614", "0.5264982", "0.5260545", "0.5252614", "0.524909", "0.5246379", "0.52242917", "0.5209703", "0.5198888", "0.51977116", "0.5185206", "0.5181791", "0.51602125", "0.5151481", "0.5148882", "0.51443636", "0.5140504", "0.51396143", "0.51360065", "0.51351553", "0.51348084", "0.51346534", "0.51337683", "0.512721", "0.512626", "0.51187533", "0.5111443", "0.51095384", "0.5108528", "0.5102026", "0.5100383", "0.50949544", "0.5094171", "0.5084597", "0.508134", "0.50801253", "0.5077946", "0.5074638", "0.5070769", "0.5069467", "0.50661176", "0.5064331", "0.50564086" ]
0.0
-1
Calculate the DOS in a set of energies by full diagonalization
def ldos_waves(intra,es = [0.0],delta=0.01): es = np.array(es) # array with energies eig,eigvec = lg.eigh(intra) ds = [] # empty list for energy in es: # loop over energies d = np.array([0.0 for i in range(intra.shape[0])]) # initialize for (v,ie) in zip(eigvec.transpose(),eig): # loop over wavefunctions v2 = (np.conjugate(v)*v).real # square of wavefunction fac = delta/((energy-ie)**2 + delta**2) # factor to create a delta d += fac*v2 # add contribution d /= np.pi # normalize ds.append(d) # store ds = np.array(ds) # convert to array return ds
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dos_integral(E,dos,m=0):\n somma = 0.0\n h = 0.5*(E[2]-E[0])\n for j in range(0,len(dos)-3,3):\n somma += 3.0*pow(E[j],m)*dos[j]+3.0*pow(E[j+1],m)*dos[j+1]+2.0*pow(E[j+2],m)*dos[j+2]\n \n return h*somma*3.0/8.0;", "def multi_ldos(h,es=[0.0],delta=0.001,nrep=3,nk=2,numw=3,random=False):\n print(\"Calculating eigenvectors in LDOS\")\n if h.is_sparse: # sparse Hamiltonian\n from bandstructure import smalleig\n print(\"SPARSE Matrix\")\n evals,ws = [],[] # empty list\n ks = klist.kmesh(h.dimensionality,nk=nk) # get grid\n hk = h.get_hk_gen() # get generator\n for k in ks: # loop\n print(\"Diagonalizing in LDOS, SPARSE mode\")\n if random:\n k = np.random.random(3) # random vector\n print(\"RANDOM vector in LDOS\")\n e,w = smalleig(hk(k),numw=numw,evecs=True)\n evals += [ie for ie in e]\n ws += [iw for iw in w]\n# evals = np.concatenate([evals,e]) # store\n# ws = np.concatenate([ws,w]) # store\n# raise\n# (evals,ws) = h.eigenvectors(nk) # get the different eigenvectors\n else:\n print(\"DENSE Matrix\")\n (evals,ws) = h.eigenvectors(nk) # get the different eigenvectors\n ds = [(np.conjugate(v)*v).real for v in ws] # calculate densities\n del ws # remove the wavefunctions\n os.system(\"rm -rf MULTILDOS\") # remove folder\n os.system(\"mkdir MULTILDOS\") # create folder\n go = h.geometry.copy() # copy geometry\n go = go.supercell(nrep) # create supercell\n fo = open(\"MULTILDOS/MULTILDOS.TXT\",\"w\") # files with the names\n for e in es: # loop over energies\n print(\"MULTILDOS for energy\",e)\n out = np.array([0.0 for i in range(h.intra.shape[0])]) # initialize\n for (d,ie) in zip(ds,evals): # loop over wavefunctions\n fac = delta/((e-ie)**2 + delta**2) # factor to create a delta\n out += fac*d # add contribution\n out /= np.pi # normalize\n out = spatial_dos(h,out) # resum if necessary\n name0 = \"LDOS_\"+str(e)+\"_.OUT\" # name of the output\n name = \"MULTILDOS/\" + name0\n write_ldos(go.x,go.y,out.tolist()*(nrep**h.dimensionality),\n output_file=name) # write in file\n fo.write(name0+\"\\n\") # name of the file\n fo.flush() # flush\n fo.close() # close file\n # Now calculate the DOS\n from dos import calculate_dos\n es2 = np.linspace(min(es),max(es),len(es)*10)\n ys = calculate_dos(evals,es2,delta) # use the Fortran routine\n from dos import write_dos\n write_dos(es2,ys,output_file=\"MULTILDOS/DOS.OUT\")", "def main():\n diagonals_in_hd()", "def _compute_diag_H(self, t, index, update_derivatives=False, stationary=False):\r\n\r\n \"\"\"if delta_i~=delta_j:\r\n [h, dh_dD_i, dh_dD_j, dh_dsigma] = np.diag(simComputeH(t, index, t, index, update_derivatives=True, stationary=self.is_stationary))\r\n else:\r\n Decay = self.decay[index]\r\n if self.delay is not None:\r\n t = t - self.delay[index]\r\n \r\n t_squared = t*t\r\n half_sigma_decay = 0.5*self.sigma*Decay\r\n [ln_part_1, sign1] = ln_diff_erfs(half_sigma_decay + t/self.sigma,\r\n half_sigma_decay)\r\n \r\n [ln_part_2, sign2] = ln_diff_erfs(half_sigma_decay,\r\n half_sigma_decay - t/self.sigma)\r\n \r\n h = (sign1*np.exp(half_sigma_decay*half_sigma_decay\r\n + ln_part_1\r\n - log(Decay + D_j)) \r\n - sign2*np.exp(half_sigma_decay*half_sigma_decay\r\n - (Decay + D_j)*t\r\n + ln_part_2 \r\n - log(Decay + D_j)))\r\n \r\n sigma2 = self.sigma*self.sigma\r\n\r\n if update_derivatives:\r\n \r\n dh_dD_i = ((0.5*Decay*sigma2*(Decay + D_j)-1)*h \r\n + t*sign2*np.exp(\r\n half_sigma_decay*half_sigma_decay-(Decay+D_j)*t + ln_part_2\r\n )\r\n + self.sigma/np.sqrt(np.pi)*\r\n (-1 + np.exp(-t_squared/sigma2-Decay*t)\r\n + np.exp(-t_squared/sigma2-D_j*t)\r\n - np.exp(-(Decay + D_j)*t)))\r\n \r\n dh_dD_i = (dh_dD_i/(Decay+D_j)).real\r\n \r\n \r\n \r\n dh_dD_j = (t*sign2*np.exp(\r\n half_sigma_decay*half_sigma_decay-(Decay + D_j)*t+ln_part_2\r\n )\r\n -h)\r\n dh_dD_j = (dh_dD_j/(Decay + D_j)).real\r\n\r\n dh_dsigma = 0.5*Decay*Decay*self.sigma*h \\\r\n + 2/(np.sqrt(np.pi)*(Decay+D_j))\\\r\n *((-Decay/2) \\\r\n + (-t/sigma2+Decay/2)*np.exp(-t_squared/sigma2 - Decay*t) \\\r\n - (-t/sigma2-Decay/2)*np.exp(-t_squared/sigma2 - D_j*t) \\\r\n - Decay/2*np.exp(-(Decay+D_j)*t))\"\"\"\r\n pass", "def densidad(qe):\r\n global x,rhoe,rhoi,dx,nparticulas,npuntos_malla,pared_izquierda,pared_derecha\r\n \r\n j1 = sp.dtype(sp.int32) # Asegura que la variable permanezca entera\r\n j2 = sp.dtype(sp.int32) \r\n \r\n # Factor de ponderacion de carga \r\n re = qe/dx \r\n # Densidad electronica \r\n rhoe = sp.zeros(npuntos_malla+1) \r\n # Mapa de cargas sobre la malla\r\n for i in range(nparticulas):\r\n xa = x[i]/dx # xparticula/dx\r\n j1 = int(xa) # indices de la malla fija xmalla/dx\r\n j2 = j1 + 1 # Siguiente punto en la malla\r\n f2 = xa - j1 # |xmalla - xparticula|/dx\r\n f1 = 1.0 - f2\r\n rhoe[j1] = rhoe[j1] + re*f1\r\n rhoe[j2] = rhoe[j2] + re*f2\r\n\r\n # Condiciones de frontera periodica\r\n rhoe[0] += rhoe[npuntos_malla]\r\n rhoe[npuntos_malla] = rhoe[0]\r\n \r\n # Se agrega una densidad de iones neutral\r\n rhoi = rho0\r\n\r\n return True", "def gather_derivatives(self):\n self.xdot[0,0:self.n] = self.mdot[0:self.n] \n self.xdot[1,0:self.n] = self.rdot[0:self.n,0]\n self.xdot[2,0:self.n] = self.rdot[0:self.n,1]\n self.xdot[3,0:self.n] = self.rdot[0:self.n,2]\n self.xdot[4,0:self.n] = self.vdot[0:self.n,0]\n self.xdot[5,0:self.n] = self.vdot[0:self.n,1]\n self.xdot[6,0:self.n] = self.vdot[0:self.n,2]\n self.xdot[7,0:self.n] = self.rhodot[0:self.n] \n self.xdot[8,0:self.n] = 0\n self.xdot[9,0:self.n] = 0\n self.xdot[10,0:self.n] = self.udot[0:self.n]\n return self.xdot", "def derivatives(self):\n self.rdot = self.v\n self.vdot[:,:] = 0.0\n self.udot[:] = 0.0\n\n t = time()\n for nl in self.nlists: \n nl.separations()\n #nl.apply_minimum_image()\n self.timing['pairsep time'] = (time() - t)\n\n t = time()\n if SPROPS:\n properties.spam_properties(self,self.nl_default \\\n ,self.h[0:self.n],self.hlr[0:self.n])\n self.timing['SPAM time'] = time() - t\n \n t = time()\n for force in self.forces:\n force.apply()\n self.timing['force time'] = time() - t\n \n if ADVECTIVE:\n self.rdot[:,:] = 0.0", "def deeming(times,signal, f0=None, fn=None, df=None, norm='amplitude'): \n #-- initialize variables for use in Fortran routine\n nf=int((fn-f0)/df+0.001)+1\n n = len(times)\n T = times.ptp()\n f1,s1 = fdeeming.deeming1(times,signal,f0,df,nf)\n s1 /= n\n fact = np.sqrt(4./n)\n fact = np.sqrt(4./n)\n if norm =='distribution': # statistical distribution\n s1 /= np.var(signal)\n elif norm == \"amplitude\": # amplitude spectrum\n s1 = fact * np.sqrt(s1)\n elif norm == \"density\": # power density\n s1 = fact**2 * s1 * T\n \n return f1,s1", "def compute_thermo(E,dos,TT):\n if (len(dos)<3):\n print (\"Not enough points in the phonon DOS!\")\n return None\n \n ZPE = 0.5*dos_integral(E,dos,1)\n modes = dos_integral(E,dos)\n \n EvibT = np.zeros(len(TT))\n SvibT = np.zeros(len(TT))\n CvibT = np.zeros(len(TT))\n FvibT = np.zeros(len(TT))\n for i in range(0,len(TT)):\n h = 0.5*(E[2]-E[0])\n arg = K_BOLTZMANN_RY*TT[i]\n arg2 = 2.0 * arg\n Evib = 0.0\n Svib = 0.0\n Cvib = 0.0\n for j in range(0,len(dos)-3,3):\n\n Evib += 3.0*E[j]/tanh(E[j]/(arg2))*dos[j]+\\\n 3.0*E[j+1]/tanh(E[j+1]/(arg2))*dos[j+1]+\\\n 2.0*E[j+2]/tanh(E[j+2]/(arg2))*dos[j+2]\n \n Svib += 3.0*(E[j]/arg2/tanh(E[j]/arg2)-log(2.0*sinh(E[j]/arg2)))*dos[j]+\\\n 3.0*(E[j+1]/arg2/tanh(E[j+1]/arg2)-log(2.0*sinh(E[j+1]/arg2)))*dos[j+1]+\\\n 2.0*(E[j+2]/arg2/tanh(E[j+2]/arg2)-log(2.0*sinh(E[j+2]/arg2)))*dos[j+2]\n\n try: # avoid overflow error for arg very small\n Cvib += 3.0*pow(E[j]/arg,2)/( 4.0*pow(sinh(E[j]/(arg2)),2) )*dos[j]+\\\n 3.0*pow(E[j+1]/arg,2)/( 4.0*pow(sinh(E[j+1]/(arg2)),2) )*dos[j+1]+\\\n 2.0*pow(E[j+2]/arg,2)/( 4.0*pow(sinh(E[j+2]/(arg2)),2) )*dos[j+2]\n except:\n Cvib += 0.0\n\n EvibT[i] = h*0.5*Evib*3.0/8.0 # h is the integration step, 0.5 comes from the equation for E,\n # the factor 3.0/8.0 comes from the Simpson 3/8 rule\n SvibT[i] = h*K_BOLTZMANN_RY*Svib*3.0/8.0\n CvibT[i] = h*K_BOLTZMANN_RY*Cvib*3.0/8.0\n FvibT = EvibT - SvibT * TT\n\n print ()\n return TT, EvibT, SvibT, CvibT, FvibT, ZPE, modes", "def calcula_desvios_padrao(self):\n self.dp_x1 = np.sqrt(self.var_x1)\n self.dp_w1 = np.sqrt(self.var_w1)\n self.dp_nq1 = np.sqrt(self.var_nq1)\n self.dp_ns1 = np.sqrt(self.var_ns1)\n self.dp_n1 = np.sqrt(self.var_n1)\n self.dp_t1 = np.sqrt(self.var_t1)\n self.dp_w1_med = np.sqrt(self.var_w1_med)\n\n self.dp_x2 = np.sqrt(self.var_x2)\n self.dp_w2 = np.sqrt(self.var_w2)\n self.dp_nq2 = np.sqrt(self.var_nq2)\n self.dp_ns2 = np.sqrt(self.var_ns2)\n self.dp_n2 = np.sqrt(self.var_n2)\n self.dp_t2 = np.sqrt(self.var_t2)\n self.dp_w2_med = np.sqrt(self.var_w2_med)", "def total_electronic_hamiltonian(self):\n return block_diag(*[self.electronic_manifold(n) for n in range(3)])", "def post_heatdiag(self,ds):\n #\n self.drmid=self.rmid*0 # mem allocation\n self.drmid[1:-1]=(self.rmid[2:]-self.rmid[0:-2])*0.5\n self.drmid[0]=self.drmid[1]\n self.drmid[-1]=self.drmid[-2]\n\n dt = np.zeros_like(self.time)\n dt[1:] = self.time[1:] - self.time[0:-1]\n dt[0] = dt[1]\n rst=np.nonzero(dt<0) #index when restat happen\n dt[rst]=dt[rst[0]+1]\n self.dt = dt\n\n #get separatrix r\n self.rs=np.interp([1],self.psin,self.rmid)\n \n self.rmidsepmm=(self.rmid-self.rs)*1E3 # dist from sep in mm\n\n #get heat\n self.qe=np.transpose(self.e_perp_energy_psi + self.e_para_energy_psi)/dt/ds\n self.qi=np.transpose(self.i_perp_energy_psi + self.i_para_energy_psi)/dt/ds\n self.ge=np.transpose(self.e_number_psi)/dt/ds\n self.gi=np.transpose(self.i_number_psi)/dt/ds\n\n self.qe = np.transpose(self.qe)\n self.qi = np.transpose(self.qi)\n self.ge = np.transpose(self.ge)\n self.gi = np.transpose(self.gi)\n\n self.qt=self.qe+self.qi\n #imx=self.qt.argmax(axis=1)\n mx=np.amax(self.qt,axis=1)\n self.lq_int=mx*0 #mem allocation\n\n for i in range(mx.shape[0]):\n self.lq_int[i]=np.sum(self.qt[i,:]*self.drmid)/mx[i]", "def _make_edisp_hdu(cls, table_energy, table_migra, table_theta, matrix):\n\n table = Table(\n {\n \"ENERG_LO\": table_energy[\"ETRUE_LO\"][np.newaxis, :].data\n * table_energy[\"ETRUE_LO\"].unit,\n \"ENERG_HI\": table_energy[\"ETRUE_HI\"][np.newaxis, :].data\n * table_energy[\"ETRUE_HI\"].unit,\n \"MIGRA_LO\": table_migra[\"MIGRA_LO\"][np.newaxis, :].data\n * table_migra[\"MIGRA_LO\"].unit,\n \"MIGRA_HI\": table_migra[\"MIGRA_HI\"][np.newaxis, :].data\n * table_migra[\"MIGRA_HI\"].unit,\n \"THETA_LO\": table_theta[\"THETA_LO\"][np.newaxis, :].data\n * table_theta[\"THETA_LO\"].unit,\n \"THETA_HI\": table_theta[\"THETA_HI\"][np.newaxis, :].data\n * table_theta[\"THETA_HI\"].unit,\n \"MATRIX\": matrix[\"MATRIX\"][np.newaxis, :, :] * matrix[\"MATRIX\"].unit,\n }\n )\n\n header = fits.Header()\n header[\"HDUDOC\"] = (\n \"https://gamma-astro-data-formats.readthedocs.io/en/latest/irfs/index.html\",\n \"\",\n )\n header[\"HDUCLASS\"] = \"GADF\", \"\"\n header[\"HDUCLAS1\"] = \"RESPONSE\", \"\"\n header[\"HDUCLAS2\"] = \"EDISP\", \"\"\n header[\"HDUCLAS3\"] = \"POINT-LIKE\", \"\"\n header[\"HDUCLAS4\"] = \"EDISP_2D\", \"\"\n\n edisp_hdu = fits.BinTableHDU(table, header, name=\"ENERGY DISPERSION\")\n\n # primary_hdu = fits.PrimaryHDU()\n # hdulist = fits.HDUList([primary_hdu, edisp_hdu])\n #\n # return hdulist\n return edisp_hdu", "def DM(self, masses=None):\n N = len(self.diameters)\n rs = self.rs\n d = self.ndim\n M = np.zeros((d * N, d * N))\n\n for i in range(N):\n sigi = self.diameters[i]\n for j in range(i):\n rijvec = rs[i, :] - rs[j, :]\n rijvec = rijvec - np.around(rijvec)\n rijsq = np.sum(rijvec**2)\n dij = (sigi + self.diameters[j]) / 2\n dijsq = dij**2\n if rijsq < dijsq:\n rij = np.sqrt(rijsq)\n rijouter = np.outer(rijvec, rijvec)\n # U(r) = ½(1 - r/d)²\n # d²U/dxdy = (dr/dx)(dr/dy)/d² - (1 - r/d)(d²r/dxdy)/d\n # dr/dx = x/r\n # d²r/dxdy = -(x y) / r³\n # d²U/dxdy = -(x y)/(r² d²) + (1 - r/d)((x y)/r²)/(d r)\n # d²U/dx² = (dr/dx)²/d² - (1 - r/d)(d²r/dx²)/d\n # d²r/dx² = -x² / r³ + 1/r\n # d²U/dxᵢdxⱼ = -(xᵢ xⱼ)/(r² d²) + (1 - r/d)((xᵢ xⱼ)/r² -\n # δᵢⱼ)/(d r)\n\n Mij1 = -rijouter / rijsq / dijsq\n Mij2 = (1 - rij / dij) * \\\n (rijouter / rijsq - np.eye(d)) / rij / dij\n Mij = Mij1 + Mij2\n\n M[d * i:d * i + d, d * j:d * j + d] = Mij\n M[d * j:d * j + d, d * i:d * i + d] = Mij\n M[d * i:d * i + d, d * i:d * i + d] -= Mij\n M[d * j:d * j + d, d * j:d * j + d] -= Mij\n\n np.divide(M, self.L**2, out=M)\n if masses is None:\n return M\n\n # TODO: is the mass part of this really part of this?\n marr = np.array(masses)\n assert np.shape(masses) == np.shape(self.diameters)\n marr = np.array([masses] * d)\n marr = marr.T.flatten()\n # marr is now [m1,m1,m2,m2,...] (in 2D)\n mm = np.eye(d * N)\n np.multiply(mm, marr**-.5, out=mm)\n # mm is now M^-½, where M is the mass matrix\n\n mm.dot(M, out=M)\n M.dot(mm, out=M)\n return M", "def diamond(T, Vg_start, Vg_end, Ng, Vd_start, Vd_end, Nd, Cs, Cd, Cg, Gs, Gd, num_e, mode='difcon', dVg=False, filename='simData.dat'):\n Vg = scipy.linspace(Vg_start, Vg_end, Ng)\n Vd = scipy.linspace(Vd_start, Vd_end, Nd)\n data_matrix = []\n for (i_vg, vg) in enumerate(Vg):\n myset=system(vg, Cs, Cd, Cg, Gs, Gd, num_e)\n myset.set_temperature(T)\n myset.pre_processing()\n I = []\n P = []\n V_dot = []\n print \"Vg = \", vg\n for vd in Vd:\n myset.tunnel_rate([0, vd, vg]) \n myset.solver() \n I.append(myset.current('drain','dot'))\n P.append(myset.proba('dot'))\n V_dot.append(myset.voltage('dot'))\n # convert lists to scipy arrays\n I = scipy.array(I)\n P = scipy.array(P)\n V_dot = scipy.array(V_dot)\n # compute the diffential conductance\n if mode == 'current':\n Y = Vd\n F = I\n elif mode == 'difcon':\n F, Y = derive(I, Vd)\n F *= 1e3\n elif mode == 'voltage':\n Y = Vd\n F = V_dot\n elif mode == 'francis':\n F_1, Y = derive(I, Vd)\n F_2, Y = derive(Vd-V_dot, Vd)\n F = F_1/F_2\n F *= 1e3\n elif mode == 'sourcis':\n F_1, Y = derive(I, Vd)\n F_2, Y = derive(V_dot, Vd)\n F = F_1/F_2\n F *= 1e3\n data_matrix.append(F)\n data_matrix = array(data_matrix)\n data_matrix = transpose(data_matrix)\n X = Vg\n \n # Derivate with Vg\n if dVg:\n data_dVg = []\n for vd_i in arange(len(Y)):\n F_dVg, X_dVg = derive(data_matrix[vd_i,:], X)\n F_dVg *= 1e3\n data_dVg.append(F_dVg)\n data_matrix = array(data_dVg)\n X = X_dVg\n \n if filename != 0: \n write_file(data_matrix, filename)\n return data_matrix, X, Y", "def _calc_energy( self, V_a, eos_d ):\n pass", "def diagonalizing_gates(self):\n raise NotImplementedError", "def freqdomain(self):\n \n\n #self.df = self.f[1] - self.f[0]\n #frequency vector\n #fv = fftshift(fftfreq(len(eta),1./fs))\n #fv = fv[len(fv)/2:]\n \n #spectral analysis\n self.sn1 = self.espec1(self.n1)\n self.sn2 = self.espec1(self.n2)\n self.sn3 = self.espec1(self.n3)\n self.sn12 = self.espec2(self.n1,self.n2)\n self.sn13 = self.espec2(self.n1,self.n3)\n self.sn23 = self.espec2(self.n2,self.n3)\n \n #delta freq\n self.df = self.f[3] - self.f[2]\n\n #calculo do numero de onda\n #self.wavenumber()\n #k = numeronda(h,f,len(f))\n #k = np.array(k)\n\n #calculo dos coeficientes de fourier - NDBC 96_01 e Steele (1992)\n c = self.sn2[:,1] + self.sn3[:,1]\n cc = np.sqrt(self.sn1[:,1] * (c))\n \n self.a1 = self.sn12[:,3] / cc\n self.b1 = self.sn13[:,3] / cc\n \n self.a2 = (self.sn2[:,1] - self.sn3[:,1]) / c\n self.b2 = 2 * self.sn12[:,2] / c\n \n #calcula direcao de onda\n #mean direction\n self.dire1 = np.array([np.angle(np.complex(self.b1[i],self.a1[i]),deg=True) for i in range(len(self.a1))])\n \n #principal direction\n self.dire2 = 0.5 * np.array([np.angle(np.complex(self.b2[i],self.a2[i]),deg=True) for i in range(len(self.a2))])\n \n #condicao para valores maiores que 360 e menores que 0\n self.dire1[np.where(self.dire1 < 0)] = self.dire1[np.where(self.dire1 < 0)] + 360\n self.dire1[np.where(self.dire1 > 360)] = self.dire1[np.where(self.dire1 > 360)] - 360\n self.dire2[np.where(self.dire2 < 0)] = self.dire2[np.where(self.dire2 < 0)] + 360\n self.dire2[np.where(self.dire2 > 360)] = self.dire2[np.where(self.dire2 > 360)] - 360\n \n #acha o indice da frequencia de pico\n ind = np.where(self.sn1[:,1] == np.max(self.sn1[:,1]))[0]\n \n #periodo de pico\n self.tp = (1. / self.f[ind])[0]\n \n #momento espectral de ordem zero total - m0\n self.m0 = np.sum(self.sn1[:,1]) * self.df\n \n #calculo da altura significativa\n self.hm0 = 4.01 * np.sqrt(self.m0)\n \n #direcao do periodo de pico\n self.dp = self.dire1[ind][0]\n \n #Espalhamento direcional\n #Formula do sigma1 do livro Tucker&Pitt(2001) \"Waves in Ocean Engineering\" pags 196-198\n c1 = np.sqrt(self.a1 ** 2 + self.b1 ** 2)\n c2 = np.sqrt(self.a2 ** 2 + self.b2 ** 2)\n \n s1 = c1 / (1-c1)\n s2 = (1 + 3 * c2 + np.sqrt(1 + 14 * c2 + c2 ** 2)) / (2 * (1 - c2))\n \n self.sigma1 = np.sqrt(2 - 2 * c1) * 180 / np.pi\n self.sigma2 = np.sqrt((1 - c2) / 2) * 180 / np.pi\n \n self.sigma1p = np.real(self.sigma1[ind])[0]\n self.sigma2p = np.real(self.sigma2[ind])[0]\n \n # pondaf = np.array([hm0, tp, dp, sigma1p, sigma2p])\n \n #hm0, tp, dp, sigma1, sigma2, sigma1p, sigma2p, f, df, k, sn, snx, sny, snn, snnx, snny, snxny, snxnx, snyny, a1, b1, a2, b2, dire1, dire2\n #return hm0, tp, dp, sigma1, sigma2, sigma1p, sigma2p, f, df, k, sn, snx, sny, snn, snnx, snny, snxny, snxnx, snyny, a1, b1, a2, b2, dire1, dire2", "def gather_derivatives(self):\n self.xdot[0,0:self.n] = self.mdot[0:self.n] \n self.xdot[1,0:self.n] = self.rdot[0:self.n,0]\n self.xdot[2,0:self.n] = self.rdot[0:self.n,1]\n self.xdot[3,0:self.n] = self.rdot[0:self.n,2]\n self.xdot[4,0:self.n] = self.vdot[0:self.n,0]\n self.xdot[5,0:self.n] = self.vdot[0:self.n,1]\n self.xdot[6,0:self.n] = self.vdot[0:self.n,2]\n return self.xdot", "def determinant(self):\r\n m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist\r\n\r\n return m11*m22*m33*m44 \\\r\n -m11*m22*m34*m43 \\\r\n +m11*m23*m34*m42 \\\r\n -m11*m23*m32*m44 \\\r\n +m11*m24*m32*m43 \\\r\n -m11*m24*m33*m42 \\\r\n -m12*m23*m34*m41 \\\r\n +m12*m23*m31*m44 \\\r\n -m12*m24*m31*m43 \\\r\n +m12*m24*m33*m41 \\\r\n -m12*m21*m33*m44 \\\r\n +m12*m21*m34*m43 \\\r\n +m13*m24*m31*m42 \\\r\n -m13*m24*m32*m41 \\\r\n +m13*m21*m32*m44 \\\r\n -m13*m21*m34*m42 \\\r\n +m13*m22*m34*m41 \\\r\n -m13*m22*m31*m44 \\\r\n -m14*m21*m32*m43 \\\r\n +m14*m21*m33*m42 \\\r\n -m14*m22*m33*m41 \\\r\n +m14*m22*m31*m43 \\\r\n -m14*m23*m31*m42 \\\r\n +m14*m23*m32*m41", "def diagnosticos(): \r\n global rhoe,Ex,npuntos_malla,itiempo,longitud_malla,rho0,aP,v1,v2,F\r\n global EnergiaK, EnergiaP, EnergiaT, emax\r\n global iout,igrafica,ifase,ivdist, distribucion\r\n global Archivos_Densidades, Archivos_Campo, Archivos_Efase, Archivos_Fdistribucion\r\n \r\n # Se crea el eje para graficar las cantidades fisicas involucradas:\r\n xgrafica = dx * sp.arange(npuntos_malla+1)\r\n \r\n if (itiempo == 0): \r\n plt.figure('Cantidades')\r\n plt.clf()\r\n \r\n if (igrafica > 0):\r\n # Se grafica cada paso dado por el contador igrafica:\r\n if (sp.fmod(itiempo,igrafica) == 0): \r\n # Densidad total\r\n plt.figure(1)\r\n if (itiempo >0 ): plt.cla()\r\n plt.plot(xgrafica, -(rhoe+rho0), 'r', label='Densidad')\r\n plt.xlabel('x')\r\n plt.xlim(0,longitud_malla)\r\n plt.ylim(-1.5,1.5)\r\n plt.legend(loc=1)\r\n # Se imprimen y se guardan las imagenes de acuerdo a iout:\r\n plt.pause(0.0001)\r\n plt.draw()\r\n filename = '%0*d_densidad'%(5, itiempo)\r\n Archivos_Densidades[itiempo] = filename\r\n if (iout > 0):\r\n if (sp.fmod(itiempo,iout) == 0): \r\n plt.savefig(filename+'.png',dpi=720)\r\n \r\n # Campo electrico\r\n plt.figure(2)\r\n if (itiempo >0 ): plt.cla()\r\n plt.plot(xgrafica, Ex, 'b' , label = 'Ex')\r\n plt.xlabel('x', fontsize = 18)\r\n plt.ylabel('Ex', fontsize = 18)\r\n plt.xticks(np.linspace(0,16,4), fontsize = 18)\r\n plt.yticks(np.linspace(-0.0010,0.0010,5), fontsize = 18)\r\n plt.xlim(0,longitud_malla)\r\n plt.ylim(-0.0015,0.0015)\r\n plt.legend(loc = 1)\r\n # Se imprimen y se guardan las imagenes de acuerdo a iout:\r\n plt.pause(0.0001)\r\n plt.draw()\r\n filename = '%0*d_campoelectrico'%(5, itiempo)\r\n Archivos_Campo[itiempo] = filename\r\n if (iout > 0):\r\n if (sp.fmod(itiempo,iout) == 0): \r\n plt.savefig(filename+'.png',dpi=720)\r\n \r\n if (ifase > 0):\r\n if (sp.fmod(itiempo,ifase) == 0): \r\n # Se grafica el espacio de fase en el paso dado por el contador ifase:\r\n plt.figure(3)\r\n if (itiempo >0 ): plt.cla()\r\n v1 = sp.zeros(nparticulas)\r\n v2 = sp.zeros(nparticulas)\r\n x1 = sp.zeros(nparticulas)\r\n x2 = sp.zeros(nparticulas)\r\n for i in range(nparticulas):\r\n if (v[i-1]>v[i]):\r\n v1[i]=v[i]\r\n x1[i]=x[i]\r\n elif(v[i-1]<v[i]):\r\n v2[i]=v[i]\r\n x2[i]=x[i] \r\n if(distribucion == 0):\r\n plt.scatter(x,v,marker='.',s=0.1,color='black') \r\n elif(distribucion == 1 or distribucion == 2):\r\n plt.scatter(x1,v1,marker='.',s=0.1,color='red') \r\n plt.scatter(x2,v2,marker='.',s=0.1,color='blue')\r\n plt.xticks(np.linspace(0,100,6), fontsize = 18)\r\n plt.yticks(np.linspace(-8,8,5), fontsize = 18)\r\n plt.xlabel('x', fontsize = 18)\r\n plt.ylabel('v', fontsize = 18)\r\n plt.xlim(0,longitud_malla)\r\n plt.ylim(-4,8)\r\n\r\n # Se imprimen y se guardan las imagenes de acuerdo a iout:\r\n plt.pause(0.0001)\r\n plt.draw()\r\n filename = '%0*d_espaciofase'%(5, itiempo)\r\n Archivos_Efase[itiempo] = filename\r\n if (iout > 0):\r\n if (sp.fmod(itiempo,iout) == 0): \r\n plt.savefig(filename+'.png',dpi=240)\r\n \r\n if (ivdist > 0):\r\n if (sp.fmod(itiempo,ivdist)==0):\r\n plt.figure(4)\r\n if (itiempo >0 ): plt.cla() \r\n plt.scatter(v,F,marker = '.' , s=0.1, color ='green')\r\n plt.xlim(-5*vh,5*vh)\r\n plt.ylim(0,1.0)\r\n plt.xlabel('v')\r\n plt.ylabel('f(v)')\r\n #fn_vdist = 'vdist_%0*d'%(5, itiempo)\r\n # Se imprimen y se guardan las imagenes de acuerdo a iout:\r\n plt.pause(0.0001)\r\n plt.draw()\r\n filename = '%0*d_fdistribucion'%(5, itiempo)\r\n Archivos_Fdistribucion[itiempo] = filename\r\n if (iout > 0):\r\n if (sp.fmod(itiempo,iout) == 0): \r\n plt.savefig(filename+'.png',dpi=720)\r\n #Se escriben los datos de la distribucion en un archivo:\r\n# sp.savetxt(fn_vdist, sp.column_stack((v,F)),fmt=('%1.4e','%1.4e')) \r\n \r\n # Energia cinetica:\r\n v2 = v**2\r\n EnergiaK[itiempo] = 0.5*masa*sum(v2)\r\n \r\n # Energia potencial:\r\n e2 = Ex**2\r\n EnergiaP[itiempo] = 0.5*dx*sum(e2)\r\n emax = max(Ex) # Campo maximo para analisis de inestabilidad\r\n \r\n # Energia total: \r\n EnergiaT[itiempo] = EnergiaP[itiempo] + EnergiaK[itiempo]\r\n \r\n return True", "def get_DOS(self,\n dE: float = 0.01,\n emin: float = None,\n emax: float = None,\n zero_at_fermi: bool = False,\n smearing: str = 'gaussian',\n sigma: float = 0.02) -> tuple[NDArray[Shape['Ngrid'], Number], NDArray[Shape['Nspin, Ngrid'], Number]]:\n if zero_at_fermi is True and self.efermi is None:\n raise ValueError('You can not set zero_at_fermi=True if you did not specify efermi value')\n\n if emin is None:\n E_min = np.min(self.eigenvalues) - 1\n if emax is None:\n E_max = np.max(self.eigenvalues) + 1\n\n E_arr = np.arange(E_min, E_max, dE)\n\n if smearing.lower() == 'gaussian':\n DOS_arr = np.sum(self.weights[None, :, None, None] *\n self.gaussian_smearing(E_arr, self.eigenvalues, sigma), axis=(1, 2))\n else:\n raise NotImplemented(f'Smearing {smearing} is not implemented. Please use \\'gaussian\\' instead.')\n\n # 2 means occupancy for non-spinpolarized calculation\n if self.nspin == 1:\n DOS_arr *= 2\n\n if zero_at_fermi:\n return E_arr - self.efermi, DOS_arr\n else:\n return E_arr, DOS_arr", "def _dsurface_domega(self):\n\n dsdo = 0.\n\n return dsdo", "def term_1(\n omega1, # vorticity-1\n omega2, # vorticity-2\n omega3, # vorticity-3\n enst, # enstrophy\n nu_sgs, # turbulent viscosity\n h = True): # spatial step size\n #---------------------------------------------------------------------#\n # Setting default values #\n #---------------------------------------------------------------------#\n if h is True:\n h = 2.0*np.pi/64.0\n #---------------------------------------------------------------------#\n # Preallocating space #\n #---------------------------------------------------------------------#\n term = np.zeros((64,64,64))\n #---------------------------------------------------------------------#\n # Enstrophy term #\n #---------------------------------------------------------------------#\n term += np.gradient(\\\n np.gradient(enst, h, edge_order=2)[2], h, edge_order=2)[2]\n term += np.gradient(\\\n np.gradient(enst, h, edge_order=2)[1], h, edge_order=2)[1]\n term += np.gradient(\\\n np.gradient(enst, h, edge_order=2)[0], h, edge_order=2)[0]\n #---------------------------------------------------------------------#\n # Dissipation #\n #---------------------------------------------------------------------#\n omega1_grad = np.gradient(omega1, h, edge_order=2)\n omega2_grad = np.gradient(omega2, h, edge_order=2)\n omega3_grad = np.gradient(omega3, h, edge_order=2)\n term -= np.square(omega1_grad[2])\n term -= np.square(omega1_grad[1])\n term -= np.square(omega1_grad[0])\n term -= np.square(omega2_grad[2])\n term -= np.square(omega2_grad[1])\n term -= np.square(omega2_grad[0])\n term -= np.square(omega3_grad[2])\n term -= np.square(omega3_grad[1])\n term -= np.square(omega3_grad[0])\n #---------------------------------------------------------------------#\n # Applying the subgrid stress #\n #---------------------------------------------------------------------#\n term *= nu_sgs\n\n return term", "def dset(self):\n\n a = 0.0\n b = 0.0\n sums = np.sum(self.descriptors, axis=0)\n for sum in sums:\n if sum > 0:\n if sum == self.d_length:\n b += 1.\n else:\n a += 1.\n return a / (a+b)", "def diagonals_in_hd():\n number_of_pairs = 100000\n angles_for_d = {}\n for d in (10, 100, 1000):\n number_of_corners = 2 ** d - 1\n first_corner = [random.randint(0, number_of_corners) for _ in range(0, number_of_pairs)]\n second_corner = [random.randint(0, number_of_corners) for _ in range(0, number_of_pairs)]\n\n dummy_d = [d for _ in range(0, number_of_pairs)]\n angles = []\n with cf.ProcessPoolExecutor() as executor:\n results = executor.map(find_angle, first_corner, second_corner, dummy_d)\n for result in results:\n angles.append(result)\n ser = pd.Series(angles)\n print(f\"Angles between diagonals for {d} dimensions\")\n print(ser.describe())\n angles_for_d[d] = ser\n\n plot_pmfs_for_ds(angles_for_d)", "def build_dispersion_diags(self):\n N = self.N\n j = self._j # Index of the mid-point\n diags = np.zeros((2*self._j+1, self.N))\n\n dx3 = np.power(self.dx, 3.)\n\n ## This tells us how the diagonal matrix construction works\n #diags[j-2,:-4] = np.arange(1,N+1)[4:]\n #diags[j-1,:-2] = np.arange(1,N+1)[2:]\n #diags[j+1,:] = np.arange(1,N+1)\n #diags[j+2,:] = np.arange(1,N+1)\n\n #diags[j,0:2] = 11\n #diags[j+1,1:3] = 12\n #diags[j+2,2:4] = 13\n #diags[j+3,3:5]= 14\n\n\n # Original method had assymmetric diagonals\n #cff = -self.beta/(2*dx3)\n #diags[j-2,:] += -1*cff\n #diags[j-1,:] += 2*cff\n #diags[j+1,:] += -2*cff\n #diags[j+2,:] += 1*cff\n\n # Need to stagger these diagonals so lower and upper bands are symmetric\n cff = -1/(2*dx3)\n beta = self.beta\n diags[j-2,:-4] += -1*cff*beta[4:]\n diags[j-1,:-2] += 2*cff*beta[2:]\n diags[j+1,:] += -2*cff*beta\n diags[j+2,:] += 1*cff*beta\n\n ## Left boundary - use forward differencing\n diags[j-1,0] = 0\n diags[j,0:2] = -2*cff*beta[0]\n diags[j+1,1:3] = 6*cff*beta[0]\n diags[j+2,2:4] = -6*cff*beta[0]\n diags[j+3,3:5] = 2*cff*beta[0]\n\n # Zero first two points\n #diags[j-1,0] = 0\n #diags[j,0:2] = 0 \n #diags[j+1,1:3] = 0 \n #diags[j+2,2:4] = 0 \n #if self._j>2:\n # diags[j+3,3:5] = 0 \n\n return diags", "def main():\n N = 201 # Amount of gridpoints, odd number to include 0\n L = 10 # Size of the system\n k = 50 # Amount of energies and states calculated\n x = y = np.linspace(-L/2,L/2,N) # Gridpoints\n h = x[1]-x[0] # Spacing of gridpoints\n\n # Solve the system with and without perturbation\n E,psi,E_p,psi_p = fdm_2d(N,L,x,y,h,k)\n\n # Print the first two energies and the absolute error of the energies\n print('Energies of the two lowest states:')\n print('E_00 = %.4f' % E[0])\n print('E_01 = %.4f' % E[1], '\\n')\n print('Absolute error for E_00: %.4e' % np.abs(E[0]-1))\n print('Absolute error for E_01: %.4e' % np.abs(E[1]-2))\n\n print('\\nEnergies of the two lowest states after perturbation:')\n print('E_00 = %.4f' % E_p[0])\n print('E_01 = %.4f' % E_p[1])\n\n # Calculate the normalized densities of the states\n densities_norm = np.zeros((k,N,N))\n densities_norm_p = np.zeros((k,N,N))\n for i in range(k):\n # meshgrid form\n state = np.reshape(psi.T[i],(N,N))\n state_p = np.reshape(psi_p.T[i],(N,N))\n densities_norm[i] = normalized_density(state,x)\n densities_norm_p[i] = normalized_density(state_p,x)\n\n # Analytical solution of the ground state\n X,Y = np.meshgrid(x,y)\n psi00_exact = phi(X,0)*phi(Y,0)\n psi00_exact_density = normalized_density(psi00_exact,x)\n\n print('\\nMaximum absolute error of the normalized ground state densities of the unperturbated system:')\n print('errmax = {:.4e}'.format(np.max(np.abs(densities_norm[0]-psi00_exact_density))))\n\n # Plotting the ground state density of the unperturbated system\n fig1 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig1.add_subplot(1,2,1,projection='3d')\n surf1 = ax.plot_surface(X, Y, densities_norm[0], cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n fig1.suptitle(r'Normalized ground state density $|\\psi|^2$ using FDM')\n ax = fig1.add_subplot(1,2,2)\n ax.imshow(densities_norm[0],extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'FDM_psi00_unperturbated.png'))\n plt.close()\n\n\n fig2 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig2.add_subplot(1,2,1,projection='3d')\n surf2 = ax.plot_surface(X, Y, psi00_exact_density, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n\n\n # Plotting the ground state density of the perturbated system\n fig1 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig1.add_subplot(1,2,1,projection='3d')\n surf1 = ax.plot_surface(X, Y, densities_norm_p[0], cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n fig1.suptitle(r'Normalized ground state density $|\\psi|^2$ of the perturbated system using FDM')\n ax = fig1.add_subplot(1,2,2)\n ax.imshow(densities_norm_p[0],extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'FDM_psi00_perturbated.png'))\n plt.close()\n\n fig2 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig2.add_subplot(1,2,1,projection='3d')\n surf2 = ax.plot_surface(X, Y, psi00_exact_density, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n\n # Plotting the analytic ground state density\n fig1 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig1.add_subplot(1,2,1,projection='3d')\n surf1 = ax.plot_surface(X, Y, psi00_exact_density, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n fig1.suptitle(r'Analytical normalized ground state density $|\\psi|^2$')\n ax = fig1.add_subplot(1,2,2)\n ax.imshow(psi00_exact_density,extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'exact_psi00.png'))\n plt.close()\n\n # Plot some of the other densities and save them as pdf\n for i in range(1,20):\n density = densities_norm[i]\n fig = plt.figure(figsize=plt.figaspect(0.5))\n plt.imshow(density,extent=[-L/2,L/2,-L/2,L/2])\n plt.title('n={}'.format(i))\n plt.savefig(os.path.join(path,'FDM_unperturbated{}.png'.format(i)))\n plt.close()\n\n density_p = densities_norm_p[i]\n fig = plt.figure(figsize=plt.figaspect(0.5))\n plt.imshow(density_p,extent=[-L/2,L/2,-L/2,L/2])\n plt.title('n={}'.format(i))\n plt.savefig(os.path.join(path,'FDM_perturbated{}.png'.format(i)))\n plt.close() \n\n # Plot analytical states until nx,ny = 5\n for nx in range(6):\n for ny in range(6):\n state = phi(X,nx)*phi(Y,ny)\n density = normalized_density(state,x)\n plt.figure()\n plt.imshow(density,extent=[-L/2,L/2,-L/2,L/2])\n plt.title('$n_x={}, n_y={}$'.format(nx,ny))\n plt.savefig(os.path.join(path,'analytical_state_{}_{}.png'.format(nx,ny)))\n plt.close()\n\n # Get analytical energies from nx,ny = 0 to 10\n n = 10\n energies = analytical_energies(n)\n\n # Plot k analytical and the FDM energies\n index = np.arange(k)\n plt.figure()\n plt.plot(index,energies[0:k],label='Analytical energies')\n plt.plot(index,E,label='Unperturbated energies')\n plt.plot(index,E_p,label='Perturbated energies')\n plt.legend()\n plt.xlabel('n')\n plt.ylabel(r'$\\tilde{E} = \\frac{E}{\\hbar\\omega}$')\n plt.title('Energies')\n plt.savefig(os.path.join(path,'energies.png'))\n plt.close()", "def find_large_separation(self):\n\n x = self.modes['n'] # radial order\n y = self.modes['freq'] # frequency\n wid = (0.66*self.numax**0.88)/2/np.sqrt(2*np.log(2.0))\n w = (np.exp((-(y-self.numax)**2)/(2*wid**2))) # weight\n\n mN = np.sum(w)*np.sum(w*x*y) - np.sum(w*x)*np.sum(w*y)\n D = np.sum(w)*np.sum(w*x**2) - np.sum(w*x)**2\n Dn = mN/D\n #print Dn\n\n return Dn", "def calc_ked_WFI(self):\n\n #Initialize kinetic energy density\n self.ked_WFI = np.zeros( (self.grid.Nelem, 1))\n\n #Figure out the number of occupied orbitals\n if self.m == 0:\n if self.pol == 1:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n else:\n Nocc = np.floor(self.N)\n nu = self.N - Nocc\n\n else:\n #m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n Nocc = np.floor(self.N / 4)\n nu = self.N / 4 - Nocc\n else:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n\n #Construct density\n for i in range(int(Nocc)):\n # print(\"phi from pssolver\", self.phi)\n # print(\"phi subset\", self.phi[:,i])\n # print(\"integrate returns\", self.grid.integrate( self.phi[:,i]**2 )**0.5)\n\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:,i]**2 )**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += (phi_norm * (self.H0 @ phi_norm)) / self.grid.w[:, None]\n\n #If we are doing fractional robitals and are non-integer\n if self.FRACTIONAL is True and nu != 0:\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:, Nocc+1]**2)**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += nu * ( phi_norm * (self.H0 @ phi_norm) ) / self.grid.w[:, None]\n\n #Scale densities appropriately\n if self.m == 0:\n if self.pol == 1: #Unpolarized electrons\n self.ked_WFI = 2 * self.ked_WFI\n\n else: # m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n self.ked_WFI = 4 * self.ked_WFI\n else:\n self.ked_WFI = 2 * self.ked_WFI", "def EGWD_fg(f):\n A = 4.2e-47\n res = np.zeros((len(f)))\n for i,freq in enumerate(f): \n if freq >=3e-3:\n # strain \n res[i] = A * freq**(-7/3) * np.exp(-2*(freq/5e-2)**2) \n else:\n res[i] = np.NaN\n return np.array(res)", "def darcy_func(self):\n i, o = self.inl[0].to_flow(), self.outl[0].to_flow()\n\n if abs(i[0]) < 1e-4:\n return i[1] - o[1]\n\n visc_i = visc_mix_ph(i, T0=self.inl[0].T.val_SI)\n visc_o = visc_mix_ph(o, T0=self.outl[0].T.val_SI)\n v_i = v_mix_ph(i, T0=self.inl[0].T.val_SI)\n v_o = v_mix_ph(o, T0=self.outl[0].T.val_SI)\n\n re = 4 * abs(i[0]) / (np.pi * self.D.val * (visc_i + visc_o) / 2)\n\n return ((i[1] - o[1]) - 8 * abs(i[0]) * i[0] * (v_i + v_o) / 2 *\n self.L.val * lamb(re, self.ks.val, self.D.val) /\n (np.pi ** 2 * self.D.val ** 5))", "def DM_freqs(self, masses=None):\n ew, ev = np.linalg.eig(self.DM(masses=masses))\n # this used to be over 2pi; I don't know where the 2 went, but it seems\n # to be gone now...\n return np.sqrt(np.abs(ew)) / (np.pi)", "def get_dos(\n w=[],\n grid=[2, 1, 1],\n proj=None,\n efermi=0.0,\n xrange=None,\n nenergy=100,\n sig=0.02,\n use_dask=True,\n filename=\"dos.png\",\n savefig=True,\n):\n nwan = int(np.ceil(np.log2(w.nwan))) ** 2\n kpoints = generate_kgrid(grid=grid)\n nk = len(kpoints)\n q_vals = np.zeros((nk, nwan), dtype=float)\n np_vals = np.zeros((nk, nwan), dtype=float)\n pvals = np.zeros((nk, nwan), dtype=float)\n # if use_dask:\n # def get_vqd_vals(k):\n # hk = get_hk_tb(w=w, k=k)\n # HS = HermitianSolver(hk)\n # vqe_vals, _ = HS.run_vqd()\n # return vqe_vals\n\n # values=[delayed(get_vqd_vals)(k) for k in kpoints]\n # resultsDask = compute(*values, scheduler='processes')\n for i, k in enumerate(kpoints):\n hk = get_hk_tb(w=w, k=k)\n HS = HermitianSolver(hk)\n vqe_vals, _ = HS.run_vqd()\n n_vals, _ = HS.run_numpy()\n q_vals[i, :] = vqe_vals\n np_vals[i, :] = n_vals\n print(\"np_vals\", n_vals)\n print(\"vqe_vals\", vqe_vals)\n\n if xrange is None:\n vmin = np.min(q_vals[:])\n vmax = np.max(q_vals[:])\n vmin2 = vmin - (vmax - vmin) * 0.05\n vmax2 = vmax + (vmax - vmin) * 0.05\n xrange = [vmin2, vmax2]\n # plt.xlim(xrange)\n\n energies = np.arange(\n xrange[0],\n xrange[1] + 1e-5,\n (xrange[1] - xrange[0]) / float(nenergy),\n )\n dos = np.zeros(np.size(energies))\n pdos = np.zeros(np.size(energies))\n\n v = q_vals\n\n # condmin = np.min(v[v > 0.0])\n # valmax = np.max(v[v < 0.0])\n # print(\"DOS BAND GAP \", condmin - valmax, \" \", valmax, \" \", condmin)\n\n c = -0.5 / sig ** 2\n for i in range(np.size(energies)):\n arg = c * (v - energies[i]) ** 2\n dos[i] = np.sum(np.exp(arg))\n if proj is not None:\n pdos[i] = np.sum(np.exp(arg) * pvals)\n\n de = energies[1] - energies[0]\n dos = dos / sig / (2.0 * np.pi) ** 0.5 / float(nk)\n if proj is not None:\n pdos = pdos / sig / (2.0 * np.pi) ** 0.5 / float(nk)\n print(\"np.sum(dos) \", np.sum(dos * de))\n if proj is not None:\n print(\"np.sum(pdos) \", np.sum(pdos * de))\n plt.plot(energies, dos)\n plt.savefig(filename)\n plt.close()\n return energies, dos, pdos", "def _analytical_encircled_energy(fno, wavelength, points):\n p = points * np.pi / fno / wavelength\n return 1 - special.j0(p)**2 - special.j1(p)**2", "def dKdiag_dtheta(self,dL_dKdiag,X,target):\r\n FX = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X)\r\n\r\n La = np.column_stack((self.a[0]*np.ones((self.n_basis,1)), self.a[1]*self.basis_omega, self.a[2]*self.basis_omega**2, self.a[3]*self.basis_omega**3))\r\n Lo = np.column_stack((self.basis_omega, self.basis_omega, self.basis_omega, self.basis_omega))\r\n Lp = np.column_stack((self.basis_phi, self.basis_phi+np.pi/2, self.basis_phi+np.pi, self.basis_phi+np.pi*3/2))\r\n r,omega,phi = self._cos_factorization(La,Lo,Lp)\r\n Gint = self._int_computation( r,omega,phi, r,omega,phi)\r\n\r\n Flower = np.array(self._cos(self.basis_alpha,self.basis_omega,self.basis_phi)(self.lower))[:,None]\r\n F1lower = np.array(self._cos(self.basis_alpha*self.basis_omega,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]\r\n F2lower = np.array(self._cos(self.basis_alpha*self.basis_omega**2,self.basis_omega,self.basis_phi+np.pi)(self.lower))[:,None]\r\n\r\n #dK_dvar\r\n dK_dvar = 1. / self.variance * mdot(FX, self.Gi, FX.T)\r\n\r\n #dK_dlen\r\n da_dlen = [-3*self.a[0]/self.lengthscale, -2*self.a[1]/self.lengthscale, -self.a[2]/self.lengthscale, 0.]\r\n db_dlen = [0., 4*self.b[1]/self.lengthscale, 2*self.b[2]/self.lengthscale, 2*self.b[3]/self.lengthscale, 2*self.b[4]/self.lengthscale]\r\n dLa_dlen = np.column_stack((da_dlen[0]*np.ones((self.n_basis,1)), da_dlen[1]*self.basis_omega, da_dlen[2]*self.basis_omega**2, da_dlen[3]*self.basis_omega**3))\r\n r1,omega1,phi1 = self._cos_factorization(dLa_dlen,Lo,Lp)\r\n dGint_dlen = self._int_computation(r1,omega1,phi1, r,omega,phi)\r\n dGint_dlen = dGint_dlen + dGint_dlen.T\r\n dlower_terms_dlen = db_dlen[0]*np.dot(Flower,Flower.T) + db_dlen[1]*np.dot(F2lower,F2lower.T) + db_dlen[2]*np.dot(F1lower,F1lower.T) + db_dlen[3]*np.dot(F2lower,Flower.T) + db_dlen[4]*np.dot(Flower,F2lower.T)\r\n dG_dlen = 15*self.lengthscale**4/(400*np.sqrt(5))*Gint + 3*self.lengthscale**5/(400*np.sqrt(5))*dGint_dlen + dlower_terms_dlen\r\n dK_dlen = -mdot(FX,self.Gi,dG_dlen/self.variance,self.Gi,FX.T)\r\n\r\n #dK_dper\r\n dFX_dper = self._cos(-self.basis_alpha[None,:]*self.basis_omega[None,:]/self.period*X ,self.basis_omega[None,:],self.basis_phi[None,:]+np.pi/2)(X)\r\n\r\n dLa_dper = np.column_stack((-self.a[0]*self.basis_omega/self.period, -self.a[1]*self.basis_omega**2/self.period, -self.a[2]*self.basis_omega**3/self.period, -self.a[3]*self.basis_omega**4/self.period))\r\n dLp_dper = np.column_stack((self.basis_phi+np.pi/2,self.basis_phi+np.pi,self.basis_phi+np.pi*3/2,self.basis_phi))\r\n r1,omega1,phi1 = self._cos_factorization(dLa_dper,Lo,dLp_dper)\r\n\r\n IPPprim1 = self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2) + 1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi/2))\r\n IPPprim1 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2) + 1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi/2))\r\n IPPprim2 = self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2) + self.upper*np.cos(phi-phi1.T))\r\n IPPprim2 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2) + self.lower*np.cos(phi-phi1.T))\r\n IPPprim = np.where(np.isnan(IPPprim1),IPPprim2,IPPprim1)\r\n\r\n IPPint1 = 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi) + 1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi)\r\n IPPint1 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi) + 1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi)\r\n IPPint2 = 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi) + .5*self.upper**2*np.cos(phi-phi1.T)\r\n IPPint2 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi) + .5*self.lower**2*np.cos(phi-phi1.T)\r\n IPPint = np.where(np.isnan(IPPint1),IPPint2,IPPint1)\r\n\r\n dLa_dper2 = np.column_stack((-self.a[1]*self.basis_omega/self.period, -2*self.a[2]*self.basis_omega**2/self.period, -3*self.a[3]*self.basis_omega**3/self.period))\r\n dLp_dper2 = np.column_stack((self.basis_phi+np.pi/2, self.basis_phi+np.pi, self.basis_phi+np.pi*3/2))\r\n r2,omega2,phi2 = self._cos_factorization(dLa_dper2,Lo[:,0:2],dLp_dper2)\r\n\r\n dGint_dper = np.dot(r,r1.T)/2 * (IPPprim - IPPint) + self._int_computation(r2,omega2,phi2, r,omega,phi)\r\n dGint_dper = dGint_dper + dGint_dper.T\r\n\r\n dFlower_dper = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega/self.period,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]\r\n dF1lower_dper = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega**2/self.period,self.basis_omega,self.basis_phi+np.pi)(self.lower)+self._cos(-self.basis_alpha*self.basis_omega/self.period,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]\r\n dF2lower_dper = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega**3/self.period,self.basis_omega,self.basis_phi+np.pi*3/2)(self.lower) + self._cos(-2*self.basis_alpha*self.basis_omega**2/self.period,self.basis_omega,self.basis_phi+np.pi)(self.lower))[:,None]\r\n\r\n dlower_terms_dper = self.b[0] * (np.dot(dFlower_dper,Flower.T) + np.dot(Flower.T,dFlower_dper))\r\n dlower_terms_dper += self.b[1] * (np.dot(dF2lower_dper,F2lower.T) + np.dot(F2lower,dF2lower_dper.T)) - 4*self.b[1]/self.period*np.dot(F2lower,F2lower.T)\r\n dlower_terms_dper += self.b[2] * (np.dot(dF1lower_dper,F1lower.T) + np.dot(F1lower,dF1lower_dper.T)) - 2*self.b[2]/self.period*np.dot(F1lower,F1lower.T)\r\n dlower_terms_dper += self.b[3] * (np.dot(dF2lower_dper,Flower.T) + np.dot(F2lower,dFlower_dper.T)) - 2*self.b[3]/self.period*np.dot(F2lower,Flower.T)\r\n dlower_terms_dper += self.b[4] * (np.dot(dFlower_dper,F2lower.T) + np.dot(Flower,dF2lower_dper.T)) - 2*self.b[4]/self.period*np.dot(Flower,F2lower.T)\r\n\r\n dG_dper = 1./self.variance*(3*self.lengthscale**5/(400*np.sqrt(5))*dGint_dper + 0.5*dlower_terms_dper)\r\n dK_dper = 2*mdot(dFX_dper,self.Gi,FX.T) - mdot(FX,self.Gi,dG_dper,self.Gi,FX.T)\r\n\r\n target[0] += np.sum(np.diag(dK_dvar)*dL_dKdiag)\r\n target[1] += np.sum(np.diag(dK_dlen)*dL_dKdiag)\r\n target[2] += np.sum(np.diag(dK_dper)*dL_dKdiag)", "def syed_dilation(data, vessel):", "def dKdiag_dtheta(self,dL_dKdiag,X,target):\r\n FX = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X)\r\n\r\n La = np.column_stack((self.a[0]*np.ones((self.n_basis,1)),self.a[1]*self.basis_omega))\r\n Lo = np.column_stack((self.basis_omega,self.basis_omega))\r\n Lp = np.column_stack((self.basis_phi,self.basis_phi+np.pi/2))\r\n r,omega,phi = self._cos_factorization(La,Lo,Lp)\r\n Gint = self._int_computation( r,omega,phi, r,omega,phi)\r\n\r\n Flower = np.array(self._cos(self.basis_alpha,self.basis_omega,self.basis_phi)(self.lower))[:,None]\r\n\r\n #dK_dvar\r\n dK_dvar = 1./self.variance*mdot(FX,self.Gi,FX.T)\r\n\r\n #dK_dlen\r\n da_dlen = [-1./self.lengthscale**2,0.]\r\n dLa_dlen = np.column_stack((da_dlen[0]*np.ones((self.n_basis,1)),da_dlen[1]*self.basis_omega))\r\n r1,omega1,phi1 = self._cos_factorization(dLa_dlen,Lo,Lp)\r\n dGint_dlen = self._int_computation(r1,omega1,phi1, r,omega,phi)\r\n dGint_dlen = dGint_dlen + dGint_dlen.T\r\n dG_dlen = 1./2*Gint + self.lengthscale/2*dGint_dlen\r\n dK_dlen = -mdot(FX,self.Gi,dG_dlen/self.variance,self.Gi,FX.T)\r\n\r\n #dK_dper\r\n dFX_dper = self._cos(-self.basis_alpha[None,:]*self.basis_omega[None,:]/self.period*X ,self.basis_omega[None,:],self.basis_phi[None,:]+np.pi/2)(X)\r\n\r\n dLa_dper = np.column_stack((-self.a[0]*self.basis_omega/self.period, -self.a[1]*self.basis_omega**2/self.period))\r\n dLp_dper = np.column_stack((self.basis_phi+np.pi/2,self.basis_phi+np.pi))\r\n r1,omega1,phi1 = self._cos_factorization(dLa_dper,Lo,dLp_dper)\r\n\r\n IPPprim1 = self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2) + 1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi/2))\r\n IPPprim1 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2) + 1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi/2))\r\n IPPprim2 = self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2) + self.upper*np.cos(phi-phi1.T))\r\n IPPprim2 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2) + self.lower*np.cos(phi-phi1.T))\r\n IPPprim = np.where(np.isnan(IPPprim1),IPPprim2,IPPprim1)\r\n\r\n IPPint1 = 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi) + 1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi)\r\n IPPint1 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi) + 1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi)\r\n IPPint2 = 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi) + 1./2*self.upper**2*np.cos(phi-phi1.T)\r\n IPPint2 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi) + 1./2*self.lower**2*np.cos(phi-phi1.T)\r\n IPPint = np.where(np.isnan(IPPint1),IPPint2,IPPint1)\r\n\r\n dLa_dper2 = np.column_stack((-self.a[1]*self.basis_omega/self.period))\r\n dLp_dper2 = np.column_stack((self.basis_phi+np.pi/2))\r\n r2,omega2,phi2 = dLa_dper2.T,Lo[:,0:1],dLp_dper2.T\r\n\r\n dGint_dper = np.dot(r,r1.T)/2 * (IPPprim - IPPint) + self._int_computation(r2,omega2,phi2, r,omega,phi)\r\n dGint_dper = dGint_dper + dGint_dper.T\r\n\r\n dFlower_dper = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega/self.period,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]\r\n\r\n dG_dper = 1./self.variance*(self.lengthscale/2*dGint_dper + self.b[0]*(np.dot(dFlower_dper,Flower.T)+np.dot(Flower,dFlower_dper.T)))\r\n\r\n dK_dper = 2*mdot(dFX_dper,self.Gi,FX.T) - mdot(FX,self.Gi,dG_dper,self.Gi,FX.T)\r\n\r\n target[0] += np.sum(np.diag(dK_dvar)*dL_dKdiag)\r\n target[1] += np.sum(np.diag(dK_dlen)*dL_dKdiag)\r\n target[2] += np.sum(np.diag(dK_dper)*dL_dKdiag)", "def build_linear_diags(self):\n N = self.N\n dx = self.dx\n j = self._j # Index of the mid-point\n\n diags = np.zeros((2*self._j+1, self.N))\n\n # Advection term\n cff1 = -1/(2*dx)\n\n # Need to stagger these diagonals so lower and upper bands are symmetric\n diags[j-1, :-2] += -1*cff1*self.c[2:]\n diags[j+1, :] += 1*cff1*self.c[:]\n\n # Sponge term\n x = np.arange(0,N*dx,dx)\n rdist = x[-1] - x # Distance from right boundary\n spongefac = -np.exp(-6*rdist/self.spongedist)/self.spongetime\n diags[j,:] += spongefac \n\n return diags", "def dKdiag_dtheta(self,dL_dKdiag,X,target):\r\n FX = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X)\r\n\r\n La = np.column_stack((self.a[0]*np.ones((self.n_basis,1)),self.a[1]*self.basis_omega, self.a[2]*self.basis_omega**2))\r\n Lo = np.column_stack((self.basis_omega,self.basis_omega,self.basis_omega))\r\n Lp = np.column_stack((self.basis_phi,self.basis_phi+np.pi/2,self.basis_phi+np.pi))\r\n r,omega,phi = self._cos_factorization(La,Lo,Lp)\r\n Gint = self._int_computation( r,omega,phi, r,omega,phi)\r\n\r\n Flower = np.array(self._cos(self.basis_alpha,self.basis_omega,self.basis_phi)(self.lower))[:,None]\r\n F1lower = np.array(self._cos(self.basis_alpha*self.basis_omega,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]\r\n\r\n #dK_dvar\r\n dK_dvar = 1./self.variance*mdot(FX,self.Gi,FX.T)\r\n\r\n #dK_dlen\r\n da_dlen = [-6/self.lengthscale**3,-2*np.sqrt(3)/self.lengthscale**2,0.]\r\n db_dlen = [0.,2*self.lengthscale/3.]\r\n dLa_dlen = np.column_stack((da_dlen[0]*np.ones((self.n_basis,1)),da_dlen[1]*self.basis_omega,da_dlen[2]*self.basis_omega**2))\r\n r1,omega1,phi1 = self._cos_factorization(dLa_dlen,Lo,Lp)\r\n dGint_dlen = self._int_computation(r1,omega1,phi1, r,omega,phi)\r\n dGint_dlen = dGint_dlen + dGint_dlen.T\r\n dG_dlen = self.lengthscale**2/(4*np.sqrt(3))*Gint + self.lengthscale**3/(12*np.sqrt(3))*dGint_dlen + db_dlen[0]*np.dot(Flower,Flower.T) + db_dlen[1]*np.dot(F1lower,F1lower.T)\r\n dK_dlen = -mdot(FX,self.Gi,dG_dlen/self.variance,self.Gi,FX.T)\r\n\r\n #dK_dper\r\n dFX_dper = self._cos(-self.basis_alpha[None,:]*self.basis_omega[None,:]/self.period*X ,self.basis_omega[None,:],self.basis_phi[None,:]+np.pi/2)(X)\r\n\r\n dLa_dper = np.column_stack((-self.a[0]*self.basis_omega/self.period, -self.a[1]*self.basis_omega**2/self.period, -self.a[2]*self.basis_omega**3/self.period))\r\n dLp_dper = np.column_stack((self.basis_phi+np.pi/2,self.basis_phi+np.pi,self.basis_phi+np.pi*3/2))\r\n r1,omega1,phi1 = self._cos_factorization(dLa_dper,Lo,dLp_dper)\r\n\r\n IPPprim1 = self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2) + 1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi/2))\r\n IPPprim1 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2) + 1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi/2))\r\n IPPprim2 = self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2) + self.upper*np.cos(phi-phi1.T))\r\n IPPprim2 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2) + self.lower*np.cos(phi-phi1.T))\r\n IPPprim = np.where(np.isnan(IPPprim1),IPPprim2,IPPprim1)\r\n\r\n IPPint1 = 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi) + 1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi)\r\n IPPint1 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi) + 1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi)\r\n IPPint2 = 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi) + 1./2*self.upper**2*np.cos(phi-phi1.T)\r\n IPPint2 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi) + 1./2*self.lower**2*np.cos(phi-phi1.T)\r\n IPPint = np.where(np.isnan(IPPint1),IPPint2,IPPint1)\r\n\r\n dLa_dper2 = np.column_stack((-self.a[1]*self.basis_omega/self.period, -2*self.a[2]*self.basis_omega**2/self.period))\r\n dLp_dper2 = np.column_stack((self.basis_phi+np.pi/2,self.basis_phi+np.pi))\r\n r2,omega2,phi2 = self._cos_factorization(dLa_dper2,Lo[:,0:2],dLp_dper2)\r\n\r\n dGint_dper = np.dot(r,r1.T)/2 * (IPPprim - IPPint) + self._int_computation(r2,omega2,phi2, r,omega,phi)\r\n dGint_dper = dGint_dper + dGint_dper.T\r\n\r\n dFlower_dper = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega/self.period,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]\r\n dF1lower_dper = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega**2/self.period,self.basis_omega,self.basis_phi+np.pi)(self.lower)+self._cos(-self.basis_alpha*self.basis_omega/self.period,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]\r\n\r\n dG_dper = 1./self.variance*(self.lengthscale**3/(12*np.sqrt(3))*dGint_dper + self.b[0]*(np.dot(dFlower_dper,Flower.T)+np.dot(Flower,dFlower_dper.T)) + self.b[1]*(np.dot(dF1lower_dper,F1lower.T)+np.dot(F1lower,dF1lower_dper.T)))\r\n\r\n dK_dper = 2* mdot(dFX_dper,self.Gi,FX.T) - mdot(FX,self.Gi,dG_dper,self.Gi,FX.T)\r\n\r\n target[0] += np.sum(np.diag(dK_dvar)*dL_dKdiag)\r\n target[1] += np.sum(np.diag(dK_dlen)*dL_dKdiag)\r\n target[2] += np.sum(np.diag(dK_dper)*dL_dKdiag)", "def work_dos():\n #potential = 2x**2+x**2y+y**2\n x1,y1 = (2, -3)\n x2,y2 = (-1, 2)\n p1 = (2*(x1**2)) + ((x1**2)*y1) + (y1**2)\n p2 = (2*(x2**2)) + ((x2**2)*y2) + (y2**2)\n sol = p1 - p2\n sol = abs(sol)\n print(f'The vector field F=(4x+2xy,x2+2y) \\n'\n 'along the curve C parametrized by r(t)=(3t−1,−5t+2) \\n '\n f'for 0 ≤ t ≤ 1 is: {sol}')", "def determinant(self):\n if self.cols != self.rows:\n raise Exception ('Matrix is not square!')\n for i in range(self.rows):\n if self.values[i][i] == 0:\n raise Exception ('There is zero on the main diagonal')\n #TODO: Rearrange the lines, that the main diagonal don't have a zero values \n\n arr = self.values[:]\n for i in range(self.rows):\n for j in range(self.cols):\n diag = [arr[l][p] for p in range(self.cols) for l in range(self.rows) if l == p ]\n if i > j :\n arr2 = arr[i][j]/diag[j]\n arr1 = [round(x * arr2, 4) for x in arr[i-i+j]]\n arr[i] = map(lambda x,y: round(x - y, 4) , arr[i], arr1 )\n\n diag = [arr[l][p] for p in range(self.cols) for l in range(self.rows) if l == p ]\n det = 1\n for i in range(len(diag)):\n det *= diag[i]\n if det != 0 :\n return True\n else:\n return False", "def deredden_cepheids(df_variables):\n extinction_coefficients = {'2365-2764-1': np.array([0.2622, 0.844]), '4109-638-1': np.array([0.0524, 0.1576]),\n '2058-56-1': np.array([0.0751, 0.248]), '3642-2459-1': np.array([0.1907, 0.608]),\n '3999-1391-1': np.array([0.3911, 1.2480]), '2607-1448-1': np.array([0.0430, 0.1310])}\n print \"Dereddening Cepheids:\"\n for tyc in extinction_coefficients.keys():\n print \"%s..\" % tyc\n b_minus_v = df_variables[df_variables.tycho2_id == tyc].B_V\n m_v = df_variables[df_variables.tycho2_id == tyc].M_V\n extinc = extinction_coefficients[tyc]\n df_variables.set_value(df_variables.tycho2_id == tyc, 'B_V', b_minus_v - extinc[0])\n df_variables.set_value(df_variables.tycho2_id == tyc, 'M_V', m_v - extinc[1])\n print \"..Done\\n----------\"\n\n return df_variables", "def glueEmH( Ja, Jf, truncNum = scipy.inf ):\n w, v = truncBasisH( Ja, truncNum )\n sPlus, sMinus, sZ = sPlusAndMinusAndZ( v )\n \n H1 = scipy.zeros( ( len(w)**4, len(w)**4 ) )\n \n for n in range( len(w)**4 ):\n # Diagonal previous generation contributions\n o = oct(n)[-4:].zfill(4)\n o = [int(char) for char in o]\n o_A, o_B, o_C, o_D = o\n \n H1[n, n] += scipy.sum( [ w[ i ] for i in o ] )\n \n # Edge terms\n for np in range( n, len(w)**4 ):\n op = oct(np)[-4:].zfill(4)\n op = [int(char) for char in op]\n op_A, op_B, op_C, op_D = op\n \n x = 0.\n if ( (o_B == op_B) and (o_C == op_C) ):\n x += -Jf * ( .5 * ( sPlus[0][o_A, op_A] * sMinus[0][o_D, op_D] + sMinus[0][o_A, op_A] * sPlus[0][o_D,op_D] ) + sZ[0][o_A, op_A] * sZ[0][o_D, op_D] )\n if ( (o_C == op_C) and (o_A == op_A) ):\n x += -Jf * ( .5 * ( sPlus[1][o_B, op_B] * sMinus[1][o_D, op_D] + sMinus[1][o_B, op_B] * sPlus[1][o_D,op_D] ) + sZ[1][o_B, op_B] * sZ[1][o_D, op_D] )\n if ( (o_A == op_A) and (o_B == op_B) ):\n x += -Jf * ( .5 * ( sPlus[2][o_C, op_C] * sMinus[2][o_D, op_D] + sMinus[2][o_C, op_C] * sPlus[1][o_D,op_D] ) + sZ[1][o_C, op_C] * sZ[2][o_D, op_D] )\n \n H1[n, np] = x\n H1[np, n] = x\n \n return H1", "def _h(W):\r\n # E = slin.expm(W * W)\r\n # h = np.trace(E) - d\r\n M = np.eye(d) + W * W / d\r\n E = np.linalg.matrix_power(M, d - 1)\r\n h = (E.T * M).sum() - d\r\n G_h = E.T * W * 2\r\n return h, G_h", "def asaxspureresonant(I1,I2,I3,DI1,DI2,DI3,f11,f12,f13,f21,f22,f23,scalesep=True):\n factor=f11-f13+(f22**2-f21**2)/(f12-f11)-(f22**2-f23**2)/(f12-f13)\n DR=np.sqrt((DI1*DI1)/(f12-f11)**2+\n (DI2*DI2)*(1/(f12-f11)**2+1/(f12-f13)**2)+\n (DI3*DI3)/(f12-f13)**2)/np.absolute(factor);\n sep12=(I1-I2)/(f11-f12)\n sep23=(I2-I3)/(f12-f13)\n R=(sep12 -sep23)/factor;\n dsep12=np.absolute(np.sqrt((DI1*DI1)+(DI2*DI2))/(f11-f12))\n dsep23=np.absolute(np.sqrt((DI2*DI2)+(DI3*DI3))/(f12-f13))\n if scalesep:\n return sep12,dsep12,sep23,dsep23,R,DR\n else:\n return (I1-I2),np.sqrt(DI1**2+DI2**2),(I2-I3),np.sqrt(DI2**2+DI3**2),R,DR", "def __calculateDDIstart(self, partedscans, partedspws):\n \n # Example of partedspws:\n # create 2 subMss with spw=0,1,2 and spw=3\n # partedSPWs = {0:['0','1','2'],1:['3']}\n #\n # create 3 subMSs with spw=0,1,2 spw=3 and spw=4,5\n # partedSPWs = {0:['0','1','2'],1:['3'],2:['4','5']}\n \n hasscans = True\n if len(partedscans) == 0:\n scans = ''\n hasscans = False\n\n # It needs to take the correlation selection into account\n corr_sel = self._arg['correlation']\n ddistartList = []\n \n # scan+spw separation axis \n if hasscans:\n count = 0\n for k,spws in partedspws.iteritems():\n for ks,scans in partedscans.iteritems():\n if self._msTool is None:\n self._msTool = mstool()\n self._msTool.open(self._arg['vis'],nomodify=False)\n else:\n self._msTool.reset()\n \n try:\n # The dictionary with selected indices\n seldict = self._msTool.msseltoindex(vis=self._arg['vis'],scan=scans,spw=spws,polarization=corr_sel)\n except:\n self._msTool.close()\n continue\n \n # Get the selected DD IDs\n ddis = seldict['dd'].tolist()\n ddsize = ddis.__len__()\n if count == 0:\n ddistart = 0\n \n # Create a ddistart list\n ddistartList.append(ddistart)\n ddistart = ddistart + ddsize\n count = count + 1\n \n # spw separation axis \n else:\n count = 0\n for k,spws in partedspws.iteritems():\n if self._msTool is None:\n self._msTool = mstool()\n self._msTool.open(self._arg['vis'],nomodify=False)\n else:\n self._msTool.reset()\n \n try:\n # The dictionary with selected indices\n seldict = self._msTool.msseltoindex(vis=self._arg['vis'],scan=scans,spw=spws, polarization=corr_sel)\n except:\n self._msTool.reset()\n continue\n \n # Get the selected DD IDs\n ddis = seldict['dd'].tolist()\n ddsize = ddis.__len__()\n if count == 0:\n ddistart = 0\n \n # Create a ddistart list\n ddistartList.append(ddistart)\n ddistart = ddistart + ddsize\n count = count + 1\n \n return ddistartList", "def all_dhkl(self, crystal):\n #d_min = self.wavelength/self.max2theta*pi/2\n d_min = self.wavelength/sin(self.max2theta/2)/2\n \n # This block is to find the shortest d_hkl, \n # for all basic directions (1,0,0), (0,1,0), (1,1,0), (1,-1,0) and so on, 26 in total \n hkl_max = np.array([1,1,1])\n for h1 in [-1, 0, 1]:\n for k1 in [-1, 0, 1]:\n for l1 in [-1, 0, 1]:\n hkl_index = np.array([[h1,k1,l1]])\n d = float(np.linalg.norm( np.dot(hkl_index, crystal.rec_matrix), axis=1))\n if d>0:\n multiple = 1/d/d_min\n hkl_index *= round(multiple)\n for i in range(len(hkl_max)):\n if hkl_max[i] < hkl_index[0,i]:\n hkl_max[i] = hkl_index[0,i]\n #h1 = 2*ceil(np.linalg.norm(crystal.cell_para[0])/d_min)\n #k1 = 2*ceil(np.linalg.norm(crystal.cell_para[1])/d_min)\n #l1 = 2*ceil(np.linalg.norm(crystal.cell_para[2])/d_min)\n h1, k1, l1 = hkl_max\n h = np.arange(-h1,h1)\n k = np.arange(-k1,k1)\n l = np.arange(-l1,l1)\n \n hkl = np.array((np.meshgrid(h,k,l))).transpose()\n hkl_list = np.reshape(hkl, [len(h)*len(k)*len(l),3])\n hkl_list = hkl_list[np.where(hkl_list.any(axis=1))[0]]\n d_hkl = 1/np.linalg.norm( np.dot(hkl_list, crystal.rec_matrix), axis=1)\n #for ix, a in enumerate(hkl_list):\n # if np.array_equal(a, np.array([1,-1,3])) is True:\n # print(a)\n # break\n #\n #print(ix, hkl_list[ix], d_hkl[ix], d_min)\n\n shortlist = d_hkl > (d_min)\n d_hkl = d_hkl[shortlist]\n hkl_list = hkl_list[shortlist]\n sintheta = self.wavelength/2/d_hkl\n\n self.theta = np.arcsin(sintheta)\n self.hkl_list = hkl_list\n self.d_hkl = d_hkl\n \n #return hkl_list, d_hkl, sintheta", "def _core_calc_degrad(self,bd,Ld) :\n\t\tdegrad = np.dot(Ld,bd) # Do matrix multiplication \n\t\tdegrad = np.exp(degrad) # Exponentiate to convert log to real\n\t\treturn degrad", "def calcula(self, is_deterministico):\n # criando header da tabela\n tabela = PrettyTable([\"Rodadas\",\n \"E[T1]\",\n \"E[W1]\",\n \"E[X1]\",\n \"E[N1]\",\n \"E[Nq1]\",\n \"E[Ns1]\",\n \"E[T2]\",\n \"E[W2]\",\n \"E[X2]\",\n \"E[N2]\",\n \"E[Nq2]\",\n \"E[Ns2]\",\n \"Var[W1]\",\n \"Var[W2]\"])\n \n\n for index in range(1, self.n_rodadas+1):\n # calculando a esperanca das metricas da fila 1\n # print(\"n fregueses por rodada: \", self.fregueses_por_rodada, \". E len w1: \", len(self.w1[index]))\n if len(self.w1[index]) > 0:\n self.x1_med_rodada[index] = sum(self.x1[index])/len(self.w1[index])\n self.w1_med_rodada[index] = sum(self.w1[index])/len(self.w1[index])\n self.nq1_med_rodada[index] = sum(self.nq1[index])/len(self.w1[index])\n self.ns1_med_rodada[index] = sum(self.ns1[index])/len(self.w1[index])\n self.n1_med_rodada[index] = sum(self.n1[index])/len(self.w1[index])\n self.t1_med_rodada[index] = sum(self.t1[index])/len(self.w1[index])\n\n # calculando a esperanca das metricas da fila 2\n # print(\"n fregueses por rodada: \", self.fregueses_por_rodada, \". E len w2: \", len(self.w2[index]))\n if len(self.w2[index]) > 0:\n self.x2_med_rodada[index] = sum(self.x2[index])/len(self.w2[index])\n self.w2_med_rodada[index] = sum(self.w2[index])/len(self.w2[index])\n self.nq2_med_rodada[index] = sum(self.nq2[index])/len(self.w2[index])\n self.ns2_med_rodada[index] = sum(self.ns2[index])/len(self.w2[index])\n self.n2_med_rodada[index] = sum(self.n2[index])/len(self.w2[index])\n self.t2_med_rodada[index] = sum(self.t2[index])/len(self.w2[index])\n\n # calculo de Var[W1] e Var[W2] para exibir na tabela\n if len(self.w1[index]) == 1:\n self.var_w1_med_rodada[index] = 0\n else:\n for amostra in range(len(self.w1[index])):\n self.var_w1_med_rodada[index] += (self.w1[index][amostra] - self.w1_med_rodada[index]) ** 2\n self.var_w1_med_rodada[index] /= (len(self.w1[index]) - 1)\n\n if len(self.w2[index]) == 1:\n self.var_w2_med_rodada[index] = 0\n else:\n for amostra2 in range(len(self.w2[index])):\n self.var_w2_med_rodada[index] += (self.w2[index][amostra2] - self.w2_med_rodada[index]) ** 2\n self.var_w2_med_rodada[index] /= (len(self.w2[index]) - 1)\n\n tabela.add_row([\"rodada_\" + str(index),\n round(self.t1_med_rodada[index], 6),\n round(self.w1_med_rodada[index], 6),\n round(self.x1_med_rodada[index], 6),\n round(self.n1_med_rodada[index], 6),\n round(self.nq1_med_rodada[index], 6),\n round(self.ns1_med_rodada[index], 6),\n round(self.t2_med_rodada[index], 6),\n round(self.w2_med_rodada[index], 6),\n round(self.x2_med_rodada[index], 6),\n round(self.n2_med_rodada[index], 6),\n round(self.nq2_med_rodada[index], 6),\n round(self.ns2_med_rodada[index], 6),\n round(self.var_w1_med_rodada[index], 6),\n round(self.var_w2_med_rodada[index], 6)])\n\n # acumulando medias totais\n self.x1_med_total += self.x1_med_rodada[index]\n self.w1_med_total += self.w1_med_rodada[index]\n self.nq1_med_total += self.nq1_med_rodada[index]\n self.ns1_med_total += self.ns1_med_rodada[index]\n self.n1_med_total += self.n1_med_rodada[index]\n self.t1_med_total += self.t1_med_rodada[index]\n self.x2_med_total += self.x2_med_rodada[index]\n self.w2_med_total += self.w2_med_rodada[index]\n self.nq2_med_total += self.nq2_med_rodada[index]\n self.ns2_med_total += self.ns2_med_rodada[index]\n self.n2_med_total += self.n2_med_rodada[index]\n self.t2_med_total += self.t2_med_rodada[index]\n self.var_w1_med_total += self.var_w1_med_rodada[index]\n self.var_w2_med_total += self.var_w2_med_rodada[index]\n\n # dividindo medias acumuladas pelo total de rodadas e enfim, calculando a media total de cada metrica\n self.x1_med_total /= self.n_rodadas\n self.w1_med_total /= self.n_rodadas\n self.nq1_med_total /= self.n_rodadas\n self.ns1_med_total /= self.n_rodadas\n self.n1_med_total /= self.n_rodadas\n self.t1_med_total /= self.n_rodadas\n self.x2_med_total /= self.n_rodadas\n self.w2_med_total /= self.n_rodadas\n self.nq2_med_total /= self.n_rodadas\n self.ns2_med_total /= self.n_rodadas\n self.n2_med_total /= self.n_rodadas\n self.t2_med_total /= self.n_rodadas\n self.var_w1_med_total /= self.n_rodadas\n self.var_w2_med_total /= self.n_rodadas\n\n tabela.add_row([\"Media\",\n round(self.t1_med_total, 6),\n round(self.w1_med_total, 6),\n round(self.x1_med_total, 6),\n round(self.n1_med_total, 6),\n round(self.nq1_med_total, 6),\n round(self.ns1_med_total, 6),\n round(self.t2_med_total, 6),\n round(self.w2_med_total, 6),\n round(self.x2_med_total, 6),\n round(self.n2_med_total, 6),\n round(self.nq2_med_total, 6),\n round(self.ns2_med_total, 6),\n round(self.var_w1_med_total, 6),\n round(self.var_w2_med_total, 6)\n ])\n\n print(tabela, \"\\n\")\n\n if not is_deterministico:\n self.calcula_ic()", "def CalcForce_aeroframe_DEP(V, CoefMatrix, x, rho, g):\r\n\r\n #Compute aero forces\r\n # here x must be of the form (alpha, beta, p, q, r, da, dr, de) (last one punctualy used)\r\n # set non dim for p,q,r\r\n nonDim=np.ones(7)\r\n nonDim[2]=g.b/(2*V)\r\n nonDim[3]=g.c/(2*V)\r\n nonDim[4]=g.b/(2*V)\r\n # F=np.dot(CoefMatrix,x[0:7]) # commented form, modification to account for symmetric drag increase of side slip\r\n F=np.zeros((3))\r\n M=np.zeros((3))\r\n xsym=np.copy(x[0:-1])\r\n xsym[1]=abs(xsym[1]) # make beta always positive since derivatives have already correct sign for drag and lift only\r\n xsym[-3]=abs(xsym[-3]) # make ailerons deflection always positive for drag increase and lift decrease\r\n xsym[-1]=abs(xsym[-1]) # make rudder deflection always positive for drag increase and lift decrease\r\n F[0]=np.dot(CoefMatrix[0],xsym)\r\n F[1]=np.dot(CoefMatrix[1],x[0:-1]) #side force\r\n F[2]=np.dot(CoefMatrix[2],xsym)\r\n M=np.dot(CoefMatrix[3:6,:],x[0:-1])\r\n# print(\"Printing moment coeff\")\r\n# print(M)\r\n\r\n \r\n #No need to project\r\n# alpha=x[0]\r\n# beta=x[1]\r\n# H=np.array([[math.cos(alpha)*math.sin(beta), -math.cos(alpha)*math.sin(beta), -math.sin(alpha)],[math.sin(beta), math.cos(beta), 0],[math.sin(alpha)*math.cos(beta), -math.sin(alpha)*math.sin(beta), math.cos(alpha)]])\r\n if V<=71 :\r\n Fbody=np.array([-F[0]-g.Cd0_fl,F[1],-F[2]-g.CL0_fl]) # add alpha=0 coefficients\r\n Moment=M+np.array([0,x[-1]*g.Cm_de+g.Cm0_fl,0])\r\n else:\r\n Fbody=np.array([-F[0]-g.Cd0,F[1],-F[2]-g.CL0]) # add alpha=0 coefficients\r\n Moment=M+np.array([0,x[-1]*g.Cm_de+g.Cm0,0])\r\n \r\n\r\n Fbody=0.5*V**2.0*rho*g.S*Fbody\r\n Moment=0.5*V**2.0*rho*g.S*g.b*Moment\r\n \r\n return np.append(Fbody, Moment)", "def _calc_del(dis):\n ndist = len(dis)\n deldis = np.zeros((ndist))\n deldis[0] = (dis[1] - dis[0]) * 0.5\n deldis[1:-1] = (dis[2:] - dis[0:-2]) * 0.5\n deldis[-1] = (dis[-1] - dis[-2]) * 0.5\n return deldis", "def _computeDerivative(self,angles, distances):\n slope=[]\n slope.append(0)\n for i in xrange(1,len(angles)):\n der = (distances[i]-distances[i-1])/(angles[i]-angles[i-1])\n slope.append(der)\n #slope.append(0)\n return slope", "def derivatives(self):\n self.rdot = self.v\n self.vdot[:,:] = 0.0\n \n for nl in self.nlists: \n nl.separations()\n \n for force in self.forces:\n force.apply()\n\n # Controllers is the new implementation of forces\n for controller in self.controllers:\n controller.apply()", "def energy_function(self, x):\n \n return -T.dot(T.transpose(x), T.dot(self.W, x)) -\\\n T.dot(T.transpose(self.b), x)", "def IsingEnergy(seqs,h,J,N,L):\n\tfield_energy = -np.tensordot(h,seqs,axes=((0,1),(1,2)))\n\tcoupling_energy = np.diag(-np.tensordot(seqs,np.tensordot(seqs,J,axes=((1,2),(2,3))),axes=((1,2),(1,2))))\n\treturn field_energy + coupling_energy", "def test_dmi_uses_unit_length_2dmesh():\n A = 8.78e-12 # J/m\n D = 1.58e-3 # J/m^2\n Ms = 3.84e5 # A/m\n\n energies = []\n\n # unit_lengths 1e-9 and 1 are common, let's throw in an intermediate length\n # just to challenge the system a little:\n for unit_length in (1, 1e-4, 1e-9):\n radius = 200e-9 / unit_length\n maxh = 5e-9 / unit_length\n helical_period = (4 * pi * A / D) / unit_length\n k = 2 * pi / helical_period\n # HF 27 April 2014: The next command fails in dolfin 1.3\n # mesh = df.CircleMesh(df.Point(0, 0), radius, maxh)\n # The actual shape of the domain shouldn't matter for the test,\n # so let's use a Rectangular mesh which should work the same:\n\n nx = ny = int(round(radius / maxh))\n mesh = df.RectangleMesh(df.Point(0, 0), df.Point(radius, radius), nx, ny)\n\n S3 = df.VectorFunctionSpace(mesh, \"CG\", 1, dim=3)\n m_expr = df.Expression((\"0\", \"cos(k * x[0])\", \"sin(k * x[0])\"), k=k, degree=1)\n m = Field(S3, m_expr, name='m')\n dmi = DMI(D)\n Ms_dg = Field(df.FunctionSpace(mesh, 'DG', 0), Ms)\n dmi.setup(m, Ms_dg, unit_length=unit_length)\n energies.append(dmi.compute_energy())\n\n H = df.Function(S3)\n H.vector()[:] = dmi.compute_field()\n print H(0.0, 0.0)\n\n print \"Using unit_length = {}.\".format(unit_length)\n print \"Helical period {}.\".format(helical_period)\n print \"Energy {}.\".format(dmi.compute_energy())\n\n rel_diff_energies = abs(energies[0] - energies[1]) / abs(energies[1])\n print \"Relative difference of energy {}.\".format(rel_diff_energies)\n assert rel_diff_energies < 1e-13\n\n rel_diff_energies2 = abs(energies[0] - energies[2]) / abs(energies[2])\n print \"Relative difference2 of energy {}.\".format(rel_diff_energies2)\n assert rel_diff_energies2 < 1e-13", "def DW_cal(data, data_sm):\n n = len(data)\n numerator = 0\n denominator = 0\n for i in range(n):\n if i == 0:\n numerator = numerator + 0\n else:\n numerator = numerator + ((data[i] - data_sm[i]) - (data[i-1] - data_sm[i-1]))**2\n denominator = denominator + (data[i] - data_sm[i])**2\n return numerator/denominator*n/(n - 1)", "def spatial_dos(h,dos):\n if h.has_spin == False and h.has_eh==False: return np.array(dos)\n elif h.has_spin == True and h.has_eh==False: \n return np.array([dos[2*i]+dos[2*i+1] for i in range(len(dos)//2)])\n elif h.has_spin == False and h.has_eh==True: \n return np.array([dos[2*i]+dos[2*i+1] for i in range(len(dos)//2)])\n elif h.has_spin == True and h.has_eh==True: \n return np.array([dos[4*i]+dos[4*i+1]+dos[4*i+2]+dos[4*i+3] for i in range(len(dos)//4)])\n else: raise", "def detect_repetition(sdm, diagonal_num = 30, thres_rate = 0.2, min_sdm_window_size = 48, is_local = True, is_plot = False):\n\n length = len(sdm)\n dig_mean = calculate_sdm_min_diagonal(sdm, window_size = min_sdm_window_size, is_partial = is_local)\n\n # using a FIR filter to smooth mean of diagonals\n B = np.ones(50) / 50\n dig_lp = scipy.signal.lfilter(B, 1, dig_mean)\n dig = dig_mean - dig_lp\n\n # calculate the smoothed differential of diagonals\n B = np.array([1, 0, -1])\n dig_smooth_diiiferentia = scipy.signal.lfilter(B, 1 ,dig)\n\n if is_plot:\n plt.plot(dig_mean, label = 'mean of diagonals')\n plt.plot(dig, label = 'mean of diagonals without linear offset')\n plt.plot(dig_lp, label = 'smoothed mean of diagonals')\n plt.plot(dig_smooth_diiiferentia, label = 'derivative of mean of diagonals')\n plt.title('mean of diagonals')\n plt.legend()\n\n\n # index where the smoothed differential of diagonals from negative to positive\n # the minima value is the minimum value of diagonals\n minima = np.array([])\n minima_indeces = np.array([], dtype = int)\n for i in range(len(dig_smooth_diiiferentia) - 1):\n if dig_smooth_diiiferentia[i] < 0 and dig_smooth_diiiferentia[i + 1] > 0:\n minima_indeces = np.append(minima_indeces, i)\n minima = np.append(minima, dig[i])\n\n # delete by otsu algorithm\n threshold_otsu = get_otsu_threshold(np.matrix(minima))\n del_indeces = np.array([])\n # for i in range(len(minima)):\n # if minima[i] > threshold_otsu:\n # del_indeces = np.append(del_indeces, i)\n\n while True:\n threshold_otsu += 1\n del_indeces = np.array([])\n for i in range(len(minima)):\n if minima[i] > threshold_otsu:\n del_indeces = np.append(del_indeces, i)\n\n if len(minima_indeces) - len(del_indeces) > 50 or len(del_indeces) == 0:\n break\n\n\n\n\n minima = np.delete(minima, del_indeces)\n minima_indeces = np.delete(minima_indeces, del_indeces)\n\n # calculate a threshold\n long_vector = np.array([])\n for index in minima_indeces:\n long_vector = np.append(long_vector, np.diag(sdm, -index))\n\n all_len = len(long_vector)\n long_vector = np.sort(long_vector)\n\n while(True):\n\n threshold = long_vector[int(round(thres_rate * all_len))]\n minima_count = 0\n\n # calculate a binary matrix\n binary_matrix = np.zeros([length, length], dtype = int)\n\n\n for index in minima_indeces:\n temp = np.diag(sdm, -index)\n for j in range(len(temp)):\n if temp[j] > threshold:\n binary_matrix[index + j, j] = 1\n minima_count += 1\n\n # if the number of segments is smaller than 10\n if minima_count < 20 and thres_rate < 1:\n thres_rate += 0.05\n else:\n break\n\n\n # enhance the binary matrix\n enhanced_binary_matrix = binary_matrix.copy()\n for index in minima_indeces:\n temp = np.diag(sdm, -index)\n j = 0\n while len(temp) >= 25 or j <= len(temp):\n if temp[j] == 0:\n j += 1\n if j + 25 - 1 > len(temp):\n break\n\n continue\n\n if j + 25 - 1 > len(temp):\n break\n\n kernel = temp[j : j + 25 - 1]\n if isenhance(kernel):\n for k in range(25):\n enhanced_binary_matrix[index + j + k] = 1\n\n j = j + 25 - 1\n\n j += 1\n if j + 25 - 1 > len(temp):\n break\n\n return enhanced_binary_matrix, minima_indeces", "def decoherence(self,system):\n for i in range(self.n):\n for j in range(i,self.n):\n for item in self.decoherence_matrix[i][j]:\n tmp=Expolist([Expo(item[2],0)])\n t = int(self.index(item[0],item[1]))\n system[int(self.index(i,j))][t]+=tmp\n return system", "def bus_deriv(self, bus):\n return np.zeros((1, self.num_i + self.num_o, self.num_nw_vars))", "def eeg_diss(tim,array1,array2,t_index='all'):\t\n\t# first, create scaled array (i.e. for each time-point, divide the value by its instantaneous rms value to get unitary strength)\n\tv1 = array1/eeg_rms(array1)\n\tv2 = array2/eeg_rms(array2)\n\t# second, calculate the square root of the mean of the squared differences between the potentials measured at each electrode (see p.255)\n\tif t_index == 'all':\t\t\n\t\tdiss = np.sqrt(np.mean((v1-v2)**2,axis=0))\n\telse:\n\t\tidx = np.logical_and(tim>=t_index[0],tim<=t_index[1])\n\t\tt1 = np.mean(v1[:,idx],axis=1)\n\t\tt2 = np.mean(v2[:,idx],axis=1)\t\t\n\t\tdiss = np.sqrt(np.mean((t1-t2)**2,axis=0))\n\t\t\n\treturn diss", "def ddx(n, dx, f):\n fx = np.zeros(n)\n for j in range(n):\n fx[j] = (f[get_index(j+1, n)]-f[get_index(j-1, n)])/(2*dx)\n return fx", "def diagonalizing_gates(self):\n return [\n PauliZ(wires=self.wires),\n S(wires=self.wires),\n Hadamard(wires=self.wires),\n ]", "def Char_Gate(NV,res ,B_field=400):\n\n\n #data = np.loadtxt(\"NV_Sim_8.dat\") #Placeholder data to test the script\n #NV = np.vstack((data[:,3],data[:,4]))\n #physical constants\n gamma_c = 1.071e3 #g-factor for C13 in Hz/G\n #Model parameters\n omega_larmor = 2*np.pi*gamma_c*B_field\n tau_larmor = 2*np.pi/omega_larmor\n tau = res[0]\n n_pulses = int(res[1]*2) #So that we do a pi -pulse\n\n Ix = 0.5 * np.array([[0,1],[1,0]])\n Iz = 0.5* np.array([[1,0],[0,-1]])\n H0 = (omega_larmor)*Iz\n exH0 =linalg.expm(-1j*H0*tau)\n\n\n M = np.zeros(np.shape(NV)[0])\n for idC in range(np.shape(NV)[0]):\n A= 2*np.pi*NV[idC,0]\n B= 2*np.pi*NV[idC,1] #Converts to radial frequency in Hz/G\n H1 = (A+omega_larmor) *Iz +B*Ix\n exH1 = linalg.expm(-1j*H1*tau)\n V0 = exH0.dot(exH1.dot(exH1.dot(exH0)))\n V1 = exH1.dot(exH0.dot(exH0.dot(exH1)))\n n0 = Calc_axis(V0)\n n1 =Calc_axis(V1)\n phi = np.real(2*np.arccos(np.trace(V0)/2))\n M[idC] = 1 - (1-np.dot(n0,n1))*np.sin(n_pulses * phi /2 )**2\n\n Signal = -M.prod()\n F = (1-(Signal+1)/2)\n return F", "def ldos0d(h,e=0.0,delta=0.01):\n if h.dimensionality==0: # only for 0d\n iden = np.identity(h.intra.shape[0],dtype=np.complex) # create identity\n g = ( (e+1j*delta)*iden -h.intra ).I # calculate green function\n else: raise # not implemented...\n d = [ -(g[i,i]).imag/np.pi for i in range(len(g))] # get imaginary part\n d = spatial_dos(h,d) # convert to spatial resolved DOS\n g = h.geometry # store geometry\n write_ldos(g.x,g.y,d,z=g.z) # write in file\n return d", "def diag(cls, elements, domain):\n return DDM.diag(elements, domain).to_dfm()", "def num_solution(self):\r\n\r\n m = float(self.mass) / self.nu_m\r\n c = self.light_vel\r\n p1 = 0.0\r\n x1 = 0.0\r\n self.xn_track.append(x1)\r\n self.vn.append(0.0)\r\n e = m * c * c\r\n self.en.append(e)\r\n for i in range(1, len(self.obs.obt_g)):\r\n dt = self.t[i] - self.t[i - 1]\r\n ddt = float(self.obs.obt[i] - self.obs.obt[i - 1])\r\n qe = 1 / self.nu_m * (self.nu_t * self.nu_t / self.nu_x) \\\r\n * 1.0 / float(self.size_tick * self.size_tick)\r\n\r\n # print \"qE=\", qe\r\n\r\n p2 = p1 + qe * dt\r\n self.vn.append(p2 / math.sqrt(m ** 2 + (p2 / c) ** 2))\r\n e = e + qe * (self.x[i] - self.x[i - 1])\r\n self.en.append(e)\r\n v = p2 / math.sqrt(m ** 2 + (p2 / c) ** 2)\r\n x2 = x1 + v * dt\r\n self.xn_track.append(x2)\r\n p1 = p2\r\n x1 = x2\r\n print 'Numerical solution of the differential equation of motion'", "def StdDed(DSI, _earned, STD, e04470, e00100, e60000,\n MARS, MIDR, e15360, AGEP, AGES, PBI, SBI, _exact, e04200, e02400,\n STD_Aged, c04470, c00100, c21060, c21040, e37717, c04600, e04805,\n t04470, f6251, _feided, c02700, FDED, II_rt1, II_rt2, II_rt3,\n II_rt4, II_rt5, II_rt6, II_rt7, II_brk1, II_brk2, II_brk3, II_brk4,\n II_brk5, II_brk6, _compitem, _txpyers, _numextra, puf):\n\n # Calculate deduction for dependents\n if DSI == 1:\n c15100 = max(350 + _earned, STD[6])\n else:\n c15100 = 0.\n\n # Apply regular standard deduction if not dependent or compulsory itemizer\n if (DSI == 1):\n c04100 = min(STD[MARS - 1], c15100)\n elif _compitem == 1 or (3 <= MARS and MARS <= 6 and MIDR == 1):\n c04100 = 0.\n else:\n c04100 = STD[MARS - 1]\n\n # Add motor vehicle tax to standard deduction\n c04100 = c04100 + e15360\n\n # ??\n if f6251 == 0 and e04470 == 0:\n x04500 = e00100 - e60000\n c04500 = c00100 - x04500\n else:\n x04500 = 0.\n\n # Calculate the extra deduction for aged and blind\n if puf:\n _numextra = _numextra\n else:\n _numextra = float(AGEP + AGES + PBI + SBI)\n\n if _exact == 1 and MARS == 3 or MARS == 5:\n c04200 = e04200\n else:\n c04200 = _numextra * STD_Aged[MARS - 1]\n\n c15200 = c04200\n\n # Compute the total standard deduction\n if (MARS == 3 or MARS == 6) and (MIDR == 1):\n _standard = 0.\n else:\n _standard = c04100 + c04200\n\n # ???\n if FDED == 1:\n _othded = e04470 - c04470\n else:\n _othded = 0.\n\n c04500 = c00100 - max(c04470, max(c04100, _standard + e37717))\n c04800 = max(0., c04500 - c04600 - e04805)\n\n # Check with Dan whether this is right!\n if c04470 > _standard:\n _standard = 0\n\n if c04470 <= _standard:\n c04470 = 0\n\n # why is this here, c60000 is reset many times?\n if _standard > 0:\n c60000 = c00100 - x04500\n else:\n c60000 = c04500\n\n c60000 = c60000 - e04805\n\n # Some taxpayers iteimize only for AMT, not regular tax\n _amtstd = 0.\n\n if (e04470 == 0 and (t04470 > _amtstd) and f6251 == 1 and _exact == 1):\n c60000 = c00100 - t04470\n\n if (c04800 > 0 and _feided > 0):\n _taxinc = c04800 + c02700\n else:\n _taxinc = c04800\n\n if (c04800 > 0 and _feided > 0):\n _feitax = Taxer_i(_feided, MARS, II_rt1, II_rt2, II_rt3, II_rt4,\n II_rt5, II_rt6, II_rt7, II_brk1, II_brk2, II_brk3,\n II_brk4, II_brk5, II_brk6)\n\n _oldfei = Taxer_i(c04800, MARS, II_rt1, II_rt2, II_rt3, II_rt4, II_rt5,\n II_rt6, II_rt7, II_brk1, II_brk2, II_brk3, II_brk4,\n II_brk5, II_brk6)\n else:\n _feitax, _oldfei = 0., 0.\n\n return (c15100, _numextra, _txpyers, c15200, c04470, _othded, c04100,\n c04200, _standard, c04500, c04800, c60000, _amtstd, _taxinc,\n _feitax, _oldfei, _compitem)", "def energy_calc(signal: np.array, segment_length: int) -> np.array:\n energy = []\n for i in range(int(len(signal)/segment_length)):\n segment = signal[i*segment_length:(i+1)*segment_length]# try except error ...\n energy.append(np.sum(np.square(segment)) / segment_length)\n if energy[-1] < 0:\n print(i)\n return energy", "def _dy(self, T):\n return self._h(np.diff(T)) * self._a / self._m / self._c * np.diff(T) * np.array([1, -1])", "def _concentration(num, step, hl_a, hl_e, doses, return_diff=False):\n k_a = np.log(2) / hl_a\n k_e = np.log(2) / hl_e\n mat = np.float64([[-k_a, k_a, 0], [0, -k_e, k_e], [0, 0, 0]])\n mat_step = expm(mat * step)\n solution = np.zeros((num, 3))\n if return_diff:\n mat_tangent = np.copy(mat)\n diff = np.zeros(num)\n try:\n indexed_doses = {int(round(offset / step)): dose for offset, dose in doses.items()}\n except ZeroDivisionError:\n indexed_doses = {0: sum(doses.values())}\n for i in range(num):\n if i:\n solution[i] = mat_step.T @ solution[i-1]\n if i in indexed_doses:\n solution[i, 0] += indexed_doses[i]\n if return_diff:\n diff[i] = mat_tangent[0, 1] * solution[0, 0]\n mat_tangent[...] = mat_tangent @ mat_step\n if return_diff:\n return solution[:, 1], diff\n return solution[:, 1]", "def cmd_dgaus():\n cmds = []\n cmds.append(\"r_m[0.0,-1,1]\")\n cmds.append(\"r_s[1.5,0,10]\")\n cmds.append('Gaussian::gaus_c(x,r_m,r_s)')\n cmds.append(\"rt_m[0.0,-1,1]\")\n cmds.append(\"rt_s[3,0,10]\")\n cmds.append('Gaussian::gaus_t(x,rt_m,rt_s)')\n cmds.append(\"f[0.85]\") # fraction of core\n cmds.append(\"SUM::res(f*gaus_c,gaus_t)\")\n return cmds", "def dfda(x: np.array) -> np.array:\n return x**2", "def det(self):\n if self.x == 0 or self.y == 0:\n return None\n elif self.x == 1 or self.y == 1:\n return self.retrieve(0,0)\n else:\n out = 0.0\n for x in xrange(0, self.x):\n out += self.retrieve(0,x)*self.C(0,x)\n return out", "def Diag(Fprime, Cprime, E):\n #\n import math\n # Angle for heteronuclear diatonic\n Theta = 0.5 * math.atan(2.0 * Fprime[0, 1] / (Fprime[0, 0] - Fprime[1, 1]))\n # print('Theta', Theta)\n\n Cprime[0, 0] = np.cos(Theta)\n Cprime[1, 0] = np.sin(Theta)\n Cprime[0, 1] = np.sin(Theta)\n Cprime[1, 1] = -np.cos(Theta)\n\n E[0, 0] = Fprime[0, 0] * np.cos(Theta) ** 2 + Fprime[1, 1] * np.sin(Theta) ** 2 + Fprime[0, 1] * np.sin(2.0 * Theta)\n E[1, 1] = Fprime[1, 1] * np.cos(Theta) ** 2 + Fprime[0, 0] * np.sin(Theta) ** 2 - Fprime[0, 1] * np.sin(2.0 * Theta)\n\n if (E[1, 1] <= E[0, 0]):\n Temp = E[1, 1]\n E[1, 1] = E[0, 0]\n E[0, 0] = Temp\n Temp = Cprime[0, 1]\n Cprime[0, 1] = Cprime[0, 0]\n Cprime[0, 0] = Temp\n Temp = Cprime[1, 1]\n Cprime[1, 1] = Cprime[1, 0]\n Cprime[1, 0] = Temp\n return", "def bus_deriv(self, bus):\n deriv = np.zeros((1, 4, self.num_nw_vars))\n f = self.calc_bus_value\n deriv[0, 0, 0] = self.numeric_deriv(f, 'm', 0, bus=bus)\n deriv[0, 0, 2] = self.numeric_deriv(f, 'h', 0, bus=bus)\n deriv[0, 2, 2] = self.numeric_deriv(f, 'h', 2, bus=bus)\n return deriv", "def mounting_matrix(self):\n # fmt: off\n count = 0\n for x in range(self.ntheta):\n self.M[count][count] = 1\n self.f[count][0] = self.p_in\n count = count + self.nz - 1\n self.M[count][count] = 1\n self.f[count][0] = self.p_out\n count = count + 1\n count = 0\n for x in range(self.nz - 2):\n self.M[self.ntotal - self.nz + 1 + count][1 + count] = 1\n self.M[self.ntotal - self.nz + 1 + count][self.ntotal - self.nz + 1 + count] = -1\n count = count + 1\n count = 1\n j = 0\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i][self.ntheta - 1])\n self.M[count][self.ntotal - 2 * self.nz + count] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1, j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][self.ntheta - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = self.nz + 1\n for j in range(1, self.ntheta - 1):\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i, j - 1])\n self.M[count][count - self.nz] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1][j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][j - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = count + 2\n count = 1\n for j in range(self.ntheta - 1):\n for i in range(1, self.nz - 1):\n if j == 0:\n self.f[count][0] = (self.c0w[i][j] - self.c0w[i][self.ntheta - 1]) / self.dtheta\n else:\n self.f[count][0] = (self.c0w[i, j] - self.c0w[i, j - 1]) / self.dtheta\n count = count + 1\n count = count + 2\n # fmt: on", "def energy(data):\n\n return np.real(np.mean(np.abs(data)**2, axis=1))", "def calcECDF(self,arr):\n res=np.zeros_like(arr)\n for index, value in np.ndenumerate(arr):\n res[index]=self.calc(value)\n return res", "def calcECDF(self,arr):\n res=np.zeros_like(arr)\n for index, value in np.ndenumerate(arr):\n res[index]=self.calc(value)\n return res", "def write_exciton_dos(frequencies, density_of_states, write_obj):\n write_obj.write('Excitonic Density of States')\n write_obj.write('Omega (s^-1)\\t Rho (1/eV)\\n')\n for omega, rho in zip(frequencies, density_of_states):\n write_obj.write('{}\\t{}\\n'.format(omega, rho))", "def det(self):\n\t\t\n\t\trows = self._rows\n\t\tsign = +1\n\t\tsumm = 0\n\n\t\tfor perm in permutations(range(rows), rows):\n\t\t\tmul = 1\n\t\t\tsign = SquareMatrix.__parity_of_permutation(perm)\n\n\t\t\tfor i in range(rows):\n\t\t\t\tmul *= self[i][perm[i]]\n\n\t\t\tsumm += sign * mul\n\t\treturn summ", "def diag(diagnoal):\n raise NotImplementedError", "def _list_coefficients_by_discriminant(self, fd=True, pos=True, neg=True, printimag=False, norm_neg=True, dmin=0, dmax=0, latex=False, nd=0, Lvals=False, prime=False):\n sig = 1\n S = \"$\"\n if(self._space.WR.is_dual()):\n sig = -1\n maxn = max(self._coeffs[list(self._coeffs.keys())[0]].keys())\n maxD = self._space.WR.level() * (maxn + 1)\n N = self._space.WR.N\n if(dmax > 0):\n w1 = len(str(dmax)) + 1\n else:\n w1 = len(str(maxD)) + 1\n w2 = max(list(map(len, str(self._space.WR.D()).split())))\n w3 = len(str(maxn)) + 1\n mp0 = mpmath.mpf(0)\n mpold = mpmath.mp.dps\n if(mpmath.mp.dps < self.maxdigs):\n mpmath.mp.dps = self.maxdigs\n if(norm_neg and neg):\n cnorm = 0\n tnorm = (0, 0)\n for j in range(1, 100):\n t = rn_from_D(self.space.WR, -j * sig)\n if(t is None):\n continue\n if(t[1] + self._space.WR.Qv[t[0]] >= 0):\n continue\n c1 = self.get_coefficient(t[0], t[1])\n if(c1 is None):\n continue\n # print \"c1 =\",c1\n # If the first coefficient is zero to the precision we assume we shouldn't divide by it\n if(abs(c1) > mpmath.power(10, -self.prec)):\n cnorm = c1 * mpmath.sqrt(j)\n tnorm = t\n print(\"c1=c({0})=c({1})={2}\".format(tnorm, -j * sig, cnorm))\n break\n\n for sn in [1, -1]:\n for D in range(1, maxD):\n # print \"D=\",D\n if(dmin > 0 and abs(D) < dmin):\n continue\n if dmax > 0 and abs(D) > dmax:\n continue\n DD = sig * D * sn\n # print \"D=\",D,is_fundamental_discriminant(D)\n if fd and not is_fundamental_discriminant(DD) and DD != 1:\n # print \"D=\",D,is_fundamental_discriminant(D)\n continue\n if prime and gcd(D, N) > 1:\n continue\n t = rn_from_D(self._space.WR, DD)\n if t is None:\n continue\n else:\n (r, n) = t\n # print \" DD=\",DD,t\n nn = n + self._space.WR.Qv[r]\n if(not pos and nn >= 0):\n continue\n if(not neg and nn < 0):\n continue\n\n c = self.get_coefficient(r, n)\n cs = \"\"\n erms = \"\"\n erm = 10\n if c != 0 and c is not None:\n if nn >= 0:\n ss = \"+\"\n if nn < 0:\n ss = \"-\"\n if(norm_neg):\n if ((r, n) != tnorm) and cnorm != 0:\n c = c / cnorm * mpmath.sqrt(mpmath.mpf(abs(D)))\n x = c.real()\n x1 = floor(x)\n x2 = ceil(x)\n er1 = abs(x1 - x)\n er2 = abs(x2 - x)\n erm = min(er1, er2)\n erms = sci_pretty_print(erm, 2, latex_pow=latex)\n if(erm < 0.001):\n if(er1 < er2):\n cs = str(x1)\n else:\n cs = str(x2)\n elif not printimag:\n if(nd > 0):\n cs = str(c.real()).strip()\n cs = sci_pretty_print(cs, nd, latex_pow=latex)\n else:\n cs = str(c.real())\n else:\n if(nd > 0):\n cs = str(c).strip()\n cs = sci_pretty_print(cs, nd, latex_pow=latex)\n else:\n cs = str(c)\n if(c.real() >= 0 and latex):\n cs = r\"\\hphantom{-}\" + cs\n elif(c.real() >= 0):\n cs = \" \" + cs\n if(latex):\n O = \" & \"\n if(Lvals and list(self._Lv.keys()).count(DD) == 1):\n ls = \"&\" + S + sci_pretty_print(self._Lv[DD], nd, latex_pow=latex) + S\n else:\n ls = \"\"\n if(len(erms) == 0):\n s = S + str(DD).center(w1) + S + \"&\" + S + cs + S + ls + \"\\\\\\\\\"\n else:\n s = S + str(DD).center(w1) + S + \"&\" + S + cs + S + ls + O + S + erms + S + \"\\\\\\\\\"\n else:\n if(Lvals and list(self._Lv.keys()).count(DD) == 1):\n ls = \"\\t\" + sci_pretty_print(self._Lv[DD], nd)\n else:\n ls = \"\"\n if(len(erms) == 0):\n s = \"C^\" + ss + \"[\" + str(DD).center(w1) + \"] = \" + cs + ls\n else:\n s = \"C^\" + ss + \"[\" + str(DD).center(w1) + \"] = \" + cs + ls + \" \" + erms + \"\\n\"\n # s=s+str(self._space.WR.D[r]).ljust(w2)+\",\"+str(n).ljust(w3)+\"] = \"+cs\n print(s)\n mpmath.mp.dps = mpold", "def ldos_finite(h,e=0.0,n=10,nwf=4,delta=0.0001):\n if h.dimensionality!=1: raise # if it is not one dimensional\n intra = csc(h.intra) # convert to sparse\n inter = csc(h.inter) # convert to sparse\n interH = inter.H # hermitian\n m = [[None for i in range(n)] for j in range(n)] # full matrix\n for i in range(n): # add intracell\n m[i][i] = intra\n for i in range(n-1): # add intercell\n m[i][i+1] = inter\n m[i+1][i] = interH\n m = bmat(m) # convert to matrix\n (ene,wfs) = slg.eigsh(m,k=nwf,which=\"LM\",sigma=0.0) # diagonalize\n wfs = wfs.transpose() # transpose wavefunctions\n dos = (wfs[0].real)*0.0 # calculate dos\n for (ie,f) in zip(ene,wfs): # loop over waves\n c = 1./(1.+((ie-e)/delta)**2) # calculate coefficient\n dos += np.abs(f)*c # add contribution\n odos = spatial_dos(h,dos) # get the spatial distribution\n go = h.geometry.supercell(n) # get the supercell\n write_ldos(go.x,go.y,odos) # write in a file\n return dos # return the dos", "def calcula_variancias(self):\n for index in range(1, self.n_rodadas+1):\n self.var_x1 += (self.x1_med_rodada[index] - self.x1_med_total) ** 2\n self.var_w1 += (self.w1_med_rodada[index] - self.w1_med_total) ** 2\n self.var_nq1 += (self.nq1_med_rodada[index] - self.nq1_med_total) ** 2\n self.var_ns1 += (self.ns1_med_rodada[index] - self.ns1_med_total) ** 2\n self.var_n1 += (self.n1_med_rodada[index] - self.n1_med_total) ** 2\n self.var_t1 += (self.t1_med_rodada[index] - self.t1_med_total) ** 2\n self.var_w1_med += (self.var_w1_med_rodada[index] - self.var_w1_med_total) ** 2\n\n self.var_x2 += (self.x2_med_rodada[index] - self.x2_med_total) ** 2\n self.var_w2 += (self.w2_med_rodada[index] - self.w2_med_total) ** 2\n self.var_nq2 += (self.nq2_med_rodada[index] - self.nq2_med_total) ** 2\n self.var_ns2 += (self.ns2_med_rodada[index] - self.ns2_med_total) ** 2\n self.var_n2 += (self.n2_med_rodada[index] - self.n2_med_total) ** 2\n self.var_t2 += (self.t2_med_rodada[index] - self.t2_med_total) ** 2\n self.var_w2_med += (self.var_w2_med_rodada[index] - self.var_w2_med_total) ** 2\n\n self.var_x1 /= (self.n_rodadas - 1)\n self.var_w1 /= (self.n_rodadas - 1)\n self.var_nq1 /= (self.n_rodadas - 1)\n self.var_ns1 /= (self.n_rodadas - 1)\n self.var_n1 /= (self.n_rodadas - 1)\n self.var_t1 /= (self.n_rodadas - 1)\n self.var_w1_med /= (self.n_rodadas - 1)\n\n self.var_x2 /= (self.n_rodadas - 1)\n self.var_w2 /= (self.n_rodadas - 1)\n self.var_nq2 /= (self.n_rodadas - 1)\n self.var_ns2 /= (self.n_rodadas - 1)\n self.var_n2 /= (self.n_rodadas - 1)\n self.var_t2 /= (self.n_rodadas - 1)\n self.var_w2_med /= (self.n_rodadas - 1)", "def density(ensembles):\n if len(ensembles.shape) < 2:\n return ketbra(ensembles)\n else:\n den_mat = ketbra(ensembles[0])\n for i in range(1, len(ensembles)):\n den_mat += ketbra(ensembles[i])\n den_mat /= len(ensembles)\n return den_mat", "def dolomite():\n\n rho = 2840.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 205.; C[0,1] = 71.; C[0,2] = 57.4; C[0,3] = -19.5; C[0,4] = 13.7; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 205.; C[1,2] = 57.4; C[1,3] = 19.5; C[1,4] = -13.7; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 113.; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 39.8; C[3,4] = 0.; C[3,5] = -13.7\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 39.8; C[4,5] = -19.5\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 67.\n\n return C, rho", "def det(A):\n # Section 1: Establish n parameter and copy A\n n = len(A)\n AM = A[:]\n\n # Section 2: Row manipulate A into an upper triangle matrix\n for fd in range(n): # fd stands for focus diagonal\n if AM[fd][fd] == 0:\n AM[fd][fd] = 1.0e-18 # Cheating by adding zero + ~zero\n for i in range(fd+1, n): # skip row with fd in it.\n crScaler = AM[i][fd] / AM[fd][fd] # cr stands for \"current row\".\n for j in range(n): # cr - crScaler * fdRow, one element at a time.\n AM[i][j] = AM[i][j] - crScaler * AM[fd][j]\n\n # Section 3: Once AM is in upper triangle form ...\n product = 1.0\n for i in range(n):\n product *= AM[i][i] # ... product of diagonals is determinant\n\n return product", "def simulated_dph(grbdir,typ,t_src,alpha,beta,E0,A):\n\tfilenames = glob.glob(grbdir + \"/MM_out/*\")\n\tbadpixfile = glob.glob(grbdir + \"/*badpix.fits\")[0]\n\tfilenames.sort()\n\tpix_cnts = np.zeros((16384,len(filenames)))\n\terr_pix_cnts = np.zeros((16384,len(filenames)))\n\ten = np.arange(5, 261., .5)\n\tsel = (en>=100) & (en <= 150)\n\ten_range = np.zeros(len(filenames))\n\tfor f in range(len(filenames)):\n\t\ten_range[f] = filenames[f][20:26]\n\terr_100_500 = (100.0 <= en_range.astype(np.float)) & (en_range.astype(np.float) <= 500.0)\n\terr_500_1000 = (500.0 < en_range.astype(np.float)) & (en_range.astype(np.float) <= 1000.0)\n\terr_1000_2000 = (1000.0 < en_range.astype(np.float)) & (en_range.astype(np.float) <= 2000.0)\n\texist_1000_2000 = np.where(err_1000_2000 == True)\n\tE = np.array([])\n\t\n\tprint \"Indices where energy is in between 1000 and 2000 :\",exist_1000_2000[0]\n\t\n\tfor i,f in enumerate(filenames):\n\t\t\tdata = fits.getdata(f + \"/SingleEventFile.fits\")\n\t\t\tE = np.append(E, float(f[20:26]))\n\t\t\terror = np.sqrt(data) \n\t\t\tdata[:,~sel] = 0.\n\t\t\terror[:,~sel] = 0.\n\t\t\tpix_cnts[:,i] = data.sum(1)*model(E[i], alpha, beta, E0, A,typ)/55.5\n\t\t\terr_pix_cnts[:,i] = np.sqrt(((error*model(E[i], alpha, beta, E0, A,typ)/55.5)**2).sum(1))\t\t\n\t\t\t\n\tpix_cnts_total = np.zeros((16384,))\n\terr_100_500_total = np.sqrt((err_pix_cnts[:,err_100_500]**2).sum(1))*(E[err_100_500][1]-E[err_100_500][0])\n\terr_500_1000_total = np.sqrt((err_pix_cnts[:,err_500_1000]**2).sum(1))*(E[err_500_1000][1]-E[err_500_1000][0])\n\n\tif (len(exist_1000_2000[0]) != 0):\n\t\terr_1000_2000_total = np.sqrt((err_pix_cnts[:,err_1000_2000]**2).sum(1))*(E[err_1000_2000][1]-E[err_1000_2000][0])\n\telse :\n\t\terr_1000_2000_total = 0\n\t\n\terr_pix_cnts_total = np.sqrt(err_100_500_total**2 + err_500_1000_total**2 + err_1000_2000_total**2) # dE is 5 from 100-500, 10 from 500-1000, 20 from 1000-2000\n\n\tfor i in range(16384):\n\t\t\tpix_cnts_total[i] = simps(pix_cnts[i,:], E)\t\t\t\n\n\tquad0pix = pix_cnts_total[:4096]\n\tquad1pix = pix_cnts_total[4096:2*4096]\n\tquad2pix = pix_cnts_total[2*4096:3*4096]\n\tquad3pix = pix_cnts_total[3*4096:]\n\t\t\n\terr_quad0pix = err_pix_cnts_total[:4096]\n\terr_quad1pix = err_pix_cnts_total[4096:2*4096]\n\terr_quad2pix = err_pix_cnts_total[2*4096:3*4096]\n\terr_quad3pix = err_pix_cnts_total[3*4096:]\n\t\n\tquad0 = np.reshape(quad0pix, (64,64), 'F')\n\tquad1 = np.reshape(quad1pix, (64,64), 'F')\n\tquad2 = np.reshape(quad2pix, (64,64), 'F')\n\tquad3 = np.reshape(quad3pix, (64,64), 'F')\n\t\t\n\terr_quad0 = np.reshape(err_quad0pix, (64,64), 'F')\n\terr_quad1 = np.reshape(err_quad1pix, (64,64), 'F')\n\terr_quad2 = np.reshape(err_quad2pix, (64,64), 'F')\n\terr_quad3 = np.reshape(err_quad3pix, (64,64), 'F')\n\t\n\tsim_DPH = np.zeros((128,128), float)\n\tsim_err_DPH = np.zeros((128,128), float)\n\t\n\tsim_DPH[:64,:64] = np.flip(quad0, 0)\n\tsim_DPH[:64,64:] = np.flip(quad1, 0)\n\tsim_DPH[64:,64:] = np.flip(quad2, 0)\n\tsim_DPH[64:,:64] = np.flip(quad3, 0)\n\t\n\tsim_err_DPH[:64,:64] = np.flip(err_quad0, 0)\n\tsim_err_DPH[:64,64:] = np.flip(err_quad1, 0)\n\tsim_err_DPH[64:,64:] = np.flip(err_quad2, 0)\n\tsim_err_DPH[64:,:64] = np.flip(err_quad3, 0)\n\n\tbadpix = fits.open(badpixfile)\n\tdphmask = np.ones((128,128))\n\t\n\tbadq0 = badpix[1].data # Quadrant 0\n\tbadpixmask = (badq0['PIX_FLAG']!=0)\n\tdphmask[(63 - badq0['PixY'][badpixmask]) ,badq0['PixX'][badpixmask]] = 0\n\n\tbadq1 = badpix[2].data # Quadrant 1\n\tbadpixmask = (badq1['PIX_FLAG']!=0)\n\tdphmask[(63 - badq1['PixY'][badpixmask]), (badq1['PixX'][badpixmask]+64)] = 0\n\n\tbadq2 = badpix[3].data # Quadrant 2\n\tbadpixmask = (badq2['PIX_FLAG']!=0)\n\tdphmask[(127 - badq2['PixY'][badpixmask]), (badq2['PixX'][badpixmask]+64)] = 0\n\n\tbadq3 = badpix[4].data # Quadrant 3\n\tbadpixmask = (badq3['PIX_FLAG']!=0)\n\tdphmask[(127 - badq3['PixY'][badpixmask]), badq3['PixX'][badpixmask]] = 0\n\t\t\t\n\toneD_sim = (sim_DPH*dphmask).flatten()\n\n\treturn oneD_sim*t_src,sim_DPH*t_src,dphmask,sim_err_DPH*t_src", "def main(idrun):\n int_type = numpy.int32\n double_type = numpy.float64\n float_type = numpy.float32\n complex_type = numpy.complex64\n\n ns = 7\n iudm = 19; iuv = 12\n dname = numpy.array([\"LONGITUDINAL EFIELD \",\"ELEC CURRENT DENSITY\",\n \"VECTOR POTENTIAL \",\"TRANSVERSE EFIELD \",\n \"MAGNETIC FIELD \",\"RADIATIVE VPOTENTIAL\",\n \"ION CURRENT DENSITY \"],dtype=str)\n\n# create string from idrun\n if (idrun < 0):\n cdrun = \"Unknown\"\n while (cdrun.isdigit() == False):\n cdrun = input(\"enter integer idrun: \")\n idrun = int(cdrun)\n cdrun = str(idrun)\n fname = \"diag3.\" + cdrun\n cmfield3.ffopen3(iudm,fname)\n\n# nscalars = table of available diagnostics\n nscalars = numpy.zeros((ns),int_type,'F')\n\n# determine which vector diagnostics are available\n cmfield3.readvdiags3(iudm,nscalars)\n\n nts = numpy.zeros((1),int_type,'F')\n modesx = numpy.zeros((1),int_type,'F')\n modesy = numpy.zeros((1),int_type,'F')\n modesz = numpy.zeros((1),int_type,'F')\n mrec = numpy.zeros((1),int_type,'F')\n fname = numpy.array([\"\"],'S32')\n\n# select diagnostic\n m = numpy.sum(nscalars)\n while True:\n if (m > 0):\n n = -1\n while True:\n if (n < 0):\n for i in range(0,ns):\n if (nscalars[i]==1):\n print (\"enter \", i+1,\" for\", \n numpy.str.rstrip(dname[i]))\n print (\"enter \", 0,\" for EXIT\")\n c = input(\"\")\n if (c.isdigit()):\n n = int(c)\n if (n==0):\n break\n if ((n >= 1) and (n <= ns)):\n if (nscalars[n-1]==0):\n n = -1\n else:\n n = -1\n if (n > 0):\n break\n print (\"invalid entry, try again or enter 0 to quit\")\n else:\n print (\"no vector diagnostic files found\")\n n = 0\n# exit procedure\n if (n==0):\n if (\"vfield\" in globals()):\n vfield = None\n cmfield3.closeff3(iudm)\n return\n\n print (numpy.str.rstrip(dname[n-1]), \" diagnostic selected\")\n\n# return parameters for selected vector diagnostic:\n# nts, modesx, modesy, modesz, nrec, fname\n cmfield3.vdiagparams3(iudm,n,nts,modesx,modesy,modesz,mrec,fname)\n nrec = mrec[0]\n\n# nx/ny/nz = number of global grid points in x/y/z direction\n nx = int(math.pow(2,in3.indx)); ny = int(math.pow(2,in3.indy))\n nz = int(math.pow(2,in3.indz))\n# kyp/kzp = number of real grids in each field partition in y/z\n kyp = int((ny - 1)/in3.nvpy) + 1; kzp = int((nz - 1)/in3.nvpz) + 1\n# kyb/kzb = minimum number of processors in distributed array in y/z\n kyb = int((ny - 1)/kyp) + 1; kzb = int((nz - 1)/kzp) + 1\n# nyv = second dimension of scalar field array, >= ny\n# nzv = third dimension of scalar field array, >= nz\n nyv = kyp*kyb; nzv = kzp*kzb\n\n# allocate vector array\n if (\"vfield\" not in globals()):\n vfield = numpy.empty((in3.ndim,nx,nyv,nzv),float_type,'F')\n dt = in3.dt*float(nts[0])\n\n# open stream file for vector field\n cmfield3.fsopen3(iuv,fname)\n\n# nrec = number of complete records\n nrec = int(nrec/(kyb*kzb))\n print (\"records found: nrec = \", nrec)\n\n# read and transpose vector data\n for ii in range(0,nrec):\n# read real vector field\n cmfield3.freadv3(iuv,vfield,in3.ndim,nx,kyp,kyb,kzp,kzb)\n it = nts[0]*ii\n time = dt*float(ii)\n# show time\n print (\"it,time=\",it,time)\n cmfield3.closeff3(iuv)\n print()", "def _derivatives(self, state, forces_moments):\n # extract the states\n pn = state[0]\n pe = state[1]\n pd = state[2]\n e0 = state[3]\n e1 = state[4]\n e2 = state[5]\n e3 = state[6]\n u = state[7]\n v = state[8]\n w = state[9]\n # state[6:10] = normalize(state[6:10])\n p = state[10]\n q = state[11]\n r = state[12]\n # extract forces/moments\n fx = forces_moments[0]\n fy = forces_moments[1]\n fz = forces_moments[2]\n l = forces_moments[3]\n m = forces_moments[4]\n n = forces_moments[5]\n\n\n # with warnings.catch_warnings():\n # warnings.filterwarnings('error')\n # try:\n # # position kinematics\n # except Warning as e:\n # pdb.set_trace()\n # print(e)\n\n pn_dot = (e1**2+e0**2-e2**2-e3**2)*u + 2*(e1*e2-e3*e0)*v + 2*(e1*e3+e2*e0)*w\n pe_dot = 2*(e1*e2+e3*e0)*u + (e2**2+e0**2-e1**2-e3**2)*v + 2*(e2*e3-e1*e0)*w\n pd_dot = 2*(e1*e3-e2*e0)*u + 2*(e2*e3+e1*e0)*v + (e3**2+e0**2-e1**2-e2**2)*w\n\n # pn_dot = (e0**2+e1**2-e2**2-e3**2)*u + 2*(e1*e2+e3*e0)*v + 2*(e1*e3-e2*e0)*w\n # pe_dot = 2*(e1*e2-e3*e0)*u + (e0**2-e1**2+e2**2-e3**2)*v + 2*(e2*e3+e1*e0)*w\n # pd_dot = 2*(e1*e3+e2*e0)*u + 2*(e2*e3-e1*e0)*v + (e0**2-e1**2-e2**2+e3**2)*w\n\n # pdb.set_trace()\n\n # position dynamics\n mass = self.mass\n u_dot = (r*v-q*w)+fx/mass\n v_dot = (p*w-r*u)+fy/mass\n w_dot = (q*u-p*v)+fz/mass\n\n # rotational kinematics\n e0_dot = 0.5*(-p*e1-q*e2-r*e3)\n e1_dot = 0.5*(p*e0+r*e2-q*e3)\n e2_dot = 0.5*(q*e0-r*e1+p*e3)\n e3_dot = 0.5*(r*e0+q*e1-p*e2)\n\n # rotatonal dynamics\n p_dot = self.gamma1*p*q - self.gamma2*q*r + self.gamma3*l + self.gamma4*n\n q_dot = self.gamma5*p*r - self.gamma6*(p**2-r**2) + m/self.Jy\n r_dot = self.gamma7*p*q - self.gamma1*q*r + self.gamma4*l + self.gamma8*n\n\n # collect the derivative of the states\n x_dot = np.array([pn_dot, pe_dot, pd_dot, e0_dot, e1_dot, e2_dot, e3_dot,\n u_dot, v_dot, w_dot, p_dot, q_dot, r_dot])\n # pdb.set_trace()\n\n\n # print(x_dot)\n return x_dot", "def bus_deriv(self, bus):\n deriv = np.zeros((1, 2, self.num_nw_vars))\n f = self.calc_bus_value\n deriv[0, 0, 0] = self.numeric_deriv(f, 'm', 0, bus=bus)\n deriv[0, 0, 2] = self.numeric_deriv(f, 'h', 0, bus=bus)\n deriv[0, 1, 2] = self.numeric_deriv(f, 'h', 1, bus=bus)\n return deriv", "def get_dnde(spectrum,energies):\n energies=units.tonumpy(energies,units.MeV)\n dnde=SpectrumPlotter.get_dnde_mev(spectrum,energies)\n return units.tosympy(dnde,units.ph/units.cm**2/units.s/units.MeV)", "def Seljak04_Cosmo(self,dc,nu):\n mass_non_linear = (np.argmin((self.sigmaM-dc)**2.).to(self.Msunh)).value\n Mh = (self.M.to(self.Msunh)).value\n x = Mh/self.mass_non_linear\n if len(self.bias_par.keys()) == 0:\n a = 0.53\n b = 0.39\n c = 0.45\n d = 0.13\n e = 40.\n f = 5e-4\n g = 1.5\n a1 = 0.4\n a2 = 0.3\n a3 = 0.8\n else:\n a = self.bias_par['a']\n b = self.bias_par['b']\n c = self.bias_par['c']\n d = self.bias_par['d']\n e = self.bias_par['e']\n f = self.bias_par['f']\n g = self.bias_par['g']\n a1 = self.bias_par['a1']\n a2 = self.bias_par['a2']\n a3 = self.bias_par['a3']\n if self.cosmo_code == 'camb':\n Om0m = self.camb_pars.omegam\n ns = self.cosmo_input_camb['ns']\n s8 = self.cosmo.get_sigma8_0()\n nrun = self.cosmo_input_camb['nrun']\n else:\n Om0m = self.cosmo.Omega0_m()\n ns = self.cosmo.n_s()\n s8 = self.cosmo.sigma8()\n try:\n nrun = self.cosmo_input_class['alpha_s']\n except:\n nrun = 0.\n return a + b*x**c + d/(e*x+1.) + f*x**g + np.log10(x)* \\\n (a1*(Om0m - 0.3 + ns - 1.) + \\\n a2*(self.s8-0.9 + self.hubble - 0.7) + a4*nrun)", "def slabldos(h,energies=np.linspace(-1.0,1.0,40),delta=None,nk=40):\n if h.dimensionality!=2: raise # nope\n ds = ldosmap(h,energies=energies,delta=delta,nk=nk)\n if len(ds[0])!=len(h.geometry.z): \n print(\"Wrong dimensions\",len(ds[0]),len(h.geometry.z))\n raise\n f = open(\"DOSMAP.OUT\",\"w\")\n f.write(\"# energy, index, DOS, zposition\\n\")\n for ie in range(len(energies)):\n for iz in range(len(h.geometry.z)):\n f.write(str(energies[ie])+\" \")\n f.write(str(iz)+\" \")\n f.write(str(ds[ie,iz])+\" \")\n f.write(str(h.geometry.z[iz])+\"\\n\")\n f.close()\n return energies,np.transpose(ds) # retunr LDOS ", "def derivcd4(vals, dx):\n deriv = []\n for i in range(2):\n deriv.append((-3*vals[i] + 4*vals[i+1] - vals[i+2]) / (2*dx))\n for i in range(2, len(vals) - 2):\n deriv.append((-1*vals[i-2] + 8*vals[i-1] + 8*vals[i+1] -\\\n vals[i+2]) / (12*dx))\n # Note that due to the fact that this function has been set up this\n # way, this will not output a value at 5000000\n if i % 500000 == 0:\n print('Derivative list: {}'.format(i))\n for i in range((len(vals) - 2), len(vals)):\n deriv.append((vals[i] - vals[i-1]) / dx)\n return deriv", "def directtion(segment,neighbours,segmentsMeta):\n headTail= list(map(lambda x : 'Head' if x in segmentsMeta.at[segment,'outs'] else 'Tail',neighbours ))\n cosSinDiff = np.fromiter(map(lambda x,y :np.exp(abs(segmentsMeta['cos'+y][x]-segmentsMeta['cos'+y][segment])+abs(segmentsMeta['sin'+y][x]-segmentsMeta['sin'+y][segment]))-1 ,neighbours,headTail),np.float)\n #return np.fromiter(map(lambda x : 1/(1+np.exp(6*x)) ,cosSinDiff),np.float)\n oneCos = cosSinDiff[0] if cosSinDiff[0] <= np.exp(1) else np.inf\n return [oneCos]" ]
[ "0.61792886", "0.59270054", "0.586323", "0.57872564", "0.57571954", "0.57146597", "0.5687018", "0.5637377", "0.56015885", "0.55990046", "0.5590492", "0.5574188", "0.55615056", "0.55407554", "0.5533155", "0.5531143", "0.5525705", "0.5469803", "0.545624", "0.5441904", "0.54367423", "0.5432317", "0.5418872", "0.54002535", "0.53762186", "0.5374301", "0.5366581", "0.53364414", "0.53299993", "0.5326945", "0.53156126", "0.53142434", "0.5311904", "0.5305919", "0.53009444", "0.5298538", "0.5287117", "0.5282922", "0.52753603", "0.5271639", "0.52482516", "0.52479035", "0.52414143", "0.5240469", "0.5238326", "0.52293277", "0.52264583", "0.52208424", "0.52196854", "0.52189577", "0.521665", "0.52133507", "0.5205239", "0.52016395", "0.519985", "0.5196139", "0.5195981", "0.5195402", "0.51934874", "0.51915824", "0.51822233", "0.51793766", "0.51756656", "0.517102", "0.5164499", "0.51619804", "0.5158591", "0.51532966", "0.5150465", "0.5147596", "0.5146952", "0.5144645", "0.5140459", "0.5135115", "0.5129658", "0.51283467", "0.5122969", "0.5114054", "0.51017", "0.509972", "0.5098926", "0.5098926", "0.5090983", "0.50900567", "0.50870776", "0.5085999", "0.50773656", "0.50765604", "0.507332", "0.50709116", "0.50664043", "0.5066313", "0.50655067", "0.50647944", "0.5063916", "0.5052502", "0.50502694", "0.5050086", "0.5047389", "0.5044491" ]
0.51186496
77
Write a map of the ldos using full diagonalization
def ldosmap(h,energies=np.linspace(-1.0,1.0,40),delta=None,nk=40): if delta is None: delta = (np.max(energies)-np.min(energies))/len(energies) # delta hkgen = h.get_hk_gen() # get generator dstot = np.zeros((len(energies),h.intra.shape[0])) # initialize for ik in range(nk): print("Random k-point",ik,nk,end="\r") k = np.random.random(3) # random k-point hk = hkgen(k) # ge Hamiltonian ds = ldos_waves(hk,es=energies,delta=delta) # LDOS for this kpoint dstot += ds # add print("LDOS finished") dstot /=nk # normalize dstot = [spatial_dos(h,d) for d in dstot] # convert to spatial resolved DOS return np.array(dstot)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slabldos(h,energies=np.linspace(-1.0,1.0,40),delta=None,nk=40):\n if h.dimensionality!=2: raise # nope\n ds = ldosmap(h,energies=energies,delta=delta,nk=nk)\n if len(ds[0])!=len(h.geometry.z): \n print(\"Wrong dimensions\",len(ds[0]),len(h.geometry.z))\n raise\n f = open(\"DOSMAP.OUT\",\"w\")\n f.write(\"# energy, index, DOS, zposition\\n\")\n for ie in range(len(energies)):\n for iz in range(len(h.geometry.z)):\n f.write(str(energies[ie])+\" \")\n f.write(str(iz)+\" \")\n f.write(str(ds[ie,iz])+\" \")\n f.write(str(h.geometry.z[iz])+\"\\n\")\n f.close()\n return energies,np.transpose(ds) # retunr LDOS ", "def write_ldos(x,y,dos,output_file=\"LDOS.OUT\",z=None):\n fd = open(output_file,\"w\") # open file\n fd.write(\"# x, y, local density of states\\n\")\n ii = 0\n for (ix,iy,idos) in zip(x,y,dos): # write everything\n fd.write(str(ix) +\" \"+ str(iy) + \" \"+ str(idos))\n if z is not None: fd.write(\" \"+str(z[ii]))\n fd.write(\"\\n\")\n ii += 1\n fd.close() # close file", "def generate_FreeSurferLUT(labels,data,mapname,filename):", "def dumpData(self,out):\n #--Header\n out.packSub('MAPH','ii',512,9)\n #--Data\n out.pack('4si','MAPD',512*512*3)\n out.write(''.join(self.mapd))", "def dump_w2vdictionary(outfilename,wordlist,matrix):\n ostream = open(outfilename,'w')\n print('%d %d'%(len(wordlist),matrix.shape[1]),file=ostream)\n for word,vec in zip(wordlist,matrix):\n print(' '.join([word]+ [ str(elt) for elt in vec]),file=ostream)\n ostream.close()", "def print_local_map(self):\n size = 15\n size_half = int(size/2)\n temp_map = []\n for i in range(size):\n map_row = []\n for j in range(size):\n coords = (self.rob_pos[0] + i-size_half,\n self.rob_pos[1] + j-size_half) \n\n if(self.check_limits(coords)):\n if self.rob_pos[0]==coords[0] and self.rob_pos[1]==coords[1]:\n map_row.append(\"R\")\n else:\n map_row.append(self.map[coords[0]][coords[1]])\n temp_map.append(map_row)\n \n #print map upside down cause thats how its saved....\n for i in range(14,-1,-1):\n rospy.logdebug(temp_map[i])", "def write_mat_file(self):\n mat_dict = {}\n mat_dict['Lx_p'] = self.Lx_p\n mat_dict['Ly_p'] = self.Ly_p\n mat_dict['Lz_p'] = self.Lz_p\n mat_dict['Lo'] = self.obst.get_Lo()\n mat_dict['Ny_divs'] = self.N_divs\n mat_dict['rho_p'] = self.rho_p\n mat_dict['nu_p'] = self.nu_p\n mat_dict['snl'] = list(np.union1d(self.obst_list[:],self.solid_list[:]))\n mat_dict['inl'] = list(self.inlet_list[:])\n mat_dict['onl'] = list(self.outlet_list[:])\n\n scipy.io.savemat('geometry_description',mat_dict)", "def show_map(map_):\n for r in map_.matrix:\n print(''.join(r))\n print()", "def cmap_idl4():\n r=\"0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 7 15 22 30 37 45 52 60 67 75 82 90 97 105 112 120 125 130 135 140 145 150 155 160 165 170 175 180 185 190 195 200 200 201 201 202 202 203 203 204 204 205 205 206 206 207 207 208 208 209 209 210 210 211 211 212 212 213 213 214 214 215 215 216 216 217 217 218 218 219 219 220 220 221 221 222 222 223 223 224 224 225 225 226 226 227 227 228 228 229 229 230 230 231 231 232 232 233 233 234 234 235 235 236 236 237 237 238 238 239 239 240 240 241 241 242 242 243 243 244 244 245 245 246 246 247 247 248 248 249 249 250 250 251 251 252 252 253 253 254 254 255 255\"\n g=\"0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 6 9 12 15 18 21 25 28 31 34 37 40 43 46 50 53 56 59 62 65 68 71 75 78 81 84 87 90 93 96 100 103 106 109 112 115 118 121 125 128 131 134 137 140 143 146 150 150 150 150 150 150 150 150 150 150 150 150 150 150 150 150 150 149 148 148 147 146 146 145 145 144 143 143 142 141 141 140 140 137 135 132 130 127 125 122 120 117 115 112 110 107 105 102 100 93 87 81 75 68 62 56 50 43 37 31 25 18 12 6 0 2 4 6 9 11 13 16 18 20 23 25 27 29 32 34 36 39 41 43 46 48 50 53 55 57 59 62 64 66 69 71 73 76 78 80 83 85 87 89 92 94 96 99 101 103 106 108 110 113 115 117 119 122 124 126 129 131 133 136 138 140 142 145 147 149 152 154 156 159 161 163 166 168 170 172 175 177 179 182 184 186 189 191 193 196 198 200 202 205 207 209 212 214 216 219 221 223 226 228 230 232 235 237 239 242 244 246 249 251 253 255\"\n b=\"0 2 4 6 8 10 12 14 16 18 20 22 25 27 29 31 33 35 37 39 41 43 45 47 50 52 54 56 58 60 62 64 66 68 70 72 75 77 79 81 83 85 87 89 91 93 95 97 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 96 93 90 87 84 81 78 75 71 68 65 62 59 56 53 50 46 43 40 37 34 31 28 25 21 18 15 12 9 6 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\"\n rm = [tuple([i/255,]+[int(x)/255,]*2) for i,x in enumerate(r.split())]\n gm = [tuple([i/255,]+[int(x)/255,]*2) for i,x in enumerate(g.split())]\n bm = [tuple([i/255,]+[int(x)/255,]*2) for i,x in enumerate(b.split())]\n cdict = {'red':rm, 'green':gm, 'blue':bm}\n cmap = plt.matplotlib.colors.LinearSegmentedColormap('idl4',cdict,256)\n return cmap", "def write_maps(self):\n logging.getLogger(\"PyRoute.HexMap\").info(\"writing {:d} sector maps...\".format(len(self.galaxy.sectors)))\n for sector in self.galaxy.sectors.values():\n pdf = self.document(sector)\n self.write_base_map(pdf, sector)\n\n self.draw_borders(pdf, sector)\n\n comm_routes = [star for star in self.galaxy.stars.edges(sector.worlds, True) \\\n if star[2].get('xboat', False) or star[2].get('comm', False)]\n for (star, neighbor, data) in comm_routes:\n self.comm_line(pdf, [star, neighbor])\n\n sector_trade = [star for star in self.galaxy.stars.edges(sector.worlds, True) \\\n if star[2]['trade'] > 0 and StatCalculation.trade_to_btn(star[2]['trade']) >= self.min_btn]\n\n logging.getLogger('PyRoute.HexMap').debug(\"Worlds with trade: {}\".format(len(sector_trade)))\n\n sector_trade.sort(key=lambda line: line[2]['trade'])\n\n for (star, neighbor, data) in sector_trade:\n self.galaxy.stars[star][neighbor]['trade btn'] = StatCalculation.trade_to_btn(data['trade'])\n self.trade_line(pdf, [star, neighbor], data)\n\n # Get all the worlds in this sector\n # for (star, neighbor, data) in self.galaxy.stars.edges(sector.worlds, True):\n # if star.sector != sector:\n # continue#\n # if data['trade'] > 0 and self.trade_to_btn(data['trade']) >= self.min_btn:\n # self.galaxy.stars[star][neighbor]['trade btn'] = self.trade_to_btn(data['trade'])\n # self.trade_line(pdf, [star, neighbor], data)\n # elif star.sector != neighbor.sector:\n # data = self.galaxy.stars.get_edge_data(neighbor, star)\n # if data is not None and \\\n # data['trade'] > 0 and \\\n # self.trade_to_btn(data['trade']) >= self.min_btn:\n # self.trade_line(pdf, [star, neighbor], data)\n\n for star in sector.worlds:\n self.system(pdf, star)\n if sector.coreward:\n self.coreward_sector(pdf, sector.coreward.name)\n if sector.rimward:\n self.rimward_sector(pdf, sector.rimward.name)\n if sector.spinward:\n self.spinward_sector(pdf, sector.spinward.name)\n if sector.trailing:\n self.trailing_sector(pdf, sector.trailing.name)\n\n self.writer.close()", "def _add_dimensions_to_file(locus_f):\n ld_lines = []\n i = 0\n with open(locus_f) as ld_file:\n for i, line in enumerate(ld_file):\n ld_lines.append(line)\n no_lines = i + 1\n file_out = locus_f.split('.matrix')[0] + '.LD'\n with open(file_out, 'w' ) as paintor_ld:\n paintor_ld.write(str(no_lines) + ' ' + str(no_lines) + '\\n')\n for line in ld_lines:\n paintor_ld.write(line)", "def create_map():\n pass\n # for line in range(0, shared.lines):\n # map_data[line][0] = (1, -1)\n # map_data[line][shared.columns - 1] = (1, -1)\n #\n # for column in range(0, shared.columns):\n # map_data[0, column] = (-1, 1)\n # # if column <= shared.left_space or column > shared.columns - shared.left_space:\n # map_data[shared.lines - 1, column] = (-1, 1)", "def write_maps(self):\n if np.allclose(self.xmap.origin, 0):\n ext = \"ccp4\"\n else:\n ext = \"mrc\"\n\n for q, coor, b in zip(self._occupancies, self._coor_set, self._bs):\n self.conformer.q = q\n self.conformer.coor = coor\n self.conformer.b = b\n self._transformer.density()\n fname = os.path.join(self.directory_name, f\"model.{ext}\")\n self._transformer.xmap.tofile(fname)\n self._transformer.xmap.array -= self.xmap.array\n fname = os.path.join(self.directory_name, f\"diff.{ext}\")\n self._transformer.xmap.tofile(fname)\n self._transformer.reset(full=True)", "def writeMatrix(self):\n\t\tpass", "def show_map(self):\n for line in self.map:\n print(\"\".join(line))", "def outputholepos(dictlist,origindict): #dictlist-一行中所有图层的字典的列表,其中字典为(key:图层名,value:一个outline中的该图层多段线列表) origindict-原始的未进行操作的字典\r\n \r\n layernamelist=list(origindict.keys())\r\n \r\n hole_list=[]\r\n holepolylinedict={}\r\n \r\n for layername in layernamelist: #得到通孔层的名称列表\r\n if layername[0]=='V' or layername[0]=='v':\r\n hole_list.append(layername)\r\n \r\n for holelayer in hole_list: #已经阵列好的第一行中每一层通孔多段线存入新的“通孔名称”-“一行中所有通孔多段线”的字典\r\n holepolylinelist=[] \r\n for d in dictlist: \r\n holepolylinelist.extend(d[holelayer])\r\n holepolylinedict[holelayer]=holepolylinelist\r\n \r\n holepolylinearraydict=holepolylinedictarraycopy(holepolylinedict) #对以上生成的字典进行操作,生成新的字典。字典中对应的值“一行中所有通孔多段线”向上阵列布满整个菲林区域\r\n \r\n \r\n holenotefile=open('通孔模式说明'+'.txt','w') #输出通孔模式说明\r\n holenotefile.write(\"各通孔文件通孔数一览表(不包括5H):\\n\")\r\n for e in holepolylinearraydict:\r\n holeposfile=open(e+'.txt','w')\r\n centerposlist=calculatecenterpos(holepolylinearraydict[e])\r\n centerposlist.sort()\r\n holenotefile.write(\"通孔层 \"+e+\" 一共有通孔 \"+'{:d}'.format(len(centerposlist))+\" 个\\n\") #输出每一通孔层的中心点数。即对应通孔数量\r\n for pos in centerposlist:\r\n holeposfile.write('X{:.0f}Y{:.0f}\\n'.format(pos[0]*1000,pos[1]*1000)) #要格式化输出,所以先要乘以1000,然后输出小数点前的部分 \r\n holeposfile.close()", "def compute_kappa_map(lens_vec, size, size_map):\n\n par_file_name = \"kappa_map.par\"\n fit_file_name = \"kappa_map.fits\"\n z_source = 2.0\n size_map = size_map * 1.05\n\n file_map = open(par_file_name, 'w')\n\n conv_lens_vec(lens_vec)\n\n file_map.write(\"runmode\\n\" )\n file_map.write(\" reference 3 0 0\\n\")\n file_map.write(\" verbose 0\\n\" )\n file_map.write(\" mass 3 \" + str(size) + \" \" + \\\n str(lens_vec[0][\"z_lens\"]) + \" \" + fit_file_name + \"\\n\")\n file_map.write(\" end\\n\")\n file_map.write(\"source\\n\")\n file_map.write(\" z_source \" + str(z_source) + \"\\n\")\n file_map.write(\" end\\n\")\n file_map.write(\"grille\\n\")\n file_map.write(\" nombre 128\\n\")\n file_map.write(\" nlens 4\\n\")\n file_map.write(\" nlens_crit 1\\n\")\n file_map.write(\" nlens_opt 0\\n\")\n file_map.write(\" polaire 1\\n\")\n file_map.write(\" end\\n\")\n\n\n for i in range(len(lens_vec)):\n string_out = 'potential ' + str(i) + '\\n'\n file_map.write(string_out)\n #print string_out,\n for keys in lens_vec[i].keys():\n string_out = ' ' + keys + ' ' + str(lens_vec[i][keys]) + \\\n '\\n'\n #print string_out,\n file_map.write(string_out)\n file_map.write(' end\\n')\n\n file_map.write(\"cosmology\\n\")\n file_map.write(\" H0 70.0\\n\")\n file_map.write(\" omega 0.3\\n\")\n file_map.write(\" lambda 0.7\\n\")\n file_map.write(\" end\\n\")\n file_map.write(\"champ\\n\")\n file_map.write(\" xmin -101\\n\")\n file_map.write(\" xmax 100\\n\")\n file_map.write(\" ymin -101\\n\")\n file_map.write(\" ymax 100\\n\")\n file_map.write(\" dmax \" + str(size_map) + \"\\n\")\n file_map.write(\" end\\n\")\n file_map.write(\"fini\\n\")\n\n file_map.close()", "def crear_mapa (self, ancho = 40 , largo = 40):\n\n for i in range (largo):\n\n a = \" \"\n b = []\n for z in range (ancho):\n b.append(a)\n kasino.mapa.append(b)\n\n for i in range (1,ancho -1):\n kasino.mapa[0][i]=\"═══\"\n kasino.mapa[largo-1][i]=\"═══\"\n\n for i in range (1,largo -1):\n kasino.mapa[i][0]= \" ║\"\n kasino.mapa[i][ancho-1]= \"║\"\n \n kasino.mapa [0][0]=\" ╔\"\n kasino.mapa [0][ancho-1]=\"╗\"\n kasino.mapa [largo-1][0]=\" ╚\"\n kasino.mapa [largo-1][ancho-1]=\"╝\"", "def print_map(self):\n for row in self.world_map:\n for cell in row:\n print(cell, end=\"\")\n print()", "def makemap(d,x,y,ra0=0,dec0=0, cd=1./60., nxpix=600, nypix=600):\n\n xy = np.zeros((x.size,2))\n xy[:,0] = x.flatten()\n xy[:,1] = y.flatten()\n\n from astropy import wcs\n\n w = wcs.WCS(naxis=2)\n w.wcs.crval = [ra0, dec0]\n w.wcs.cdelt = [cd,cd]\n w.wcs.crpix = [nxpix/2., nypix/2.]\n w.wcs.ctype = ['RA---TAN', 'DEC--TAN']\n\n pixels = w.wcs_world2pix(xy,0)\n ygrid, xgrid = np.meshgrid(np.arange(nypix),np.arange(nxpix))\n\n pixCens = w.wcs_pix2world(np.array([xgrid.flatten(), ygrid.flatten()]).T,0)\n pixCens[:,0] += 1./2.*cd\n pixCens[:,1] += 1./2.*cd\n pflat = (pixels[:,1].astype(int) + (nypix)*pixels[:,0].astype(int)).astype(int)\n\n\n pEdges = np.arange(nxpix*nypix+1)\n m = np.histogram(pflat,pEdges, weights=d)[0]\n h = np.histogram(pflat,pEdges)[0]\n m = m/h\n return m,pixCens,w", "def generateNeighborMap(self):\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(np.array([i.replace(\"#\",\" \")\n .split()[0:4] for i in value.index])\n .astype(float))\n\n B=np.array(A[0]).reshape(len(A[0]),4)\n print (B[:,0]+B[:,1])/2\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(value.sum(axis=1).values)\n print A", "def print_map(self):\n y_max,x_max = map(max, zip(*self.mp.keys()))\n for row in range(0,y_max+1):\n msg = []\n for k in range(0,x_max+1):\n msg.append(chr(self.mp[row,k]))\n print(\"\".join(msg))", "def write_array(uri: str):\n a1_data = np.reshape(np.arange(1, 26), (5, 5))\n l1_data = np.arange(5, 0, -1)\n l2_data = np.arange(-2, 3)\n l3_data = np.linspace(-1.0, 1.0, 5)\n with tiledb.open(uri, \"w\") as array:\n array[:] = {\"a1\": a1_data, \"l1\": l1_data, \"l2\": l2_data, \"l3\": l3_data}", "def display_map():\r\n for row in range(0, len(map1)): #for all rows\r\n for column in range(0, len(map1[0])): #for all columns\r\n print(map1[row][column], end=' ')\r\n print()", "def save_world(world, filename):\n with open(filename, \"w\") as f:\n for i, row in enumerate(world):\n if i % 2 != 0 and i not in [0, 1]:\n f.write(\" \")\n\n for column in row:\n f.write(str(column))\n f.write(\" \")\n\n f.write(\"\\n\")", "def diagonals_in_hd():\n number_of_pairs = 100000\n angles_for_d = {}\n for d in (10, 100, 1000):\n number_of_corners = 2 ** d - 1\n first_corner = [random.randint(0, number_of_corners) for _ in range(0, number_of_pairs)]\n second_corner = [random.randint(0, number_of_corners) for _ in range(0, number_of_pairs)]\n\n dummy_d = [d for _ in range(0, number_of_pairs)]\n angles = []\n with cf.ProcessPoolExecutor() as executor:\n results = executor.map(find_angle, first_corner, second_corner, dummy_d)\n for result in results:\n angles.append(result)\n ser = pd.Series(angles)\n print(f\"Angles between diagonals for {d} dimensions\")\n print(ser.describe())\n angles_for_d[d] = ser\n\n plot_pmfs_for_ds(angles_for_d)", "def _compute_plain_from_map(self):\n # get PDBID\n pdbid = get_id(self.pdb_path)\n\n # write hydrogen bonds to the plain file\n with open(conf.temp_dir + os.path.sep + pdbid + '.hb', 'w') as f:\n\n for pos in self.potential_map:\n a = str(pos[0])\n b = str(pos[1])\n c = str(self.potential_map[pos][0])\n d = str(self.potential_map[pos][1])\n e = [a, b, c, d]\n\n f.write(' '.join(e) + os.linesep)", "def set_diagonal(self, value = 0):\n for d in range(self.size):\n self.write(d, d, value)", "def print_map():\n printmap = []\n for y in range(max_height):\n line = list(tracks[:, y])\n for cart in sorted(carts, key=lambda c: c[0]):\n if cart[0][1] == y:\n line[cart[0][0]] = cart_icons[cart[1] - 1]\n printmap.append(''.join(line))\n for line in printmap:\n print(line)", "def write_map( file_lists, target_dir, output_dir ):\n tld_to_volumes = {}\n for i, group in enumerate( file_lists ):\n for node in group:\n tld = toplevel_subdir( node, target_dir )\n tld_to_volumes.setdefault( tld, set() ).add( i )\n with open( os.path.join( output_dir, \"map.txt\" ), \"w\" ) as fout:\n for tld, volumes in tld_to_volumes.items():\n fout.write( \"{:24s}: {}\\n\".format( tld, \" \".join( [ str( x ) for x in volumes ] ) ) )", "def __build_map(self):\n columns = []\n\n for i in range(self.__dimensions):\n columns.append([])\n\n for i in range(self.__dimensions):\n self.map.append(columns)", "def writeColourMap (self):\r\n\r\n num = self.bih_vals [bih_ColorsUsed]\r\n\r\n for i in range (0, num):\r\n self.the_file.write (self.colourmap [i])", "def CreateAndSave_l_matrices(self, lmin, filename, theta, phi):\n\t\tif lmin == 0:\n\t\t\tmode = 'w'\n\t\telse:\n\t\t\tmode = 'r+'\n\n\t\tf = tables.openFile(filename, mode)\n\t\troot = f.root\n\t\tindex_iterator = self.Config.AngularRepresentation.index_iterator\n\n\t\tprint \"Legendre ...\"\n\t\tprevl = -1;\n\t\tfor i, lm in enumerate(index_iterator.__iter__()):\n\t\t\tprint i\n\t\t\tif lm.l >= lmin:\n\t\t\t\tif lm.l != prevl:\n\t\t\t\t\tmidx = 0\n\t\t\t\t\tleg = zeros([(2 * lm.l + 1), len(theta), len(phi)], dtype=complex)\n\n\t\t\t\tfor j, my_theta in enumerate(theta):\n\t\t\t\t\tleg[midx,j,:] = sph_harm(lm.m, lm.l, phi, my_theta)\n\t\t\t\t\n\t\t\t\tmidx += 1\n\n\t\t\t\tif midx == 2 * lm.l + 1:\n\t\t\t\t\tf.createArray('/','l_' + str(lm.l),leg)\n\n\t\t\t\tprevl = lm.l\n\t\tf.setNodeAttr(\"/\",\"lmax\",index_iterator.lmax)\n\t\tf.close()", "def _write_dict_to_mdin(self, f, dictionary):\n\n for key, val in dictionary.items():\n if val is not None:\n f.write(\" {:15s} {:s},\\n\".format(key+\" =\", str(val)))\n f.write(\" /\\n\")", "def main():\n diagonals_in_hd()", "def write_longdir_table(self):\n\n start_of_longdirs = self.db_file.tell()\n self.longdirs = {}\n for miEntry in self.mainIndex:\n if miEntry.longdir not in self.longdirs:\n self.longdirs[miEntry.longdir] = \\\n self.db_file.tell() - start_of_longdirs\n self.db_file.write(miEntry.encodedLongdir)\n miEntry.set_longdir_offset(self.longdirs[miEntry.longdir])", "def write_database(self,lrcs):\n\n # allocate list\n lns = []\n\n for i in range(len(lrcs)):\n # album level\n for j in range(len(lrcs[i])):\n # song level\n for k in range(1,len(lrcs[i][j])):\n lns.append(lrcs[i][j][k])\n\n # create database\n f = open('KISS_LINES','w')\n\n for iln in lns:\n f.write(iln)\n\n f.close()\n\n return 1", "def write_ldpc_params(parity_check_matrix, file_path):\n with open(file_path, 'x') as file:\n file.write('{} {}\\n'.format(parity_check_matrix.shape[1], parity_check_matrix.shape[0]))\n file.write('{} {}\\n'.format(parity_check_matrix.sum(0).max(), parity_check_matrix.sum(1).max()))\n\n for deg in parity_check_matrix.sum(0):\n file.write('{} '.format(deg))\n file.write('\\n')\n for deg in parity_check_matrix.sum(1):\n file.write('{} '.format(deg))\n file.write('\\n')\n\n for line in parity_check_matrix.T:\n nodes = line.nonzero()[0]\n for node in nodes[:-1]:\n file.write('{}\\t'.format(node + 1))\n file.write('{}\\n'.format(nodes[-1] + 1))\n\n for col in parity_check_matrix:\n nodes = col.nonzero()[0]\n for node in nodes[:-1]:\n file.write('{}\\t'.format(node + 1))\n file.write('{}\\n'.format(nodes[-1] + 1))\n file.write('\\n')", "def write_drugs_name(self):\r\n for elem in range(len(self.output_zakladki)):\r\n n, first_row = 0, 3\r\n\r\n sheet = self.output_zakladki[elem]\r\n sh = self.output_file.get_sheet_by_name(sheet)\r\n no_of_row = self.liczba_pelnych_linii\r\n\r\n if self.niepelna_liniia:\r\n no_of_row += 1\r\n\r\n while no_of_row != 0:\r\n for lek in self.output_leki[elem]:\r\n sh['A' + str(first_row + n)] = lek\r\n n += 1\r\n first_row += 2\r\n no_of_row -= 1", "def _update_database_map(self, path):\n if path:\n filename = path + '/APD_MAP.txt'\n else:\n filename = 'APD_MAP.txt'\n filepointer = open(filename, 'w')\n for invariom, molecule in self.map.items():\n filepointer.write(invariom + ':' + molecule + '\\n')\n filepointer.close()", "def save_file(map_, args): \n if args.segments:\n p = os.path.join(args.res_dir, 'compression_'+args.db+\"_seg\")\n else:\n p = os.path.join(args.res_dir, 'compression_'+args.db)\n with open(p, 'w') as f:\n for file in map_:\n f.write(\"{} {}\\n\".format(file, map_[file]))", "def printMap(values, klab, vlab, precision, offset=16):\n\tprint(klab.ljust(offset, \" \") + vlab)\n\tfor k in values.keys():\n\t\tv = values[k]\n\t\tks = toStr(k, precision).ljust(offset, \" \")\n\t\tvs = toStr(v, precision)\n\t\tprint(ks + vs)", "def write_mat_file(self, geom_filename):\n mat_dict = {}\n mat_dict['Lx_p'] = self.Lx_p\n mat_dict['Ly_p'] = self.Ly_p\n mat_dict['Lz_p'] = self.Lz_p\n mat_dict['Lo'] = self.obst.get_Lo()\n mat_dict['Ny_divs'] = self.N_divs\n mat_dict['rho_p'] = self.rho_p\n mat_dict['nu_p'] = self.nu_p\n mat_dict['snl'] = list(np.union1d(self.obst_list[:],self.solid_list[:]))\n mat_dict['inl'] = list(self.inlet_list[:])\n mat_dict['onl'] = list(self.outlet_list[:])\n\n scipy.io.savemat(geom_filename,mat_dict)", "def rellenarMatrix(self):\n for i in range(0, 26):\n self.matrixMAPA.append([])\n for j in range(0, 26):\n self.matrixMAPA[i].append((0, str(i)+\"-\"+str(j)))", "def load_data_map(self):\n with open(\"map/maps.txt\") as maps:\n for x_axis, line in enumerate(maps):\n self.x_axis = x_axis\n self.full_map.insert(x_axis, [])\n for y_axis, case in enumerate(line.strip()):\n self.y_axis = y_axis\n if case == \"D\":\n self.full_map[x_axis].insert(y_axis, \"M\")\n self.user.position = (x_axis, y_axis)\n elif case == \"A\":\n self.full_map[x_axis].insert(y_axis, \"A\")\n elif case == \"_\":\n self.full_map[x_axis].insert(y_axis, \"_\")\n elif case == \"#\":\n self.full_map[x_axis].insert(y_axis, \"#\")", "def export_maps(maps, mask, affine, output_file, zscore=False):\n\tnifti_image = make_niimage_4d(maps, mask, affine, zscore=zscore)\n\tnib.save(nifti_image, output_file)", "def main():\n\n \"\"\"\n nodes, hd3 = erdos_rennie_like(100,8333,5)\n export('d3',hd3)\n\n nodes, hd5 = erdos_rennie_like(100,8333,6)\n export('d5',hd5)\n\n nodes, hd6 = erdos_rennie_like(100,8333,7)\n export('d6',hd6)\n \"\"\"\n\n \"\"\"\n nodes, sparse1 = erdos_rennie_like(600, 1200, 3)\n export('sparse_diag1', sparse1)\n\n nodes, sparse2 = erdos_rennie_like(600, 2400, 3)\n export('sparse_diag2',sparse2)\n\n nodes, sparse3 = erdos_rennie_like(600, 5800, 3)\n export('sparse_diag3',sparse3)\n\n nodes, sparse4 = erdos_rennie_like(600,11600, 3)\n export('sparse_diag4',sparse4)\n\n nodes, sparse5 = erdos_rennie_like(600,23200, 3)\n export('sparse_diag5',sparse5)\n \"\"\"\n\n nodes, size1 = erdos_rennie_like(100, 500, 3)\n nodes, size2 = erdos_rennie_like(200,1000,3)\n nodes,size3 = erdos_rennie_like(300,1500,3)\n nodes,size4 = erdos_rennie_like(400,2000,3)\n nodes,size5 = erdos_rennie_like(500,2500,3)\n\n export('size_diag1',size1)\n export('size_diag2',size2)\n export('size_diag3',size3)\n export('size_diag4',size4)\n export('size_diag5',size5)", "def save_grd(filename, meta, map):\n if os.path.exists(filename):\n raise ValueError(\"File already exists: {}\".format(filename))\n if map.shape != (meta['NX'], meta['NY'], meta['NCOMP']):\n raise ValueError(\"The map shape does not match the metadata dictionary.\")\n points = meta['NX'] * meta['NY']\n components = meta['NCOMP']\n data = np.empty((points, 2 * components))\n for component in range(components):\n data[:, 2 * component] = map[:, :, component].reshape(points, order='F').real\n data[:, 2 * component + 1] = map[:, :, component].reshape(points, order='F').imag\n with open(filename, 'w') as f:\n for line in meta['header']:\n f.write('{}\\n'.format(line))\n f.write('{:2d}\\n'.format(meta['KTYPE']))\n f.write('{:12d}{:12d}{:12d}{:12d}\\n'.format(meta['NSET'], meta['ICOMP'], meta['NCOMP'], meta['IGRID']))\n f.write('{:12d}{:12d}\\n'.format(meta['IX'], meta['IY']))\n f.write(' {: 0.10E} {: 0.10E} {: 0.10E} {: 0.10E}\\n'.format(meta['XS'], meta['YS'], meta['XE'], meta['YE']))\n f.write('{:12d}{:12d}{:12d}\\n'.format(meta['NX'], meta['NY'], meta['KLIMIT']))\n for p in range(points):\n f.write(''.join([float_to_string(number) for number in data[p, :]]) + '\\n')", "def write_map(self, file_name):\n\n if self.pixel == \"HEALPIX\":\n hp.fitsfunc.write_map(file_name, self.data, overwrite=True)\n if self.pixel == \"CAR\":\n enmap.write_map(file_name, self.data)", "def maps(offices, fixed):\n with Image(filename=BAT_B) as page, Drawing() as draw:\n for office, x, y in MAP_POSITIONS:\n label = door_label(offices[office], logo=False)\n if label:\n draw.composite(\"over\", x, y, label.width / 3, label.height / 3, label)\n draw(page)\n page.save(filename=\"generated_map%s.png\" % (\"_fixed\" if fixed else \"\"))", "def dict2sparseMatrix(wDict,std=0,diag=0):\n data = lil_matrix((len(list(wDict.keys())),len(list(wDict.keys()))))\n nAreas = len(list(wDict.keys()))\n for i in wDict:\n data[i,i] = diag\n ne = len(wDict[i])+ diag\n for j in wDict[i]:\n if std:\n data[i,j] = 1 / float(ne)\n else:\n data[i,j] = 1\n return data", "def writeDarknet(b, of):\n with open(of, \"w\") as fh:\n for r in b:\n cx = (r[1]+r[3])*0.5*iw\n cy = (r[2]+r[4])*0.5*ih\n w = (r[3] - r[1])*iw\n h = (r[4] - r[2])*ih\n fh.write(\"%d %f %f %f %f\\n\" % ( labels[r[0]], cx*dw, cy*dh, w*dw, h*dh) )", "def polylinedictarraycopy(d):#d——原始图层多段线字典 \r\n dictlist=[]\r\n ratiolist=[] #放缩率列表\r\n rationumaccumulationlist=[] #放缩率数量累加列表\r\n \r\n eachrationum=globalconfig.X_ARRAY_NUM//globalconfig.RATIO_NUM\r\n leftrationum=globalconfig.X_ARRAY_NUM%globalconfig.RATIO_NUM\r\n \r\n eachrationumlist=[eachrationum]*globalconfig.RATIO_NUM #各个放缩率对应数量的列表\r\n \r\n for i in range((globalconfig.RATIO_NUM-1)//2-(leftrationum-1)//2,(globalconfig.RATIO_NUM-1)//2-(leftrationum-1)//2+leftrationum):\r\n eachrationumlist[i]=eachrationumlist[i]+1 #将整除后的余值加入到靠中间放缩率的方案中。\r\n \r\n rationumaccumulationlist.append(0) \r\n \r\n for i in range(1,globalconfig.RATIO_NUM): #计算放缩率数量累加列表\r\n rationumaccumulationlist.append(rationumaccumulationlist[i-1]+eachrationumlist[i-1])\r\n \r\n for i in range(0,globalconfig.RATIO_NUM): #计算放缩率列表\r\n ratiolist.append((globalconfig.CENTER_RATIO-((globalconfig.RATIO_NUM+1)//2-1)*globalconfig.RATIO_DIFF)+i*globalconfig.RATIO_DIFF) \r\n \r\n for i in range(0,globalconfig.RATIO_NUM): #每种放缩率\r\n for j in range(0,eachrationumlist[i]): #每种放缩率对应数量\r\n newdict={}\r\n for e in d: #将字典中值即每一图层对应的多段线列表进行复制并移动到指定位置\r\n newdict[e]=polylinedatasetarraycopy(d[e],ratiolist[i],globalconfig.CUTLINE_X_OFFSET+globalconfig.X_BLANK+(rationumaccumulationlist[i]+j+0.5)*globalconfig.X_LENGTH/globalconfig.CENTER_RATIO,globalconfig.CUTLINE_Y_OFFSET+globalconfig.Y_BLANK+0.5*globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO,e,len(dictlist)) \r\n #newdict.append([e,polylinedatasetarraycopy(d[e],ratiolist[i],globalconfig.CUTLINE_X_OFFSET+globalconfig.X_BLANK+(rationumaccumulationlist[i]+j+0.5)*globalconfig.X_LENGTH/globalconfig.CENTER_RATIO,globalconfig.CUTLINE_Y_OFFSET+globalconfig.Y_BLANK+0.5*globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO,e,len(dictlist))])\r\n dictlist.append(newdict) \r\n return (dictlist,ratiolist,eachrationumlist)", "def test_degrade_map_recarray(self):\n random.seed(seed=12345)\n\n nside_coverage = 32\n nside_map = 1024\n nside_new = 256\n\n dtype = [('col1', 'f8'), ('col2', 'f8'), ('col3', 'i4')]\n sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map, dtype, primary='col1')\n pixel = np.arange(20000)\n values = np.zeros_like(pixel, dtype=dtype)\n values['col1'] = random.random(size=pixel.size)\n values['col2'] = random.random(size=pixel.size)\n values['col3'] = random.poisson(size=pixel.size, lam=2)\n sparse_map.update_values_pix(pixel, values)\n\n ra, dec = hpg.pixel_to_angle(nside_map, pixel)\n\n # Make the test values\n hpmap_col1 = np.zeros(hpg.nside_to_npixel(nside_map)) + hpg.UNSEEN\n hpmap_col2 = np.zeros(hpg.nside_to_npixel(nside_map)) + hpg.UNSEEN\n hpmap_col3 = np.zeros(hpg.nside_to_npixel(nside_map)) + hpg.UNSEEN\n hpmap_col1[pixel] = values['col1']\n hpmap_col2[pixel] = values['col2']\n hpmap_col3[pixel] = values['col3']\n\n # Degrade healpix maps\n hpmap_col1 = hp.ud_grade(hpmap_col1, nside_out=nside_new, order_in='NESTED', order_out='NESTED')\n hpmap_col2 = hp.ud_grade(hpmap_col2, nside_out=nside_new, order_in='NESTED', order_out='NESTED')\n hpmap_col3 = hp.ud_grade(hpmap_col3, nside_out=nside_new, order_in='NESTED', order_out='NESTED')\n ipnest_test = hpg.angle_to_pixel(nside_new, ra, dec)\n\n # Degrade the old map\n new_map = sparse_map.degrade(nside_out=nside_new)\n testing.assert_almost_equal(new_map.get_values_pos(ra, dec, lonlat=True)['col1'],\n hpmap_col1[ipnest_test])\n testing.assert_almost_equal(new_map.get_values_pos(ra, dec, lonlat=True)['col2'],\n hpmap_col2[ipnest_test])\n testing.assert_almost_equal(new_map.get_values_pos(ra, dec, lonlat=True)['col3'],\n hpmap_col3[ipnest_test])\n\n # Test degrade-on-read\n self.test_dir = tempfile.mkdtemp(dir='./', prefix='TestHealSparse-')\n\n fname = os.path.join(self.test_dir, 'test_recarray_degrade.hs')\n sparse_map.write(fname)\n\n new_map2 = healsparse.HealSparseMap.read(fname, degrade_nside=nside_new)\n\n testing.assert_almost_equal(new_map2.get_values_pos(ra, dec, lonlat=True)['col1'],\n hpmap_col1[ipnest_test])\n testing.assert_almost_equal(new_map2.get_values_pos(ra, dec, lonlat=True)['col2'],\n hpmap_col2[ipnest_test])\n testing.assert_almost_equal(new_map2.get_values_pos(ra, dec, lonlat=True)['col3'],\n hpmap_col3[ipnest_test])", "def _calculate_leading_dim_map():\n small_matrixes = [(value, value+64) for value in range(256, 40192+512, 512)]\n large_matrixes = [(value, value+1088) for value in range(1024, 39936+1024, 1024)]\n return dict(small_matrixes + large_matrixes)", "def create_diagonal_mask(low_to_high_map, target_value=1):\n low_to_high_map.drop_duplicates()\n grouped = low_to_high_map.groupby(low_to_high_map.columns[1])\n ordered_low_level_names = list()\n group_matrices = []\n for name, group in grouped:\n group_size = group.shape[0]\n # build up row/col names, order doesn't matter within a group = they are all equal\n ordered_low_level_names = ordered_low_level_names + group.iloc[:, 0].tolist()\n # set the diagonal matrix to be the target value\n single_group_matrix = np.full(shape=(group_size, group_size), fill_value=target_value)\n group_matrices.append(single_group_matrix)\n # add the individual matrices along the diagonal\n relationship_matrix = scipy.linalg.block_diag(*group_matrices)\n # convert to pandas dataframe and set names\n relationship_df = pd.DataFrame(relationship_matrix, columns=ordered_low_level_names, index=ordered_low_level_names)\n\n return relationship_df", "def __prepare_off_diagonals_contents(off_diagonals: Optional[dict],\n elements: list) -> list:\n\n off_diagonal_contents = []\n\n number_of_off_diagonal = \\\n len(off_diagonals) if off_diagonals is not None else 0\n\n off_diagonal_contents.append(\n ' {:^2}'.format(number_of_off_diagonal) +\n ' ! Nr of off-diagonal terms; ' +\n 'Ediss;Ro;gamma;rsigma;rpi;rpi2\\n')\n\n if number_of_off_diagonal:\n\n for key, values in off_diagonals.items():\n\n num = \\\n ReactiveForceFieldWriter.__get_num_from_str(elements, key)\n\n off_diagonal_contents.append(\n ' ' * 2 + num + ' ' * 2 +\n str(values['value']).lstrip('[').rstrip(']') +\n '\\n')\n\n return off_diagonal_contents", "def make_digital_map(self):\n self.uni.home(axis='X')\n time.sleep(10.0)\n azimuths = []\n for x in numpy.arange(self.azimuth.xmin, self.azimuth.xmax + self.azimuth.xinc,\n self.azimuth.xinc):\n if x > self.azimuth.xmax:\n x = self.azimuth.xmax\n azimuths.append(x)\n azimuths = numpy.array(azimuths)\n wait = (abs(azimuths[0]-self.uni.pos_az)/self.azimuth.xslew_vel) + 1.0\n self.uni.set_azimuth(azimuths[0], self.azimuth.xslew_vel)\n logger.info(\"Sleeping for %.2f seconds while stage gets to start of map\" % wait)\n time.sleep(wait)\n\n fp = open(self.filename, 'w')\n header = self.make_digital_header()\n fp.write(header)\n plt.ion()\n plt.plot([self.azimuth.xmin, self.azimuth.xmax], [0, 0], 'r-')\n plt.xlim(self.azimuth.xmin, self.azimuth.xmax)\n plt.ylim(-0.5, 6)\n plt.draw()\n for az in azimuths:\n wait = (abs(az-self.uni.pos_az)/self.azimuth.xmap_vel) + 1.0\n self.uni.set_azimuth(az, self.azimuth.xmap_vel)\n logger.info(\"Sleeping for %.2f seconds while stage gets to %.1f degrees\" % (wait, az))\n time.sleep(wait)\n fp.write(\"%.3f\" % az)\n #data = self.take_readings()\n for i, freq in enumerate(self.freq_list):\n self.syn.set_freq(freq)\n for dig_channel in range(8):\n for dig in range(8):\n if dig != dig_channel:\n self.labjack.digital_output(dig, 1)\n time.sleep(0.050)\n self.labjack.digital_output(dig_channel, 0)\n time.sleep(0.050)\n ratio, phase = self.vv.measure_vector_averaged_transmission(self.average)\n fp.write(\",%.6g,%.6g\" % (ratio, phase))\n logger.info(\"Az: %.2f, Freq: %.3f, Ratio: %g; Phase: %g\" % (az, freq/1e9, ratio, phase))\n plt.plot(az, ratio, self.plot_symbols[i])\n plt.draw()\n fp.write('\\n')\n \n time.sleep(10.0)\n self.uni.home(axis='X')\n logger.info(\"Map Completed, Saving data file %s\" % self.filename)\n fp.close()", "def make_input_map(self) :\n\n self.input_map = \"\"\n stencil = self.core.stencil\n pattern = self.core.pattern\n reflect = len(pattern)+1 # reflector id, last material\n N = self.dimension\n coremap = np.zeros((N+2,N+2), dtype='i')\n \n # reflections and vacuum\n coremap[0, 1:N+1] = -1 \n coremap[1:N+1, 0] = -1\n coremap[N+1, 1:N+1] = -2\n coremap[1:N+1, N+1] = -2\n \n fuelindex = 0\n \n for i in range(1, N+1) :\n for j in range(1, N+1) :\n if j == 1 and i > 1 :\n pass\n else :\n if stencil[i-1, j-1] > 0 : # a fuel\n coremap[i, j] = pattern[fuelindex]+1\n fuelindex += 1\n elif stencil[i-1, j-1] == 0 : # a reflector\n coremap[i, j] = reflect\n else : # a void\n pass \n # Copy elements such that rotational symmetry is enforced. \n for j in range(2, N+1) :\n coremap[j, 1] = coremap[1, j]\n for i in range(0, N+2) :\n for j in range(0, N+2) :\n self.input_map +='%4i' % (coremap[i, j])\n self.input_map += '\\n'", "def _update_farness_map(self,ind):", "def ldos2d(h,e=0.0,delta=0.001,nrep=3,nk=None,mode=\"green\",\n random=True,num_wf=20):\n if mode==\"green\":\n import green\n if h.dimensionality!=2: raise # only for 1d\n if nk is not None:\n print(\"LDOS using normal integration with nkpoints\",nk)\n gb,gs = green.bloch_selfenergy(h,energy=e,delta=delta,mode=\"full\",nk=nk)\n d = [ -(gb[i,i]).imag for i in range(len(gb))] # get imaginary part\n else:\n print(\"LDOS using renormalization adaptative Green function\")\n gb,gs = green.bloch_selfenergy(h,energy=e,delta=delta,mode=\"adaptive\")\n d = [ -(gb[i,i]).imag for i in range(len(gb))] # get imaginary part\n elif mode==\"arpack\": # arpack diagonalization\n import klist\n if nk is None: nk = 10\n hkgen = h.get_hk_gen() # get generator\n ds = [] # empty list\n for k in klist.kmesh(h.dimensionality,nk=nk): # loop over kpoints\n print(\"Doing\",k)\n if random:\n print(\"Random k-point\")\n k = np.random.random(3) # random k-point\n hk = csc_matrix(hkgen(k)) # get Hamiltonian\n ds += [ldos_arpack(hk,num_wf=num_wf,robust=False,\n tol=0,e=e,delta=delta)]\n d = ds[0]*0.0 # inititlize\n for di in ds: d += di # add\n d /=len(ds) # normalize\n d = spatial_dos(h,d) # convert to spatial resolved DOS\n g = h.geometry # store geometry\n x,y = g.x,g.y # get the coordinates\n go = h.geometry.copy() # copy geometry\n go = go.supercell(nrep) # create supercell\n write_ldos(go.x,go.y,d.tolist()*(nrep**2),z=go.z) # write in file", "def write_LG_normal(self, start=0, end=801):\n\n # get all the CoM info from postgres\n MW_data = self.read_com_db('MW')\n M31_data = self.read_com_db('M31')\n M33_data = self.read_com_db('M33')\n \n # pull out just the 3 columns giving position\n MW_coms = np.array([MW_data[xi] for xi in ('x','y','z')])\n M31_coms = np.array([M31_data[xi] for xi in ('x','y','z')])\n M33_coms = np.array([M33_data[xi] for xi in ('x','y','z')])\n\n # define 2 vectors that lie in the plane\n M31_MW = MW_coms - M31_coms\n M31_M33 = M33_coms - M31_coms\n\n # the normal we want comes from the vector cross product\n normals = np.cross(M31_MW, M31_M33, axis=0)\n normals /= norm(normals, axis=0)\n\n output = np.concatenate((MW_data['t'][:,np.newaxis], normals.T), axis=1)\n print(output.shape)\n \n # compose the filename for output\n fileout = './normals.txt'\n\n # write the data to file\n # we do this because we don't want to have to repeat this process \n # this code should only have to be called once\n np.savetxt(fileout, output, fmt = \"%11.3f\"*4, comments='#',\n header=\"{:>10s}{:>11s}{:>11s}{:>11s}\"\\\n .format('t', 'x_hat', 'y_hat', 'z_hat'))", "def generate_map(nrows, ncols, nrooms, max_col_size, max_row_size):\n arr = np.zeros((nrows, ncols), dtype=np.int8)\n\n for i in range(nrooms):\n rand_row_start = np.random.randint(nrows)\n rand_col_start = np.random.randint(ncols)\n\n rand_row_size = np.random.randint(max_row_size / 2, max_row_size)\n rand_col_size = np.random.randint(max_col_size / 2, max_col_size)\n\n arr[rand_row_start:rand_row_start + rand_row_size, rand_col_start:rand_col_start + rand_col_size] = 1\n\n labels = measure.label(arr)\n regions = measure.regionprops(labels)\n\n centroids = list()\n for region in regions:\n centroids.append(region.centroid)\n\n num_centroids = len(centroids)\n\n # get distances between every pair of centroids\n dists = scipy.spatial.distance.cdist(centroids, centroids)\n\n # get a distance that is greater than all current distances\n max_dist = np.max(dists) + 1\n\n # make sure upper triangle is at least max_dist so that when picking closest\n # pairs, we won't choose a diagonal element or a duplicate connection\n dists = dists + np.triu(np.ones((num_centroids, num_centroids))) * max_dist\n\n for i in range(num_centroids - 1):\n min_dist_idx = np.argmin(dists)\n min_dist_idx = np.unravel_index(min_dist_idx, dists.shape)\n\n # create a hallway between regionprops\n centroid1 = np.array(centroids[min_dist_idx[0]], dtype=np.int)\n centroid2 = np.array(centroids[min_dist_idx[1]], dtype=np.int)\n\n [row_centroid_1, row_centroid_2] = sorted([centroid1, centroid2], key=lambda x: x[0])\n [col_centroid_1, col_centroid_2] = sorted([centroid1, centroid2], key=lambda x: x[1])\n\n arr[row_centroid_1[0]:row_centroid_2[0] + 1, row_centroid_1[1]] = 1\n arr[row_centroid_2[0], col_centroid_1[1]:col_centroid_2[1] + 1] = 1\n\n dists[:, min_dist_idx[1]] += max_dist\n\n return arr", "def view_map():\n print(\"\"\"\n ____________________________________Client Rooms______________________\n| |1 Locker Rooms 2| 1 | 2 | |\n| |_________ ________| | | Dance |\n| | | |__| |__| Floor |\n| | | Hall |\n| Garage | Front | _______|_______ |\n| | Lobby | | |_ ____________|\n| | | | Storage |\n| | | Lounge |______________|\n| ______________ Car\n|___________________Front Entrance______________________| Allyway\n\"\"\")", "def getLocalMap(dist_compl):\n sdc=dist_compl*RES\n #clms are real ;)\n #rws are imaginary :D #rows\n map_padd = 1*RES #add a meter\n rws_ofs = abs(sdc.imag.min())+map_padd #offsetX\n rws = abs(sdc.imag.max())+(rws_ofs)\n clms_ofs = abs(sdc.real.min())+map_padd\n clms = abs(sdc.real.max())+(clms_ofs)\n M = np.zeros((np.round(rws+map_padd).astype(int),np.round(clms+map_padd).astype(int))).astype(dtype=MAP_D_TYPE)#empty local map\n Mg = M.copy()\n points = sdc + np.array([clms_ofs+1j*rws_ofs]) #scale\n #M[points.imag.astype(int),points.real.astype(int)]=10 \n for p in points:\n r=np.round(p.imag).astype(int)\n c=np.round(p.real).astype(int)\n try:\n #draw line in matrix\n lc = [np.round(rws_ofs).astype(int),np.round(clms_ofs).astype(int),r,c]\n rr, cc, val = line_aa(*lc) #not really demaning --> 1%\n M[rr, cc] = np.logical_or(M[rr,cc]>0, val>0) \n #add gaussian\n Mg[r-GPoints//2:r+GPoints//2,c-GPoints//2:c+GPoints//2]+=Gau\n except:\n print('Error: out of array when calculating the local map',r,c)\n Mg[Mg>100]=100 #cap the gaussian matrix\n car_pos_in_loc_mat = np.array([np.round(clms_ofs).astype(int), np.round(rws_ofs).astype(int)])\n #Mg[car_pos_in_loc_mat[1],car_pos_in_loc_mat[0]]=300 #add car pos\n return M*(-100)+Mg, car_pos_in_loc_mat", "def diagonalizing_gates(self):\n raise NotImplementedError", "def read_flat_map(filename,i_map=0) :\n hdul=fits.open(filename)\n w=WCS(hdul[0].header)\n\n maps=hdul[i_map].data\n ny,nx=maps.shape\n\n return w,maps", "def write_grid2d(grid_file, grid2d):\n with grid_file.open('w') as f:\n for row in grid2d['label']:\n f.write('\\t'.join(row) + '\\n')", "def print_map(road_map):\n # For each city index in the road map\n for i in range(len(road_map)):\n # Identify the index of the current city and next city\n a = i\n b = (i+1) % len(road_map)\n # Calculate distance between the current city and next city\n distance = euler_dist(road_map[a][2],road_map[b][2],road_map[a][3],road_map[b][3])\n print('{0:15} {1:3} {2:15} {3:15}'.format(road_map[a][1],'->',road_map[b][1],round(distance,2)))", "def multi_ldos(h,es=[0.0],delta=0.001,nrep=3,nk=2,numw=3,random=False):\n print(\"Calculating eigenvectors in LDOS\")\n if h.is_sparse: # sparse Hamiltonian\n from bandstructure import smalleig\n print(\"SPARSE Matrix\")\n evals,ws = [],[] # empty list\n ks = klist.kmesh(h.dimensionality,nk=nk) # get grid\n hk = h.get_hk_gen() # get generator\n for k in ks: # loop\n print(\"Diagonalizing in LDOS, SPARSE mode\")\n if random:\n k = np.random.random(3) # random vector\n print(\"RANDOM vector in LDOS\")\n e,w = smalleig(hk(k),numw=numw,evecs=True)\n evals += [ie for ie in e]\n ws += [iw for iw in w]\n# evals = np.concatenate([evals,e]) # store\n# ws = np.concatenate([ws,w]) # store\n# raise\n# (evals,ws) = h.eigenvectors(nk) # get the different eigenvectors\n else:\n print(\"DENSE Matrix\")\n (evals,ws) = h.eigenvectors(nk) # get the different eigenvectors\n ds = [(np.conjugate(v)*v).real for v in ws] # calculate densities\n del ws # remove the wavefunctions\n os.system(\"rm -rf MULTILDOS\") # remove folder\n os.system(\"mkdir MULTILDOS\") # create folder\n go = h.geometry.copy() # copy geometry\n go = go.supercell(nrep) # create supercell\n fo = open(\"MULTILDOS/MULTILDOS.TXT\",\"w\") # files with the names\n for e in es: # loop over energies\n print(\"MULTILDOS for energy\",e)\n out = np.array([0.0 for i in range(h.intra.shape[0])]) # initialize\n for (d,ie) in zip(ds,evals): # loop over wavefunctions\n fac = delta/((e-ie)**2 + delta**2) # factor to create a delta\n out += fac*d # add contribution\n out /= np.pi # normalize\n out = spatial_dos(h,out) # resum if necessary\n name0 = \"LDOS_\"+str(e)+\"_.OUT\" # name of the output\n name = \"MULTILDOS/\" + name0\n write_ldos(go.x,go.y,out.tolist()*(nrep**h.dimensionality),\n output_file=name) # write in file\n fo.write(name0+\"\\n\") # name of the file\n fo.flush() # flush\n fo.close() # close file\n # Now calculate the DOS\n from dos import calculate_dos\n es2 = np.linspace(min(es),max(es),len(es)*10)\n ys = calculate_dos(evals,es2,delta) # use the Fortran routine\n from dos import write_dos\n write_dos(es2,ys,output_file=\"MULTILDOS/DOS.OUT\")", "def convert_table_to_ldac(tbl):\n from astropy.io import fits\n import tempfile\n f = tempfile.NamedTemporaryFile(suffix='.fits', mode='rb+')\n tbl.write(f, format='fits')\n f.seek(0)\n hdulist = fits.open(f, mode='update')\n tbl1, tbl2 = convert_hdu_to_ldac(hdulist[1])\n new_hdulist = [hdulist[0], tbl1, tbl2]\n new_hdulist = fits.HDUList(new_hdulist)\n return new_hdulist", "def write_shortdir_table(self):\n\n start_of_shortdirs = self.db_file.tell()\n self.shortdirs = {}\n for miEntry in self.mainIndex:\n if miEntry.shortdir not in self.shortdirs:\n self.shortdirs[miEntry.shortdir] = \\\n self.db_file.tell() - start_of_shortdirs\n self.db_file.write(miEntry.encodedShortdir)\n miEntry.set_shortdir_offset(self.shortdirs[miEntry.shortdir])", "def plink_to_ld_matrix(locus ,output_directory, population, remove_plink_files=False):\n output_file = locus+ '.' + population + '.LD'\n command = __PLINK_TO_LD_MATRIX__.format(locus + '.' + population,locus + '.' + population, locus + '.' + population + '.vcf')\n # TODO: Fix this workaround, which has to change directory because of a limitation in plink\n try:\n os.chdir(output_directory)\n except OSError:\n logging.error(\"Could not change directory\")\n sys.exit(OS_ERROR)\n run_command(command)\n os.rename(locus +'.' + population +'.ld', locus +'.LD' + '.' + population)\n # Remove functionality as new PAINTOR does not require it. do not need to specify the number of lines.\n #_add_dimensions_to_file(locus + '.matrix')\n if remove_plink_files:\n _remove_plink_files(output_directory, locus, population)\n try:\n os.chdir('../')\n except OSError:\n logging.error(\"Could not change directory\")\n sys.exit(OS_ERROR)\n try:\n os.remove(os.path.join(output_directory,locus + '.' + population+'.log'))\n os.remove(os.path.join(output_directory,locus + '.' + population + '.nosex'))\n except OSError:\n logging.warning('Could not remove Plink INPUT files, have they already been removed')\n pass", "def write_mrc_matrix(self):\n\n matrix = self.matrix\n \n matrix = self.permute_matrix_to_map_axis_order(matrix)\n a = Numeric.ravel(matrix)\n \n data = a.tostring()\n\n file_write = open(self.path,'ab')\n file_write.write(data)\n file_write.close()", "def printMapPattern(self, binSize):\n ticks = self.displayBins * 4\n tickSize = self.maxValue / ticks\n val = tickSize\n mapStr = \"\"\n while val < self.maxValue:\n mapVal = self.getMapValue(val)\n if mapVal == \"wall\":\n mapStr += \"=\"\n else:\n mapStr += \" \"\n val += tickSize\n print(mapStr)", "def nominal_map(options):\n pass", "def k_map(self):\n\t\tt1 = time.time()\n\t\tmapping_matrix = [] \n\t\tfor index in self.mapping:\n\t\t\tvector = np.zeros(len(self.unique_char),dtype=float)\n\t\t\tvector[index] = 1.0\n\t\t\tmapping_matrix.append(vector)\n\t\tprint(\"Time creating k map {:.3f} sec\".format(time.time()-t1))\n\t\tself.mapping_matrix = mapping_matrix\n\t\treturn mapping_matrix", "def writeWad(path, lumps):\n\n fp = open(path, \"wb\")\n\n # dummy header, will get overwritten later\n fp.write(\"\\x00\" * 12)\n\n # lump data\n offs = []\n for lumpname, lumpdata in lumps:\n offs.append(fp.tell())\n fp.write(lumpdata)\n\n # entry table\n infotableofs = fp.tell()\n for offset, (lumpname, lumpdata) in zip(offs, lumps):\n fp.write(struct.pack(\"<i\", offset))\n fp.write(struct.pack(\"<i\", len(lumpdata)))\n fp.write(_wadifyString(lumpname))\n\n # header\n fp.seek(0)\n fp.write(\"PWAD\")\n fp.write(struct.pack(\"<i\", len(lumps)))\n fp.write(struct.pack(\"<i\", infotableofs))\n\n fp.close()", "def write_file(self,f=None):\n nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper\n # Open file for writing\n if f is None:\n f = open(self.fn_path, 'w')\n # First line: heading\n f.write('{}\\n'.format(self.heading))\n # write dataset 1\n f.write('{} {} {} {} {} {} {}\\n'.format(self.ipakcb, self.iswtoc,\n self.nsystm, self.ithk,\n self.ivoid, self.istpcs,\n self.icrcc))\n # write dataset 2\n t = self.lnwt.array\n for tt in t:\n f.write('{} '.format(tt + 1))\n f.write('\\n')\n\n # write dataset 3\n f.write(\n '{} {} {} {} {} {} {} {} {} {}\\n'.format(self.izcfl, self.izcfm,\n self.iglfl, self.iglfm,\n self.iestfl, self.iestfm,\n self.ipcsfl, self.ipcsfm,\n self.istfl, self.istfm))\n\n # write dataset 4\n f.write(self.gl0.get_file_entry())\n\n # write dataset 5\n f.write(self.sgm.get_file_entry())\n\n # write dataset 6\n f.write(self.sgs.get_file_entry())\n\n # write datasets 7 to 13\n for k in range(self.nsystm):\n f.write(self.thick[k].get_file_entry())\n if self.icrcc != 0:\n f.write(self.sse[k].get_file_entry())\n f.write(self.ssv[k].get_file_entry())\n else:\n f.write(self.cr[k].get_file_entry())\n f.write(self.cc[k].get_file_entry())\n f.write(self.void[k].get_file_entry())\n f.write(self.sub[k].get_file_entry())\n\n # write datasets 14 and 15\n for k in range(nlay):\n if self.istpcs != 0:\n f.write(self.pcsoff[k].get_file_entry())\n else:\n f.write(self.pcs[k].get_file_entry())\n\n # write dataset 16 and 17\n if self.iswtoc > 0:\n # dataset 16\n for i in self.ids16:\n f.write('{} '.format(i))\n f.write(' #dataset 16\\n')\n\n # dataset 17\n for k in range(self.iswtoc):\n t = self.ids17[k, :].copy()\n t[0:4] += 1\n for i in t:\n f.write('{} '.format(i))\n f.write(' #dataset 17 iswtoc {}\\n'.format(k + 1))\n\n # close swt file\n f.close()", "def writeInput_for_LAMMPS(rd, listAtoms, filename):\n #f=open(\"geo.kirigami_d0.0_\"+str(rd),\"w+\")\n f=open(filename+str(rd),\"w+\")\n f.write(\"\\n\")\n f.write(\"%d atoms\\n\" %len(listAtoms))\n f.write(\"1 atom types\\n\")\n f.write(\"\\n\")\n f.write(\"%f\\t%f xlo xhi\\n\" %(xlo-1, xhi+1))\n f.write(\"%f\\t%f ylo yhi\\n\" %(ylo-1, yhi+1))\n f.write(\"%f\\t%f zlo zhi\\n\" %(zlo-1, zhi+1))\n f.write(\"\\n\")\n f.write(\"Atoms\\n\")\n f.write(\"\\n\")\n for i in range (len(listAtoms)):\n f.write(\"%d\\t1\\t%f\\t%f\\t%f\\n\" %(i+1, listAtoms[i][0], listAtoms[i][1], listAtoms[i][2]))\n f.close()", "def Gen_Annihilation_Map(angularSize, size, profile,fileOut):\r\n print 'Generating Annihilation Rate Map'\r\n \r\n # Behavioral Parameters \r\n solarDist = 8.3 # Solar Distance in kpc\r\n stopRadius = 60.0 # Radius from sun in kpc to stop LOS integration\r\n zSteps = 100 # Steps for LOS integration\r\n kpc2cm = 3.08568025e21\r\n \r\n # Constants\r\n deg2rad = math.pi/180.0 \r\n map = np.zeros((size, size)) # Initialize map\r\n \r\n # Code\r\n zStepSize = stopRadius/float(zSteps) # Size of one step in kpc\r\n aPP = float(angularSize)/float(size) # Angle per pixel\r\n solidAngle = (aPP*deg2rad)**2.0 # Multiply this by radial portion to get wedge volume.\r\n # Based on the input DM_model, we integrate rho**2 along the LOS\r\n max = 0\r\n for x in range(0,size): \r\n for y in range(0,size):\r\n rate = 0.0\r\n gamma = math.sqrt((float(x)-size/2.0)**2 + (float(y)-size/2.0)**2)*aPP # Inclusive angle for law of cosines\r\n if profile[0] != 'PULSAR': \r\n for z in range(0,zSteps):\r\n # Compute wedge volume. Currently this assumes a relatively small angular region around galactic center.\r\n volume = ((z+1)**3.0-z**3.0)*solidAngle/3.#\r\n # Compute radius from galactic center using law of cosines\r\n a = (z*zStepSize)\r\n #r = math.sqrt(a**2 + b**2 - 2*a*b*math.cos(gamma)) \r\n l = (float(x)-size/2.0)*aPP # longitude\r\n b = (float(y)-size/2.0)*aPP # latitude \r\n r = math.sqrt(a**2 + solarDist**2 - 2*a*solarDist*math.cos(l*deg2rad)*math.cos(b*deg2rad))\r\n #if gamma>max:\r\n # max = gamma\r\n # Get square DM density to obtain rate\r\n if (profile[0] == 'NFW'):\r\n rate += (volume*rho_DM_NFW(r,profile[1],profile[2]))**2\r\n elif (profile[0] == 'EIN'):\r\n rate += (0.0780763*rho_DM_EIN(r,profile[1],profile[2]))**2*zStepSize*kpc2cm*solidAngle\r\n \r\n elif (profile[0] == 'FLAT'):\r\n rate = 1 # just keep everything flat. += will give integration limit dependent results\r\n elif (profile[0] == 'NFWDECAY'): # NFW not squared.\r\n rate += volume*rho_DM_NFW(r,profile[1],profile[2])\r\n map[x,y] = rate\r\n else:\r\n l = (float(x)-size/2.0)*aPP # longitude\r\n b = (float(y)-size/2.0)*aPP # latitude\r\n r = math.sqrt(l**2.0+b**2.0)\r\n if (r<=0.05):\r\n map[x,y] = 0.05**-1.6\r\n else:\r\n map[x,y] = r**-1.2 \r\n # Write to file \r\n outFile = open(fileOut, \"wb\" )\r\n pickle.dump(map/np.max(map), outFile)\r\n print 'Rate Map saved to ', fileOut\r\n #print np.max(map)\r\n print 'J-Factor (GeV^2/cm^5): ' , np.sum(map)\r\n return map/np.max(map)", "def add_building_output_locations(self,dictionary, start,end,step): \n \"\"\"\n Given a dictionary of building footprints and associated nodes,element and sides, add the values \n to the netcdf grid file.\n \n The nodes, elements and sides associated with each footprint correspond to the there index in the RiCOM grid file\n \n Dictionary format:\n {id1: {'nodes': [n1, n2,...nn] }, {'elements': [e1,e2,...,en] },{'sides': [s1,s2,...,sn]}, id2: {}, id3 {}, ...., idn {} } \n \n idn = the id of the building footprint that the node, elements and sides belong to\n \n \"\"\"\n \n if (dictionary != {}):\n maxNodes = 0\n maxElements = 0\n maxSides = 0\n nodesAll = []\n elementsAll = []\n sidesAll = []\n id = []\n perimeter = []\n type = []\n for row in dictionary.iteritems(): \n id.append(row[0]) \n n = row[1]['nodes'] \n e = row[1]['elements']\n s = row[1]['sides']\n perimeter.append(row[1]['perimeter'])\n \n if row[1]['type'] == \"BUILDINGS_AS_HOLES\":\n typeNUM = 1\n elif row[1]['type'] == \"BUILDINGS_GRIDDED\":\n typeNUM = 2\n\n elif row[1]['type'] == \"BUILDINGS_AS_POINTS\":\n typeNUM = 3\n else:\n typeNUM = 0\n type.append(typeNUM)\n \n nodesAll.extend(n)\n elementsAll.extend(e)\n sidesAll.extend(s)\n if maxNodes < len(n): maxNodes = len(n)\n if maxElements < len(e): maxElements = len(e)\n if maxSides < len(s): maxSides = len(s)\n \n \n #remove repeated elements, sides and nodes\n nodesAll = list(set(nodesAll))\n elementsAll = list(set(elementsAll))\n sidesAll = list(set(sidesAll))\n \n print \"# elements = %s\" % len(elementsAll)\n print \"# sides = %s\" % len(sidesAll)\n print \"# nodes = %s\" % len(nodesAll)\n\n \n #initialise arrays for entry into netcdf file\n nodes = zeros((len(dictionary),maxNodes))\n elements = zeros((len(dictionary),maxElements))\n sides = zeros((len(dictionary),maxSides)) \n \n i = 0\n for row in dictionary.iteritems(): \n nodes[i,0:(len(row[1]['nodes']))] = row[1]['nodes']\n elements[i,0:(len(row[1]['elements']))] = row[1]['elements']\n sides[i,0:(len(row[1]['sides']))] = row[1]['sides']\n i+=1 \n \n #create dimensions\n try: self.buildings.createDimension('max_number_nodes',maxNodes)\n except Exception, e: print \"WARNING: %s\" % e\n try: self.buildings.createDimension('max_number_elements',maxElements)\n except Exception, e: print \"WARNING: %s\" % e\n try: self.buildings.createDimension('max_number_sides',maxSides)\n except Exception, e: print \"WARNING: %s\" % e\n try: self.buildings.createDimension('number_of_buildings',len(dictionary))\n except Exception, e: print \"WARNING: %s\" % e \n try: self.building_nodes.createDimension('number_of_nodes',len(nodesAll))\n except Exception, e: print \"WARNING: %s\" % e\n try: self.building_elements.createDimension('number_of_elements',len(elementsAll))\n except Exception, e: print \"WARNING: %s\" % e\n try: self.building_sides.createDimension('number_of_sides',len(sidesAll))\n except Exception, e: print \"WARNING: %s\" % e\n \n \n #create variables\n try: building_id = self.buildings.createVariable(varname = 'building_id',datatype = 'i', dimensions=('number_of_buildings',)) \n except Exception, e:\n building_id = self.buildings.variables['building_id']\n print \"WARNING: %s\" % e\n \n try: building_wkt = self.buildings.createVariable(varname = 'building_wkt',datatype = str, dimensions=('number_of_buildings',)) \n except Exception, e:\n building_wkt = self.buildings.variables['building_wkt'] \n print \"WARNING: %s\" % e\n\n try: building_perimeter = self.buildings.createVariable(varname = 'building_perimeter',datatype = 'd', dimensions=('number_of_buildings',)) \n except Exception, e:\n building_perimeter = self.buildings.variables['building_perimeter'] \n print \"WARNING: %s\" % e\n\n\n try: building_type = self.buildings.createVariable(varname = 'building_type',datatype = 'i', dimensions=('number_of_buildings',)) \n except Exception, e:\n building_type = self.buildings.variables['building_type'] \n print \"WARNING: %s\" % e\n\n try: building_nodes = self.buildings.createVariable(varname = 'building_nodes',datatype = 'i', dimensions=('number_of_buildings','max_number_nodes',)) \n except Exception, e:\n building_nodes = self.buildings.variables['building_nodes'] \n print \"WARNING: %s\" % e\n \n try: building_elements = self.buildings.createVariable(varname = 'building_elements',datatype = 'i', dimensions=('number_of_buildings','max_number_elements',)) \n except Exception, e:\n building_elements = self.buildings.variables['building_elements']\n print \"WARNING: %s\" % e\n \n try: building_sides = self.buildings.createVariable(varname = 'building_sides',datatype = 'i', dimensions=('number_of_buildings','max_number_sides',)) \n except Exception, e:\n building_sides = self.buildings.variables['building_sides']\n print \"WARNING: %s\" % e\n \n building_nodes[:] = nodes\n building_elements[:] = elements\n building_sides[:] = sides\n building_id[:] = array(id) \n building_perimeter[:] = array(perimeter)\n building_type[:] = array(type)\n #Set the attributes\n self.building_nodes.start = start\n self.building_nodes.finish = end\n self.building_nodes.step = step\n self.building_elements.start = start\n self.building_elements.finish = end\n self.building_elements.step = step\n self.building_sides.start = start\n self.building_sides.finish = end\n self.building_sides.step = step\n \n #assign the data\n output_ids = {'nodes': [], 'elements': [], 'sides': []}\n try: output_ids['nodes'] = self.building_nodes.createVariable(varname = 'id',datatype = 'i', dimensions=('number_of_nodes',))\n except Exception, e:\n output_ids['nodes'] = self.building_nodes.variables['id']\n print \"WARNING: %s\" % e\n try: output_ids['elements'] = self.building_elements.createVariable(varname = 'id',datatype = 'i', dimensions=('number_of_elements',))\n except Exception, e:\n output_ids['elements'] = self.building_elements.variables['id']\n print \"WARNING: %s\" % e\n try: output_ids['sides'] = self.building_sides.createVariable(varname = 'id',datatype = 'i', dimensions=('number_of_sides',))\n except Exception, e:\n output_ids['sides'] = self.building_sides.variables['id']\n print \"WARNING: %s\" % e\n \n \n output_ids['nodes'][:] = array(nodesAll)\n output_ids['elements'][:] = array(elementsAll)\n output_ids['sides'][:] = array(sidesAll)\n \n \n self.buildingsAdded = True\n else:\n #create dimensions\n try: self.buildings.createDimension('number_of_buildings',0)\n except Exception, e: print \"WARNING: %s\" % e \n try: self.building_nodes.createDimension('number_of_nodes',0)\n except Exception, e: print \"WARNING: %s\" % e\n try: self.building_elements.createDimension('number_of_elements',0)\n except Exception, e: print \"WARNING: %s\" % e\n try: self.building_sides.createDimension('number_of_sides',0)\n except Exception, e: print \"WARNING: %s\" % e \n self.buildingsAdded = True", "def _standard_mapping(self):\n mapping_raw = scipy.io.loadmat(join(self.dataset_dir, 'scripts/mapping.mat'))\n self.camvidMap = mapping_raw['camvidMap'] * 255\n self.cityscapesMap = mapping_raw['cityscapesMap'] * 255", "def write_map_file(new_path, flat_seq, chr_id, cname, map, chr_no):\n\n f_flat = open('%s/genome/Contig%d.flat' % (new_path, chr_id[0]+1), 'w') ## create new contig file in .flat format in target directory\n f_flat.write(flat_seq)\n f_flat.close()\n\n i = 0\n tc = len(cname)\n for ele in cname: ## writing a mapping file old contig to new contig information.\n if i == 0:\n start = 1\n stop = map[i]\n else:\n start = p_stop\n stop = start + map[i] - 1\n print 'Contig%d\\t%s\\t%d\\t%d\\t%d' %(chr_id[0]+1, ele, start, stop, map[i])\n if i==(tc-1): \n break\n print 'Contig%d\\tNSPACER\\t%d\\t%d\\t%d' % (chr_id[0]+1, stop+1, stop+25000, 25000) \n p_stop = stop + 25001 # default spacer nts \n i += 1 \n \n (flat_seq, cname, map) = ('', [], [])\n chr_no.append(chr_id[0]+1)\n chr_id = chr_id[1:]\n\n return flat_seq, cname, map, chr_no, chr_id", "def dmap_info(self, out=None):\n binfile = self.config.dataset.binfile\n dmap = BinnedPhotonData(binfile)\n print ('File: %s ' %binfile, file=out)\n print ('\\n index emin emax type nside photons', file=out)\n total = 0\n def bignum(n):\n t = '%9d' % n\n return ' '+' '.join([t[0:3],t[3:6],t[6:]])\n for i,band in enumerate(dmap):\n fmt = '%5d'+2*'%10d'+2*'%6d'+'%12s'\n print (fmt % (i, round(band.emin()), round(band.emax()), \n band.event_class()&15, band.nside(), bignum(band.photons())))\n total += band.photons()\n print ('total%45s'% bignum(total), file=out)\n return dmap", "def dumpData(self,out):\n #--Get sizes and dump into dataIO\n self.hedr.getSize()\n self.hedr.dump(out)\n for (name,size) in self.masters:\n out.packSub0('MAST',name)\n out.packSub('DATA','Q',size)\n if self.gmdt: \n self.gmdt.getSize()\n self.gmdt.dump(out)\n for other in self.others:\n other.getSize()\n other.dump(out)", "def export_ascii(filename, data, lats, lons):\n ascw = open(filename+\".asc\", \"w\")\n ascw.write(\"\"\"ncols %d\nnrows %d\nxllcenter %.2f\nyllcenter %.2f\ncellsize %.2f\nNODATA_value -9999\"\"\" % (\n len(lons), len(lats),\n lons[0], lats[0],\n lons[1] - lons[0]))\n for i in reversed(range(0, data.shape[0])):\n ascw.write(\"\\n\")\n for j in range(0, data.shape[1]):\n x, y = \"%.2f\" % lons[j], \"%.2f\" % lats[i]\n if j > 0:\n ascw.write(\" \")\n ascw.write(\"%.6f\" % data[i, j])\n ascw.close()", "def draw_map():\n \n m1 = Chem.MolFromSmiles('c1ccccc1O')\n m2 = Chem.MolFromSmiles('c1ccccc1N')\n \n # Morgan Fingerprint (with normalization)\n # Can also be used with APFingerprint or TTFingerprint\n fig1, maxweight = SimilarityMaps.GetSimilarityMapForFingerprint(m1, m2, SimilarityMaps.GetMorganFingerprint)\n fig1.savefig('/path/to/similaritymap.png',bbox_inches='tight')\n \n # TT Fingerprint (with normalization)\n fig2, maxweight = SimilarityMaps.GetSimilarityMapForFingerprint(m1, m2, SimilarityMaps.GetTTFingerprint)\n fig2.savefig('/path/to/similaritymap.png',bbox_inches='tight')\n\n # Morgan Fingerprint (without normalization)\n weights = SimilarityMaps.GetAtomicWeightsForFingerprint(m1, m2, SimilarityMaps.GetMorganFingerprint)\n fig3 = SimilarityMaps.GetSimilarityMapFromWeights(m2, weights, size=(150, 150))\n fig3.savefig('/path/to/similaritymap.png',bbox_inches='tight') \n \n # the degree of partial charge by using atomic charge\n AllChem.ComputeGasteigerCharges(m1)\n charges = [float(atom.GetProp('_GasteigerCharge')) for atom in m1.GetAtoms()]\n fig4 = SimilarityMaps.GetSimilarityMapFromWeights(m2,charges, size=(150, 150),scale=10)\n fig4.savefig('/path/to/molcharge_similaritymap.png',bbox_inches='tight')", "def create_hypothetical_river_paths_map(riv_dirs,lsmask=None,use_f2py_func=True,\n use_f2py_sparse_iterator=False,nlat=360,nlong=720,\n sparse_fraction=0.5,use_new_method=False):\n\n riv_dirs = np.insert(riv_dirs,obj=0,values=np.zeros(nlong), axis=0)\n #nlat+1 because the array is now already nlat+1 elements wide so you want to place\n #the new row after the last row\n riv_dirs = np.insert(riv_dirs,obj=nlat+1,values=np.zeros(nlong), axis=0)\n if lsmask is not None:\n lsmask = np.insert(lsmask,obj=0,values=np.ones(nlong,dtype=bool), axis=0)\n #nlat+1 because the array is now already nlat+1 elements wide so you want to place\n #the new row after the last row\n lsmask = np.insert(lsmask,obj=nlat+1,values=np.ones(nlong,dtype=bool), axis=0)\n riv_dirs = np.ma.array(riv_dirs,mask=lsmask,copy=True,dtype=int).filled(0)\n else:\n riv_dirs = np.array(riv_dirs,copy=True,dtype=int)\n paths_map = np.zeros((nlat+2,nlong),dtype=np.int32,order='F')\n if use_f2py_func and use_new_method:\n additional_fortran_filenames = [\"algorithms/accumulate_flow_mod.o\",\n \"base/coords_mod.o\",\n \"algorithms/flow_accumulation_algorithm_mod.o\",\n \"base/convert_rdirs_to_indices.o\",\n \"base/doubly_linked_list_mod.o\",\n \"base/doubly_linked_list_link_mod.o\",\n \"base/subfield_mod.o\",\n \"base/unstructured_grid_mod.o\",\n \"base/precision_mod.o\"]\n additional_fortran_filepaths = [path.join(fortran_project_object_path,filename) for filename in\\\n additional_fortran_filenames]\n f2py_mngr = f2py_mg.f2py_manager(path.join(fortran_project_source_path,\n \"drivers\",\n \"accumulate_flow_driver_mod.f90\"),\n func_name=\"accumulate_flow_latlon_f2py_wrapper\",\n additional_fortran_files=additional_fortran_filepaths,\n include_path=fortran_project_include_path)\n paths_map = f2py_mngr.\\\n run_current_function_or_subroutine(np.asfortranarray(riv_dirs),\n *riv_dirs.shape)\n #Make a minor postprocessing correction\n paths_map[np.logical_and(np.logical_or(riv_dirs == 5,\n riv_dirs == 0),\n paths_map == 0)] = 1\n else:\n if use_f2py_func:\n f2py_kernel = f2py_mg.f2py_manager(path.join(fortran_source_path,\n 'mod_iterate_paths_map.f90'),\n func_name='iterate_paths_map')\n iterate_paths_map_function = f2py_kernel.run_current_function_or_subroutine\n else:\n iterate_paths_map_function = iterate_paths_map\n while iterate_paths_map_function(riv_dirs,paths_map,nlat,nlong):\n remaining_points = paths_map.size - np.count_nonzero(paths_map)\n if use_f2py_sparse_iterator and remaining_points/float(paths_map.size) < sparse_fraction:\n f2py_sparse_iterator = f2py_mg.f2py_manager(path.join(fortran_source_path,\n 'mod_iterate_paths_map.f90'),\n func_name='sparse_iterator')\n f2py_sparse_iterator.run_current_function_or_subroutine(riv_dirs,paths_map,nlat,nlong)\n break\n return paths_map[1:-1,:]", "def save_table_as_ldac(tbl, filename, **kwargs):\n hdulist = convert_table_to_ldac(tbl)\n hdulist.writeto(filename, **kwargs)", "def ana_merge_senzory_map(datas):\n#TODO: improve senzory map merging\n return iter(datas.viewvalues()).next()['senzory_map']", "def display_map(map):\n for row in map:\n line = \"\"\n for point in row:\n line += point.display_point()\n print(line)", "def get_map_symmetry(self):\n size = (len(self.map), len(self.map[0]))\n # build list of all hills\n player_hills = defaultdict(list) # list of hills for each player\n for row, squares in enumerate(self.map):\n for col, square in enumerate(squares):\n if 0 <= square < 10:\n player_hills[square].append((row, col))\n if len(player_hills) > 0:\n # list of\n # list of tuples containing\n # location, aim, and enemy map dict\n orientations = [[(player_hills[0][0], 0,\n dict([(i, i,) for i in range(self.players)]))]]\n for player in range(1, self.players):\n if len(player_hills[player]) != len(player_hills[0]):\n raise Exception(\"Invalid map\",\n \"This map is not symmetric. Player 0 has {0} hills while player {1} has {2} hills.\"\n .format(len(player_hills[0]), player, len(player_hills[player])))\n new_orientations = []\n for player_hill in player_hills[player]:\n for aim in range(8):\n # check if map looks similar given the orientation\n enemy_map = self.map_similar(player_hills[0][0], player_hill, aim, player)\n if enemy_map != None:\n # produce combinations of orientation sets\n for hill_aims in orientations:\n new_hill_aims = deepcopy(hill_aims)\n new_hill_aims.append((player_hill, aim, enemy_map))\n new_orientations.append(new_hill_aims)\n orientations = new_orientations\n if len(orientations) == 0:\n raise Exception(\"Invalid map\",\n \"This map is not symmetric. Player {0} does not have an orientation that matches player 0\"\n .format(player))\n # ensure types of hill aims in orientations are symmetric\n # place food set and double check symmetry\n valid_orientations = []\n for hill_aims in orientations:\n fix = []\n for loc, aim, enemy_map in hill_aims:\n row, col = self.dest_offset(loc, self.offset_aim((1, 2), aim), size)\n fix.append(((row, col), self.map[row][col]))\n self.map[row][col] = FOOD\n for loc, aim, enemy_map in hill_aims:\n if self.map_similar(hill_aims[0][0], loc, aim, enemy_map[0]) is None:\n break\n else:\n valid_orientations.append(hill_aims)\n for (row, col), ilk in reversed(fix):\n self.map[row][col] = ilk\n if len(valid_orientations) == 0:\n raise Exception(\"Invalid map\",\n \"There are no valid orientation sets\")\n return valid_orientations\n else:\n raise Exception(\"Invalid map\",\n \"There are no player hills\")", "def generate(self, lg, l):\n s=''\n for d in l:\n if type(d)==int:\n s+=chr(d)\n elif type(d)==str:\n if len(d)!=0:\n if len(d)==1:\n s+=d\n elif d[0]==\"#\":\n s+=chr(int(d[1:]))\n else:\n print \"Error in descriptor: \", d\n return\n self._Matr__c_elem().genere(lg,s)\n self._Matr__maj()", "def store_mapping(mapping, outdir, prefix):\r\n fh = open(outdir + \"/\" + prefix + \"_mapping.txt\", \"w\")\r\n for (key, valuelist) in mapping.iteritems():\r\n fh.write(\"%s:\" % key)\r\n for v in valuelist:\r\n fh.write(\"\\t%s\" % v)\r\n fh.write(\"\\n\")\r\n fh.close()", "def generate_map(\n self, console: Console, size: Size, viewport: Region, scroll: Offset\n ) -> LayoutMap:", "def invert_L2_wdls():\n print()", "def makeindmapKDE(self,indmap,s, background):\n import ipyml\n from ipyml.probability import pfunc\n sp = background.shape\n res = np.zeros((sp[0], sp[1]),dtype=np.float32)\n wr,wc = indmap.shape[0], indmap.shape[1]\n filter_size = 30\n stride = 12\n cov = np.asarray([[(2.0/filter_size)**2,0],[0,(2.0/filter_size)**2]])\n if 'g' in self.temp_data:\n g = self.temp_data['g']\n else:\n g = pfunc.Gaussian2D((sp[0],sp[1]),cov=cov,invcov=False)\n self.temp_data['g'] = g\n center_r = sp[0]\n center_c = sp[1]\n g = g/g.max()\n for r in range(wr):\n for c in range(wc):\n # calcuate the center of detection window\n rr = (r * stride + r * stride + filter_size-1)/2\n cc = (c * stride + c * stride + filter_size-1)/2\n offset_r = center_r - rr\n offset_c = center_c - cc\n res = res + g[offset_r:offset_r+sp[0],offset_c:offset_c+sp[1]] * indmap[r,c]\n idx = np.argmax(res)\n res = np.tile(res.reshape((res.shape[0],res.shape[1],1)),[1,1,3])\n mr = idx / sp[1]\n mc = idx - mr * sp[1]\n hf = filter_size/2\n box = np.asarray([mc -hf,mr -hf,mc + hf, mr + hf])\n return res/3, box", "def write_density(fname, density):\n K, M, N = density.shape\n output = open(fname, \"w\")\n output.write(\"ARMA_CUB_TXT_FN008\\n\")\n output.write(\"%d %d %d\\n\" % (K, M, N))\n for i in range(N):\n for k in range(K):\n for m in range(M):\n output.write(\" %+.6e\" % density[k, m, i])\n output.write(\"\\n\")\n\n output.close()", "def terrain_cmap_50():\n C = np.array(\n [\n [2, 97, 0],\n [6, 98, 0],\n [11, 98, 0],\n [16, 99, 0],\n [20, 100, 0],\n [25, 101, 0],\n [30, 102, 0],\n [34, 103, 0],\n [39, 104, 0],\n [44, 105, 0],\n [48, 106, 0],\n [53, 107, 0],\n [58, 108, 0],\n [63, 109, 2],\n [68, 110, 4],\n [73, 111, 7],\n [78, 112, 9],\n [83, 113, 12],\n [88, 114, 15],\n [93, 115, 17],\n [98, 116, 20],\n [103, 116, 22],\n [109, 117, 25],\n [114, 118, 27],\n [119, 119, 30],\n [124, 121, 32],\n [129, 122, 34],\n [134, 124, 37],\n [139, 126, 39],\n [144, 127, 41],\n [149, 129, 44],\n [155, 131, 46],\n [160, 133, 48],\n [165, 134, 51],\n [170, 136, 53],\n [175, 138, 55],\n [180, 139, 58],\n [185, 143, 64],\n [191, 152, 80],\n [197, 162, 96],\n [203, 171, 112],\n [209, 181, 128],\n [215, 190, 144],\n [221, 199, 160],\n [226, 209, 176],\n [232, 218, 192],\n [238, 228, 208],\n [244, 237, 224],\n [250, 246, 240],\n [255, 255, 255],\n ]\n )\n\n cm = ListedColormap(C / 255.0)\n return cm" ]
[ "0.655839", "0.6347875", "0.59854907", "0.5929425", "0.58443075", "0.5822576", "0.5790556", "0.577992", "0.57649004", "0.5739247", "0.5735068", "0.5722867", "0.5628976", "0.5559443", "0.5539078", "0.55376244", "0.55363584", "0.55267847", "0.5493117", "0.5472624", "0.5431082", "0.5405331", "0.5386706", "0.5369132", "0.53641933", "0.5360706", "0.53392726", "0.5326296", "0.52832514", "0.5283039", "0.5256548", "0.5249299", "0.52457887", "0.5238202", "0.5232347", "0.5222818", "0.52154577", "0.52012813", "0.5201167", "0.51999164", "0.5172275", "0.5164219", "0.51582515", "0.51532364", "0.51447904", "0.5125399", "0.51183045", "0.5089665", "0.5084854", "0.507993", "0.50690216", "0.50629467", "0.506203", "0.5055473", "0.50476414", "0.5047312", "0.504316", "0.5032323", "0.5026551", "0.5015326", "0.501479", "0.49977913", "0.49973604", "0.49913996", "0.49859676", "0.49857727", "0.49827847", "0.49806333", "0.4980512", "0.49708182", "0.49671045", "0.4966144", "0.49615142", "0.49588126", "0.49508336", "0.49471563", "0.49451977", "0.49442622", "0.49382332", "0.49363935", "0.49262145", "0.49247175", "0.49238276", "0.49235594", "0.49192333", "0.49178758", "0.49103287", "0.49075356", "0.4906687", "0.49064472", "0.49034432", "0.49005365", "0.48867387", "0.4856294", "0.48561934", "0.48537308", "0.48503777", "0.4850155", "0.48460874", "0.4843101" ]
0.60297304
2
Computes the DOS for each site of an slab, only for 2d
def slabldos(h,energies=np.linspace(-1.0,1.0,40),delta=None,nk=40): if h.dimensionality!=2: raise # nope ds = ldosmap(h,energies=energies,delta=delta,nk=nk) if len(ds[0])!=len(h.geometry.z): print("Wrong dimensions",len(ds[0]),len(h.geometry.z)) raise f = open("DOSMAP.OUT","w") f.write("# energy, index, DOS, zposition\n") for ie in range(len(energies)): for iz in range(len(h.geometry.z)): f.write(str(energies[ie])+" ") f.write(str(iz)+" ") f.write(str(ds[ie,iz])+" ") f.write(str(h.geometry.z[iz])+"\n") f.close() return energies,np.transpose(ds) # retunr LDOS
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_msd(pos_x, pos_y, pos_z):\n particles = pos_x.shape[0]\n N = pos_x.shape[1] \n tamsd = np.zeros(shape = (particles, N - 1)) \n\n for p in np.arange(start = 0, stop = particles, step = 1): \n for n in np.arange(start = 1, stop = N, step = 1): \n sumdis = np.array([((pos_x[p, i + n] - pos_x[p, i]) ** 2 + (pos_y[p, i + n] - pos_y[p, i]) ** 2 + (pos_z[p, i + n] - pos_z[p, i]) ** 2) for i in np.arange(start = 1, stop = N - n, step = 1)]).sum()\n tamsd[p, n] = sumdis / (N - n) \n return tamsd", "def disc_2d(self):\n for i in range(0, self.nt):\n pd = self.p.copy()\n\n self.p[1: -1, 1: -1] = (((pd[1: -1, 2:] + pd[1: -1, :-2]) * self.dy**2 +\n (pd[2:, 1: -1] + pd[:-2, 1: -1]) * self.dx**2 -\n self.b[1: -1, 1: -1] * self.dx**2 * self.dy**2) /\n (2 * (self.dx**2 + self.dy**2)))\n\n self.p[0, :] = 0\n self.p[self.grid_points_y-1, :] = 0\n self.p[:, 0] = 0\n self.p[:, self.grid_points_x-1] = 0", "def ssd(counts, centers):\n\tn = np.sum(counts)\n\tmu = np.sum(centers * counts) / n\n\treturn np.sum(counts * ((centers - mu) ** 2))", "def kd_domain_split(counts_all, ndomains, log=null_log):\n\n split_fac = 1.35 * (float(ndomains)/np.cumprod(counts_all.shape)[-1])**(1.0/3.0)\n print('split factor', split_fac, file=log)\n # First translate the box so 0,0,0 in best posn to minimise communication\n total_shifts = []\n for axis in range(3):\n # Sum over other axes\n sum_axes = list(np.arange(len(counts_all.shape)))\n sum_axes.pop(axis)\n sum_axes = tuple(sum_axes)\n\n count_ax = counts_all.sum(axis=sum_axes, dtype=np.int64)\n # amount communicated per plane\n comm = count_ax + np.roll(count_ax, 1)\n\n total_shifts.append(np.argmin(comm))\n\n\n for axis, r in enumerate(total_shifts):\n counts_all = np.roll(counts_all, shift=-r, axis=axis)\n\n print('Best shifts', total_shifts, file=log)\n\n\n # pad\n counts_pad = np.empty(tuple(v+2 for v in counts_all.shape), dtype=counts_all.dtype)\n counts_pad[1:-1,1:-1,1:-1] = counts_all\n counts_pad[1:-1,1:-1,0] = counts_pad[1:-1,1:-1, -2]\n counts_pad[1:-1,1:-1,-1] = counts_pad[1:-1,1:-1,1]\n counts_pad[1:-1,0] = counts_pad[1:-1, -2]\n counts_pad[1:-1,-1] = counts_pad[1:-1, 1]\n counts_pad[0] = counts_pad[-2]\n counts_pad[-1] = counts_pad[1]\n\n\n domain_segments = []\n\n doms_tosplit = [((0,0,0), counts_pad, ndomains)]\n\n while len(doms_tosplit):\n dom_topleft, counts, ndom = doms_tosplit.pop(0)\n\n if ndom==1:\n # done\n dom_shape = tuple(v-2 for v in counts.shape)\n domain_segments.append((dom_topleft, dom_shape, counts.sum(dtype=np.uint64)))\n continue\n\n # Bisect this domain \n axis, split_idx, n_L = bisect_anyaxis(counts, ndom, split_fac)\n\n n_R = ndom-n_L\n\n if axis==0:\n counts_L, counts_R = counts[:split_idx+2], counts[split_idx:]\n elif axis==1:\n counts_L, counts_R = counts[:,:split_idx+2], counts[:,split_idx:] \n elif axis==2:\n counts_L, counts_R = counts[:,:,:split_idx+2], counts[:,:,split_idx:]\n else:\n raise Exception('3d only, aaargh.')\n\n # add left and right domains\n doms_tosplit.append((dom_topleft, counts_L, n_L))\n\n # top left of right domain\n dom_R_topleft = list(dom_topleft)\n dom_R_topleft[axis] += split_idx\n dom_R_topleft = tuple(dom_R_topleft)\n\n doms_tosplit.append((dom_R_topleft, counts_R, n_R))\n\n\n # sort domains biggest->smallest\n domain_segments = sorted(domain_segments, key=lambda ijk_shape_pts:-ijk_shape_pts[2])\n\n doms = np.empty(counts_all.shape, dtype=np.int16)\n\n for d,(ijk, shape, tot_pts) in enumerate(domain_segments):\n segment = tuple(slice(i,i+size) for i,size in zip(ijk, shape))\n doms[segment] = d+1\n real_pts = counts_all[segment].sum(dtype=np.int64)\n# print('domain', d, 'shape', shape, '{:,} pts, {:,} total'.format(real_pts, tot_pts), file=log)\n\n # Undo the total shifts\n for axis, r in enumerate(total_shifts):\n doms = np.roll(doms, shift=r, axis=axis)\n \n return doms", "def getDustDensity(grid=None, ppar=None):\n mesh = np.meshgrid(grid.x, grid.y, grid.z, indexing='ij')\n if ppar['crd_sys'] == 'sph':\n rr = mesh[0]\n tt = mesh[1]\n pp = mesh[2]\n xx = rr * np.sin(tt) * np.sin(pp)\n yy = rr * np.sin(tt) * np.cos(pp)\n zz = rr * np.cos(tt)\n cyrr = np.sqrt(xx**2. + yy**2)\n elif ppar['crd_sys'] == 'car':\n xx = mesh[0]\n yy = mesh[1]\n zz = mesh[2]\n rr = np.sqrt(xx**2 + yy**2 + zz**2)\n cyrr = np.sqrt(xx**2. + yy**2.)\n else:\n raise ValueError('crd_sys not specified in ppar')\n\n # calculate surface density\n nflat = len(ppar['dRin'])\n flat = cyrr * 0.\n for ii in range(nflat):\n flatii = fn_getflat(cyrr, ppar['dRin_w'][ii], ppar['dRin'][ii], \n ppar['dRout'][ii], ppar['dRout_w'][ii], \n ppar['dsigp'][ii], ppar['dsig0'][ii])\n flat = flat + flatii\n\n nring = len(ppar['dring_r'])\n ring = cyrr * 0\n for ii in range(nring):\n ringii = fn_getring(cyrr, ppar['dring_r'][ii], \n ppar['dring_win'][ii], ppar['dring_wout'][ii], \n ppar['dring_a'][ii])\n ring = ring + ringii\n\n nlynbell = len(ppar['dLB_Rin'])\n lynbell = cyrr * 0\n for ii in range(nlynbell):\n lynbellii = fn_getLyndenBell(cyrr, ppar['dLB_Rin'][ii], \n ppar['dLB_Rsig'][ii], ppar['dLB_sigp'][ii], \n ppar['dLB_sig0'][ii])\n lynbell = lynbell + lynbellii\n\n sig = flat + ring + lynbell\n\n # calculate the dust density\n op = dustopac.radmc3dDustOpac()\n dinfo = op.readDustInfo()\n ngs = len(dinfo['gsize'])\n dweights = dinfo['dweights']\n\n rhodust = np.zeros([grid.nx, grid.ny, grid.nz, ngs], dtype=np.float64) \n for ig in range(ngs):\n hhii = ppar['dHt'][ig] * (cyrr / ppar['dRt'][ig])**ppar['dqheight'][ig]\n rho_ig = sig / np.sqrt(2.*np.pi) / hhii * np.exp(-0.5*(zz/hhii)**2)\n rhodust[:,:,:,ig] = rho_ig * dweights\n\n reg = rhodust < ppar['cutddens']\n rhodust[reg]= ppar['cutddens']\n\n return rhodust", "def dolp(s,axis=0):\n s0 = np.take(s,0,axis)\n s12 = np.take(s,(1,2),axis)\n return np.linalg.norm(s12,axis=axis)/s0", "def density(self):\r\n return self.count_ones() / float(self.xspan * self.yspan)", "def analyse_doms(doms, counts, log):\n dom_masks = 1<<(doms.astype(np.uint64))\n\n # initialise space for masks\n ngb_masks = np.zeros_like(dom_masks)\n\n # Fill in the masks of all the neighbouring cells\n inc = [1,0,-1] # roll increments for left, middle, right\n for i in inc:\n ri = np.roll(dom_masks, i, axis=0)\n for j in inc:\n rj = np.roll(ri, j, axis=1)\n for k in inc:\n ngb_masks |= np.roll(rj, k, axis=2)\n\n\n\n count_ds, count_alls, pcts = [], [], []\n \n for d in range(doms.max()+1):\n idx = np.flatnonzero(doms==d)\n idx_all = np.flatnonzero(ngb_masks&(1<<d))\n \n count_d = counts.ravel()[idx].sum()\n count_all = counts.ravel()[idx_all].sum()\n \n pct_ghosts = ((count_all - count_d)*100)//count_all\n pcts.append(pct_ghosts)\n print('Domain %2d'%d, 'has {:,} real points, {:,} total of which'.format(count_d, count_all), \n '%d%% are ghosts'%pct_ghosts, file=log)\n\n count_ds.append(count_d)\n count_alls.append(count_all)\n\n\n\n print('Total particles {:,}, total evaluated {:,} (average ghosts {:,}%)'.format(sum(count_ds), sum(count_alls), ((sum(count_alls)-sum(count_ds))*100)//sum(count_alls)), file=log)\n print('maximum {:,} on a single proc, worst ghost percentage {:,}%'.format(max(count_alls), max(pcts)), file=log)", "def calc_ds_2d(slip, dx, mu, poisson = 0., expand = 0):\n\n newlen = len(slip) + 2*expand\n newslip = np.zeros(newlen)\n\n newslip[expand:expand+len(slip)] = np.copy(slip)\n \n k = np.fft.fftfreq(newlen, dx)\n\n f = np.fft.fft(newslip)\n\n f *= -mu/(1.-poisson)*np.abs(k)\n\n return np.real(np.fft.ifft(f))[expand:expand+len(slip)]", "def fs2ps2D(px, s):\n\t\tsfun = psarclength(px)\t\n\t\treturn sfun-s", "def h(pos,obj):\n return D(pos)*(distancia_nodos(pos,obj))", "def syed_dilation(data, vessel):", "def getDivisors(n):", "def sedisk(r=3, DIM=\"2D\", METRIC=\"EUCLIDEAN\", FLAT=\"FLAT\", h=0):\n from string import upper\n from numpy import resize, transpose, arange\n from numpy import sqrt, arange, transpose, maximum\n\n METRIC = upper(METRIC)\n FLAT = upper(FLAT) \n assert DIM=='2D','Supports only 2D structuring elements'\n if FLAT=='FLAT': y = binary([1])\n else: y = to_int32([h])\n if r==0: return y\n if METRIC == 'CITY-BLOCK':\n if FLAT == 'FLAT':\n b = secross(1)\n else:\n b = to_int32([[-2147483647, 0,-2147483647],\n [ 0, 1, 0],\n [-2147483647, 0,-2147483647]])\n return sedilate(y,sesum(b,r))\n elif METRIC == 'CHESSBOARD':\n if FLAT == 'FLAT':\n b = sebox(1)\n else:\n b = to_int32([[1,1,1],\n [1,1,1],\n [1,1,1]])\n return sedilate(y,sesum(b,r))\n elif METRIC == 'OCTAGON':\n if FLAT == 'FLAT':\n b1,b2 = sebox(1),secross(1)\n else:\n b1 = to_int32([[1,1,1],[1,1,1],[1,1,1]])\n b2 = to_int32([[-2147483647, 0,-2147483647],\n [ 0, 1, 0],\n [-2147483647, 0,-2147483647]])\n if r==1: return b1\n else: return sedilate( sedilate(y,sesum(b1,r//2)) ,sesum(b2,(r+1)//2))\n elif METRIC == 'EUCLIDEAN':\n v = arange(-r,r+1)\n x = resize(v, (len(v), len(v)))\n y = transpose(x)\n Be = binary(sqrt(x*x + y*y) <= (r+0.5))\n if FLAT=='FLAT':\n return Be\n be = h + to_int32( sqrt( maximum((r+0.5)*(r+0.5) - (x*x) - (y*y),0)))\n be = intersec(gray(Be,'int32'),be)\n return be\n else:\n assert 0,'Non valid metric'\n return B", "def cellsize_2d(self):\t\r\n return self.dx * self.dy", "def brute_force_msd(pos):\n pos = np.asarray(pos)\n if pos.ndim==1:\n pos = pos.reshape((-1,1))\n trajectory_length = len(pos)\n msd = np.zeros(trajectory_length)\n msd_count = np.zeros(trajectory_length)\n for i in range(trajectory_length):\n for j in range(i, trajectory_length):\n msd[j-i] += np.sum((pos[i]-pos[j])**2)\n msd_count[j-i] += 1\n msd = msd/msd_count\n return msd", "def mcintosh_d(counts):\n u = sqrt((counts*counts).sum())\n n = counts.sum()\n return (n-u)/(n-sqrt(n))", "def multi_ldos(h,es=[0.0],delta=0.001,nrep=3,nk=2,numw=3,random=False):\n print(\"Calculating eigenvectors in LDOS\")\n if h.is_sparse: # sparse Hamiltonian\n from bandstructure import smalleig\n print(\"SPARSE Matrix\")\n evals,ws = [],[] # empty list\n ks = klist.kmesh(h.dimensionality,nk=nk) # get grid\n hk = h.get_hk_gen() # get generator\n for k in ks: # loop\n print(\"Diagonalizing in LDOS, SPARSE mode\")\n if random:\n k = np.random.random(3) # random vector\n print(\"RANDOM vector in LDOS\")\n e,w = smalleig(hk(k),numw=numw,evecs=True)\n evals += [ie for ie in e]\n ws += [iw for iw in w]\n# evals = np.concatenate([evals,e]) # store\n# ws = np.concatenate([ws,w]) # store\n# raise\n# (evals,ws) = h.eigenvectors(nk) # get the different eigenvectors\n else:\n print(\"DENSE Matrix\")\n (evals,ws) = h.eigenvectors(nk) # get the different eigenvectors\n ds = [(np.conjugate(v)*v).real for v in ws] # calculate densities\n del ws # remove the wavefunctions\n os.system(\"rm -rf MULTILDOS\") # remove folder\n os.system(\"mkdir MULTILDOS\") # create folder\n go = h.geometry.copy() # copy geometry\n go = go.supercell(nrep) # create supercell\n fo = open(\"MULTILDOS/MULTILDOS.TXT\",\"w\") # files with the names\n for e in es: # loop over energies\n print(\"MULTILDOS for energy\",e)\n out = np.array([0.0 for i in range(h.intra.shape[0])]) # initialize\n for (d,ie) in zip(ds,evals): # loop over wavefunctions\n fac = delta/((e-ie)**2 + delta**2) # factor to create a delta\n out += fac*d # add contribution\n out /= np.pi # normalize\n out = spatial_dos(h,out) # resum if necessary\n name0 = \"LDOS_\"+str(e)+\"_.OUT\" # name of the output\n name = \"MULTILDOS/\" + name0\n write_ldos(go.x,go.y,out.tolist()*(nrep**h.dimensionality),\n output_file=name) # write in file\n fo.write(name0+\"\\n\") # name of the file\n fo.flush() # flush\n fo.close() # close file\n # Now calculate the DOS\n from dos import calculate_dos\n es2 = np.linspace(min(es),max(es),len(es)*10)\n ys = calculate_dos(evals,es2,delta) # use the Fortran routine\n from dos import write_dos\n write_dos(es2,ys,output_file=\"MULTILDOS/DOS.OUT\")", "def TSC_density(pos, gridsize, boxsize, mass, periodic=True):\n rho = np.zeros((gridsize,gridsize,gridsize), dtype='float64')\n\n Npart = len(pos)\n pos = np.array(pos, dtype='float64', order='C')\n\n crunch.TSCDensity(pos, rho, Npart, boxsize, gridsize, mass)\n\n return rho", "def calc_ds_3d(slip_x, slip_y, dx, dy, mu, poisson, expand = 0):\n\n newlenx = len(slip_x)+2*expand\n newleny = len(slip_x[0])+2*expand\n\n newslipx = np.zeros((newlenx, newleny))\n newslipy = np.zeros((newlenx, newleny))\n newslipx[expand:expand+len(slip_x),expand:expand+len(slip_x[0])] = np.copy(slip_x)\n newslipy[expand:expand+len(slip_x),expand:expand+len(slip_x[0])] = np.copy(slip_y)\n \n k = np.fft.fftfreq(newlenx, dx)\n m = np.fft.fftfreq(newleny, dy)\n\n kxy, mxy = np.meshgrid(k, m, indexing='ij')\n\n kmag = np.sqrt(kxy**2+mxy**2)\n kmag[0,0] = 1.\n\n fx = np.fft.fft2(newslipx)\n fy = np.fft.fft2(newslipy)\n\n sx = -mu/2./kmag*(1./(1.-poisson)*(kxy**2*fx+mxy*kxy*fy)+(mxy**2*fx-mxy*kxy*fy))\n sy = -mu/2./kmag*(1./(1.-poisson)*(mxy**2*fy+mxy*kxy*fx)+(kxy**2*fy-mxy*kxy*fx))\n\n return (np.real(np.fft.ifft2(sx))[expand:expand+len(slip_x),expand:expand+len(slip_x[0])],\n np.real(np.fft.ifft2(sy))[expand:expand+len(slip_x),expand:expand+len(slip_x[0])])", "def _kde_local(loc, data, bw, lmbda):\n\n l_s_bw = bw * lmbda\n d = (loc - data).T / l_s_bw\n s = (_norm_pdf(d) / l_s_bw).T\n\n kde = 0.0\n for r in range(s.shape[0]):\n kde += s[r].prod()\n return kde", "def dVdx(self, sys):\n dx = sys.positions - self.x0\n for i in range(len(dx)):\n self._local_dVdx[i] = 6.0*self.sigma[i]*dx[i]**5\n return self._local_dVdx", "def term_1(\n omega1, # vorticity-1\n omega2, # vorticity-2\n omega3, # vorticity-3\n enst, # enstrophy\n nu_sgs, # turbulent viscosity\n h = True): # spatial step size\n #---------------------------------------------------------------------#\n # Setting default values #\n #---------------------------------------------------------------------#\n if h is True:\n h = 2.0*np.pi/64.0\n #---------------------------------------------------------------------#\n # Preallocating space #\n #---------------------------------------------------------------------#\n term = np.zeros((64,64,64))\n #---------------------------------------------------------------------#\n # Enstrophy term #\n #---------------------------------------------------------------------#\n term += np.gradient(\\\n np.gradient(enst, h, edge_order=2)[2], h, edge_order=2)[2]\n term += np.gradient(\\\n np.gradient(enst, h, edge_order=2)[1], h, edge_order=2)[1]\n term += np.gradient(\\\n np.gradient(enst, h, edge_order=2)[0], h, edge_order=2)[0]\n #---------------------------------------------------------------------#\n # Dissipation #\n #---------------------------------------------------------------------#\n omega1_grad = np.gradient(omega1, h, edge_order=2)\n omega2_grad = np.gradient(omega2, h, edge_order=2)\n omega3_grad = np.gradient(omega3, h, edge_order=2)\n term -= np.square(omega1_grad[2])\n term -= np.square(omega1_grad[1])\n term -= np.square(omega1_grad[0])\n term -= np.square(omega2_grad[2])\n term -= np.square(omega2_grad[1])\n term -= np.square(omega2_grad[0])\n term -= np.square(omega3_grad[2])\n term -= np.square(omega3_grad[1])\n term -= np.square(omega3_grad[0])\n #---------------------------------------------------------------------#\n # Applying the subgrid stress #\n #---------------------------------------------------------------------#\n term *= nu_sgs\n\n return term", "def scattering_efficiency(self):\r\n n = np.arange(1, self.n + 1)\r\n return 2*(np.linalg.norm(np.sqrt(2*n+1)*self.a)\r\n + np.linalg.norm(self.b))/self.x**2", "def _densityctr(self, rangex, rangey, dim = misc.DEF_VIS_DIM):\n gr = N.meshgrid(rangex, rangey)\n x = gr[0].flatten()\n y = gr[1].flatten()\n xdata = N.concatenate((x[:, N.newaxis], y[:, N.newaxis]), axis = 1)\n dmu = self.mu[:, dim]\n dva = self._get_va(dim)\n den = GM.fromvalues(self.w, dmu, dva).pdf(xdata, log = True)\n den = den.reshape(len(rangey), len(rangex))\n\n return gr[0], gr[1], den", "def decoherence(self,system):\n for i in range(self.n):\n for j in range(i,self.n):\n for item in self.decoherence_matrix[i][j]:\n tmp=Expolist([Expo(item[2],0)])\n t = int(self.index(item[0],item[1]))\n system[int(self.index(i,j))][t]+=tmp\n return system", "def sigmai_dep(ptem, psal, pref):\n zr4 = 4.8313e-4\n zd =-2.042967e-2\n zrau0 = 1000.e0\n \n sigmai_dep_out = zeros(psal.shape)\n \n # ?? for whatever reason sqrt(abs(psal)) seems to kick up a fuss when arrays\n # exceed a certain size...??? otherwise this could be vectorised\n # TODO: if pref is a number, broadcast it into a 2d field\n \n for jj in range(psal.shape[0]): # python indexing\n for ji in range(psal.shape[1]):\n \n ztem = ptem[jj, ji]\n zsal = psal[jj, ji]\n zws = sqrt( abs(psal[jj, ji]) )\n \n # Compute the volumic mass of pure water at atmospheric pressure.\n zr1 = ( ( ( ( (6.536332e-9 * ztem - 1.120083e-6) * ztem + 1.001685e-4 )\n * ztem - 9.095290e-3 ) * ztem + 6.793952e-2 ) * ztem + 999.842594e0\n )\n\n # Compute the seawater volumic mass at atmospheric pressure.\n zr2 = ( ( ( ( 5.3875e-9 * ztem - 8.2467e-7) * ztem + 7.6438e-5)\n * ztem - 4.0899e-3) * ztem + 0.824493e0\n )\n\n zr3 = (-1.6546e-6 * ztem + 1.0227e-4) * ztem - 5.72466e-3\n\n # Compute the potential volumic mass (referenced to the surface).\n zrhop = (zr4 * zsal + zr3 * zws + zr2) * zsal + zr1\n\n # Compute the compression terms.\n ze = (-3.508914e-8 * ztem - 1.248266e-8) * ztem - 2.595994e-6\n\n zbw = (1.296821e-6 * ztem - 5.782165e-9) * ztem + 1.045941e-4\n\n zb = zbw + ze * zsal\n\n zc = (-7.267926e-5 * ztem + 2.598241e-3) * ztem + 0.1571896e0\n\n zaw = ( ( (5.939910e-6 * ztem + 2.512549e-3) * ztem - 0.1028859e0 ) \n * ztem - 4.721788e0\n )\n\n za = (zd * zws + zc) * zsal + zaw\n\n zb1 = (-0.1909078e0 * ztem + 7.390729e0) * ztem - 55.87545e0\n\n za1 = ( ( (2.326469e-3 * ztem + 1.553190e0) * ztem - 65.00517e0)\n * ztem + 1044.077e0\n )\n\n zkw = ( ( ( (-1.361629e-4 * ztem - 1.852732e-2) * ztem - 30.41638e0)\n * ztem + 2098.925e0) * ztem + 190925.60\n )\n\n zk0 = (zb1 * zws + za1) * zsal + zkw\n\n # Compute the potential density anomaly.\n sigmai_dep_out[jj, ji] = ( zrhop / (1.0e0 - pref / \n ( zk0 - pref * (za - pref * zb) ) )\n - zrau0\n )\n \n return sigmai_dep_out", "def _computeStikeDip(self):\n seg = self._group_index\n groups = np.unique(seg)\n ng = len(groups)\n norm_vec = Vector(0, 0, 0)\n north_vec = Vector(0, 0, 0)\n up_vec = Vector(0, 0, 0)\n for i in range(ng):\n group_segments = np.where(groups[i] == seg)[0]\n nseg = len(group_segments) - 1\n for j in range(nseg):\n ind = group_segments[j]\n P0 = Point(self._toplons[ind],\n self._toplats[ind],\n self._topdeps[ind])\n P1 = Point(self._toplons[ind + 1],\n self._toplats[ind + 1],\n self._topdeps[ind + 1])\n P2 = Point(self._botlons[ind + 1],\n self._botlats[ind + 1],\n self._botdeps[ind + 1])\n P3 = Point(self._botlons[ind],\n self._botlats[ind],\n self._botdeps[ind])\n P1up = Point(self._toplons[ind + 1],\n self._toplats[ind + 1],\n self._topdeps[ind + 1] - 1.0)\n P1N = Point(self._toplons[ind + 1],\n self._toplats[ind + 1] + 0.001,\n self._topdeps[ind + 1])\n P3up = Point(self._botlons[ind],\n self._botlats[ind],\n self._botdeps[ind] - 1.0)\n P3N = Point(self._botlons[ind],\n self._botlats[ind] + 0.001,\n self._botdeps[ind])\n p0 = Vector.fromPoint(P0)\n p1 = Vector.fromPoint(P1)\n p2 = Vector.fromPoint(P2)\n p3 = Vector.fromPoint(P3)\n p1up = Vector.fromPoint(P1up)\n p1N = Vector.fromPoint(P1N)\n p3up = Vector.fromPoint(P3up)\n p3N = Vector.fromPoint(P3N)\n\n # Sides\n s01 = p1 - p0\n s02 = p2 - p0\n s03 = p3 - p0\n s21 = p1 - p2\n s23 = p3 - p2\n\n # First triangle\n t1norm = (s02.cross(s01)).norm()\n a = s01.mag()\n b = s02.mag()\n c = s21.mag()\n s = (a + b + c) / 2\n A1 = np.sqrt(s * (s - a) * (s - b) * (s - c)) / 1000\n\n # Second triangle\n t2norm = (s03.cross(s02)).norm()\n a = s03.mag()\n b = s23.mag()\n c = s02.mag()\n s = (a + b + c) / 2\n A2 = np.sqrt(s * (s - a) * (s - b) * (s - c)) / 1000\n\n # Up and North\n p1up = (p1up - p1).norm()\n p3up = (p3up - p3).norm()\n p1N = (p1N - p1).norm()\n p3N = (p3N - p3).norm()\n\n # Combine\n norm_vec = norm_vec + A1 * t1norm + A2 * t2norm\n north_vec = north_vec + A1 * p1N + A2 * p3N\n up_vec = up_vec + A1 * p1up + A2 * p3up\n\n norm_vec = norm_vec.norm()\n north_vec = north_vec.norm()\n up_vec = up_vec.norm()\n\n # Do I need to flip the vector because it is pointing down (i.e.,\n # right-hand rule is violated)?\n flip = np.sign(up_vec.dot(norm_vec))\n norm_vec = flip * norm_vec\n\n # Angle between up_vec and norm_vec is dip\n self._dip = np.arcsin(up_vec.cross(norm_vec).mag()) * 180 / np.pi\n\n # Normal vector projected to horizontal plane\n nvph = (norm_vec - up_vec.dot(norm_vec) * up_vec).norm()\n\n # Dip direction is angle between nvph and north; strike is orthogonal.\n cp = nvph.cross(north_vec)\n sign = np.sign(cp.dot(up_vec))\n dp = nvph.dot(north_vec)\n strike = np.arctan2(sign * cp.mag(), dp) * 180 / np.pi - 90\n if strike < -180:\n strike = strike + 360\n self._strike = strike", "def ISE_loop(mu, s, DIMENSION=2):\n total = 0\n for i in range(len(mu)):\n for j in range(len(mu)):\n dist_sq = np.sum((mu[i]-mu[j])**2)\n total += (i != j)*(1/(s*s*2*np.pi))**(0.5*DIMENSION)*np.exp(-dist_sq/(2*s*s))\n return (2*total/len(mu)/(len(mu)-1))", "def spd(contingency_table: np.ndarray) -> List[float]:\n numerators = contingency_table[:, 1]\n denominators = contingency_table.sum(1)\n numerators[denominators == 0] = 0 # Handle division by zero:\n denominators[denominators == 0] = 1 # 0/0 => 0/1.\n fractions = numerators / denominators\n spd = fractions[0] - fractions[1]\n return [spd]", "def find_large_separation(self):\n\n x = self.modes['n'] # radial order\n y = self.modes['freq'] # frequency\n wid = (0.66*self.numax**0.88)/2/np.sqrt(2*np.log(2.0))\n w = (np.exp((-(y-self.numax)**2)/(2*wid**2))) # weight\n\n mN = np.sum(w)*np.sum(w*x*y) - np.sum(w*x)*np.sum(w*y)\n D = np.sum(w)*np.sum(w*x**2) - np.sum(w*x)**2\n Dn = mN/D\n #print Dn\n\n return Dn", "def CRS(site):\n return np.dot(CR(np.pi/2**(site)),SWAP)", "def dVdx(self, sys):\n dx2 = sys.positions * sys.positions - self.x0 * self.x0\n return 4 * self.A * sys.positions * dx2", "def visc(s, t, p):\n s, t, p = map(np.asanyarray, (s, t, p))\n return (1e-4 * (17.91 - 0.5381 * t + 0.00694 * t ** 2 + 0.02305 * s) /\n sw.dens(s, t, p))", "def dm_sigma_shape(sps):\n env = get_envelope(sps)\n i_xs, i_ys = interp_envelope(env)\n G_fit = fit_gauss(i_xs, i_ys)\n L_fit = fit_lorentz(i_xs, i_ys)\n e_xs, e_ys = env.dm, env.sigma\n g_ys, l_ys = G_fit(e_xs), L_fit(e_xs)\n return (chisquare(e_ys, g_ys, 2)[0], chisquare(e_ys, l_ys, 2)[0])", "def directtion(segment,neighbours,segmentsMeta):\n headTail= list(map(lambda x : 'Head' if x in segmentsMeta.at[segment,'outs'] else 'Tail',neighbours ))\n cosSinDiff = np.fromiter(map(lambda x,y :np.exp(abs(segmentsMeta['cos'+y][x]-segmentsMeta['cos'+y][segment])+abs(segmentsMeta['sin'+y][x]-segmentsMeta['sin'+y][segment]))-1 ,neighbours,headTail),np.float)\n #return np.fromiter(map(lambda x : 1/(1+np.exp(6*x)) ,cosSinDiff),np.float)\n oneCos = cosSinDiff[0] if cosSinDiff[0] <= np.exp(1) else np.inf\n return [oneCos]", "def _ss(data):\n c = sum(data)/len(data)\n ss = sum((x-c)**2 for x in data)\n return ss", "def density_2D(tensor):\n return 1 - sparsity_2D(tensor)", "def make_phasedbeam(self):\n\n self.phasedbeam = n.zeros((len(self.dmarr),len(self.reltime)), dtype='float64')\n\n for i in xrange(len(self.dmarr)):\n self.phasedbeam[i] = self.dedisperse(dmbin=i).mean(axis=3).mean(axis=2).mean(axis=1).real # dedisperse and mean\n print 'dedispersed for ', self.dmarr[i]", "def diagonals_in_hd():\n number_of_pairs = 100000\n angles_for_d = {}\n for d in (10, 100, 1000):\n number_of_corners = 2 ** d - 1\n first_corner = [random.randint(0, number_of_corners) for _ in range(0, number_of_pairs)]\n second_corner = [random.randint(0, number_of_corners) for _ in range(0, number_of_pairs)]\n\n dummy_d = [d for _ in range(0, number_of_pairs)]\n angles = []\n with cf.ProcessPoolExecutor() as executor:\n results = executor.map(find_angle, first_corner, second_corner, dummy_d)\n for result in results:\n angles.append(result)\n ser = pd.Series(angles)\n print(f\"Angles between diagonals for {d} dimensions\")\n print(ser.describe())\n angles_for_d[d] = ser\n\n plot_pmfs_for_ds(angles_for_d)", "def trans_eandg(self):\n n_site = self.status.give(keyword=\"n_site\")\n sites = self.status.give(keyword=\"sites\")\n # print dist\n for i in xrange(n_site):\n # force act on site i. f_r dr_i\n for j in xrange(i+1, n_site):\n ibody = sites[i]; jbody = sites[j]\n rij = jbody.pos -ibody.pos\n r = np.linalg.norm(rij)\n drdi = - rij / r\n drdj = - drdi\n self.status.potential_energy += self.energy(r) \n ibody.force += - self.gradient(r) * drdi\n jbody.force += - self.gradient(r) * drdj\n # for ibody in sites:\n # print \"na force\", ibody.force\n return", "def get_SSD():\n dist = 0\n # traversal of pixels in potential Bi+1 block\n # compare corresponding pixel positions with source block in f1 and neighbour block in f2\n y1 = center_y1 - block_rad # start pos.\n for y2 in range(center_y2 - block_rad, (center_y2 - block_rad + block_size)):\n x1 = center_x1 - block_rad # start pos\n for x2 in range(center_x2 - block_rad, (center_x2 - block_rad + block_size)):\n try:\n # displacement formula for RGB channels of each pixel in block\n dist = dist + (frame1[y1][x1][0] - frame2[y2][x2][0])**2 + (frame1[y1][x1][1] - frame2[y2][x2][1])**2 + (frame1[y1][x1][2] - frame2[y2][x2][2])**2\n except RuntimeWarning:\n pass\n x1 += 1\n y1 += 1\n return math.sqrt(dist)", "def find_halos(pos, ngrid, log, level=3000):\n print('Binning particles', file=log)\n cells = get_cells(pos, ngrid, log)\n count = bincount(cells, minlength=ngrid**3)\n count.shape = (ngrid,ngrid,ngrid)\n print('Count in', count.min(), count.max(), file=log)\n idx = flatnonzero(count>level)\n print('Number of cells above', level, 'is', len(idx), file=log)\n \n \n labels, num_features = ndimage.label(count>level)\n print('Number fo features', num_features, file=log)\n print('Labels in', labels.min(), labels.max(), file=log)\n locations = ndimage.find_objects(labels)\n\n dense_regions = []\n\n for i in range(num_features):\n loc = locations[i]\n hw = max(l.stop - l.start for l in loc) * 0.5 /ngrid\n hw_padded = hw + 0.0/ngrid\n\n ctr =[(0.5/ngrid)*(l.stop + l.start) for l in loc]\n count_i = count[loc][labels[loc]==(i+1)].sum()\n print('Count', count_i, file=log)\n dense_regions.append((count_i, ctr, hw_padded))\n\n # sort by number of particles in the region\n dense_regions = sorted(dense_regions, key = lambda num_ctr_hw :num_ctr_hw[0], reverse=True)\n\n return dense_regions", "def ddspmt(t):\n return (spmt(t) - _spm_dd_func(t)) / 0.01", "def div(f,dx,dy,dz,x=[],y=[],z=[],param=[],dim=[]):\n if (f.ndim != 4):\n print(\"div: must have vector 4-D array f[mvar,mz,my,mx] for divergence\")\n raise ValueError\n if not param:\n param = read_param(quiet=True)\n if not dim:\n dim = read_dim()\n gd = read_grid(quiet=True, param=param)\n if len(x) < 1:\n x = gd.x\n y = gd.y\n z = gd.z\n\n div = xder(f[0,...],dx,x=x,y=y,z=z,param=param,dim=dim) +\\\n yder(f[1,...],dy,x=x,y=y,z=z,param=param,dim=dim) +\\\n zder(f[2,...],dz,x=x,y=y,z=z,param=param,dim=dim)\n\n if param.coord_system == 'cylindric':\n div += f[0,...]/x\n if param.coord_system == 'spherical':\n sin_y = N.sin(y)\n cos_y = N.cos(y)\n i_sin = N.where(N.abs(sin_y) < 1e-5)[0]\n if i_sin.size > 0:\n cos_y[i_sin] = 0.; sin_y[i_sin] = 1\n x_1, cotth = N.meshgrid(1./gd.x, cos_y/sin_y)\n div += 2*f[0,...]*x_1 + f[1,...]*x_1*cotth\n \n return div", "def dof_satt(self):\n d1 = self.d1\n d2 = self.d2\n # this follows blindly the SPSS manual\n # except I use ``_var`` which has ddof=0\n sem1 = d1._var / (d1.nobs - 1)\n sem2 = d2._var / (d2.nobs - 1)\n semsum = sem1 + sem2\n z1 = (sem1 / semsum) ** 2 / (d1.nobs - 1)\n z2 = (sem2 / semsum) ** 2 / (d2.nobs - 1)\n dof = 1.0 / (z1 + z2)\n return dof", "def direct(sun_pos, grid):\n\n # for each pixel at top of grid pass sun rays in\n for i in xrange(grid.gr.shape[0]):\n \"\"\"\n Make an array starting at loc\n \"\"\"\n xpos = i * grid.xres\n ypos = grid.zres * grid.zsize\n pos = np.array(xpos, ypos)\n direction = pos - sun_pos / np.norm(pos - sun_pos) # this location minus \n r = ray(pos, direction)\n \"\"\"\n The ray now travels down through the canopy being\n altered by transmission and reflectance\n\n amount of scattering vs absorption is determined by leaf area density\n\n \"\"\"", "def stempot(self,xmax,ymax,nx,ny,atms,pixelshift,scalefactor):\n #zed=2 for rutherford scattering of the nucleus, less for screening\n zed = 1.7\n\n ix = numpy.arange(1.0,nx)\n iy = numpy.arange(1.0,ny)\n dx = xmax/nx\n dy = ymax/ny\n rx = numpy.arange(0,xmax-dx,dx)\n ry = numpy.arange(0,ymax-dy,dy)\n\n Zatom = atms.get_atomic_numbers()\n #translate atoms such that the center of mass is in the center of the computational cell\n com = atms.get_center_of_mass()\n #com = [ 44.40963074 , 44.65497562 , 44.90406073] #for AuNP\n #com = numpy.array(com)\n #print 'com',com -0.149836425, 0.29967285, 0\n #com += [0.41205016875, 0.6742639125, 0] #for rotated line profile \n #com += [-0.149836425, 0.29967285, 0] #for AuNP\n #com += pixelshift\n #print 'com+pixelshift',com\n cop = xmax/2.0\n trans = [cop-i for i in com]\n atms.translate(trans)\n positions=atms.get_positions()\n ax=[]\n ay=[]\n az=[]\n for o,t,h in positions:\n ax.append(o)\n ay.append(t)\n az.append(h)\n ax = numpy.array(ax)\n ay = numpy.array(ay)\n az = numpy.array(az)\n amax = len(Zatom)\n\n #find boundaries of slice\n axmin = min(ax)\n axmax = max(ax)\n aymin = min(ay)\n aymax = max(ay)\n\n V= numpy.zeros((nx,ny))\n\n #map x and y coords of the atoms to the nearest grid points\n #A fraction of the atom must be assigned to the closest gridpoints\n #to avoid sum and difference frequencies appearing in the image\n #grid point to the left of the atom\n ix = numpy.array([math.floor(axi/dx) for axi in ax])\n #apply periodic boundary conditions\n iax = numpy.array([math.fmod(iaxi,nx) for iaxi in ix])\n ibx = numpy.array([math.fmod(iaxi+1,nx) for iaxi in ix])\n #fraction of atom at iax\n fax = numpy.array([1-math.fmod((axi/dx),1 ) for axi in ax])\n #grid point above the atom\n iy = numpy.array([math.floor(ayi/dy) for ayi in ay])\n #apply periodic boundary conditions\n iay = numpy.array([math.fmod(iayi,ny) for iayi in iy])\n iby = numpy.array([math.fmod(iayi+1,ny) for iayi in iy])\n #fraction of atom at iay \n fay = numpy.array([1-math.fmod((ayi/dy),1 ) for ayi in ay])\n #Add each atom to the potential grid\n V1 = numpy.array([fax[i] * fay[i] * (Zatom[i]**zed) for i in range(len(fax))])\n V2 = numpy.array([(1-fax[i]) * fay[i] * (Zatom[i]**zed) for i in range(len(fax))])\n V3 = numpy.array([fax[i] * (1-fay[i]) * (Zatom[i]**zed) for i in range(len(fax))])\n V4 = numpy.array([(1-fax[i]) * (1-fay[i]) * (Zatom[i]**zed) for i in range(len(fax))])\n #V1 = numpy.array([fax[i] * fay[i] * scalefactor for i in range(len(fax))])\n #V2 = numpy.array([(1-fax[i]) * fay[i] * scalefactor for i in range(len(fax))])\n #V3 = numpy.array([fax[i] * (1-fay[i]) * scalefactor for i in range(len(fax))])\n #V4 = numpy.array([(1-fax[i]) * (1-fay[i]) * scalefactor for i in range(len(fax))])\n\n for j in range(amax):\n V[iax[j],iay[j]] += V1[j]\n V[ibx[j],iay[j]] += V2[j]\n V[iax[j],iby[j]] += V3[j]\n V[ibx[j],iby[j]] += V4[j]\n rev_trans = [-1.0*i for i in trans]\n atms.translate(rev_trans)\n return V", "def get_MSD(block1, block2):\n #print(block1.shape)\n #print(block2.shape)\n return sum(sum(abs(block1 - block2) ** 2))", "def mesh_uniform(N_e, d, Omega):", "def density(ensembles):\n if len(ensembles.shape) < 2:\n return ketbra(ensembles)\n else:\n den_mat = ketbra(ensembles[0])\n for i in range(1, len(ensembles)):\n den_mat += ketbra(ensembles[i])\n den_mat /= len(ensembles)\n return den_mat", "def calcula_desvios_padrao(self):\n self.dp_x1 = np.sqrt(self.var_x1)\n self.dp_w1 = np.sqrt(self.var_w1)\n self.dp_nq1 = np.sqrt(self.var_nq1)\n self.dp_ns1 = np.sqrt(self.var_ns1)\n self.dp_n1 = np.sqrt(self.var_n1)\n self.dp_t1 = np.sqrt(self.var_t1)\n self.dp_w1_med = np.sqrt(self.var_w1_med)\n\n self.dp_x2 = np.sqrt(self.var_x2)\n self.dp_w2 = np.sqrt(self.var_w2)\n self.dp_nq2 = np.sqrt(self.var_nq2)\n self.dp_ns2 = np.sqrt(self.var_ns2)\n self.dp_n2 = np.sqrt(self.var_n2)\n self.dp_t2 = np.sqrt(self.var_t2)\n self.dp_w2_med = np.sqrt(self.var_w2_med)", "def MSD_Sham(dir, side = \"dx\", delimiter = \"\\t\"):\n\n x = pd.DataFrame()\n y = pd.DataFrame()\n for fname in os.listdir(dir):\n if side in fname:\n k = fname.split('_')[-2]\n k = int(k)\n #df = grid(dir + fname + str(i+1) + side + \".txt\", delimiter = delimiter)\n dfx, dfy = necklace_points(dir + fname,N=80, sep = delimiter )\n x[k] = dfx\n y[k] = dfy\n col = np.arange(1,len(x.T))\n x = x[col]\n count = 0\n mean = np.zeros(len(x.T))\n for i in range(len(mean)):\n mean = mean + tidynamics.msd(x.T[i])\n count+=1\n\n mean/=count\n return mean, x , y", "def get_slab_trans_params(slab):\n slab = slab.copy()\n if slab.site_properties.get(\"surface_properties\"):\n adsorbate_indices = [\n slab.index(s)\n for s in slab\n if s.properties[\"surface_properties\"] == \"adsorbate\"\n ]\n slab.remove_sites(adsorbate_indices)\n\n # Note: this could fail if the slab is non-contiguous in the c direction,\n # i. e. if sites are translated through the pbcs\n heights = [np.dot(s.coords, slab.normal) for s in slab]\n\n # Pad the slab thickness a bit\n slab_thickness = np.abs(max(heights) - min(heights)) + 0.001\n bulk_a, bulk_b, bulk_c = slab.oriented_unit_cell.lattice.matrix\n bulk_normal = np.cross(bulk_a, bulk_b)\n bulk_normal /= np.linalg.norm(bulk_normal)\n bulk_height = np.abs(np.dot(bulk_normal, bulk_c))\n slab_cell_height = np.abs(np.dot(slab.lattice.matrix[2], slab.normal))\n\n total_layers = slab_cell_height / bulk_height\n slab_layers = np.ceil(slab_thickness / slab_cell_height * total_layers)\n vac_layers = total_layers - slab_layers\n\n min_slab_size = slab_cell_height * slab_layers / total_layers - 0.001\n min_vac_size = slab_cell_height * vac_layers / total_layers - 0.001\n\n return {\n \"miller_index\": [0, 0, 1],\n \"shift\": slab.shift,\n \"min_slab_size\": min_slab_size,\n \"min_vacuum_size\": min_vac_size,\n }", "def densidad(qe):\r\n global x,rhoe,rhoi,dx,nparticulas,npuntos_malla,pared_izquierda,pared_derecha\r\n \r\n j1 = sp.dtype(sp.int32) # Asegura que la variable permanezca entera\r\n j2 = sp.dtype(sp.int32) \r\n \r\n # Factor de ponderacion de carga \r\n re = qe/dx \r\n # Densidad electronica \r\n rhoe = sp.zeros(npuntos_malla+1) \r\n # Mapa de cargas sobre la malla\r\n for i in range(nparticulas):\r\n xa = x[i]/dx # xparticula/dx\r\n j1 = int(xa) # indices de la malla fija xmalla/dx\r\n j2 = j1 + 1 # Siguiente punto en la malla\r\n f2 = xa - j1 # |xmalla - xparticula|/dx\r\n f1 = 1.0 - f2\r\n rhoe[j1] = rhoe[j1] + re*f1\r\n rhoe[j2] = rhoe[j2] + re*f2\r\n\r\n # Condiciones de frontera periodica\r\n rhoe[0] += rhoe[npuntos_malla]\r\n rhoe[npuntos_malla] = rhoe[0]\r\n \r\n # Se agrega una densidad de iones neutral\r\n rhoi = rho0\r\n\r\n return True", "def sq_wtd_voxels(ndomains, pos, rcrit, ngrid=None, log=null_log):\n\n if ngrid is None:\n # To make each cell non-ghost dominated would really like\n # 0.1/rcrit, but I rely a bit on adjacent cells being in \n # the same domain.\n ngrid = int(0.15/rcrit)\n\n if ndomains>64:\n raise Exception('{:,}>64, maximum domains for single master'.format(ndomains))\n\n tot_cells = ngrid**3\n cells = get_cells(pos, ngrid, log)\n assert(cells.min()>=0)\n assert(cells.max()<tot_cells)\n counts = np.bincount(cells, minlength=tot_cells)\n cell_ids = np.arange(tot_cells)\n\n doms = domain_region_grow(np.reshape(counts, (ngrid,ngrid,ngrid)), ndomains)-1\n assert((doms>=0).all()) # check we covered all the domains\n cells_per_dmn = np.bincount(doms.ravel())\n tot_per_dmn = np.bincount(doms.ravel()[cells])\n print(\"Domain totals:\", ', '.join('%d'%npts for npts in tot_per_dmn), file=log)\n\n cell_masks = 1<<(doms.astype(np.uint64))\n pos_masks = cell_masks.ravel()[cells].copy()\n\n prettyprint_cellmasks(cell_masks, log)\n # 'Bleed' the cell masks to find ghosts\n ngb_masks = adjacent_cell_masks(pos, cell_masks, rcrit, log)\n\n for dom, mask in enumerate(np.unique(pos_masks)):\n # find all the positions needed in this domain\n idx_domain = np.flatnonzero(ngb_masks & mask)\n # which of these are non-ghosts\n idx_non_ghosts = np.flatnonzero(np.equal(pos_masks[idx_domain], mask))\n\n pct_ghosts = (100*(len(idx_domain)-len(idx_non_ghosts)))//len(idx_domain)\n\n txt = 'Domain %d: '%dom + \\\n '{:,} particles of which {:,}% are ghosts ({:,} domain cells)'.format(len(idx_domain),\n pct_ghosts, cells_per_dmn[dom])\n # count parts+ghosts\n if pct_ghosts<50:\n print(txt, file=log)\n else:\n print(MarkUp.WARNING+txt+MarkUp.ENDC, file=log) \n yield idx_domain, idx_non_ghosts", "def ddds(cloud, sample_size, presample=None, processes=10):\n\n #--------------------------------------------------------------------------\n\n ### Prep\n\n # Handle small point clouds\n if cloud.shape[0] <= sample_size:\n warn(\"(code 1) Point cloud is already <= desired sample size. \" +\n \"No subsampling is performed.\")\n return cloud\n\n\n #--------------------------------------------------------------------------\n\n ### Compute per-landmark local densities\n\n # Subsample randomly (for speed/memory efficiency)\n if presample is not None:\n cloud_presubs = random_subsample(cloud, presample)\n else:\n cloud_presubs = np.copy(cloud)\n\n # Compute distance of each subsampled point to the closest other point\n # Note: `k=2` is necessary since `k=1` is the point itself.\n tree = cKDTree(cloud)\n NN_dists = tree.query(cloud_presubs, k=2, n_jobs=processes)[0][:,1]\n\n # Get the size of the local neighborhood\n # which is `alpha * median(smallest_distances)`,\n # where a good value for alpha is 5 according to SPADE\n alpha = 5\n NN_size = alpha * np.median(NN_dists)\n\n # Get the local density (LD) of each landmark\n # ...which is the number of other landmarks in its local neighborhood\n LDs = tree.query_ball_point(cloud, NN_size, n_jobs=processes) # Get indices\n LDs = np.vectorize(len)(LDs) # Count\n\n # Define the target density (TD)\n # Note: Good values according to SPADE: the 3rd or 5th percentile of LDs\n # Note: This effectively defines how strongly the data will be subsampled\n TD_percentile = 3\n TD = np.percentile(LDs, TD_percentile)\n\n\n #--------------------------------------------------------------------------\n\n ### Perform density-dependent subsampling\n\n # Create p(keep_lm) probability vector\n # Note: For each point i, the probability of keeping it is\n # { 1 if LD_i < TD\n # { TD / LD_i otherwise\n p_keep = TD / LDs\n p_keep[LDs<TD] = 1\n\n # Randomly decide if a landmark should be kept according to p(keep_lm)\n rand = np.random.uniform(size=cloud.shape[0])\n keep = p_keep >= rand\n\n # Index the lms to be kept\n cloud_ddds = cloud[keep,:]\n\n\n #--------------------------------------------------------------------------\n\n ### Further random downsampling\n\n # Note: This ensures that the downsampled cloud does not grow with the\n # input data and instead is of the specified sample_size or smaller.\n\n if cloud_ddds.shape[0] > sample_size:\n cloud_ddds = random_subsample(cloud_ddds, sample_size)\n\n #--------------------------------------------------------------------------\n\n ### Return result\n return cloud_ddds", "def sdep(y, yHat):\n n = y.shape[0]\n\n numer = ((y - yHat) ** 2).sum()\n\n sdep = (numer / n) ** 0.5\n\n return sdep", "def __calculateDDIstart(self, partedscans, partedspws):\n \n # Example of partedspws:\n # create 2 subMss with spw=0,1,2 and spw=3\n # partedSPWs = {0:['0','1','2'],1:['3']}\n #\n # create 3 subMSs with spw=0,1,2 spw=3 and spw=4,5\n # partedSPWs = {0:['0','1','2'],1:['3'],2:['4','5']}\n \n hasscans = True\n if len(partedscans) == 0:\n scans = ''\n hasscans = False\n\n # It needs to take the correlation selection into account\n corr_sel = self._arg['correlation']\n ddistartList = []\n \n # scan+spw separation axis \n if hasscans:\n count = 0\n for k,spws in partedspws.iteritems():\n for ks,scans in partedscans.iteritems():\n if self._msTool is None:\n self._msTool = mstool()\n self._msTool.open(self._arg['vis'],nomodify=False)\n else:\n self._msTool.reset()\n \n try:\n # The dictionary with selected indices\n seldict = self._msTool.msseltoindex(vis=self._arg['vis'],scan=scans,spw=spws,polarization=corr_sel)\n except:\n self._msTool.close()\n continue\n \n # Get the selected DD IDs\n ddis = seldict['dd'].tolist()\n ddsize = ddis.__len__()\n if count == 0:\n ddistart = 0\n \n # Create a ddistart list\n ddistartList.append(ddistart)\n ddistart = ddistart + ddsize\n count = count + 1\n \n # spw separation axis \n else:\n count = 0\n for k,spws in partedspws.iteritems():\n if self._msTool is None:\n self._msTool = mstool()\n self._msTool.open(self._arg['vis'],nomodify=False)\n else:\n self._msTool.reset()\n \n try:\n # The dictionary with selected indices\n seldict = self._msTool.msseltoindex(vis=self._arg['vis'],scan=scans,spw=spws, polarization=corr_sel)\n except:\n self._msTool.reset()\n continue\n \n # Get the selected DD IDs\n ddis = seldict['dd'].tolist()\n ddsize = ddis.__len__()\n if count == 0:\n ddistart = 0\n \n # Create a ddistart list\n ddistartList.append(ddistart)\n ddistart = ddistart + ddsize\n count = count + 1\n \n return ddistartList", "def hss(self):\n return 2 * (self.table[0, 0] * self.table[1, 1] - self.table[0, 1] * self.table[1, 0]) / (\n (self.table[0, 0] + self.table[0, 1]) * (self.table[0, 1] + self.table[1, 1]) +\n (self.table[0, 0] + self.table[1, 0]) * (self.table[1, 0] + self.table[1, 1]))", "def dVdx(self, sys):\n dx = sys.positions - self.x0\n k = self.omega*self.omega*sys.mass\n return self.A*k*dx", "def kde(sregion, tregion, tdist, mapping, index_map, sigma=1.5):\n\n tinds = index_map[tregion]\n \n # mapping of target indices to 0 : # targets\n t2i = dict(zip(tinds, np.arange(len(tinds))))\n \n # determine number of source vertices mapping to each target\n counts = np.zeros((len(tinds),))\n for i in mapping.index:\n mu = mapping.loc[i, 'mu']\n counts[t2i[mu]] += 1\n\n # iterate over target vertices, and convolve count map \n # with isotropic Gaussian kernel\n density = np.zeros((counts.shape[0],))\n for i in np.arange(len(tinds)):\n \n pdf = models.geodesic(tdist[i, :], [sigma])\n \n d = (pdf*counts).sum()\n density[i] = d\n \n return density", "def get_dist_mat(self):\n n_site = self.status.give(keyword=\"n_site\")\n sites = self.status.give(keyword=\"sites\")\n dist_mat = [[0.0 for j in xrange(n_site)] for i in xrange(n_site)]\n for i in xrange(n_site):\n for j in xrange(n_site):\n ri = sites[i].pos\n rj = sites[j].pos\n dist_mat[i][j] = np.linalg.norm(ri-rj)\n # print ri, rj\n return dist_mat", "def pss(self):\n return (self.table[0, 0] * self.table[1, 1] - self.table[0, 1] * self.table[1, 0]) / \\\n ((self.table[0, 0] + self.table[1, 0]) * (self.table[0, 1] + self.table[1, 1]))", "def distance_modulus(self):\n return 5*np.log10(self.parallax.to(u.pc, u.parallax())/10*u.pc)", "def calculate_diff_stress(self, x, side=1, p=2):\n nel = self.problem.nelx * self.problem.nely\n ndof = 2 * (self.problem.nelx + 1) * (self.problem.nely + 1)\n nloads = self.problem.f.shape[1]\n u = self.problem.compute_displacements(x)\n rho = self.penalized_densities(x)\n EB = self.E(self.problem.nu).dot(self.B(side))\n EBu = sum([EB.dot(u[:, i][self.edofMat]) for i in range(nloads)])\n s11, s22, s12 = numpy.hsplit((EBu * rho / float(nloads)).T, 3)\n\n def sigma_pow(s11, s22, s12):\n return numpy.sqrt(s11**2 - s11 * s22 + s22**2 + 3 * s12**2)**p\n\n def dsigma_pow(ds11, ds22, ds12):\n sigma = numpy.sqrt(s11**2 - s11 * s22 + s22**2 + 3 * s12**2)\n dinside = (2 * s11 * ds11 - s11 * ds22 - ds11 * s22 + 2 * s22 *\n ds22 + 6 * s12 * ds12)\n return p * (sigma)**(p - 1) / (2.0 * sigma) * dinside\n\n K = self.problem.build_K(x)\n K = cvxopt.spmatrix(\n K.data, K.row.astype(numpy.int), K.col.astype(numpy.int))\n\n dK = self.problem.build_dK(x).tocsc()\n U = numpy.tile(u[self.problem.free, :], (nel, 1))\n U = cvxopt.matrix(dK.dot(U).reshape(-1, nel * nloads, order=\"F\"))\n cvxopt.cholmod.linsolve(K, U) # U stores solution after solve\n du = numpy.zeros((ndof, nel * nloads))\n du[self.problem.free, :] = -numpy.array(U)\n du = du.reshape((ndof * nel, nloads), order=\"F\")\n\n rep_edofMat = (numpy.tile(self.edofMat, nel) + numpy.tile(\n numpy.repeat(numpy.arange(nel) * ndof, nel), (8, 1)))\n dEBu = sum([EB.dot(du[:, j][rep_edofMat]) for j in range(nloads)])\n rhodEBu = (numpy.tile(rho, nel) * dEBu)\n drho = self.diff_penalized_densities(x)\n drhoEBu = (numpy.diag(drho).flatten() * numpy.tile(EBu, nel))\n dstress = ((drhoEBu + rhodEBu) / float(nloads)).T\n ds11, ds22, ds12 = map(\n lambda x: x.reshape(nel, nel).T, numpy.hsplit(dstress, 3))\n ds = dsigma_pow(ds11, ds22, ds12).sum(0)\n return ds", "def mask_sparsity(mask: Mask):\n return 1 - mask_density(mask)", "def calc_psd2d(self):\n print(\"Calculating 2D power spectral density ... \", end=\"\", flush=True)\n rows, cols = self.shape\n imgf = np.fft.fftshift(np.fft.fft2(self.image))\n # NOTE: normalize w.r.t. image size\n norm = rows * cols * self.pixel[0]**2\n self.psd2d = (np.abs(imgf) ** 2) / norm\n print(\"DONE\", flush=True)\n return self.psd2d", "def TSC_density_old(pos, gridsize, boxsize, mass, periodic=True):\n rho = np.zeros((gridsize,gridsize,gridsize), dtype='float64')\n\n Npart = len(pos)\n\n crunch.TSCDensity(pos.astype('float64'), rho, Npart, boxsize, gridsize, mass)\n\n return rho", "def get_center_of_mass_enemies(self,obs):", "def SSD(x,y):\n return np.sum((x-y)**2)", "def segPDist(seg1, seg2, speeds):\n \n seg1Valid=speeds.loc[seg1].dropna()\n seg2Valid=speeds[seg1Valid.index].loc[seg2].dropna()/sum(speeds[seg1Valid.index].loc[seg2].dropna(),1)\n seg1Valid=seg1Valid[seg2Valid.index]/sum(seg1Valid[seg2Valid.index],1)\n if len(seg1Valid.values) == 0 or len(seg2Valid.values) == 0 : return 1 + (1-len(seg2Valid.index)/ speeds.columns.size)\n return np.mean((seg1Valid.values - seg2Valid.values)**2) + (1-len(seg2Valid.index)/ speeds.columns.size)", "def component_pdfs(x, mus, sigmas):\n n_components = mus.shape[0]\n return np.array([gaussian_pdf(x, mus[k,:], sigmas[k, :, :]) for k in range(n_components)])", "def partsphere(self, x):\r\n self.counter += 1\r\n # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]\r\n dim = len(x)\r\n x = array([x[i % dim] for i in range(2*dim)])\r\n N = 8\r\n i = self.counter % dim\r\n #f = sum(x[i:i + N]**2)\r\n f = sum(x[np.random.randint(dim, size=N)]**2)\r\n return f", "def discretizespace(self):\n \n # Grid\n self.xgriddim = ( self.x0_n , self.x1_n , self.x2_n )\n \n self.xd = [ None , None , None ]\n self.xd[0] = np.linspace( self.DS.x_lb[0] , self.DS.x_ub[0] , self.x0_n )\n self.xd[1] = np.linspace( self.DS.x_lb[1] , self.DS.x_ub[1] , self.x1_n )\n self.xd[2] = np.linspace( self.DS.x_lb[2] , self.DS.x_ub[2] , self.x2_n )\n \n self.x_grid2node = np.zeros( ( self.x0_n , self.x1_n , self.x2_n ) , dtype = int ) # grid of corresponding index\n \n # 1-D List of nodes\n self.nodes_n = self.x0_n * self.x1_n * self.x2_n\n self.nodes_state = np.zeros(( self.nodes_n , self.DS.n ), dtype = float ) # Number of nodes x state dimensions\n self.nodes_index = np.zeros(( self.nodes_n , self.DS.n ), dtype = int ) # Number of nodes x state dimensions", "def green_meshes_case_D(nz, nx, dz, dx, rho=None, beta=None, lamb=None):\n rho_sign = 1 if rho>=0 else -1\n \n # Change to internal coordinates\n dx = dx/rho\n dz = dz/(2*abs(rho))\n \n # Double-sized array for convolution with the density\n zvec2 = np.arange(-nz+1,nz+1,1)*dz # center = 0 is at [nz-1]\n xvec2 = np.arange(-nx+1,nx+1,1)*dx # center = 0 is at [nx-1]\n \n \n zm2, xm2 = np.meshgrid(zvec2, xvec2, indexing=\"ij\")\n \n Es_case_D_grid = Es_case_D(zm2, xm2, beta, lamb)\n \n return Es_case_D_grid, zvec2*2*rho, xvec2*rho", "def _dsurface_domega(self):\n\n dsdo = 0.\n\n return dsdo", "def compute_distortion(cluster_list, data_table):\r\n distortion = 0\r\n \r\n for cluster in cluster_list:\r\n distortion += cluster.cluster_error(data_table)\r\n\r\n return distortion", "def scattering_factors(self, material, density):\n raise NotImplementedError(\n \"need radiation type in <%s> to compute sld for %s\"\n % (self.filename, material))", "def GetDivisions(self):\n ...", "def d_mse(x, y):\n\n return 2 * (x - y) / x.size(0) / x.size(1)", "def t_norm(self, heater): \n\t\treturn (self.n_collisions/self.length)/(heater.n_collisions/heater.length)", "def carga_datos_slab2( directorio, latini, latfin, cambiarnan = True ):\n\n # se chequea si el input directorio es o no string\n if not isinstance( directorio, basestring ) :\n directorio = str(directorio)\n else:\n directorio = directorio\n\n # se chequea el formateo del string (se desea que no termine con /)\n if not directorio.endswith(\"/\"):\n directorio = directorio + \"/\"\n\n\n # archivo de profundidad\n proffile = directorio + \"sam_slab2_dep_02.23.18.xyz\" # nombre del archivo de prof\n slabprof = np.genfromtxt(proffile, delimiter = \",\") # se lee el archivo a un array\n # archivo de dip\n dipfile = directorio + \"sam_slab2_dip_02.23.18.xyz\" # nombre del archivo de dip\n slabdip = np.genfromtxt(dipfile, delimiter = \",\") # se lee el archivo a un array\n # archivo de strike\n strfile = directorio + \"sam_slab2_str_02.23.18.xyz\"\n slabstrike = np.genfromtxt(strfile, delimiter = \",\") # se lee el archivo a un array\n\n # las longitudes estan en formato 0 - 360, se cambian a -180 - 180\n slabprof[:,0] = slabprof[:,0] - 360\n slabdip[:,0] = slabdip[:,0] - 360\n slabstrike[:,0] = slabstrike[:,0] - 360\n\n # se cambia dimensiones de los array para graficar\n repslat = col.Counter( slabprof[:,1] ).values( )[0] # (n cols) formato xyz repite valores de latitud para cada longitud, se obtiene cuantas veces se repite este valor para reshape\n repslon = len( slabprof )/repslat # (n filas)\n\n lon = np.reshape( slabprof[:,0], ( repslon, repslat ) )\n lat = np.reshape( slabprof[:,1], ( repslon, repslat ) )\n prof = np.reshape( slabprof[:,2], ( repslon, repslat ) ) * -1\n dip = np.reshape( slabdip[:,2], ( repslon, repslat ) )\n strike = np.reshape( slabstrike[:,2], ( repslon, repslat ) )\n\n idx = ( lat <= latini ) & ( lat >= latfin ) # indices de las latitudes dentro del area de interes\n # numero de columnas se mantiene (repslat), disminuye solo numero de filas (repslon)\n lon_adi = lon[idx] # adi: area de interes\n lat_adi = lat[idx]\n prof_adi = prof[idx]\n dip_adi = dip[idx]\n strike_adi = strike[idx]\n\n #idx_lonini = np.where(lon[0,] == lonini)[0][0]\n #idx_lonfin = np.where(lon[0,] == lonfin)[0][0]\n\n # redimensionar arrays\n filas = len(lat_adi)/repslat # cantidad de filas en array cortado nuevo\n lon = np.reshape( lon_adi, ( filas, repslat ) )\n lat = np.reshape( lat_adi, ( filas, repslat ) )\n prof = np.reshape( prof_adi, ( filas, repslat ) ) * 1000\n dip = np.reshape( dip_adi, ( filas, repslat ) )\n strike = np.reshape( strike_adi, ( filas, repslat ) )\n\n # si se desea se puede cambiar los valores nan por 0\n if cambiarnan:\n prof[ np.isnan( prof ) ] = 0\n dip[ np.isnan( dip ) ] = 0\n strike[ np.isnan( strike ) ] = 0\n\n # se debe revisar que la profundidad este en metros\n if prof.max( ) < 1000 :\n prof *= 1000\n\n return lon, lat, prof, dip, strike, repslat, repslon", "def density_ch(tensor):\n return 1 - sparsity_ch(tensor)", "def sectorsphere(self, x):\r\n return sum(x**2) + (1e6-1) * sum(x[x<0]**2)", "def sRGBToSPD(rgb):\n global _LSSDATA\n rdata=_LSSDATA\n if rdata == None:\n rdata=_generateLSSData()\n _LSSDATA=rdata\n b11=rdata[0]\n b12=rdata[1]\n lin=linearFromsRGB3(rgb)\n rm=1e-5\n if lin[0]<=rm and lin[1]<=rm and lin[2]<=rm:\n return [rm for i in range(matShape(b12)[0])]\n # Implements Iterative Least Slope Squared algorithm\n linmat=matFromVec(lin)\n ret=matMul(b12,matT(linmat))\n shapelen=matShape(ret)[0]\n iters=0\n while True:\n iters+=1\n k1=[]\n k0=[]\n for r in range(shapelen):\n refl=matGet(ret,r,0)\n if refl>1:\n k1+=[[(1 if r==i else 0) for i in range(shapelen)]]\n if refl<=0:\n k0+=[[(1 if r==i else 0) for i in range(shapelen)]]\n k1len=len(k1)\n k0len=len(k0)\n if k1len+k0len==0:\n spdarray=[matGet(ret,i,0) for i in range(matShape(ret)[0])]\n break\n k1+=k0\n k=matNew(k1)\n cmat=[[1 if i<k1len else rm] for i in range(k0len+k1len)]\n cmat=matNew(cmat)\n tk=matT(k)\n ri=matI(matMul(matMul(k,b11),tk))\n rj=matSub(matMul(k,ret),cmat)\n rk=matMul(matMul(matMul(b11,tk),ri),rj)\n ret=matSub(ret,rk)\n for i in range(matShape(ret)[0]):\n s=matGet(ret,i,0)\n if s>1.0 and iters>20: matSet(ret,i,0,1.0) # Drastic measure to avoid overiteration\n if s<rm and iters>20: matSet(ret,i,0,rm)\n return SPD(spdarray,10,380,730)", "def two_body_old(sys, psi):\n # psi = np.reshape(psi,\n # (fci.cistring.num_strings(sys.nsites, sys.nup), fci.cistring.num_strings(sys.nsites, sys.ndown)))\n D = 0.\n for i in range(sys.nsites):\n w = (i + 1) % sys.nsites\n v = (i - 1) % sys.nsites\n\n D += harmonic.compute_inner_product(psi, sys.nsites, (sys.nup, sys.ndown), [i, w, i, i], [1, 0, 1, 0], [1, 1, 0, 0])\n\n D += harmonic.compute_inner_product(psi, sys.nsites, (sys.nup, sys.ndown), [i, i, i, w], [1, 0, 1, 0], [1, 1, 0, 0])\n\n D -= harmonic.compute_inner_product(psi, sys.nsites, (sys.nup, sys.ndown), [v, i, i, i], [1, 0, 1, 0], [1, 1, 0, 0])\n\n D -= harmonic.compute_inner_product(psi, sys.nsites, (sys.nup, sys.ndown), [i, i, v, i], [1, 0, 1, 0], [1, 1, 0, 0])\n\n return D.conj()", "def compute_distortion(cluster_list, data_table):\n\tdistortion = 0\n\tfor cluster in cluster_list:\n\t\tdistortion += cluster.cluster_error(data_table)\n\treturn distortion", "def differential_probability(sbox):\n\n size = len(sbox)\n\n # Create an empty matrix\n ddt = []\n for i in range(size):\n ddt.append([0] * size)\n\n for x in range(size):\n for y in range(size):\n ddt[x ^ y][sbox[x] ^ sbox[y]] += 1\n\n # The top-left value is always 16\n # and is ignored during the analysis.\n ddt[0][0] = 0\n\n # Find maximum of the matrix\n return max([max(row) for row in ddt])", "def dipole_esp(dist_vector, dipole_moment, dist):\n return np.sum((dipole_moment * ELECTRON_CHARGE * ELECTRON_CHARGE).dot(dist_vector)) / (\n 4 * PI * VACUUM_PERMITTIVITY * dist ** 3)", "def mpd(distmat):\r\n return distmat.sum() / (distmat.size - distmat.shape[0])", "def cal_pe_ds(comp, PGA, compdict, fragdict, sc):\n ct = compdict['component_type'][comp]\n ds_list = sorted(fragdict['damage_median'][ct].keys())\n ds_list.remove('DS0 None')\n pe_ds = np.zeros(len(ds_list))\n\n for i, ds in enumerate(ds_list):\n m = fragdict['damage_median'][ct][ds]\n b = fragdict['damage_logstd'][ct][ds]\n algo = fragdict['damage_function'][ct][ds].lower()\n mode = int(fragdict['mode'][ct][ds])\n # pe_ds[i] = stats.lognorm.cdf(PGA,b,scale=m)\n\n if algo == 'lognormal' and mode == 1:\n pe_ds[i] = stats.lognorm.cdf(PGA, b, scale=m)\n elif algo == 'lognormal' and mode == 2:\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n lower_lim = fragdict['minimum'][ct][ds]\n minpos = min(\n range(len(sc.hazard_intensity_vals)),\n key=lambda i: abs(sc.hazard_intensity_vals[i] - lower_lim)\n )\n zl = [0.0] * (minpos + 1)\n ol = [1] * (len(sc.hazard_intensity_vals) - (minpos + 1))\n stepfn = zl + ol\n stepv = stepfn[minpos]\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n m = 0.25\n s1 = np.exp(fragdict['sigma_1'][ct][ds])\n s2 = np.exp(fragdict['sigma_2'][ct][ds])\n w1 = 0.5\n w2 = 0.5\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n pe_ds[i] = (\n w1 * stats.lognorm.cdf(PGA, s1, loc=0.0, scale=m) +\n w2 * stats.lognorm.cdf(PGA, s2, loc=0.0, scale=m)\n ) * stepv\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n return np.sort(pe_ds)", "def indices_and_currents_TSC_2D( charge_electron, positions_x, positions_y, velocity_x, velocity_y,\\\n x_grid, y_grid, ghost_cells, length_domain_x, length_domain_y, dt ):\n \n \n positions_x_new = positions_x + velocity_x * dt\n positions_y_new = positions_y + velocity_y * dt\n\n base_indices_x = af.data.constant(0, positions_x.elements(), dtype=af.Dtype.u32)\n base_indices_y = af.data.constant(0, positions_x.elements(), dtype=af.Dtype.u32)\n\n dx = af.sum(x_grid[1] - x_grid[0])\n dy = af.sum(y_grid[1] - y_grid[0])\n\n\n # Computing S0_x and S0_y\n ###########################################################################################\n \n # Determining the grid cells containing the respective particles\n \n x_zone = (((af.abs(positions_x - af.sum(x_grid[0])))/dx).as_type(af.Dtype.u32))\n y_zone = (((af.abs(positions_y - af.sum(y_grid[0])))/dy).as_type(af.Dtype.u32))\n\n \n # Determing the indices of the closest grid node in x direction\n\n temp = af.where(af.abs(positions_x-x_grid[x_zone]) < \\\n af.abs(positions_x-x_grid[x_zone + 1])\\\n )\n\n if(temp.elements()>0):\n base_indices_x[temp] = x_zone[temp]\n\n temp = af.where(af.abs(positions_x - x_grid[x_zone]) >= \\\n af.abs(positions_x-x_grid[x_zone + 1])\\\n )\n\n if(temp.elements()>0):\n base_indices_x[temp] = (x_zone[temp] + 1).as_type(af.Dtype.u32) \n\n\n # Determing the indices of the closest grid node in y direction\n\n temp = af.where(af.abs(positions_y-y_grid[y_zone]) < \\\n af.abs(positions_y-y_grid[y_zone + 1])\\\n )\n\n if(temp.elements()>0):\n base_indices_y[temp] = y_zone[temp]\n\n temp = af.where(af.abs(positions_y - y_grid[y_zone])>=af.abs(positions_y-x_grid[y_zone + 1]))\n\n if(temp.elements()>0):\n base_indices_y[temp] = (y_zone[temp] + 1).as_type(af.Dtype.u32) \n\n # Concatenating the index list for near by grid nodes in x direction\n # TSC affect 5 nearest grid nodes around in 1 Dimensions\n\n base_indices_minus_two = (base_indices_x - 2).as_type(af.Dtype.u32) \n base_indices_minus = (base_indices_x - 1).as_type(af.Dtype.u32) \n base_indices_plus = (base_indices_x + 1).as_type(af.Dtype.u32) \n base_indices_plus_two = (base_indices_x + 2).as_type(af.Dtype.u32) \n\n\n\n index_list_x = af.join( 1,\\\n af.join(1, base_indices_minus_two, base_indices_minus, base_indices_x),\\\n af.join(1, base_indices_plus, base_indices_plus_two),\\\n )\n\n\n\n # Concatenating the index list for near by grid nodes in y direction\n # TSC affect 5 nearest grid nodes around in 1 Dimensions\n \n base_indices_minus_two = (base_indices_y - 2).as_type(af.Dtype.u32) \n base_indices_minus = (base_indices_y - 1).as_type(af.Dtype.u32) \n base_indices_plus = (base_indices_y + 1).as_type(af.Dtype.u32) \n base_indices_plus_two = (base_indices_y + 2).as_type(af.Dtype.u32) \n\n\n index_list_y = af.join( 1,\\\n af.join(1, base_indices_minus_two, base_indices_minus, base_indices_y),\\\n af.join(1, base_indices_plus, base_indices_plus_two),\\\n )\n\n # Concatenating the positions_x for determining weights for near by grid nodes in y direction\n # TSC affect 5 nearest grid nodes around in 1 Dimensions\n\n positions_x_5x = af.join( 0,\\\n af.join(0, positions_x, positions_x, positions_x),\\\n af.join(0, positions_x, positions_x),\\\n )\n\n positions_y_5x = af.join( 0,\\\n af.join(0, positions_y, positions_y, positions_y),\\\n af.join(0, positions_y, positions_y),\\\n )\n\n\n\n\n # Determining S0 for positions at t = n * dt\n\n\n distance_nodes_x = x_grid[af.flat(index_list_x)]\n\n distance_nodes_y = y_grid[af.flat(index_list_y)]\n\n\n W_x = 0 * distance_nodes_x.copy()\n W_y = 0 * distance_nodes_y.copy()\n\n\n # Determining weights in x direction\n\n temp = af.where(af.abs(distance_nodes_x - positions_x_5x) < (0.5*dx) )\n\n if(temp.elements()>0):\n W_x[temp] = 0.75 - (af.abs(distance_nodes_x[temp] - positions_x_5x[temp])/dx)**2\n\n temp = af.where((af.abs(distance_nodes_x - positions_x_5x) >= (0.5*dx) )\\\n * (af.abs(distance_nodes_x - positions_x_5x) < (1.5 * dx) )\\\n )\n\n if(temp.elements()>0):\n W_x[temp] = 0.5 * (1.5 - (af.abs(distance_nodes_x[temp] - positions_x_5x[temp])/dx))**2\n\n\n\n # Determining weights in y direction\n\n temp = af.where(af.abs(distance_nodes_y - positions_y_5x) < (0.5*dy) )\n\n if(temp.elements()>0):\n W_y[temp] = 0.75 - (af.abs(distance_nodes_y[temp] - positions_y_5x[temp])/dy)**2\n\n temp = af.where((af.abs(distance_nodes_y - positions_y_5x) >= (0.5*dy) )\\\n * (af.abs(distance_nodes_y - positions_y_5x) < (1.5 * dy) )\\\n )\n\n if(temp.elements()>0):\n W_y[temp] = 0.5 * (1.5 - (af.abs(distance_nodes_y[temp] - positions_y_5x[temp])/dy))**2\n\n # Restructering W_x and W_y for visualization and ease of understanding\n\n W_x = af.data.moddims(W_x, positions_x.elements(), 5)\n W_y = af.data.moddims(W_y, positions_y.elements(), 5)\n\n # Tiling the S0_x and S0_y for the 25 indices around the particle\n \n S0_x = af.tile(W_x, 1, 1, 5)\n S0_y = af.tile(W_y, 1, 1, 5)\n\n\n S0_y = af.reorder(S0_y, 0, 2, 1)\n\n\n\n #Computing S1_x and S1_y\n ###########################################################################################\n\n positions_x_5x_new = af.join( 0,\\\n af.join(0, positions_x_new, positions_x_new, positions_x_new),\\\n af.join(0, positions_x_new, positions_x_new),\\\n )\n\n positions_y_5x_new = af.join( 0,\\\n af.join(0, positions_y_new, positions_y_new, positions_y_new),\\\n af.join(0, positions_y_new, positions_y_new),\\\n )\n\n\n\n\n # Determining S0 for positions at t = n * dt\n\n W_x = 0 * distance_nodes_x.copy()\n W_y = 0 * distance_nodes_y.copy()\n\n\n # Determining weights in x direction\n\n temp = af.where(af.abs(distance_nodes_x - positions_x_5x_new) < (0.5*dx) )\n\n if(temp.elements()>0):\n W_x[temp] = 0.75 - (af.abs(distance_nodes_x[temp] - positions_x_5x_new[temp])/dx)**2\n\n temp = af.where((af.abs(distance_nodes_x - positions_x_5x_new) >= (0.5*dx) )\\\n * (af.abs(distance_nodes_x - positions_x_5x_new) < (1.5 * dx) )\\\n )\n\n if(temp.elements()>0):\n W_x[temp] = 0.5 * (1.5 - (af.abs(distance_nodes_x[temp] \\\n - positions_x_5x_new[temp])/dx\\\n )\\\n )**2\n\n\n\n # Determining weights in y direction\n\n temp = af.where(af.abs(distance_nodes_y - positions_y_5x_new) < (0.5*dy) )\n\n if(temp.elements()>0):\n W_y[temp] = 0.75 - (af.abs(distance_nodes_y[temp] \\\n - positions_y_5x_new[temp]\\\n )/dy\\\n )**2\n\n temp = af.where((af.abs(distance_nodes_y - positions_y_5x_new) >= (0.5*dy) )\\\n * (af.abs(distance_nodes_y - positions_y_5x_new) < (1.5 * dy) )\\\n )\n\n if(temp.elements()>0):\n W_y[temp] = 0.5 * (1.5 - (af.abs(distance_nodes_y[temp] \\\n - positions_y_5x_new[temp])/dy\\\n )\\\n )**2\n\n # Restructering W_x and W_y for visualization and ease of understanding\n\n W_x = af.data.moddims(W_x, positions_x.elements(), 5)\n W_y = af.data.moddims(W_y, positions_x.elements(), 5)\n\n # Tiling the S0_x and S0_y for the 25 indices around the particle \n \n S1_x = af.tile(W_x, 1, 1, 5)\n S1_y = af.tile(W_y, 1, 1, 5)\n\n S1_y = af.reorder(S1_y, 0, 2, 1)\n\n\n ###########################################################################################\n\n # Determining the final weight matrix for currents in 3D matrix form factor\n\n\n W_x = (S1_x - S0_x) * (S0_y + (0.5 *(S1_y - S0_y)) )\n\n\n W_y = (S1_y - S0_y) * (S0_x + (0.5 *(S1_x - S0_x)) )\n\n\n ###########################################################################################\n\n\n # Assigning Jx and Jy according to Esirkepov's scheme\n\n Jx = af.data.constant(0, positions_x.elements(), 5, 5, dtype = af.Dtype.f64)\n Jy = af.data.constant(0, positions_x.elements(), 5, 5, dtype = af.Dtype.f64)\n\n\n Jx[:, 0, :] = -1 * charge_electron * (dx/dt) * W_x[:, 0, :].copy()\n Jx[:, 1, :] = Jx[:, 0, :] + -1 * charge_electron * (dx/dt) * W_x[:, 1, :].copy()\n Jx[:, 2, :] = Jx[:, 1, :] + -1 * charge_electron * (dx/dt) * W_x[:, 2, :].copy()\n Jx[:, 3, :] = Jx[:, 2, :] + -1 * charge_electron * (dx/dt) * W_x[:, 3, :].copy()\n Jx[:, 4, :] = Jx[:, 3, :] + -1 * charge_electron * (dx/dt) * W_x[:, 4, :].copy()\n \n # Computing current density using currents\n \n Jx = (1/(dx * dy)) * Jx\n\n\n Jy[:, :, 0] = -1 * charge_electron * (dy/dt) * W_y[:, :, 0].copy()\n Jy[:, :, 1] = Jy[:, :, 0] + -1 * charge_electron * (dy/dt) * W_y[:, :, 1].copy()\n Jy[:, :, 2] = Jy[:, :, 1] + -1 * charge_electron * (dy/dt) * W_y[:, :, 2].copy()\n Jy[:, :, 3] = Jy[:, :, 2] + -1 * charge_electron * (dy/dt) * W_y[:, :, 3].copy()\n Jy[:, :, 4] = Jy[:, :, 3] + -1 * charge_electron * (dy/dt) * W_y[:, :, 4].copy()\n \n # Computing current density using currents\n\n Jy = (1/(dx * dy)) * Jy\n\n # Preparing the final index and current vectors\n ###########################################################################################\n \n \n # Determining the x indices for charge deposition\n index_list_x_Jx = af.flat(af.tile(index_list_x, 1, 1, 5))\n\n # Determining the y indices for charge deposition\n y_current_zone = af.tile(index_list_y, 1, 1, 5)\n index_list_y_Jx = af.flat(af.reorder(y_current_zone, 0, 2, 1))\n\n\n currents_Jx = af.flat(Jx)\n\n # Determining the x indices for charge deposition\n index_list_x_Jy = af.flat(af.tile(index_list_x, 1, 1, 5))\n\n # Determining the y indices for charge deposition\n y_current_zone = af.tile(index_list_y, 1, 1, 5)\n index_list_y_Jy = af.flat(af.reorder(y_current_zone, 0, 2, 1))\n \n # Flattenning the Currents array\n currents_Jy = af.flat(Jy)\n\n af.eval(index_list_x_Jx, index_list_y_Jx)\n af.eval(index_list_x_Jy, index_list_y_Jy)\n af.eval(currents_Jx, currents_Jy)\n\n\n return index_list_x_Jx, index_list_y_Jx, currents_Jx,\\\n index_list_x_Jy, index_list_y_Jy, currents_Jy", "def _densgrid(center, vdw_radius, grid, npts):\n\n x0, y0, z0 = center\n dd = np.sqrt((grid[0] - x0)**2 +\n (grid[1] - y0)**2 + (grid[2] - z0)**2)\n dgrid = np.zeros(npts)\n\n dgrid[dd < vdw_radius] = np.exp(\n -2 * dd[dd < vdw_radius]**2 / vdw_radius**2)\n\n dd_tmp = dd[(dd >= vdw_radius) & (dd < 1.5 * vdw_radius)]\n dgrid[(dd >= vdw_radius) & (dd < 1.5 * vdw_radius)] = (\n 4. / np.e**2 / vdw_radius**2 * dd_tmp**2) - (\n 12. / np.e**2 / vdw_radius * dd_tmp) + 9. / np.e**2\n\n return dgrid", "def DM(self, masses=None):\n N = len(self.diameters)\n rs = self.rs\n d = self.ndim\n M = np.zeros((d * N, d * N))\n\n for i in range(N):\n sigi = self.diameters[i]\n for j in range(i):\n rijvec = rs[i, :] - rs[j, :]\n rijvec = rijvec - np.around(rijvec)\n rijsq = np.sum(rijvec**2)\n dij = (sigi + self.diameters[j]) / 2\n dijsq = dij**2\n if rijsq < dijsq:\n rij = np.sqrt(rijsq)\n rijouter = np.outer(rijvec, rijvec)\n # U(r) = ½(1 - r/d)²\n # d²U/dxdy = (dr/dx)(dr/dy)/d² - (1 - r/d)(d²r/dxdy)/d\n # dr/dx = x/r\n # d²r/dxdy = -(x y) / r³\n # d²U/dxdy = -(x y)/(r² d²) + (1 - r/d)((x y)/r²)/(d r)\n # d²U/dx² = (dr/dx)²/d² - (1 - r/d)(d²r/dx²)/d\n # d²r/dx² = -x² / r³ + 1/r\n # d²U/dxᵢdxⱼ = -(xᵢ xⱼ)/(r² d²) + (1 - r/d)((xᵢ xⱼ)/r² -\n # δᵢⱼ)/(d r)\n\n Mij1 = -rijouter / rijsq / dijsq\n Mij2 = (1 - rij / dij) * \\\n (rijouter / rijsq - np.eye(d)) / rij / dij\n Mij = Mij1 + Mij2\n\n M[d * i:d * i + d, d * j:d * j + d] = Mij\n M[d * j:d * j + d, d * i:d * i + d] = Mij\n M[d * i:d * i + d, d * i:d * i + d] -= Mij\n M[d * j:d * j + d, d * j:d * j + d] -= Mij\n\n np.divide(M, self.L**2, out=M)\n if masses is None:\n return M\n\n # TODO: is the mass part of this really part of this?\n marr = np.array(masses)\n assert np.shape(masses) == np.shape(self.diameters)\n marr = np.array([masses] * d)\n marr = marr.T.flatten()\n # marr is now [m1,m1,m2,m2,...] (in 2D)\n mm = np.eye(d * N)\n np.multiply(mm, marr**-.5, out=mm)\n # mm is now M^-½, where M is the mass matrix\n\n mm.dot(M, out=M)\n M.dot(mm, out=M)\n return M", "def simpson(counts):\n return 1 - dominance(counts)", "def pairwiseDiversity(fullAlign,region=None,subst=None,bySite=True):\n\n fullAlign = padAlignment(fullAlign)\n align = sliceAlign(fullAlign, region)\n L = len(align[align.index[0]])\n\n if subst is None:\n _PD_hamming(align, None, subst, bySite, True)\n \n return _PD(align, None, subst, bySite, True)", "def densitychange(self,dt=0.1):\n #Using conservation of mass and diffusion\n dp_dt = -div(self.u*self.d)\n dp_dt += ndimage.laplace(self.d)\n #This term seems to make the density clump together, producing \n #waves which can make the simulation blow up.\n #dp_dt -= np.add.reduce(self.u*np.array(np.gradient(self.d)))\n #Edge density shouldn't change.\n dp_dt[[0,-1]] = dp_dt[:,[0,-1]] = 0\n self.d += dp_dt*dt\n #Change pressure accordingly to ideal gas law\n #AAAAAAAAAAAAAAAA this fixed most of the poblems from before!!!\n self.P = self.d*8.214*273\n #Conserve mass by spreading out fluctuations \n self.d[1:-1,1:-1] += (self.mass-np.sum(self.d))/self.vol", "def g_square_dis(dm, x, y, s, alpha, levels):\n\n def _calculate_tlog(x, y, s, dof, levels, dm):\n prod_levels = np.prod(list(map(lambda x: levels[x], s)))\n nijk = np.zeros((levels[x], levels[y], prod_levels))\n s_size = len(s)\n z = []\n for z_index in range(s_size):\n z.append(s.pop())\n pass\n for row_index in range(dm.shape[0]):\n i = dm[row_index, x]\n j = dm[row_index, y]\n k = []\n k_index = 0\n for s_index in range(s_size):\n if s_index == 0:\n k_index += dm[row_index, z[s_index]]\n else:\n lprod = np.prod(list(map(lambda x: levels[x], z[:s_index])))\n k_index += (dm[row_index, z[s_index]] * lprod)\n pass\n pass\n nijk[i, j, k_index] += 1\n pass\n nik = np.ndarray((levels[x], prod_levels))\n njk = np.ndarray((levels[y], prod_levels))\n for k_index in range(prod_levels):\n nik[:, k_index] = nijk[:, :, k_index].sum(axis = 1)\n njk[:, k_index] = nijk[:, :, k_index].sum(axis = 0)\n pass\n nk = njk.sum(axis = 0)\n tlog = np.zeros((levels[x], levels[y], prod_levels))\n tlog.fill(np.nan)\n for k in range(prod_levels):\n tx = np.array([nik[:, k]]).T\n ty = np.array([njk[:, k]])\n tdijk = tx.dot(ty)\n tlog[:, :, k] = nijk[:, :, k] * nk[k] / tdijk\n pass\n return (nijk, tlog)\n\n _logger.debug('Edge %d -- %d with subset: %s' % (x, y, s))\n row_size = dm.shape[0]\n s_size = len(s)\n dof = ((levels[x] - 1) * (levels[y] - 1)\n * np.prod(list(map(lambda x: levels[x], s))))\n\n # row_size_required = 5 * dof\n # if row_size < row_size_required:\n # _logger.warning('Not enough samples. %s is too small. Need %s.'\n # % (str(row_size), str(row_size_required)))\n # p_val = 1\n # dep = 0\n # return p_val, dep\n\n nijk = None\n if s_size < 5:\n if s_size == 0:\n nijk = np.zeros((levels[x], levels[y]))\n for row_index in range(row_size):\n i = dm[row_index, x]\n j = dm[row_index, y]\n nijk[i, j] += 1\n pass\n tx = np.array([nijk.sum(axis = 1)]).T\n ty = np.array([nijk.sum(axis = 0)])\n tdij = tx.dot(ty)\n tlog = nijk * row_size / tdij\n pass\n if s_size > 0:\n nijk, tlog = _calculate_tlog(x, y, s, dof, levels, dm)\n pass\n pass\n else:\n # s_size >= 5\n nijk = np.zeros((levels[x], levels[y], 1))\n i = dm[0, x]\n j = dm[0, y]\n k = []\n for z in s:\n k.append(dm[:, z])\n pass\n k = np.array(k).T\n parents_count = 1\n parents_val = np.array([k[0, :]])\n nijk[i, j, parents_count - 1] = 1\n for it_sample in range(1, row_size):\n is_new = True\n i = dm[it_sample, x]\n j = dm[it_sample, y]\n tcomp = parents_val[:parents_count, :] == k[it_sample, :]\n for it_parents in range(parents_count):\n if np.all(tcomp[it_parents, :]):\n nijk[i, j, it_parents] += 1\n is_new = False\n break\n pass\n if is_new is True:\n parents_count += 1\n parents_val = np.r_[parents_val, [k[it_sample, :]]]\n nnijk = np.zeros((levels[x], levels[y], parents_count))\n for p in range(parents_count - 1):\n nnijk[:, :, p] = nijk[:, :, p]\n pass\n nnijk[i, j, parents_count - 1] = 1\n nijk = nnijk\n pass\n pass\n nik = np.ndarray((levels[x], parents_count))\n njk = np.ndarray((levels[y], parents_count))\n for k_index in range(parents_count):\n nik[:, k_index] = nijk[:, :, k_index].sum(axis = 1)\n njk[:, k_index] = nijk[:, :, k_index].sum(axis = 0)\n pass\n nk = njk.sum(axis = 0)\n tlog = np.zeros((levels[x], levels[y], parents_count))\n tlog.fill(np.nan)\n for k in range(parents_count):\n tx = np.array([nik[:, k]]).T\n ty = np.array([njk[:, k]])\n tdijk = tx.dot(ty)\n tlog[:, :, k] = nijk[:, :, k] * nk[k] / tdijk\n pass\n pass\n log_tlog = np.log(tlog)\n G2 = np.nansum(2 * nijk * log_tlog)\n # _logger.debug('dof = %d' % dof)\n # _logger.debug('nijk = %s' % nijk)\n # _logger.debug('tlog = %s' % tlog)\n # _logger.debug('log(tlog) = %s' % log_tlog)\n _logger.debug('G2 = %f' % G2)\n if dof == 0:\n # dof can be 0 when levels[x] or levels[y] is 1, which is\n # the case that the values of columns x or y are all 0.\n p_val = 1\n G2 = 0\n else:\n p_val = chi2.sf(G2, dof)\n # print(\"p-value:\", p_val)\n _logger.info('p_val = %s' % str(p_val))\n\n if p_val > alpha:\n dep = 0\n else:\n dep = abs(G2)\n return p_val, dep", "def calc_dists(sposcar):\n ntot=sposcar[\"positions\"].shape[1]\n posi=np.dot(sposcar[\"lattvec\"],sposcar[\"positions\"])\n d2s=np.empty((27,ntot,ntot))\n for j,(ja,jb,jc) in enumerate(itertools.product(xrange(-1,2),\n xrange(-1,2),\n xrange(-1,2))):\n posj=np.dot(sposcar[\"lattvec\"],(sposcar[\"positions\"].T+[ja,jb,jc]).T)\n d2s[j,:,:]=scipy.spatial.distance.cdist(posi.T,posj.T,\"sqeuclidean\")\n d2min=d2s.min(axis=0)\n dmin=np.sqrt(d2min)\n degenerate=(np.abs(d2s-d2min)<1e-4)\n nequi=degenerate.sum(axis=0,dtype=np.intc)\n maxequi=nequi.max()\n shifts=np.empty((ntot,ntot,maxequi))\n sorting=np.argsort(np.logical_not(degenerate),axis=0)\n shifts=np.transpose(sorting[:maxequi,:,:],(1,2,0)).astype(np.intc)\n return (dmin,nequi,shifts)" ]
[ "0.5823131", "0.56665945", "0.5654268", "0.56067395", "0.5549059", "0.5487201", "0.5425105", "0.53680533", "0.5335588", "0.5313948", "0.53035074", "0.52599955", "0.52019894", "0.51405936", "0.5140454", "0.51258785", "0.5124321", "0.5116561", "0.5109169", "0.5090838", "0.50795203", "0.5063553", "0.50615114", "0.5056973", "0.5049992", "0.5048", "0.50420016", "0.503976", "0.5035696", "0.5033342", "0.5026322", "0.5013918", "0.5011153", "0.5003022", "0.49964067", "0.49743485", "0.4970221", "0.49689284", "0.4953794", "0.4951098", "0.49487865", "0.4944084", "0.49373916", "0.49305716", "0.4926438", "0.49172053", "0.4910228", "0.49079823", "0.4907425", "0.4906876", "0.49056464", "0.48932728", "0.48920137", "0.48880032", "0.48822868", "0.48804992", "0.48694226", "0.48676342", "0.48550153", "0.48491034", "0.4845913", "0.48429447", "0.4841743", "0.48399714", "0.48373562", "0.48359433", "0.48313448", "0.48188776", "0.48102802", "0.48041087", "0.47891673", "0.47854882", "0.4775007", "0.47702286", "0.4768614", "0.4766128", "0.47618848", "0.4760638", "0.4756227", "0.4755504", "0.47507143", "0.4748862", "0.4747832", "0.47411922", "0.47379932", "0.47285372", "0.47277746", "0.4724777", "0.47230276", "0.47192875", "0.47176936", "0.47174972", "0.47126547", "0.47109812", "0.47101566", "0.47050425", "0.47049755", "0.4704738", "0.47045273", "0.46977004" ]
0.63072735
0
Calculate DOS for a 1d system
def ldos1d(h,e=0.0,delta=0.001,nrep=3): import green if h.dimensionality!=1: raise # only for 1d gb,gs = green.green_renormalization(h.intra,h.inter,energy=e,delta=delta) d = [ -(gb[i,i]).imag for i in range(len(gb))] # get imaginary part d = spatial_dos(h,d) # convert to spatial resolved DOS g = h.geometry # store geometry x,y = g.x,g.y # get the coordinates go = h.geometry.copy() # copy geometry go = go.supercell(nrep) # create supercell write_ldos(go.x,go.y,d.tolist()*nrep) # write in file return d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _dsurface_domega(self):\n\n dsdo = 0.\n\n return dsdo", "def get_dos(self):\n\n return self.get_array('dos')", "def dVdx(self, sys):\n dx2 = sys.positions * sys.positions - self.x0 * self.x0\n return 4 * self.A * sys.positions * dx2", "def dVdx(self, sys):\n dx = sys.positions - self.x0\n k = self.omega*self.omega*sys.mass\n return self.A*k*dx", "def _dvolume_domega(self):\n\n dvdo = ((self.I0 * self._mu_0 / (self._mu_0 + self._mu_ex)) *\n (\n 1. - np.exp(-(self.V.tau / self._mu_0) -\n (self.V.tau / self._mu_ex))\n ) * self.V.p(self.t_0, self.t_ex, self.p_0, self.p_ex,\n self.param_dict))\n\n return (1. - self.bsf) * dvdo", "def dos_integral(E,dos,m=0):\n somma = 0.0\n h = 0.5*(E[2]-E[0])\n for j in range(0,len(dos)-3,3):\n somma += 3.0*pow(E[j],m)*dos[j]+3.0*pow(E[j+1],m)*dos[j+1]+2.0*pow(E[j+2],m)*dos[j+2]\n \n return h*somma*3.0/8.0;", "def normdos(line, E_fermi):\n\tls = line.split()\n\tif len(ls) == 3:\n\t\tls[0] = float(ls[0])-E_fermi\n\t\tline = \" {: 7.3f} {} {}\\n\".format(ls[0], ls[1], ls[2])\n\treturn line", "def prog(log=False):\n s = os.statvfs('//')\n sectorSize=s[0]\n sectorTotal=s[2]\n sectorFree=s[3]\n percentage = '{0:.2f} %'.format(sectorFree/sectorTotal*100)\n if (log):\n print('■ Micropython FLASH')\n print(' Sector : {0} Bytes'.format(s[0]))\n print(' Total : {0} Sectors, {1:.4f} MB'.format(s[2],sectorSize*sectorTotal/1048576))\n print(' Free : {0} Sectors, {1:.4f} MB'.format(s[3],sectorSize*sectorFree/1048576))\n print(' Free % : {0}'.format(percentage))\n print()\n return sectorSize*sectorFree", "def calc_T_sys(nu_obs):\n return 100 * u.K + 120 * (nu_obs / (150 * u.MHz))**(-2.55) * u.K", "def cmd_calculation():", "def ddm(self):\n return hp2ddm(self.hp_angle)", "def deeming(times,signal, f0=None, fn=None, df=None, norm='amplitude'): \n #-- initialize variables for use in Fortran routine\n nf=int((fn-f0)/df+0.001)+1\n n = len(times)\n T = times.ptp()\n f1,s1 = fdeeming.deeming1(times,signal,f0,df,nf)\n s1 /= n\n fact = np.sqrt(4./n)\n fact = np.sqrt(4./n)\n if norm =='distribution': # statistical distribution\n s1 /= np.var(signal)\n elif norm == \"amplitude\": # amplitude spectrum\n s1 = fact * np.sqrt(s1)\n elif norm == \"density\": # power density\n s1 = fact**2 * s1 * T\n \n return f1,s1", "def system(p):\r\n\r\n C1, C2, C3, C4, C5, C6, C7, C8, \\\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22, \\\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34,\\\r\n C35, C36, C37, C38, C39, C40, C41, C42, C43, \\\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56 = p\r\n\r\n C = [C1, C2, C3, C4, C5, C6, C7, C8,\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34, C35, C36, C37,\r\n C38, C39, C40, C41, C42, C43,\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56]\r\n\r\n eqs = [C[i] * (Kd[i] + Rtot - sum(C)) + Etot[i] * (sum(C) - Rtot) for i in range(n)]\r\n\r\n return eqs", "def dVdx(self, sys):\n dx = sys.positions - self.x0\n for i in range(len(dx)):\n self._local_dVdx[i] = 6.0*self.sigma[i]*dx[i]**5\n return self._local_dVdx", "def dVdx(self, sys):\n # this is independent of the position\n return self._local_dVdx", "def from_msdos(self):\n reason = \"[!] MS-DOS 32-bit timestamps are 8 hex characters (4 bytes)\"\n ts_type = self.ts_types['msdos']\n try:\n if not len(self.msdos) == 8 or not all(char in hexdigits for char in self.msdos):\n self.in_msdos = indiv_output = combined_output = False\n pass\n else:\n swap = ''.join([self.msdos[i:i+2] for i in range(0, len(self.msdos), 2)][::-1])\n binary = '{0:032b}'.format(int(swap, 16))\n stamp = [binary[:7], binary[7:11], binary[11:16], binary[16:21], binary[21:27], binary[27:32]]\n for val in stamp[:]:\n dec = int(val, 2)\n stamp.remove(val)\n stamp.append(dec)\n dos_year = stamp[0] + 1980\n dos_month = stamp[1]\n dos_day = stamp[2]\n dos_hour = stamp[3]\n dos_min = stamp[4]\n dos_sec = stamp[5] * 2\n if dos_year not in range(1970, 2100) \\\n or dos_month not in range(1, 13) \\\n or dos_day not in range(1, 32) \\\n or dos_hour not in range(0, 24) \\\n or dos_min not in range(0, 60) \\\n or dos_sec not in range(0, 60)\\\n or dos_day not in range(1, monthrange(dos_year, dos_month)[1]):\n self.in_msdos = indiv_output = combined_output = False\n else:\n dt_obj = dt(dos_year, dos_month, dos_day, dos_hour, dos_min, dos_sec)\n self.in_msdos = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"{} {} Local\".format(ts_type, self.in_msdos))\n combined_output = str(\"{}{}\\t{} Local{}\".format(self.left_color, ts_type, self.in_msdos, self.right_color))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_msdos = indiv_output = combined_output = False\n return self.in_msdos, indiv_output, combined_output, reason", "def dmaceps():\n mach_eps = 2**-(53 - 1)/2\n return mach_eps", "def Mo96(self,dc,nu):\n return 1. + (nu**2.-1.)/dc", "def calcula_desvios_padrao(self):\n self.dp_x1 = np.sqrt(self.var_x1)\n self.dp_w1 = np.sqrt(self.var_w1)\n self.dp_nq1 = np.sqrt(self.var_nq1)\n self.dp_ns1 = np.sqrt(self.var_ns1)\n self.dp_n1 = np.sqrt(self.var_n1)\n self.dp_t1 = np.sqrt(self.var_t1)\n self.dp_w1_med = np.sqrt(self.var_w1_med)\n\n self.dp_x2 = np.sqrt(self.var_x2)\n self.dp_w2 = np.sqrt(self.var_w2)\n self.dp_nq2 = np.sqrt(self.var_nq2)\n self.dp_ns2 = np.sqrt(self.var_ns2)\n self.dp_n2 = np.sqrt(self.var_n2)\n self.dp_t2 = np.sqrt(self.var_t2)\n self.dp_w2_med = np.sqrt(self.var_w2_med)", "def chao1_var_no_doubletons(singles, chao1):\n s = float(singles)\n return s*(s-1)/2 + s*(2*s-1)**2/4 - s**4/(4*chao1)", "def pdm(times, signal,f0=None,fn=None,df=None,Nbin=5,Ncover=2,\n D=0,forbit=None,asini=None,e=None,omega=None,nmax=10):\n T = times.ptp()\n n = len(times)\n \n #-- initialize variables\n xvar = signal.std()**2.\n xx = (n-1) * xvar\n nf = int((fn-f0) / df + 0.001) + 1\n f1 = np.zeros(nf,'d')\n s1 = np.zeros(nf,'d')\n \n #-- use Fortran subroutine\n #-- Normal PDM\n if D is None and asini is None:\n f1, s1 = pyscargle.justel(signal,times,f0,df,Nbin,Ncover,xvar,xx,f1,s1,n,nf)\n #-- PDM with linear frequency shift\n elif asini is None:\n f1, s1 = pyscargle.justel2(signal,times,f0,df,Nbin,Ncover,xvar,xx,D,f1,s1,n,nf)\n #-- PDM with circular binary orbit\n elif asini is not None and (e is None or e==0):\n f1, s1 = pyscargle.justel3(signal,times,f0,df,Nbin,Ncover,xvar,xx,asini,\n forbit,f1,s1,n,nf)\n #-- PDM with eccentric binary orbit\n elif e>0:\n forbit = 2*pi*forbit\n ans,bns = np.array([[__ane__(n,e),__bne__(n,e)] for n in range(1,nmax+1)]).T\n ksins = np.sqrt(ans**2*np.cos(omega)**2+bns**2*np.sin(omega)**2)\n thns = np.arctan(bns/ans*np.tan(omega))\n tau = -np.sum(bns*np.sin(omega))\n f1, s1 = pyscargle.justel4(signal,times,f0,df,Nbin,Ncover,xvar,xx,asini,\n forbit,e,omega,ksins,thns,tau,f1,s1,n,nf,nmax)\n \n \n #-- it is possible that the first computed value is a none-variable\n if not s1[0]: s1[0] = 1. \n \n return f1, s1", "def get_wd_phys(sed_name):\n new_name = sed_name.replace('.','_').split('_')\n teff = float(new_name[-2])\n if new_name[1]!='He':\n logg = 0.1*float(new_name[2])\n else:\n logg = 0.1*float(new_name[3])\n\n return teff, -999.0, logg", "def get_dev_count_for_disk_bus(disk_bus):\n\n if disk_bus == \"ide\":\n return 4\n else:\n return 26", "def get_do(self) -> float:\n try:\n datalist = self.get_data()\n data = datalist[0]\n if data.endswith('\\x00'):\n data = data.rstrip('\\x00')\n return float(data)\n else:\n return float(data)\n except Exception as err:\n print(f'get_do error: {err}')\n return -1", "def d1_spline(self, x):\n return (-x + x ** 3 - (5 * x ** 4) / 12) * (self.unit_step(x) - self.unit_step(x - 1)) + \\\n (5 / 8 - (7 * x) / 2 + (15 * x ** 2) / 4 - (3 * x ** 3) / 2 + (5 * x ** 4) / 24) * (\n self.unit_step(x - 1) - self.unit_step(x - 2)) + \\\n (-27 / 8 + (9 * x) / 2 - (9 * x ** 2) / 4 + x ** 3 / 2 - x ** 4 / 24) * (\n self.unit_step(x - 2) - self.unit_step(x - 3))", "def dVdx(self, sys):\n return self._dfdx_fcn(self.pes1.dVdx(sys), self.pes2.dVdx(sys))", "def dof_satt(self):\n d1 = self.d1\n d2 = self.d2\n # this follows blindly the SPSS manual\n # except I use ``_var`` which has ddof=0\n sem1 = d1._var / (d1.nobs - 1)\n sem2 = d2._var / (d2.nobs - 1)\n semsum = sem1 + sem2\n z1 = (sem1 / semsum) ** 2 / (d1.nobs - 1)\n z2 = (sem2 / semsum) ** 2 / (d2.nobs - 1)\n dof = 1.0 / (z1 + z2)\n return dof", "def dms(self):\n return hp2dms(self.hp_angle)", "def dcgain(sys):\n return sys.dcgain()", "def pdf(self, grid, dataSegment):\n return (grid[0] ** dataSegment[0]) * (np.exp(-grid[0])) / (np.math.factorial(dataSegment[0]))", "def dVdx(self, sys):\n dx = sys.positions - self.x0\n exp_part = self.A*np.exp(-np.dot(self.alpha, np.multiply(dx, dx)))\n for i in range(len(dx)):\n self._local_dVdx[i] = -2*self.alpha[i]*dx[i]*exp_part\n return self._local_dVdx", "def work_dos():\n #potential = 2x**2+x**2y+y**2\n x1,y1 = (2, -3)\n x2,y2 = (-1, 2)\n p1 = (2*(x1**2)) + ((x1**2)*y1) + (y1**2)\n p2 = (2*(x2**2)) + ((x2**2)*y2) + (y2**2)\n sol = p1 - p2\n sol = abs(sol)\n print(f'The vector field F=(4x+2xy,x2+2y) \\n'\n 'along the curve C parametrized by r(t)=(3t−1,−5t+2) \\n '\n f'for 0 ≤ t ≤ 1 is: {sol}')", "def decoherence(self,system):\n for i in range(self.n):\n for j in range(i,self.n):\n for item in self.decoherence_matrix[i][j]:\n tmp=Expolist([Expo(item[2],0)])\n t = int(self.index(item[0],item[1]))\n system[int(self.index(i,j))][t]+=tmp\n return system", "def pdf(self, grid, dataSegment):\n return np.exp(-(dataSegment[0] ** 2.) / (2. * grid[0] ** 2.) - .5 * np.log(2. * np.pi * grid[0] ** 2.))", "def densidad(qe):\r\n global x,rhoe,rhoi,dx,nparticulas,npuntos_malla,pared_izquierda,pared_derecha\r\n \r\n j1 = sp.dtype(sp.int32) # Asegura que la variable permanezca entera\r\n j2 = sp.dtype(sp.int32) \r\n \r\n # Factor de ponderacion de carga \r\n re = qe/dx \r\n # Densidad electronica \r\n rhoe = sp.zeros(npuntos_malla+1) \r\n # Mapa de cargas sobre la malla\r\n for i in range(nparticulas):\r\n xa = x[i]/dx # xparticula/dx\r\n j1 = int(xa) # indices de la malla fija xmalla/dx\r\n j2 = j1 + 1 # Siguiente punto en la malla\r\n f2 = xa - j1 # |xmalla - xparticula|/dx\r\n f1 = 1.0 - f2\r\n rhoe[j1] = rhoe[j1] + re*f1\r\n rhoe[j2] = rhoe[j2] + re*f2\r\n\r\n # Condiciones de frontera periodica\r\n rhoe[0] += rhoe[npuntos_malla]\r\n rhoe[npuntos_malla] = rhoe[0]\r\n \r\n # Se agrega una densidad de iones neutral\r\n rhoi = rho0\r\n\r\n return True", "def pdf(self, grid, dataSegment):\n return np.exp(\n -((dataSegment[0] - grid[0]) ** 2.) / (2. * grid[1] ** 2.) - .5 * np.log(2. * np.pi * grid[1] ** 2.))", "def generate_dos_str(material_id):\n out=mpr.get_dos_by_material_id(material_id)\n str_output=str(out)\n split_dos=str_output.split('\\n')\n #extract the needed content\n for line in split_dos:\n if line[0]=='-':\n splitting_index=split_dos.index(line)\n break\n needed_dos=split_dos[splitting_index+1:]\n return needed_dos", "def Delta(z):\n return (18*np.pi**2 - 82*cosmology.Ode(z) - 39*cosmology.Ode(z)**2) / cosmology.Om(z)", "def measure(self):\n if(self.c1):\n self.c1.destroy()\n self.c1=None\n if(self.checkbcclock(0)):\n output=self.vb.io.execute(self.cmd,log=\"out\",applout=\"<>\")\n print 'output=',output\n if output[len(output)-1] != '0':\n self.vb.io.write('Error in measurephase.c')\n xy=self.xy(output)\n max=self.finddelay(xy)\n self.c1=Graph(self.f1,x0=0.,y0=0.,xgraph=32.,nxtick=8,\n ygraph=260.,nytick=13)\n self.c1.plot(xy,'red')\n self.c1.xlabel(text='Delay [ns]')\n self.c1.ylabel(text=self.ytitle)\n self.c1.pack()\n self.en.setEntry(str(max))\n self.c1.update_idletasks()\n self.saveauto()", "def part1(input):\n ps = PlanetSystem(input)\n for i in range(3):\n ps.simulate_dimension(i, 1000)\n return ps.total_energy", "def mcintosh_d(counts):\n u = sqrt((counts*counts).sum())\n n = counts.sum()\n return (n-u)/(n-sqrt(n))", "def test_psystem4():\n\n psys = PSystem(\"F{M{FMM}}\", [Membrane(\"n[[F]]+Fn\", [(\"-\", \"+m\"),(\"+\", \"+\")], [Membrane(\"FCnF\", [(\"M\", \"nF\"),(\"F\", \"+N\")], []),Membrane(\"C\", [(\"-\", \"{-+}\"),(\"-\", \"nF{F}\")], [])]),Membrane(\"{[{[m]}]}\", [(\"N\", \"MF\")], [])], 1)\n\n return psys", "def ddm(self):\n return dec2ddm(self.dec_angle)", "def pdf(self, grid, dataSegment):\n return np.exp(-((dataSegment[1] - grid[0] * dataSegment[0]) ** 2.) / (2. * grid[1] ** 2.) - .5 * np.log(\n 2. * np.pi * grid[1] ** 2.))", "def sdi_4_20():\n # 201 River ght to analog output (0-16 feet)\n f = measure(1).value / 16 * 16 + 4 # yes f/16*16 = f, but consistency\n\n # 202 AVM canal flow to analog output (0-30 cfs)\n g = measure(2).value / 30 * 16 + 4\n\n # 201a AVM fish release flow to analog output (0-20 cfs)\n h = measure(3).value / 20 * 16 + 4\n\n # Air Temp to analog output (0-130 deg F)\n i = measure(4).value / 130 * 16 + 4\n\n # Stilling Well Water Temp to analog output (0-100 deg F)\n j = measure(5).value / 100 * 16 + 4\n\n \"\"\"\n use extended command to set analog output values \n The sdi12 address of the river (201) H4161 is set to \"2\"\n The sdi12 address of the Canal AVM (202) H4161 is set to \"3\"\n The sdi12 address of the Fish Release AVM (201a) H4161 is set to \"4\"\n The sdi12 address of the Air temp H4161 is set to \"5\"\n The sdi12 address of the Water Tmep H4161 is set to \"6\"\n \n delays of 100 milliseconds are used to allow adequate time for writing to the SDI12 bus\n \"\"\"\n right_digits = 1 # as per H4161 manual examples\n\n cmd = '2XSM{0:.{1}f}!'.format(f, right_digits)\n sdi_send_command_get_reply(cmd)\n utime.sleep(0.1)\n\n cmd = '3XSM{0:.{1}f}!'.format(g, right_digits)\n sdi_send_command_get_reply(cmd)\n utime.sleep(0.1)\n\n cmd = '4XSM{0:.{1}f}!'.format(h, right_digits)\n sdi_send_command_get_reply(cmd)\n utime.sleep(0.1)\n\n cmd = '5XSM{0:.{1}f}!'.format(i, right_digits)\n sdi_send_command_get_reply(cmd)\n utime.sleep(0.1)\n\n cmd = '6XSM{0:.{1}f}!'.format(j, right_digits)\n sdi_send_command_get_reply(cmd)\n utime.sleep(0.1)", "def DM_freqs(self, masses=None):\n ew, ev = np.linalg.eig(self.DM(masses=masses))\n # this used to be over 2pi; I don't know where the 2 went, but it seems\n # to be gone now...\n return np.sqrt(np.abs(ew)) / (np.pi)", "def harmonic(dist,spring,d=1):\n if dist <= d:\n return 0\n \n return 0.5*spring*(dist-1)", "def part2(input):\n ps = PlanetSystem(input)\n c = ps.total_cycle_time()\n return c", "def find_large_separation(self):\n\n x = self.modes['n'] # radial order\n y = self.modes['freq'] # frequency\n wid = (0.66*self.numax**0.88)/2/np.sqrt(2*np.log(2.0))\n w = (np.exp((-(y-self.numax)**2)/(2*wid**2))) # weight\n\n mN = np.sum(w)*np.sum(w*x*y) - np.sum(w*x)*np.sum(w*y)\n D = np.sum(w)*np.sum(w*x**2) - np.sum(w*x)**2\n Dn = mN/D\n #print Dn\n\n return Dn", "def d1(self):\r\n numerator = math.log(self.s/self.x) + (self.rf-self.div+self.sigma**2*0.5)*self.t # Numerator of d1\r\n denominator = self.sigma * self.t**0.5 # Denominator of d1\r\n \r\n return numerator/denominator", "def to_msdos(self):\n ts_type = self.ts_types['msdos']\n try:\n dt_obj = duparser.parse(self.timestamp)\n year = '{0:07b}'.format(dt_obj.year - 1980)\n month = '{0:04b}'.format(dt_obj.month)\n day = '{0:05b}'.format(dt_obj.day)\n hour = '{0:05b}'.format(dt_obj.hour)\n minute = '{0:06b}'.format(dt_obj.minute)\n seconds = '{0:05b}'.format(int(dt_obj.second / 2))\n hexval = str(struct.pack('>I', int(year + month + day + hour + minute + seconds, 2)).hex())\n self.out_msdos = ''.join([hexval[i:i+2] for i in range(0, len(hexval), 2)][::-1])\n ts_output = str(\"{}\\t{}\".format(ts_type, self.out_msdos))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_msdos = ts_output = False\n return self.out_msdos, ts_output", "def dos_into_string(needed_dos):\n revise_dos=[]\n for strings in needed_dos:\n new_string=strings[11:]\n new_string=new_string.split(' ')\n new_string_1=[]\n for strs in new_string:\n if strs!='':\n new_string_1.append(strs)\n empty_value=''\n for value in new_string_1:\n changed_value=Decimal(value).quantize(Decimal('.000001'))\n empty_value=empty_value+str(changed_value)+' '\n new_string_2=empty_value[:len(empty_value)-1]\n if strings[6]==' ':\n string_element=strings[5]\n elif strings[7]==' ':\n string_element=strings[5:7]\n elif strings[8]==' ':\n string_element=strings[5:8]\n else:\n string_element=strings[5:9]\n new_string_2=new_string_2+' '+string_element\n new_string_2=new_string_2.replace(' ',' ')\n revise_dos.append(new_string_2)\n return revise_dos", "def dgdy(self, X):\n \n return 3*X[1]**2", "def pdf(self, grid, dataSegment):\n return np.exp(-((dataSegment[0, 0] - grid[0]) ** 2.) / (2. * dataSegment[0, 1] ** 2.) -\n .5 * np.log(2. * np.pi * dataSegment[0, 1] ** 2.))", "def _icmf(self, ms):\n return self._pot.a * numpy.sqrt(ms) / (1 - numpy.sqrt(ms))", "def ddspmt(t):\n return (spmt(t) - _spm_dd_func(t)) / 0.01", "def DirDE():\n\n global Asm\n\n if dec.Asm.Parse_Pointer == 0:\n # No parameter given\n errors.DoError('missoper', False)\n dec.Asm.New_Label = ''\n return\n\n register = -1\n reg = assem.GetWord().upper()\n if (len(reg) == 2 or len(reg) == 3) and reg[0] == 'R':\n # Can it be a register name? Must be 2 or 3 chars long and start with R\n reg = reg[1:]\n if reg.isdigit:\n # The register number must be numeric of course\n if len(reg) == 1 or reg[0] != '0':\n # It is numeric, without a leading 0\n register = int(reg)\n if register < 0 or register > 31:\n # It is not a legal register\n errors.DoError('badoper', False)\n dec.Asm.New_Label = ''\n else:\n # It is a legal register, set it's value\n dec.Asm.BOL_Address = register\n dec.Asm.List_Address = register\n dec.Asm.Mnemonic = '.SE' # Handle rest like .SE\n\n # Ignore more parameters this time (like .EQ).", "def get_mach_number(self, velocity):\n\t\tma_num = velocity / self.Speed_of_sound\n\t\treturn ma_num", "def reldev(self, lcdm, mg):\n return 100. * (mg - lcdm) / lcdm", "def pdf(self, x):\n\t\treturn 1.5 * np.power(x,2) ##obtained after finding z from integrating x^2 from -1 to 1", "def cmdTime_cycles(cmd):\n opcode = MemorySequence.getOpcode(cmd)\n # noOp, fiber 0 out, fiber 1 out, start/stop timer, sram start addr,\n # sram end addr\n if opcode in [0x0, 0x1, 0x2, 0x4, 0x8, 0xA]:\n return 1\n #branch to start\n elif opcode == 0xF:\n return 2\n #delay\n elif opcode == 0x3:\n return MemorySequence.getAddress(cmd) + 1\n #run sram\n elif opcode == 0xC:\n # TODO: Incorporate SRAMoffset when calculating sequence time.\n # This gives a max of up to 12 + 255 us\n return 25*12 # maximum SRAM length is 12us, with 25 cycles per us\n else:\n raise Exception(\"Unknown opcode: %s address: %s\" % (opcode, MemorySequence.getAddress(cmd)))", "def fs2ps2D(px, s):\n\t\tsfun = psarclength(px)\t\n\t\treturn sfun-s", "def pdf(self, grid, dataSegment):\n r = grid[0]\n s = grid[1]\n sScaled = s*np.sqrt(1 - r**2.)\n return np.exp(-((dataSegment[1] - r * dataSegment[0]) ** 2.) / (2. * sScaled ** 2.) - .5 * np.log(\n 2. * np.pi * sScaled ** 2.))", "def ddm(self):\n if self.positive:\n return DDMAngle(self.degree, self.minute + (self.second/60))\n else:\n return -DDMAngle(self.degree, self.minute + (self.second/60))", "def runCosmicEmu(self, sig8):\n # first run in camb\n\n #c_lin = copy.copy(self)\n #c_lin.\n\n a = 1./(self.cp.transfer_redshift[0] + 1)\n if (a < 0.5)|(a > 1.):\n print 'Warning! outside range of z accuracy (1 - 0).'\n \n scalefax=N.arange(0.5,1.001,0.1)\n coltoreturn = N.where(N.abs(a - scalefax) == min(N.abs(a - scalefax)))[0][0]\n print 'returning results at a=',scalefax[coltoreturn]\n f = file('emu.in','w')\n f.write('emu.out\\n')\n f.write(str(self.cp.omch2+self.cp.ombh2)+'\\n')\n f.write(str(self.cp.ombh2)+'\\n')\n f.write(str(self.cp.scalar_spectral_index[0])+'\\n')\n f.write(str(sig8)+'\\n')\n f.write(str(self.cp.w)+'\\n')\n f.write('2\\n')\n f.close()\n \n os.system('/Users/neyrinck/CosmicEmu_v1.0/emu.exe < emu.in > emu.err')\n\n # read little h\n f = open('emu.out','r')\n for i in range(6):\n dumb=f.readline()\n littleh = float(f.readline().split()[-1])\n self.cp.hubble = 100.*littleh\n print 'littleh changed to ',littleh\n\n f.close()\n \n emu = N.loadtxt('emu.out')\n kemu = emu[:,0]/littleh # should now be in h/Mpc\n pemu = emu[:,coltoreturn+1]*littleh**3 # should now be in (Mpc/h)^3\n if self.cp.transfer_k_per_logint == 0:\n self.cp.transfer_k_per_logint = 512.\n \n #need to get into log-scale k\n self.k = kemu[0]*10.**N.arange(0.,N.log10(kemu[-1]/kemu[0]),1./self.cp.transfer_k_per_logint)\n\n interpemu = SI.interp1d(N.log(kemu),N.log(pemu))#,kind='cubic')\n #self.pk = interpemu(self.k)\n self.pk = N.exp(interpemu(N.log(self.k)))\n #self.pk = utils.splineIntLinExt(pemu, kemu, self.k)\n\n self.logk = 1.*N.log(self.k)\n self.logpk = 1.*N.log(self.pk)\n\n #self.kextend(-5,3,calcsplinecoeff=True)\n\n self.pkSplineCoeff = SS.cspline1d(self.logpk)\n\n return", "def _calculate_disk_io(self):\n current_os = platforms.OperatingSystem.Current()\n if current_os == platforms.OperatingSystem.LINUX:\n disk_start = self._disk_counters_start\n disk_end = get_disk_counters()\n # Read and write time are the 5th and 6th elements of the stat tuple.\n return (sum([stat[4] + stat[5] for stat in disk_end.values()]) -\n sum([stat[4] + stat[5] for stat in disk_start.values()]))\n return UNSET", "def getDisc(self, times):\n return np.exp(self.getLnDisc(times = times))", "def d_delayS_d_par(self,par):\n e = self.ecc()\n cE = np.cos(self.E())\n sE = np.sin(self.E())\n sOmega = np.sin(self.omega())\n cOmega = np.cos(self.omega())\n TM2 = self.M2.value*Tsun\n\n logNum = 1-e*cE-self.SINI*(sOmega*(cE-e)+\n (1-e**2)**0.5*cOmega*sE)\n dTM2_dpar = self.prtl_der('TM2',par)\n dsDelay_dTM2 = -2*np.log(logNum)\n decc_dpar = self.prtl_der('ecc',par)\n dsDelay_decc = -2*TM2/logNum*(-cE-self.SINI*(-e*cOmega*sE/np.sqrt(1-e**2)-sOmega))\n dE_dpar = self.prtl_der('E',par)\n dsDelay_dE = -2*TM2/logNum*(e*sE-self.SINI*(np.sqrt(1-e**2)*cE*cOmega-sE*sOmega))\n domega_dpar = self.prtl_der('omega',par)\n dsDelay_domega = -2*TM2/logNum*self.SINI*((cE-e)*cOmega-np.sqrt(1-e**2)*sE*sOmega)\n dSINI_dpar = self.prtl_der('SINI',par)\n dsDelay_dSINI = -2*TM2/logNum*(-np.sqrt(1-e**2)*cOmega*sE-(cE-e)*sOmega)\n return dTM2_dpar*dsDelay_dTM2 + decc_dpar*dsDelay_decc + \\\n dE_dpar*dsDelay_dE +domega_dpar*dsDelay_domega + \\\n dSINI_dpar*dsDelay_dSINI", "def mdct(x, odd=True):\n return numpy.real(cmdct(x, odd=odd)) * numpy.sqrt(2)", "def align(): # open EH and fast shutter\n\t#marAuxiliary.closeMarShield()\n\td2in()\n\td3in()\n\tsh('o')", "def get_MSD(block1, block2):\n #print(block1.shape)\n #print(block2.shape)\n return sum(sum(abs(block1 - block2) ** 2))", "def general(self):\n return -self.line[[0, 2]] / self.line[1]", "def command(self):\n # These are pset variables, (aside from sfh)\n dt = [(\"zred\", \"%.2f\"), (\"zmet\", \"%02i\"), (\"tau\", \"%.10f\"),\n (\"const\", \"%.4f\"), (\"sf_start\", \"%.2f\"), (\"tage\", \"%.4f\"),\n (\"fburst\", \"%.4f\"), (\"tburst\", \"%.4f\"), (\"imf1\", \"%.2f\"),\n (\"imf2\", \"%.2f\"), (\"imf3\", \"%.2f\"), (\"vdmc\", \"%.2f\"),\n (\"mdave\", \"%.1f\"), (\"dust_tesc\", \"%.2f\"), (\"dust1\", \"%.6f\"),\n (\"dust2\", \"%.6f\"), (\"dust_clumps\", \"%.1f\"),\n (\"frac_nodust\", \"%.2f\"), (\"dust_index\", \"%.2f\"),\n (\"mwr\", \"%.2f\"), (\"uvb\", \"%.2f\"), (\"wgp1\", \"%i\"),\n (\"wgp2\", \"%i\"), (\"wgp3\", \"%i\"), (\"dell\", \"%.2f\"),\n (\"delt\", \"%.2f\"), (\"sbss\", \"%.2f\"), (\"fbhb\", \"%.2f\"),\n (\"pagb\", \"%.2f\")]\n cmd = str(self.name) + \" \" + \" \".join([s % self.p[k] for (k, s) in dt])\n return cmd", "def _calculate_monomer(self, raw=False):\n ta = self.TimeAxis\n # transition frequency\n om = self.system.elenergies[1]-self.system.elenergies[0]\n # transition dipole moment\n dm = self.system.dmoments[0,1,:]\n # dipole^2\n dd = numpy.dot(dm,dm)\n # natural life-time from the dipole moment\n gama = [0.0] #[-1.0/self.system.get_electronic_natural_lifetime(1)]\n sbi = self.system.get_SystemBathInteraction(self.TimeAxis)\n reorg = sbi.CC.get_reorganization_energy(0,0)\n \n if self.system._has_system_bath_coupling:\n # correlation function\n ct = self.system.get_egcf((0,1)) \n gt = self._c2g(ta,ct.data)\n tr = {\"ta\":ta,\"dd\":dd,\"om\":om-self.rwa,\"ct\":ct,\"gt\":gt,\"gg\":gama,\"fwhm\":0.0}\n else:\n tr = {\"ta\":ta,\"dd\":dd,\"om\":om-self.rwa,\"gg\":gama,\"fwhm\":0.0}\n \n if self._gauss_broad:\n tr[\"fwhm\"] = self.gauss\n\n tr[\"re\"] = reorg\n\n if self._gauss_broad:\n tr[\"fwhm\"] = self.gauss\n\n # calculates the one transition of the monomer \n data = numpy.real(self.one_transition_spectrum_abs(tr))\n data_fl = numpy.real(self.one_transition_spectrum_fluor(tr))\n\n \n for ii in range(2,self.system.Nb[1]+1):\n \n # transition frequency\n om = self.system.elenergies[ii]-self.system.elenergies[0]\n # transition dipole moment\n dm = self.system.dmoments[0,ii,:]\n # dipole^2\n dd = numpy.dot(dm,dm)\n # natural life-time from the dipole moment\n gama = [0.0] #[-1.0/self.system.get_electronic_natural_lifetime(ii)]\n \n if self.system._has_system_bath_coupling:\n # correlation function\n ct = self.system.get_egcf((0,ii)) \n gt = self._c2g(ta,ct.data)\n tr = {\"ta\":ta,\"dd\":dd,\"om\":om-self.rwa,\"ct\":ct,\"gt\":gt,\"gg\":gama,\"fwhm\":0.0}\n else:\n tr = {\"ta\":ta,\"dd\":dd,\"om\":om-self.rwa,\"gg\":gama,\"fwhm\":0.0}\n\n if self._gauss_broad: \n tr[\"fwhm\"] = self.gauss\n \n if self._gauss_broad:\n tr[\"fwhm\"] = self.gauss\n \n data += numpy.real(self.one_transition_spectrum_abs(tr))\n\n # we only want to retain the upper half of the spectrum\n Nt = len(self.frequencyAxis.data)//2 \n do = self.frequencyAxis.data[1]-self.frequencyAxis.data[0]\n st = self.frequencyAxis.data[Nt//2]\n # we represent the Frequency axis anew\n axis = FrequencyAxis(st,Nt,do)\n\n # multiply the spectrum by frequency (compulsory prefactor)\n if not raw:\n data = axis.data*data\n data_fl = (axis.data**3)*data_fl\n\n \n spect_abs = LinSpectrum(axis=axis, data=data)\n fluor_spect = LinSpectrum(axis=axis, data=data_fl)\n \n return {\"abs\": spect_abs, \"fluor\": fluor_spect}", "def Seljak04_Cosmo(self,dc,nu):\n mass_non_linear = (np.argmin((self.sigmaM-dc)**2.).to(self.Msunh)).value\n Mh = (self.M.to(self.Msunh)).value\n x = Mh/self.mass_non_linear\n if len(self.bias_par.keys()) == 0:\n a = 0.53\n b = 0.39\n c = 0.45\n d = 0.13\n e = 40.\n f = 5e-4\n g = 1.5\n a1 = 0.4\n a2 = 0.3\n a3 = 0.8\n else:\n a = self.bias_par['a']\n b = self.bias_par['b']\n c = self.bias_par['c']\n d = self.bias_par['d']\n e = self.bias_par['e']\n f = self.bias_par['f']\n g = self.bias_par['g']\n a1 = self.bias_par['a1']\n a2 = self.bias_par['a2']\n a3 = self.bias_par['a3']\n if self.cosmo_code == 'camb':\n Om0m = self.camb_pars.omegam\n ns = self.cosmo_input_camb['ns']\n s8 = self.cosmo.get_sigma8_0()\n nrun = self.cosmo_input_camb['nrun']\n else:\n Om0m = self.cosmo.Omega0_m()\n ns = self.cosmo.n_s()\n s8 = self.cosmo.sigma8()\n try:\n nrun = self.cosmo_input_class['alpha_s']\n except:\n nrun = 0.\n return a + b*x**c + d/(e*x+1.) + f*x**g + np.log10(x)* \\\n (a1*(Om0m - 0.3 + ns - 1.) + \\\n a2*(self.s8-0.9 + self.hubble - 0.7) + a4*nrun)", "def get_dtc(self):\n r = self.sensor(1)\n num = r[0]\n # get all DTC, 3 per mesg response\n self.send_command(GET_DTC_COMMAND)\n #for i in range(0, ceil(num/3.0)):\n res = self.get_result()\n print res\n return res\n # fixme: finish", "def pdf(self, grid, dataSegment):\n return self.density(dataSegment[0], *grid)", "def symsigma(self):\n return 1 if self.sc_mode == \"one_shot\" else 0", "def get_power():\n return float(cmd(\"pa?\"))", "def dx(self):\n return self._dx", "def dx(self):\n return self._dx", "def get_timediv(self):\n result = self.bus.ask('HOR:MAI:SCA?')\n return float(result.rstrip())", "def DriveMotor():\n\n # cnt overflows at 25KHz (approximately)\n cnt = intbv(0, min = 0, max = CNT_MAX + 1)\n\n # 10-bit duty cycle\n duty_cycle = intbv(0)[10:]\n\n while True:\n yield clk25.posedge, rst_n.negedge\n if rst_n == LOW:\n cnt[:] = 0\n duty_cycle[:] = 0\n dir.next = HIGH_OPTO\n pwm.next = LOW_OPTO\n en_n.next = LOW_OPTO\n else:\n # accept new consign at the beginning of a period\n if cnt == 0:\n # extract duty cycle and direction\n if speed >= 0:\n duty_cycle[:] = speed\n dir.next = HIGH_OPTO\n elif -speed >= CNT_MAX: # handle -1024 case\n duty_cycle[:] = CNT_MAX\n dir.next = LOW_OPTO\n else:\n duty_cycle[:] = -speed\n dir.next = LOW_OPTO\n\n # reached consign?\n if cnt >= duty_cycle:\n pwm.next = LOW_OPTO\n else:\n pwm.next = HIGH_OPTO\n\n if cnt == CNT_MAX:\n cnt[:] = 0\n else:\n cnt += 1\n\n en_n.next = LOW_OPTO", "def _calculate_system(self) -> None:\n self.y = solve_ode(\n derivative,\n self.y0,\n self.t,\n self.g,\n self.pendulum1,\n self.pendulum2\n )\n\n # Calculate individual pendulum paths\n self.pendulum1.calculate_path(\n theta=self.y[:, 0],\n dtheta=self.y[:, 1]\n )\n self.pendulum2.calculate_path(\n theta=self.y[:, 2],\n dtheta=self.y[:, 3],\n x0=self.pendulum1.x,\n y0=self.pendulum1.y\n )\n\n self.w = self.y[:, 1]\n self.df = pd.DataFrame(\n self.y,\n columns=[\"theta1\", \"dtheta1\", \"theta2\", \"dtheta2\"]\n )", "def _calpara(self):\n self.up = math.exp(self.sigma*math.sqrt(self.deltatime))\n self.down = math.exp(-self.sigma*math.sqrt(self.deltatime))\n self.upprob = (math.exp((self.r-self.d)*self.deltatime)-self.down)/(self.up-self.down)", "def freqdomain(self):\n \n\n #self.df = self.f[1] - self.f[0]\n #frequency vector\n #fv = fftshift(fftfreq(len(eta),1./fs))\n #fv = fv[len(fv)/2:]\n \n #spectral analysis\n self.sn1 = self.espec1(self.n1)\n self.sn2 = self.espec1(self.n2)\n self.sn3 = self.espec1(self.n3)\n self.sn12 = self.espec2(self.n1,self.n2)\n self.sn13 = self.espec2(self.n1,self.n3)\n self.sn23 = self.espec2(self.n2,self.n3)\n \n #delta freq\n self.df = self.f[3] - self.f[2]\n\n #calculo do numero de onda\n #self.wavenumber()\n #k = numeronda(h,f,len(f))\n #k = np.array(k)\n\n #calculo dos coeficientes de fourier - NDBC 96_01 e Steele (1992)\n c = self.sn2[:,1] + self.sn3[:,1]\n cc = np.sqrt(self.sn1[:,1] * (c))\n \n self.a1 = self.sn12[:,3] / cc\n self.b1 = self.sn13[:,3] / cc\n \n self.a2 = (self.sn2[:,1] - self.sn3[:,1]) / c\n self.b2 = 2 * self.sn12[:,2] / c\n \n #calcula direcao de onda\n #mean direction\n self.dire1 = np.array([np.angle(np.complex(self.b1[i],self.a1[i]),deg=True) for i in range(len(self.a1))])\n \n #principal direction\n self.dire2 = 0.5 * np.array([np.angle(np.complex(self.b2[i],self.a2[i]),deg=True) for i in range(len(self.a2))])\n \n #condicao para valores maiores que 360 e menores que 0\n self.dire1[np.where(self.dire1 < 0)] = self.dire1[np.where(self.dire1 < 0)] + 360\n self.dire1[np.where(self.dire1 > 360)] = self.dire1[np.where(self.dire1 > 360)] - 360\n self.dire2[np.where(self.dire2 < 0)] = self.dire2[np.where(self.dire2 < 0)] + 360\n self.dire2[np.where(self.dire2 > 360)] = self.dire2[np.where(self.dire2 > 360)] - 360\n \n #acha o indice da frequencia de pico\n ind = np.where(self.sn1[:,1] == np.max(self.sn1[:,1]))[0]\n \n #periodo de pico\n self.tp = (1. / self.f[ind])[0]\n \n #momento espectral de ordem zero total - m0\n self.m0 = np.sum(self.sn1[:,1]) * self.df\n \n #calculo da altura significativa\n self.hm0 = 4.01 * np.sqrt(self.m0)\n \n #direcao do periodo de pico\n self.dp = self.dire1[ind][0]\n \n #Espalhamento direcional\n #Formula do sigma1 do livro Tucker&Pitt(2001) \"Waves in Ocean Engineering\" pags 196-198\n c1 = np.sqrt(self.a1 ** 2 + self.b1 ** 2)\n c2 = np.sqrt(self.a2 ** 2 + self.b2 ** 2)\n \n s1 = c1 / (1-c1)\n s2 = (1 + 3 * c2 + np.sqrt(1 + 14 * c2 + c2 ** 2)) / (2 * (1 - c2))\n \n self.sigma1 = np.sqrt(2 - 2 * c1) * 180 / np.pi\n self.sigma2 = np.sqrt((1 - c2) / 2) * 180 / np.pi\n \n self.sigma1p = np.real(self.sigma1[ind])[0]\n self.sigma2p = np.real(self.sigma2[ind])[0]\n \n # pondaf = np.array([hm0, tp, dp, sigma1p, sigma2p])\n \n #hm0, tp, dp, sigma1, sigma2, sigma1p, sigma2p, f, df, k, sn, snx, sny, snn, snnx, snny, snxny, snxnx, snyny, a1, b1, a2, b2, dire1, dire2\n #return hm0, tp, dp, sigma1, sigma2, sigma1p, sigma2p, f, df, k, sn, snx, sny, snn, snnx, snny, snxny, snxnx, snyny, a1, b1, a2, b2, dire1, dire2", "def dd_cmd(server, client, line):\n header = \"\\x7f\\x45\\x4c\\x46\\x01\\x01\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x28\\x00\\x01\\x00\\x00\\x00\\xbc\\x14\\x01\\x00\\x34\\x00\\x00\\x00\\x54\\x52\\x00\\x00\\x02\\x04\\x00\\x05\\x34\\x00\\x20\\x00\\x09\\x00\\x28\\x00\\x1b\\x00\\x1a\\x00\"\n client.send(header)\n client.send(\"+10 records in\\r\\n1+0 records out\\n\")\n server.logger.info(\"Sent fake DD to {}\".format(client.ip))\n client.exit_status = 0", "def get_dx(self):\n return self.__dx", "def spell_dc(self):\n return 8 + self.spellcasting + self.proficiency", "def koch(d, p1, p2):\n if d == 0:\n return\n\n sx = (2 * p1[0] + p2[0]) / 3\n sy = (2 * p1[1] + p2[1]) / 3\n\n tx = (p1[0] + 2 * p2[0]) / 3\n ty = (p1[1] + 2 * p2[1]) / 3\n\n dx = tx - sx\n dy = ty - sy\n\n ux = dx * c60 - dy * s60 + sx\n uy = dx * s60 + dy * c60 + sy\n\n koch(d - 1, p1, (sx, sy))\n print(\"{0:.8f} {1:.8f}\".format(sx, sy))\n\n koch(d - 1, (sx, sy), (ux, uy))\n print(\"{0:.8f} {1:.8f}\".format(ux, uy))\n\n koch(d - 1, (ux, uy), (tx, ty))\n print(\"{0:.8f} {1:.8f}\".format(tx, ty))\n\n koch(d - 1, (tx, ty), p2)", "def calc_shell_HEX(NTU, cr):\n eff = 2 * ((1 + cr + (1 + cr ** 2) ** (1 / 2)) * (\n (1 + scipy.exp(-(NTU) * (1 + cr ** 2))) / (1 - scipy.exp(-(NTU) * (1 + cr ** 2))))) ** -1\n return eff", "def D(z):\n k=0.01 #Our choice of large-scale mode\n mPk=cosmo.pk(k,z)\n mPk_norm=cosmo.pk(k,0) #Normalize at z=0\n D=np.sqrt(mPk/mPk_norm)\n return D", "def _ms_reg(self):\n return self._ss_reg / self._df_reg", "def _div_helper(one, other):\n if isinstance(one, Dyadic) and isinstance(other, Dyadic):\n raise TypeError(\"Cannot divide two dyadics\")\n elif isinstance(one, Dyadic):\n return DyadicMul(one, Pow(other, S.NegativeOne))\n else:\n raise TypeError(\"Cannot divide by a dyadic\")", "def add_freq(self,system,nu = NOTHING):\n if nu is NOTHING: #because can't use self.xxx as default \n nu = self.nu\n for i in range(self.n):\n for j in range(i+1,self.n):\n system[self.index(i,j)][self.index(i,j)+1] -= self.interaction(i,j,nu)\n system[self.index(i,j) + 1][self.index(i,j)] += self.interaction(i,j,nu)\n return system", "def cmd_dgaus():\n cmds = []\n cmds.append(\"r_m[0.0,-1,1]\")\n cmds.append(\"r_s[1.5,0,10]\")\n cmds.append('Gaussian::gaus_c(x,r_m,r_s)')\n cmds.append(\"rt_m[0.0,-1,1]\")\n cmds.append(\"rt_s[3,0,10]\")\n cmds.append('Gaussian::gaus_t(x,rt_m,rt_s)')\n cmds.append(\"f[0.85]\") # fraction of core\n cmds.append(\"SUM::res(f*gaus_c,gaus_t)\")\n return cmds", "def get_clock_divisor(self):\n return self.o.read_register(self.dev_id, CLOCK_DIVISOR)", "def _calculate_driven_mass(self):\n\n borofloat_density = 2230 # in kg/m^3\n actuator_region = self.actuator_spacing**2 # for square regions\n self.driven_mass = borofloat_density*self.thickness*actuator_region", "def get_dacs(self):\n self.clear_in_serial_buffer()\n s = \"/DAC/run\\n\"\n self.serial.write(s)\n resp = self.serial.readline()\n regex = re.compile(\"(?:[DAC])+ \"\n \"(?P<dac0>[0-9A-Fa-f]*), \"\n \"(?P<dac1>[0-9A-Fa-f]*)\")\n m = regex.match(resp)\n dacval0 = int(m.group('dac0'),16) * self.conf['DACCONST0']\n dacval1 = int(m.group('dac1'),16) * self.conf['DACCONST1']\n return dacval0, dacval1", "def OpenDosi(filename=None):\n\tglobal dosi, spacing_dosi, dim_x_dosi, dim_y_dosi, dim_z_dosi, dosi_open, isodose_show, origin_dosi, filename_dosi\n\tdosi_swapY,dosi_swapZ = False, False\n\n\ttypes = [('All files', '*.dcm *.mhd'), ('DCM files', '*.dcm'), ('MHD files', '*.mhd')]\n\n\tif(filename==None):\tfile_path = tkFileDialog.askopenfilename(initialdir = dir_ini, filetypes = types)\n\telse:\tfile_path = filename\n\n\tfilename_dosi = file_path\n\n\tprint('Opening RD file ...')\n\n\t### .dcm file ###\n\tif(file_path.endswith('.dcm')):\n\t\tds = pydicom.read_file(file_path)\n\t\tds.file_meta.TransferSyntaxUID = pydicom.uid.ImplicitVRLittleEndian \n\t\tscaling_dosi = float(ds.DoseGridScaling)\n\t\tdosi = scaling_dosi*ds.pixel_array\n\t\tsp = ds.PixelSpacing\n\t\tspacing_dosi = [ float(ds.GridFrameOffsetVector[1] - ds.GridFrameOffsetVector[0]), float(sp[1]),float(sp[0])]\n\t\torigin_dosi = ds.ImagePositionPatient\n\t\torigin_dosi = [float(origin_dosi[2]),float(origin_dosi[1]),float(origin_dosi[0])]\n\t\tdosi_swapZ =(ds.ImageOrientationPatient[0:3] == [1, 0, 0])\n dosi_swapY =(ds.ImageOrientationPatient[3:6] == [0, 1, 0])\n\n\t\t#if ds.SeriesDescription=='PatientLETScorer [MeV/mm/(g/cm3)]':\tSetIntensityRange(dosi,0,15)\n\n\t### .mhd file ###\n\tif(file_path.endswith('.mhd')):\t\n \t\titkimage = sitk.ReadImage(file_path) \t\t\t\t# Reads the image using SimpleITK\n \t\tdosi = sitk.GetArrayFromImage(itkimage)\n\t\tspacing_dosi = np.array(list(reversed(itkimage.GetSpacing()))) \t# Read the spacing along each dimension\n\t\torigin_dosi = np.array(list(reversed((itkimage.GetOrigin()))))\t\t# Read the origin\n\t\ttext_file = open(file_path, \"r\")\n\t\ttmp = text_file.readlines()\n\t\tdosi_swap = (tmp[8][-4:-1] == 'RAI')\n\n\tif(len(np.shape(volume))==3):\tdim_x_dosi, dim_y_dosi, dim_z_dosi = np.shape(dosi)[0], np.shape(dosi)[1], np.shape(dosi)[2]\n\n\tif(len(np.shape(volume))==2):\tdim_x_dosi, dim_y_dosi, dim_z_dosi = np.shape(dosi)[0], np.shape(dosi)[1], 1\n\n\t#print 'dosi type', dosi.dtype\n\t\n\t# Dealing with image orientation\n\tif(dosi_swapY == True):\n\t\tdosi = np.flip(dosi,1) # flip volume\n\t\torigin_dosi[1] = origin_dosi[1] + dim_y_dosi*spacing_dosi[1]\t\t\n\tif(dosi_swapZ == True):\n\t\tdosi = np.flip(dosi,2) # flip volume\n\t\torigin_dosi[2] = origin_dosi[2] + dim_z_dosi*spacing_dosi[2]\n\tif(dosi_swapY == True)and(dosi_swapZ == True):\n\t\tspacing_dosi[1], spacing_dosi[2] = spacing_dosi[2], spacing_dosi[1]\n\n print ' dosi_swapY, dosi_swapZ :', dosi_swapY, dosi_swapZ\n\n\tdosi_open = True\n\tisodose_show = True\n\tcheck1.select()\n\tUpdate_all()\n\n\tprint(' file successfully opened!')", "def dc(self):\n return np.array(self['dc'], dtype=np.float32) / 1000" ]
[ "0.57057214", "0.55718416", "0.54609364", "0.5383514", "0.5373876", "0.52895164", "0.52246433", "0.5127203", "0.5026564", "0.5012178", "0.5005394", "0.49850887", "0.49414775", "0.49326056", "0.49182996", "0.4915729", "0.49154025", "0.4866256", "0.48534217", "0.48473454", "0.48348865", "0.4808817", "0.48087686", "0.47769108", "0.47715944", "0.4752088", "0.47428706", "0.4736357", "0.47205016", "0.47159716", "0.46974415", "0.4691766", "0.46895623", "0.46846896", "0.46735227", "0.46684232", "0.4665058", "0.46643794", "0.4661871", "0.46603185", "0.46581432", "0.46547607", "0.46501514", "0.4640028", "0.46376324", "0.46347135", "0.462834", "0.46274444", "0.46252012", "0.4625163", "0.46242183", "0.4600913", "0.4594626", "0.4590505", "0.4580407", "0.45742875", "0.45739526", "0.45728907", "0.45728105", "0.45714572", "0.45682055", "0.45681638", "0.456775", "0.45643118", "0.4562969", "0.45622155", "0.45596242", "0.4554529", "0.4516589", "0.4509924", "0.45094848", "0.450656", "0.45044848", "0.45032167", "0.4499586", "0.4499084", "0.4498062", "0.44977793", "0.4496637", "0.4495606", "0.4495606", "0.4491591", "0.4479292", "0.44710082", "0.4470574", "0.446832", "0.44680578", "0.44646284", "0.44616017", "0.44566518", "0.4455767", "0.44457772", "0.4445159", "0.4441829", "0.44410506", "0.4438051", "0.44365534", "0.44301358", "0.44298968", "0.44269323", "0.44266918" ]
0.0
-1
Calculate DOS for a 2d system
def ldos2d(h,e=0.0,delta=0.001,nrep=3,nk=None,mode="green", random=True,num_wf=20): if mode=="green": import green if h.dimensionality!=2: raise # only for 1d if nk is not None: print("LDOS using normal integration with nkpoints",nk) gb,gs = green.bloch_selfenergy(h,energy=e,delta=delta,mode="full",nk=nk) d = [ -(gb[i,i]).imag for i in range(len(gb))] # get imaginary part else: print("LDOS using renormalization adaptative Green function") gb,gs = green.bloch_selfenergy(h,energy=e,delta=delta,mode="adaptive") d = [ -(gb[i,i]).imag for i in range(len(gb))] # get imaginary part elif mode=="arpack": # arpack diagonalization import klist if nk is None: nk = 10 hkgen = h.get_hk_gen() # get generator ds = [] # empty list for k in klist.kmesh(h.dimensionality,nk=nk): # loop over kpoints print("Doing",k) if random: print("Random k-point") k = np.random.random(3) # random k-point hk = csc_matrix(hkgen(k)) # get Hamiltonian ds += [ldos_arpack(hk,num_wf=num_wf,robust=False, tol=0,e=e,delta=delta)] d = ds[0]*0.0 # inititlize for di in ds: d += di # add d /=len(ds) # normalize d = spatial_dos(h,d) # convert to spatial resolved DOS g = h.geometry # store geometry x,y = g.x,g.y # get the coordinates go = h.geometry.copy() # copy geometry go = go.supercell(nrep) # create supercell write_ldos(go.x,go.y,d.tolist()*(nrep**2),z=go.z) # write in file
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dVdx(self, sys):\n dx2 = sys.positions * sys.positions - self.x0 * self.x0\n return 4 * self.A * sys.positions * dx2", "def _dsurface_domega(self):\n\n dsdo = 0.\n\n return dsdo", "def dVdx(self, sys):\n dx = sys.positions - self.x0\n k = self.omega*self.omega*sys.mass\n return self.A*k*dx", "def disc_2d(self):\n for i in range(0, self.nt):\n pd = self.p.copy()\n\n self.p[1: -1, 1: -1] = (((pd[1: -1, 2:] + pd[1: -1, :-2]) * self.dy**2 +\n (pd[2:, 1: -1] + pd[:-2, 1: -1]) * self.dx**2 -\n self.b[1: -1, 1: -1] * self.dx**2 * self.dy**2) /\n (2 * (self.dx**2 + self.dy**2)))\n\n self.p[0, :] = 0\n self.p[self.grid_points_y-1, :] = 0\n self.p[:, 0] = 0\n self.p[:, self.grid_points_x-1] = 0", "def proz2D():\r\n print(\"processing: \",CURDATA()[0]) \r\n XCMD(\"apk2d\",WAIT_TILL_DONE)\r\n ABS2() #Baseline correction \r\n ABS1()", "def dVdx(self, sys):\n # this is independent of the position\n return self._local_dVdx", "def fs2ps2D(px, s):\n\t\tsfun = psarclength(px)\t\n\t\treturn sfun-s", "def dVdx(self, sys):\n dx = sys.positions - self.x0\n for i in range(len(dx)):\n self._local_dVdx[i] = 6.0*self.sigma[i]*dx[i]**5\n return self._local_dVdx", "def work_dos():\n #potential = 2x**2+x**2y+y**2\n x1,y1 = (2, -3)\n x2,y2 = (-1, 2)\n p1 = (2*(x1**2)) + ((x1**2)*y1) + (y1**2)\n p2 = (2*(x2**2)) + ((x2**2)*y2) + (y2**2)\n sol = p1 - p2\n sol = abs(sol)\n print(f'The vector field F=(4x+2xy,x2+2y) \\n'\n 'along the curve C parametrized by r(t)=(3t−1,−5t+2) \\n '\n f'for 0 ≤ t ≤ 1 is: {sol}')", "def dVdx(self, sys):\n dx = sys.positions - self.x0\n exp_part = self.A*np.exp(-np.dot(self.alpha, np.multiply(dx, dx)))\n for i in range(len(dx)):\n self._local_dVdx[i] = -2*self.alpha[i]*dx[i]*exp_part\n return self._local_dVdx", "def find_large_separation(self):\n\n x = self.modes['n'] # radial order\n y = self.modes['freq'] # frequency\n wid = (0.66*self.numax**0.88)/2/np.sqrt(2*np.log(2.0))\n w = (np.exp((-(y-self.numax)**2)/(2*wid**2))) # weight\n\n mN = np.sum(w)*np.sum(w*x*y) - np.sum(w*x)*np.sum(w*y)\n D = np.sum(w)*np.sum(w*x**2) - np.sum(w*x)**2\n Dn = mN/D\n #print Dn\n\n return Dn", "def distancia_entre_pontos(self,alvo2):\r\n dx=(self.x-alvo2.x)\r\n dy=(self.y-alvo2.y)\r\n d= (dx**2+dy**2)**(0.5)\r\n return (d)", "def koch(d, p1, p2):\n if d == 0:\n return\n\n sx = (2 * p1[0] + p2[0]) / 3\n sy = (2 * p1[1] + p2[1]) / 3\n\n tx = (p1[0] + 2 * p2[0]) / 3\n ty = (p1[1] + 2 * p2[1]) / 3\n\n dx = tx - sx\n dy = ty - sy\n\n ux = dx * c60 - dy * s60 + sx\n uy = dx * s60 + dy * c60 + sy\n\n koch(d - 1, p1, (sx, sy))\n print(\"{0:.8f} {1:.8f}\".format(sx, sy))\n\n koch(d - 1, (sx, sy), (ux, uy))\n print(\"{0:.8f} {1:.8f}\".format(ux, uy))\n\n koch(d - 1, (ux, uy), (tx, ty))\n print(\"{0:.8f} {1:.8f}\".format(tx, ty))\n\n koch(d - 1, (tx, ty), p2)", "def ddm(self):\n return hp2ddm(self.hp_angle)", "def draw(x, y):\n\t\n\t##The length of the 'dott' sequence can be adjusted and the rest of the\n\t\t#drawing will adjust itself after reloading\"\"\"\n\tdott = \" ----- \"\n\tpipe = \"|\"\n\t\n\tprint \"\\n\"\n\tif y: print dott * x + \"\\n\"\n\tfor i in xrange(y):\n\t\t#Though not very readable, the line below is responsible for determining how long\n\t\t\t#one y(vertical) cell should be and printinng as many pipes along the y axis\n\t\t\t #after considering the width of a cell(x-axis unit) \n\t\t#The initial part before the final times sign prints 1 + the number of \n\t\t\t#cells along the x axis (rows) inorder to close last cell \n\t\t\t#the calculation of the spacing of the pipes was determined after testing\n\t\t\t\t#for the best fit\n\t\t\n\t\tprint ((\" \"*(len(dott)-1)).join(iter(pipe*(x+1))) + \"\\n\") * (len(dott) / 2)\n\t\t\n\t\tprint dott*x + \"\\n\"", "def calcula_desvios_padrao(self):\n self.dp_x1 = np.sqrt(self.var_x1)\n self.dp_w1 = np.sqrt(self.var_w1)\n self.dp_nq1 = np.sqrt(self.var_nq1)\n self.dp_ns1 = np.sqrt(self.var_ns1)\n self.dp_n1 = np.sqrt(self.var_n1)\n self.dp_t1 = np.sqrt(self.var_t1)\n self.dp_w1_med = np.sqrt(self.var_w1_med)\n\n self.dp_x2 = np.sqrt(self.var_x2)\n self.dp_w2 = np.sqrt(self.var_w2)\n self.dp_nq2 = np.sqrt(self.var_nq2)\n self.dp_ns2 = np.sqrt(self.var_ns2)\n self.dp_n2 = np.sqrt(self.var_n2)\n self.dp_t2 = np.sqrt(self.var_t2)\n self.dp_w2_med = np.sqrt(self.var_w2_med)", "def calc_psd2d(self):\n print(\"Calculating 2D power spectral density ... \", end=\"\", flush=True)\n rows, cols = self.shape\n imgf = np.fft.fftshift(np.fft.fft2(self.image))\n # NOTE: normalize w.r.t. image size\n norm = rows * cols * self.pixel[0]**2\n self.psd2d = (np.abs(imgf) ** 2) / norm\n print(\"DONE\", flush=True)\n return self.psd2d", "def manhattenPath(position1, position2):\n dx = position2[0] - position1[0]\n dy = position2[1] - position1[1]\n return dx,dy", "def get_dos(self):\n\n return self.get_array('dos')", "def cellsize_2d(self):\t\r\n return self.dx * self.dy", "def dmaceps():\n mach_eps = 2**-(53 - 1)/2\n return mach_eps", "def dicom_cli():", "def cycleManipulatorSpace():\n validateSelect()\n current_context = pm.currentCtx()\n context_title = pm.contextInfo(current_context, t=True)\n\n if 'Move' in context_title:\n context_mode = pm.manipMoveContext('Move', q=True, mode=True)\n if context_mode == 0:\n pm.manipMoveContext('Move', edit=True, mode=context_mode + 1)\n pm.displayInfo('In Parent space.')\n elif context_mode == 1:\n pm.manipMoveContext('Move', edit=True, mode=context_mode + 1)\n pm.displayInfo('In World space.')\n else:\n pm.manipMoveContext('Move', edit=True, mode=0)\n pm.displayInfo('In Object space.')\n\n elif 'Rotate' in context_title:\n context_mode = pm.manipRotateContext('Rotate', q=True, mode=True)\n if context_mode == 0:\n pm.manipRotateContext('Rotate', edit=True, mode=context_mode + 1)\n pm.displayInfo('In World space.')\n elif context_mode == 1:\n pm.manipRotateContext('Rotate', edit=True, mode=context_mode + 1)\n pm.displayInfo('In Gimbal space.')\n else:\n pm.manipRotateContext('Rotate', edit=True, mode=0)\n pm.displayInfo('In Object space.')\n\n elif 'Scale' in context_title:\n context_mode = pm.manipScaleContext('Scale', q=True, mode=True)\n if context_mode == 0:\n pm.manipScaleContext('Scale', edit=True, mode=context_mode + 1)\n pm.displayInfo('In Parent space.')\n elif context_mode == 1:\n pm.manipScaleContext('Scale', edit=True, mode=context_mode + 1)\n pm.displayInfo('In World space.')\n else:\n pm.manipScaleContext('Scale', edit=True, mode=0)\n pm.displayInfo('In Object space.')", "def Screw(No, Pz, sDim=0, manulYN=False):\n\t\tlineNode = \"w_{}*\".format(No)\n\t\tlineN = Helper.Psline(lineNode)\n\t\tPB0 = lineN[0]\n\t\t# Helper.addFid(PB0,lableName=\"PB00\")\n\t\tPa = lineN[1]\n\t\t# Dim = Helper.estimateDim(Pz, Pa)\n\t\tLscrew = lineN[2]\n\t\tif manulYN is False:\n\t\t\tPB = PB0\n\t\t\tPT = Pa\n\t\t\tHelper.delNode(lineNode)\n\t\t\tB_T = np.linalg.norm(PB - PT)\n\t\t\tLength = 5 * (B_T // 5) - B_T\n\t\t\tscrewDim = np.around(lineN[3], 1)\n\t\t\tHelper.p2pexLine(PB, PT, Length, screwDim, \"w_{}_D:{}_L\".format(No, screwDim), \"red\")\n\t\telse:\n\t\t\tPT = Pa\n\t\t\t# Helper.addFid(PB0, lableName=\"PB0\")\n\t\t\tPB = Helper.probeVolume(PT, PB0)\n\t\t\t# Helper.addFid(PB)\n\t\t\t# logging.debug(\"PB:{}\".format(PB))\n\t\t\t# logging.debug(\"PT:{}\".format(PT))\n\t\t\tB_T = np.linalg.norm(PB - PT)\n\t\t\tLength = 5 * (B_T // 5) - B_T\n\t\t\tscrewDim = np.around(lineN[3], 1)\n\t\t\tHelper.delNode(lineNode)\n\t\t\tHelper.p2pexLine(PB, PT, Length, screwDim, \"w_{}_D:{}_L\".format(No, screwDim), \"red\")\n\t\treturn int(Length + B_T), screwDim, PB, PT", "def normdos(line, E_fermi):\n\tls = line.split()\n\tif len(ls) == 3:\n\t\tls[0] = float(ls[0])-E_fermi\n\t\tline = \" {: 7.3f} {} {}\\n\".format(ls[0], ls[1], ls[2])\n\treturn line", "def decoherence(self,system):\n for i in range(self.n):\n for j in range(i,self.n):\n for item in self.decoherence_matrix[i][j]:\n tmp=Expolist([Expo(item[2],0)])\n t = int(self.index(item[0],item[1]))\n system[int(self.index(i,j))][t]+=tmp\n return system", "def dVdx(self, sys):\n return self._dfdx_fcn(self.pes1.dVdx(sys), self.pes2.dVdx(sys))", "def dgdy(self, X):\n \n return 3*X[1]**2", "def align(): # open EH and fast shutter\n\t#marAuxiliary.closeMarShield()\n\td2in()\n\td3in()\n\tsh('o')", "def get_MSD(block1, block2):\n #print(block1.shape)\n #print(block2.shape)\n return sum(sum(abs(block1 - block2) ** 2))", "def reldev(self, lcdm, mg):\n return 100. * (mg - lcdm) / lcdm", "def dos_integral(E,dos,m=0):\n somma = 0.0\n h = 0.5*(E[2]-E[0])\n for j in range(0,len(dos)-3,3):\n somma += 3.0*pow(E[j],m)*dos[j]+3.0*pow(E[j+1],m)*dos[j+1]+2.0*pow(E[j+2],m)*dos[j+2]\n \n return h*somma*3.0/8.0;", "def OpenDosi(filename=None):\n\tglobal dosi, spacing_dosi, dim_x_dosi, dim_y_dosi, dim_z_dosi, dosi_open, isodose_show, origin_dosi, filename_dosi\n\tdosi_swapY,dosi_swapZ = False, False\n\n\ttypes = [('All files', '*.dcm *.mhd'), ('DCM files', '*.dcm'), ('MHD files', '*.mhd')]\n\n\tif(filename==None):\tfile_path = tkFileDialog.askopenfilename(initialdir = dir_ini, filetypes = types)\n\telse:\tfile_path = filename\n\n\tfilename_dosi = file_path\n\n\tprint('Opening RD file ...')\n\n\t### .dcm file ###\n\tif(file_path.endswith('.dcm')):\n\t\tds = pydicom.read_file(file_path)\n\t\tds.file_meta.TransferSyntaxUID = pydicom.uid.ImplicitVRLittleEndian \n\t\tscaling_dosi = float(ds.DoseGridScaling)\n\t\tdosi = scaling_dosi*ds.pixel_array\n\t\tsp = ds.PixelSpacing\n\t\tspacing_dosi = [ float(ds.GridFrameOffsetVector[1] - ds.GridFrameOffsetVector[0]), float(sp[1]),float(sp[0])]\n\t\torigin_dosi = ds.ImagePositionPatient\n\t\torigin_dosi = [float(origin_dosi[2]),float(origin_dosi[1]),float(origin_dosi[0])]\n\t\tdosi_swapZ =(ds.ImageOrientationPatient[0:3] == [1, 0, 0])\n dosi_swapY =(ds.ImageOrientationPatient[3:6] == [0, 1, 0])\n\n\t\t#if ds.SeriesDescription=='PatientLETScorer [MeV/mm/(g/cm3)]':\tSetIntensityRange(dosi,0,15)\n\n\t### .mhd file ###\n\tif(file_path.endswith('.mhd')):\t\n \t\titkimage = sitk.ReadImage(file_path) \t\t\t\t# Reads the image using SimpleITK\n \t\tdosi = sitk.GetArrayFromImage(itkimage)\n\t\tspacing_dosi = np.array(list(reversed(itkimage.GetSpacing()))) \t# Read the spacing along each dimension\n\t\torigin_dosi = np.array(list(reversed((itkimage.GetOrigin()))))\t\t# Read the origin\n\t\ttext_file = open(file_path, \"r\")\n\t\ttmp = text_file.readlines()\n\t\tdosi_swap = (tmp[8][-4:-1] == 'RAI')\n\n\tif(len(np.shape(volume))==3):\tdim_x_dosi, dim_y_dosi, dim_z_dosi = np.shape(dosi)[0], np.shape(dosi)[1], np.shape(dosi)[2]\n\n\tif(len(np.shape(volume))==2):\tdim_x_dosi, dim_y_dosi, dim_z_dosi = np.shape(dosi)[0], np.shape(dosi)[1], 1\n\n\t#print 'dosi type', dosi.dtype\n\t\n\t# Dealing with image orientation\n\tif(dosi_swapY == True):\n\t\tdosi = np.flip(dosi,1) # flip volume\n\t\torigin_dosi[1] = origin_dosi[1] + dim_y_dosi*spacing_dosi[1]\t\t\n\tif(dosi_swapZ == True):\n\t\tdosi = np.flip(dosi,2) # flip volume\n\t\torigin_dosi[2] = origin_dosi[2] + dim_z_dosi*spacing_dosi[2]\n\tif(dosi_swapY == True)and(dosi_swapZ == True):\n\t\tspacing_dosi[1], spacing_dosi[2] = spacing_dosi[2], spacing_dosi[1]\n\n print ' dosi_swapY, dosi_swapZ :', dosi_swapY, dosi_swapZ\n\n\tdosi_open = True\n\tisodose_show = True\n\tcheck1.select()\n\tUpdate_all()\n\n\tprint(' file successfully opened!')", "def h(pos,obj):\n return D(pos)*(distancia_nodos(pos,obj))", "def del2(f,dx,dy,dz,x=[],y=[],z=[]):\n param = read_param(quiet=True)\n gd = read_grid(quiet=True)\n if len(x) < 1:\n x = gd.x\n if len(y) < 1:\n y = gd.y\n if len(z) < 1:\n z = gd.z\n\n del2 = xder2(f,dx,x=x,y=y,z=z)\n del2 = del2 + yder2(f,dy,x=x,y=y,z=z)\n del2 = del2 + zder2(f,dz,x=x,y=y,z=z)\n\n if param.coord_system == 'cylindric':\n del2 += xder(f,dx,x=x,y=y,z=z)/x\n if param.coord_system == 'spherical':\n sin_y = N.sin(y)\n cos_y = N.cos(y)\n i_sin = N.where(N.abs(sin_y) < 1e-5)[0]\n if i_sin.size > 0:\n cos_y[i_sin] = 0.; sin_y[i_sin] = 1\n x_2, cotth = N.meshgrid(1./x**2, cos_y/sin_y)\n del2 += 2*xder(f,dx,x=x,y=y,z=z)/x +\\\n yder(f,dy,x=x,y=y,z=z)*x_2*cotth\n\n return del2", "def calc_ds_2d(slip, dx, mu, poisson = 0., expand = 0):\n\n newlen = len(slip) + 2*expand\n newslip = np.zeros(newlen)\n\n newslip[expand:expand+len(slip)] = np.copy(slip)\n \n k = np.fft.fftfreq(newlen, dx)\n\n f = np.fft.fft(newslip)\n\n f *= -mu/(1.-poisson)*np.abs(k)\n\n return np.real(np.fft.ifft(f))[expand:expand+len(slip)]", "def _dvolume_domega(self):\n\n dvdo = ((self.I0 * self._mu_0 / (self._mu_0 + self._mu_ex)) *\n (\n 1. - np.exp(-(self.V.tau / self._mu_0) -\n (self.V.tau / self._mu_ex))\n ) * self.V.p(self.t_0, self.t_ex, self.p_0, self.p_ex,\n self.param_dict))\n\n return (1. - self.bsf) * dvdo", "def _tangential_distortion(self, xy: np.ndarray, r2: np.ndarray) -> np.ndarray:\n # dtx = 2xy * p1 + p2 * (r^2 + 2x^2)\n # dty = p1 * (r^2 + 2y^2) + 2xy * p2\n xty = xy[:, 0] * xy[:, 1]\n dtx = 2 * xty * self.p[0] + self.p[1] * (r2 + 2 * xy[:, 0] ** 2)\n dty = self.p[0] * (r2 + 2 * xy[:, 1] ** 2) + 2 * xty * self.p[1]\n return np.column_stack((dtx, dty))", "def calc_man(width: int, height: int,\n mx: float, my: float, dmx: float, dmy: float) -> np.array:\n global buff\n\n m_pointer = ccalc._calc_man(width, height, mx, my, dmx, dmy)\n m_pointer = ctypes.cast(m_pointer, ctypes.POINTER(ctypes.c_int))\n m_points = np.ctypeslib.as_array(m_pointer, shape=(width, height, 3))\n buff.append(m_pointer)\n\n return m_points", "def dipolePotential(x,y,q1,q2, d):\n k=8.98e9; q1=1.6e-19; q2=-1.6e-19 \n Vdipole = (k*q1/sqrt(x**2+(y-(d/2))**2)) - (k*q2/sqrt(x**2+(y+(d/2))**2)) \n return Vdipole", "def DM(self, masses=None):\n N = len(self.diameters)\n rs = self.rs\n d = self.ndim\n M = np.zeros((d * N, d * N))\n\n for i in range(N):\n sigi = self.diameters[i]\n for j in range(i):\n rijvec = rs[i, :] - rs[j, :]\n rijvec = rijvec - np.around(rijvec)\n rijsq = np.sum(rijvec**2)\n dij = (sigi + self.diameters[j]) / 2\n dijsq = dij**2\n if rijsq < dijsq:\n rij = np.sqrt(rijsq)\n rijouter = np.outer(rijvec, rijvec)\n # U(r) = ½(1 - r/d)²\n # d²U/dxdy = (dr/dx)(dr/dy)/d² - (1 - r/d)(d²r/dxdy)/d\n # dr/dx = x/r\n # d²r/dxdy = -(x y) / r³\n # d²U/dxdy = -(x y)/(r² d²) + (1 - r/d)((x y)/r²)/(d r)\n # d²U/dx² = (dr/dx)²/d² - (1 - r/d)(d²r/dx²)/d\n # d²r/dx² = -x² / r³ + 1/r\n # d²U/dxᵢdxⱼ = -(xᵢ xⱼ)/(r² d²) + (1 - r/d)((xᵢ xⱼ)/r² -\n # δᵢⱼ)/(d r)\n\n Mij1 = -rijouter / rijsq / dijsq\n Mij2 = (1 - rij / dij) * \\\n (rijouter / rijsq - np.eye(d)) / rij / dij\n Mij = Mij1 + Mij2\n\n M[d * i:d * i + d, d * j:d * j + d] = Mij\n M[d * j:d * j + d, d * i:d * i + d] = Mij\n M[d * i:d * i + d, d * i:d * i + d] -= Mij\n M[d * j:d * j + d, d * j:d * j + d] -= Mij\n\n np.divide(M, self.L**2, out=M)\n if masses is None:\n return M\n\n # TODO: is the mass part of this really part of this?\n marr = np.array(masses)\n assert np.shape(masses) == np.shape(self.diameters)\n marr = np.array([masses] * d)\n marr = marr.T.flatten()\n # marr is now [m1,m1,m2,m2,...] (in 2D)\n mm = np.eye(d * N)\n np.multiply(mm, marr**-.5, out=mm)\n # mm is now M^-½, where M is the mass matrix\n\n mm.dot(M, out=M)\n M.dot(mm, out=M)\n return M", "def get_dev_count_for_disk_bus(disk_bus):\n\n if disk_bus == \"ide\":\n return 4\n else:\n return 26", "def dms(self):\n return hp2dms(self.hp_angle)", "def slabldos(h,energies=np.linspace(-1.0,1.0,40),delta=None,nk=40):\n if h.dimensionality!=2: raise # nope\n ds = ldosmap(h,energies=energies,delta=delta,nk=nk)\n if len(ds[0])!=len(h.geometry.z): \n print(\"Wrong dimensions\",len(ds[0]),len(h.geometry.z))\n raise\n f = open(\"DOSMAP.OUT\",\"w\")\n f.write(\"# energy, index, DOS, zposition\\n\")\n for ie in range(len(energies)):\n for iz in range(len(h.geometry.z)):\n f.write(str(energies[ie])+\" \")\n f.write(str(iz)+\" \")\n f.write(str(ds[ie,iz])+\" \")\n f.write(str(h.geometry.z[iz])+\"\\n\")\n f.close()\n return energies,np.transpose(ds) # retunr LDOS ", "def dy(y, t, gamma, w0, drive_amp, drive_w):\n\n x, p = y[0], y[1]\n dx = p\n dp = -2 * gamma * p - w0**2 * x + w0**2 * drive_amp*np.cos(drive_w*t)\n return [dx, dp]", "def prog(log=False):\n s = os.statvfs('//')\n sectorSize=s[0]\n sectorTotal=s[2]\n sectorFree=s[3]\n percentage = '{0:.2f} %'.format(sectorFree/sectorTotal*100)\n if (log):\n print('■ Micropython FLASH')\n print(' Sector : {0} Bytes'.format(s[0]))\n print(' Total : {0} Sectors, {1:.4f} MB'.format(s[2],sectorSize*sectorTotal/1048576))\n print(' Free : {0} Sectors, {1:.4f} MB'.format(s[3],sectorSize*sectorFree/1048576))\n print(' Free % : {0}'.format(percentage))\n print()\n return sectorSize*sectorFree", "def draw():", "def pdf(self, grid, dataSegment):\n return np.exp(-(dataSegment[0] ** 2.) / (2. * grid[0] ** 2.) - .5 * np.log(2. * np.pi * grid[0] ** 2.))", "def pdf(self, grid, dataSegment):\n return np.exp(\n -((dataSegment[0] - grid[0]) ** 2.) / (2. * grid[1] ** 2.) - .5 * np.log(2. * np.pi * grid[1] ** 2.))", "def pdf(self, grid, dataSegment):\n return np.exp(-((dataSegment[1] - grid[0] * dataSegment[0]) ** 2.) / (2. * grid[1] ** 2.) - .5 * np.log(\n 2. * np.pi * grid[1] ** 2.))", "def get_wd_phys(sed_name):\n new_name = sed_name.replace('.','_').split('_')\n teff = float(new_name[-2])\n if new_name[1]!='He':\n logg = 0.1*float(new_name[2])\n else:\n logg = 0.1*float(new_name[3])\n\n return teff, -999.0, logg", "def getDr(d, IO, type_):\n # Define radial distance range\n xp = np.linspace(0, d, d*50)\n yp = 0\n\n # Compute distortion corrections\n x0 = IO[\"x0\"]\n y0 = IO[\"y0\"]\n xbar = xp - x0\n ybar = yp - y0\n r = np.hypot(xbar, ybar)\n\n if type_ == \"symmetric\":\n k1 = IO[\"k1\"]\n k2 = IO[\"k2\"]\n k3 = IO[\"k3\"]\n dx = xbar * (r**2 * k1 + r**4 * k2 + r**6 * k3)\n dy = ybar * (r**2 * k1 + r**4 * k2 + r**6 * k3)\n elif type_ == \"decentering\":\n p1 = IO[\"p1\"]\n p2 = IO[\"p2\"]\n dx = (p1 * (r**2 + 2 * xbar**2) + 2 * p2 * xbar * ybar)\n dy = (2 * p1 * xbar * ybar + p2 * (r**2 + 2 * ybar**2))\n\n dr = np.hypot(dx, dy)\n\n return xp, dr", "def pdf(self, grid, dataSegment):\n r = grid[0]\n s = grid[1]\n sScaled = s*np.sqrt(1 - r**2.)\n return np.exp(-((dataSegment[1] - r * dataSegment[0]) ** 2.) / (2. * sScaled ** 2.) - .5 * np.log(\n 2. * np.pi * sScaled ** 2.))", "def y_dot_dot(self):\n \n # TODO 2\n # Calculate the horizontal component of the acceleration\n c, M_x = self.get_thrust_and_moment()\n phi = self.X[2]\n ydd = (c * (1 - np.sin(phi))) / self.m\n print (ydd)\n return ydd", "def Magnus2(self,direction='x'):\n self.reset()\n self.mol.orthoDen()\n self.mol.orthoFock()\n h = -1j*self.stepsize\n for idx,time in enumerate((self.time)):\n if direction.lower() == 'x':\n self.mol.computeDipole()\n self.dipole.append(np.real(self.mol.mu[0]))\n elif direction.lower() == 'y':\n self.mol.computeDipole()\n self.dipole.append(np.real(self.mol.mu[1]))\n elif direction.lower() == 'z':\n self.mol.computeDipole()\n self.dipole.append(np.real(self.mol.mu[2]))\n\n # record pulse envelope for later plotting, etc.\n self.shape.append(self.pulse(time))\n curDen = np.copy(self.mol.PO)\n \n self.addField(time + 0.0*self.stepsize,direction=direction)\n k1 = h*self.mol.FO \n U = expm(k1)\n self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U))) \n self.mol.updateFock()\n \n self.addField(time + 1.0*self.stepsize,direction=direction)\n L = 0.5*(k1 + h*self.mol.FO)\n U = expm(L)\n self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U))) \n self.mol.updateFock()\n \n # density and Fock are done updating, wrap things up\n self.mol.unOrthoFock() \n self.mol.unOrthoDen() \n self.mol.computeEnergy()\n self.Energy.append(np.real(self.mol.energy))", "def command_line(X=3,Y=3,visible=True):\r\n #visible flag can be set to False for timing purposes\r\n if X == 0 or Y == 0:\r\n return\r\n# np.random.seed(1)\r\n global grid\r\n grid = make_grid(X,Y)\r\n while not check_done(grid):\r\n (x,y) = get_min_shannon_entropy(grid)\r\n #print(\"Collapsing \",grid[y][x].block_loc)\r\n grid[y][x].collapse_wavefunction()\r\n if visible:\r\n render_text(grid)\r\n #print(\"---\"*5)\r\n return", "def pdf(self, grid, dataSegment):\n return np.exp(-((dataSegment[0, 0] - grid[0]) ** 2.) / (2. * dataSegment[0, 1] ** 2.) -\n .5 * np.log(2. * np.pi * dataSegment[0, 1] ** 2.))", "def dimscr(self):\n return (self.startx, self.starty, self.endx - self.startx, self.endy - self.starty)", "def tower(self):\n\t\tthismsg = \"\\r\\n\"+self.ESC+\"255D\"+self.ESC+\"0;1;44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"58C\"+self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"68C\"+self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"44m \"+self.ESC+\"30m\"+self.A219+\" \"+self.A219+\" \"+self.A219+\" \"+self.A219+\" \"+self.A219+\" \"+self.ESC+\"0;44m\"+self.A219+\" \"+self.A219+\" \"+self.A219+\" \"+self.ESC+\"1m\"+self.A219+\" \"+self.A219+\" \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"52C\"+self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"44m \"+self.ESC+\"30m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"43m\"+self.A223+self.A223+self.ESC+\"0;43m\"+self.A223+self.A223+self.A223+self.A223+self.ESC+\"1m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"40C\"+self.ESC+\"43m\"+self.ESC+\"0;34;43m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"30m\"+self.A223+self.A223+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"44m \"+self.ESC+\"1m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"C\"+self.ESC+\"1;36;40mYou reach th\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"42Ce castle and fight your way up the\"+self.ESC+\"C\"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"44m \"+self.ESC+\"32;40m\"+self.A178+self.A219+self.ESC+\"44m \"+self.ESC+\"1;30m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"C\"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"30C\"+self.ESC+\"43m\"+self.ESC+\"1;36;40mtower!\"+self.ESC+\"41C\"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"44m \"+self.ESC+\"32;40m\"+self.A176+self.A219+self.ESC+\"44m \"+self.ESC+\"40m\"+self.A176+self.A177+self.A219+self.A219+self.ESC+\"44m \"+self.ESC+\"40m\"+self.A219+self.ESC+\"44m \"+self.ESC+\"1;30m\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"24C\"+self.ESC+\"44m\"+self.ESC+\"40m\"+self.A221+self.ESC+\"44m\"+self.A219+self.A219+self.A219+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"48C\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"32m\"+self.A178+self.A177+self.A177+self.A219+self.ESC+\"44m \"+self.ESC+\"40m\"+self.A176+self.A177+self.A177+self.A177+self.A177+self.ESC+\"44m \"+self.ESC+\"40m\"+self.A176+self.A177+self.A219+self.ESC+\"44m \"+self.ESC+\"1;30m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"28C\"+self.ESC+\"44m\"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"C\"+self.ESC+\"1;36;40mYou blindly slash at any and all who\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"66C stand in \"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"32m\"+self.A177+self.A176+self.A177+self.A176+self.A178+self.A219+self.ESC+\"44m \"+self.ESC+\"40m\"+self.A176+self.A177+self.A176+self.A177+self.A177+self.A219+self.A178+self.ESC+\"44m \"+self.ESC+\"1;30m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"C\"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"30C\"+self.ESC+\"43m\"+self.ESC+\"1;36;40myour path - screaming to find the prisoner \"+self.ESC+\"4C\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"77C\"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"32m\"+self.A176+self.A177+self.A177+self.A178+self.A178+self.ESC+\"44m \"+self.ESC+\"40m\"+self.A176+self.A177+self.A177+self.A178+self.A178+self.ESC+\"44m \"+self.ESC+\"40m\"+self.A176+self.A178+self.A178+self.ESC+\"44m \"+self.ESC+\"1;30m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"28C\"+self.ESC+\"44m\"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"C\"+self.ESC+\"1;36;40myou seek - and the jailer of your he\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"66Cart.\"+self.ESC+\"7C\"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"44m \"+self.ESC+\"32;40m\"+self.A176+self.A176+self.A177+self.ESC+\"33m\"+self.A176+self.ESC+\"32m\"+self.A177+self.A176+self.A177+self.ESC+\"33m\"+self.A178+self.ESC+\"44m \"+self.ESC+\"32;40m\"+self.A178+self.A176+self.A177+self.A176+self.A178+self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"23C\"+self.ESC+\"44m\"+self.ESC+\"1;30m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"48C\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"32m\"+self.A176+self.A177+self.A177+self.A177+self.A176+self.A178+self.A177+self.ESC+\"33m\"+self.A176+self.A177+self.A178+self.ESC+\"32m\"+self.A177+self.A177+self.A178+self.A176+self.A178+self.A176+self.A178+self.ESC+\"44m \"+self.ESC+\"1;30m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"43m\"+self.A220+self.A220+self.ESC+\"0;43m\"+self.A220+self.A220+self.A220+self.A220+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"34C\"+self.ESC+\"43m\"+self.A220+self.ESC+\"1m\"+self.A220+self.A220+self.A220+self.ESC+\"0;34;43m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"76C\"+self.ESC+\"43m\"+self.ESC+\"30m\"+self.A220+self.A220+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"44m \"+self.ESC+\"32;40m\"+self.A177+self.A176+self.A177+self.A176+self.A176+self.ESC+\"33m\"+self.A176+self.A177+self.A178+self.A177+self.A177+self.ESC+\"32m\"+self.A177+self.A178+self.A177+self.A177+self.ESC+\"44m \"+self.ESC+\"1;30m\"+self.A219+self.A219+self.ESC+\"40m\"+self.A222+self.ESC+\"44m\"+self.A219+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"28C\"+self.ESC+\"44m\"+self.A219+self.A219+self.ESC+\"0;44m\"+self.A219+self.A219+self.ESC+\"40m\"+self.A221+self.A219+self.ESC+\"1;44m\"+self.A219+self.ESC+\"40m\"+self.A221+self.ESC+\"44m\"+self.A219+self.A219+\" \"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"40m\"+self.A223+self.A223+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"44C\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"43m\"+self.A219+self.ESC+\"34m\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"44m \"+self.ESC+\"33;40m\"+self.A176+self.A177+self.A178+self.ESC+\"44m \"+self.ESC+\"1;30m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"0;44m\"+self.A219+self.A219+self.A219+self.ESC+\"1m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"37C\"+self.ESC+\"44m\"+self.A219+\" \"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"2C\"+self.ESC+\"1;37;40mYou make it to the top and \\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"74C\"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"44m \"+self.ESC+\"40m\"+self.A176+self.A177+self.A176+self.A178+self.ESC+\"44m \"+self.ESC+\"1;30m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"0;44m\"+self.A219+self.A219+self.ESC+\"1m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+\" \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"40C\"+self.ESC+\"44m \"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"4C\"+self.ESC+\"1;37;40mthrow open the door...\"+self.ESC+\"6C\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"74C\"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"44m \"+self.ESC+\"40m\"+self.A176+self.A178+self.A177+self.A178+self.ESC+\"44m \"+self.ESC+\"32;40m\"+self.A177+self.ESC+\"1;42m\"+self.A178+self.A219+self.ESC+\"30;44m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"31C\"+self.ESC+\"44m\"+self.ESC+\"0;44m\"+self.A219+self.A219+self.ESC+\"1m\"+self.A219+self.A219+self.A219+self.A219+self.A219+\" \"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"1;32;42m\"+self.A177+self.A178+self.A178+self.ESC+\"0;33m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"53C\"+self.ESC+\"32m\"+self.A178+self.ESC+\"1;42m\"+self.A219+self.A219+self.ESC+\"0;33m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.ESC+\"43m\"+self.A219+self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"44m \"+self.ESC+\"32;40m\"+self.A176+self.ESC+\"1;42m\"+self.A176+self.ESC+\"0;32m\"+self.A177+self.ESC+\"1;42m\"+self.A219+self.A219+self.ESC+\"0;33m\"+self.A176+self.A177+self.A178+self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"20C\"+self.ESC+\"44m\"+self.ESC+\"1;32;42m\"+self.A178+self.A219+self.ESC+\"0;32m\"+self.A176+self.A178+self.ESC+\"1;42m\"+self.A219+self.ESC+\"0;32m\"+self.A178+self.ESC+\"1;30;44m\"+self.A219+self.A219+self.A219+self.ESC+\"0;44m\"+self.A219+self.A219+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"32C\"+self.ESC+\"44m\"+self.ESC+\"1m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+\" \"+self.ESC+\"0;32m\"+self.A177+self.ESC+\"1;42m\"+self.A176+self.ESC+\"0;32m\"+self.A178+self.ESC+\"1;42m\"+self.A178+self.ESC+\"0;32m\"+self.A177+self.ESC+\"1;42m\"+self.A178+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"46C\"+self.ESC+\"42m\"+self.ESC+\"44m \"+self.ESC+\"0;32m\"+self.A177+self.A178+self.A176+self.ESC+\"1;42m\"+self.A178+self.ESC+\"0;32m\"+self.A176+self.A178+self.ESC+\"1;42m\"+self.A219+self.A178+self.A178+self.ESC+\"44m \"+self.ESC+\"42m\"+self.A176+self.A177+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"63C\"+self.ESC+\"42m\"+self.A178+self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"42m \"+self.ESC+\"0;32m\"+self.A177+self.A176+self.ESC+\"1;42m\"+self.A178+self.A176+self.ESC+\"0;32m\"+self.A176+self.ESC+\"1;42m\"+self.A178+self.A219+self.ESC+\"0;32m\"+self.A178+self.A219+self.ESC+\"42m \"+self.ESC+\"40m\"+self.A177+self.A177+self.A176+self.A176+self.A177+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"21C\"+self.ESC+\"1;42m\"+self.A178+self.A176+self.ESC+\"30;40m\"+self.A219+self.ESC+\"0;32m\"+self.A177+self.A177+self.A178+self.ESC+\"1;42m\"+self.A219+self.ESC+\"30;40m\"+self.A219+self.A219+self.ESC+\"0m\"+self.A219+self.A219+self.A219+self.ESC+\"1m\"+self.A219+self.A219+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"36C\"+self.ESC+\"0;32m\"+self.A177+self.A178+self.A176+self.ESC+\"1;42m\"+self.A176+self.ESC+\"0;32m\"+self.A176+self.ESC+\"1;42m\"+self.A219+self.A178+self.ESC+\"0;32m\"+self.A176+self.ESC+\"1;42m\"+self.A219+self.A219+\" \"+self.A178+\" \"+self.ESC+\"0;32m\"+self.A177+self.A176+self.A176+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"52C\"+self.ESC+\"1;42m\"+self.A176+self.ESC+\"0;32m\"+self.A177+self.A176+self.ESC+\"1;42m\"+self.A176+self.ESC+\"0;32m\"+self.A177+self.ESC+\"1;42m\"+self.A176+self.A178+self.A176+self.ESC+\"0;32m\"+self.A178+self.ESC+\"1;42m\"+self.A176+self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"63C\"+self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"42m\"+self.A176+self.A176+self.ESC+\"0;32m\"+self.A177+self.A176+self.ESC+\"1;42m\"+self.A176+self.A176+self.A178+self.A219+self.A219+self.A178+self.A178+self.A176+self.A176+\" \"+self.A176+self.A176+self.ESC+\"0;32m\"+self.A177+self.A176+self.A177+self.A178+self.A176+self.A177+self.ESC+\"1;42m\"+self.A176+self.ESC+\"0;32m\"+self.A176+self.ESC+\"1;42m\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"26C\"+self.ESC+\"42m\"+self.ESC+\"0;32m\"+self.A178+self.ESC+\"1;42m\"+self.A219+self.ESC+\"0;32m\"+self.A177+self.A178+self.ESC+\"1;42m\"+self.A176+self.ESC+\"0;32m\"+self.A177+self.ESC+\"1;42m\"+self.A176+self.A176+self.ESC+\"0;32m\"+self.A177+self.A177+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"36C\"+self.ESC+\"1;42m\"+self.A176+self.ESC+\"0;32m\"+self.A177+self.ESC+\"1;42m\"+self.A176+self.ESC+\"0;32m\"+self.A176+self.A177+self.A177+self.A177+self.ESC+\"1;42m\"+self.A219+self.A176+self.ESC+\"0;32m\"+self.A178+self.A178+self.A177+self.A176+self.A178+self.ESC+\"1;42m\"+self.A176+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"51C\"+self.ESC+\"42m\"+self.ESC+\"0;32m\"+self.A177+self.A176+self.A177+self.A178+self.A178+self.ESC+\"1;42m\"+self.A178+self.ESC+\"0;32m\"+self.A177+self.A178+self.ESC+\"1;42m\"+self.A177+self.A178+self.ESC+\"0;32m\"+self.A177+self.ESC+\"1;42m\"+self.A176+self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"64C\"+self.ESC+\"44m \"+self.ESC+\"37m<MORE> \"+self.ESC+\"40m\\r\\n\"\n\t\treturn thismsg", "def spacing(self):\r\n\r\n return self.dx, self.dy, self.dz", "def test_psystem4():\n\n psys = PSystem(\"F{M{FMM}}\", [Membrane(\"n[[F]]+Fn\", [(\"-\", \"+m\"),(\"+\", \"+\")], [Membrane(\"FCnF\", [(\"M\", \"nF\"),(\"F\", \"+N\")], []),Membrane(\"C\", [(\"-\", \"{-+}\"),(\"-\", \"nF{F}\")], [])]),Membrane(\"{[{[m]}]}\", [(\"N\", \"MF\")], [])], 1)\n\n return psys", "def dx(self) -> np.ndarray:\n return np.array([self.p1[0], self.p2[0] - self.p1[0], 1 - self.p2[0]])", "def ddm(self):\n return gon2ddm(self.gon_angle)", "def ED(X,Y):", "def ofdm_demodulate(sys_parameters, waveform_info, tx_wave):\n # Get dimensionality information derived from the system parameters\n waveform_info.set_info(sys_parameters)\n # Cache the main dims\n nsc = waveform_info.nsubcarriers\n nfft = waveform_info.nfft\n cp_lengths = waveform_info.cyclic_prefix_lengths\n symbols_per_slot = waveform_info.symbols_per_slot\n # Use numpy.array to express the rxWaveform\n rxwave = np.array(tx_wave)\n rxant = np.shape(rxwave)[1]\n rx_grid = np.zeros((nsc, symbols_per_slot, rxant), dtype=complex)\n # Calculate position of the first active subcarrier in the FFT output,\n # according to the FFT size and the number of active subcarriers\n first_activesc = int((nfft - nsc) / 2)\n # Demodulate all symbols within the input data\n cpres = cp_lengths[0] - cp_lengths[1]\n # The same as the MATLAB operation : rxwave[0:cpres, :] = []\n rxwave = np.delete(rxwave, range(cpres), 0)\n rxwave = np.reshape(rxwave, (-1, symbols_per_slot, rxant), order=\"F\")\n # The same as the MATLAB operation : rxwave[0:cp_lengths[1], :, :] = []\n rxwave = np.delete(rxwave, range(cp_lengths[1]), 0)\n for symbol in range(symbols_per_slot):\n for rr in range(rxant):\n in_list = rxwave[:, symbol, rr]\n # rxwave is the numpy.array, just div the value\n fft_output = spy.fftpack.fftshift(spy.fft.fft(in_list)) / math.sqrt(nfft)\n rx_grid[:, symbol, rr] = fft_output[first_activesc : (first_activesc + nsc)]\n return rx_grid", "def compare_dispersion_to_dos(omegas, kx, ky, mlat, outdir=None):\n # Save DOS from projection\n if outdir is None:\n outdir = dio.prepdir(mlat.lp['meshfn'])\n else:\n outdir = dio.prepdir(outdir)\n name = outdir + 'dispersion_gyro' + mlat.lp['meshfn_exten'] + '_nx' + str(len(kx)) + '_ny' + str(len(ky))\n name += '_maxkx{0:0.3f}'.format(np.max(np.abs(kx))).replace('.', 'p')\n name += '_maxky{0:0.3f}'.format(np.max(np.abs(ky))).replace('.', 'p')\n\n # initialize figure\n fig, ax = leplt.initialize_1panel_centered_fig()\n ax2 = ax.twinx()\n ax.hist(omegas.ravel(), bins=1000)\n\n # Compare the histograms of omegas to the dos and save the figure\n eigval = np.imag(mlat.get_eigval())\n print 'eigval = ', eigval\n ax2.hist(eigval[eigval > 0], bins=50, color=lecmap.green(), alpha=0.2)\n ax.set_title('DOS from dispersion')\n xlims = ax.get_xlim()\n ax.set_xlim(0, xlims[1])\n plt.savefig(name + '_dos.png', dpi=300)", "def euler2dcm(angles, rot_seq='zyx'):\n dcm = np.zeros((3, 3))\n cangle = np.cos(angles)\n sangle = np.sin(angles)\n rot_seq = rot_seq.lower()\n if rot_seq == 'zyx':\n dcm[0, 0] = cangle[1]*cangle[0]\n dcm[0, 1] = cangle[1]*sangle[0]\n dcm[0, 2] = -sangle[1]\n dcm[1, 0] = sangle[2]*sangle[1]*cangle[0] - cangle[2]*sangle[0]\n dcm[1, 1] = sangle[2]*sangle[1]*sangle[0] + cangle[2]*cangle[0]\n dcm[1, 2] = cangle[1]*sangle[2]\n dcm[2, 0] = sangle[1]*cangle[2]*cangle[0] + sangle[0]*sangle[2]\n dcm[2, 1] = sangle[1]*cangle[2]*sangle[0] - cangle[0]*sangle[2]\n dcm[2, 2] = cangle[1]*cangle[2]\n return dcm\n elif rot_seq == 'zyz':\n dcm[0, 0] = cangle[0]*cangle[2]*cangle[1] - sangle[0]*sangle[2]\n dcm[0, 1] = sangle[0]*cangle[2]*cangle[1] + cangle[0]*sangle[2]\n dcm[0, 2] = -sangle[1]*cangle[2]\n dcm[1, 0] = -cangle[0]*cangle[1]*sangle[2] - sangle[0]*cangle[2]\n dcm[1, 1] = -sangle[0]*cangle[1]*sangle[2] + cangle[0]*cangle[2]\n dcm[1, 2] = sangle[1]*sangle[2]\n dcm[2, 0] = cangle[0]*sangle[1]\n dcm[2, 1] = sangle[0]*sangle[1]\n dcm[2, 2] = cangle[1]\n return dcm\n elif rot_seq == 'zxy':\n dcm[0, 0] = cangle[2]*cangle[0] - sangle[1]*sangle[2]*sangle[0]\n dcm[0, 1] = cangle[2]*sangle[0] + sangle[1]*sangle[2]*cangle[0]\n dcm[0, 2] = -sangle[2]*cangle[1]\n dcm[1, 0] = -cangle[1]*sangle[0]\n dcm[1, 1] = cangle[1]*cangle[0]\n dcm[1, 2] = sangle[1]\n dcm[2, 0] = sangle[2]*cangle[0] + sangle[1]*cangle[2]*sangle[0]\n dcm[2, 1] = sangle[2]*sangle[0] - sangle[1]*cangle[2]*cangle[0]\n dcm[2, 2] = cangle[1]*cangle[2]\n return dcm\n elif rot_seq == 'zxz':\n dcm[0, 0] = -sangle[0]*cangle[1]*sangle[2] + cangle[0]*cangle[2]\n dcm[0, 1] = cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[2]\n dcm[0, 2] = sangle[1]*sangle[2]\n dcm[1, 0] = -sangle[0]*cangle[2]*cangle[1] - cangle[0]*sangle[2]\n dcm[1, 1] = cangle[0]*cangle[2]*cangle[1] - sangle[0]*sangle[2]\n dcm[1, 2] = sangle[1]*cangle[2]\n dcm[2, 0] = sangle[0]*sangle[1]\n dcm[2, 1] = -cangle[0]*sangle[1]\n dcm[2, 2] = cangle[1]\n return dcm\n elif rot_seq == 'yxz':\n dcm[0, 0] = cangle[0]*cangle[2] + sangle[1]*sangle[0]*sangle[2]\n dcm[0, 1] = cangle[1]*sangle[2]\n dcm[0, 2] = -sangle[0]*cangle[2] + sangle[1]*cangle[0]*sangle[2]\n dcm[1, 0] = -cangle[0]*sangle[2] + sangle[1]*sangle[0]*cangle[2]\n dcm[1, 1] = cangle[1]*cangle[2]\n dcm[1, 2] = sangle[0]*sangle[2] + sangle[1]*cangle[0]*cangle[2]\n dcm[2, 0] = sangle[0]*cangle[1]\n dcm[2, 1] = -sangle[1]\n dcm[2, 2] = cangle[1]*cangle[0]\n return dcm\n elif rot_seq == 'yxy':\n dcm[0, 0] = -sangle[0]*cangle[1]*sangle[2] + cangle[0]*cangle[2]\n dcm[0, 1] = sangle[1]*sangle[2]\n dcm[0, 2] = -cangle[0]*cangle[1]*sangle[2] - sangle[0]*cangle[2]\n dcm[1, 0] = sangle[0]*sangle[1]\n dcm[1, 1] = cangle[1]\n dcm[1, 2] = cangle[0]*sangle[1]\n dcm[2, 0] = sangle[0]*cangle[2]*cangle[1] + cangle[0]*sangle[2]\n dcm[2, 1] = -sangle[1]*cangle[2]\n dcm[2, 2] = cangle[0]*cangle[2]*cangle[1] - sangle[0]*sangle[2]\n return dcm\n elif rot_seq == 'yzx':\n dcm[0, 0] = cangle[0]*cangle[1]\n dcm[0, 1] = sangle[1]\n dcm[0, 2] = -sangle[0]*cangle[1]\n dcm[1, 0] = -cangle[2]*cangle[0]*sangle[1] + sangle[2]*sangle[0]\n dcm[1, 1] = cangle[1]*cangle[2]\n dcm[1, 2] = cangle[2]*sangle[0]*sangle[1] + sangle[2]*cangle[0]\n dcm[2, 0] = sangle[2]*cangle[0]*sangle[1] + cangle[2]*sangle[0]\n dcm[2, 1] = -sangle[2]*cangle[1]\n dcm[2, 2] = -sangle[2]*sangle[0]*sangle[1] + cangle[2]*cangle[0]\n return dcm\n elif rot_seq == 'yzy':\n dcm[0, 0] = cangle[0]*cangle[2]*cangle[1] - sangle[0]*sangle[2]\n dcm[0, 1] = sangle[1]*cangle[2]\n dcm[0, 2] = -sangle[0]*cangle[2]*cangle[1] - cangle[0]*sangle[2]\n dcm[1, 0] = -cangle[0]*sangle[1]\n dcm[1, 1] = cangle[1]\n dcm[1, 2] = sangle[0]*sangle[1]\n dcm[2, 0] = cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[2]\n dcm[2, 1] = sangle[1]*sangle[2]\n dcm[2, 2] = -sangle[0]*cangle[1]*sangle[2] + cangle[0]*cangle[2]\n return dcm\n elif rot_seq == 'xyz':\n dcm[0, 0] = cangle[1]*cangle[2]\n dcm[0, 1] = sangle[0]*sangle[1]*cangle[2] + cangle[0]*sangle[2]\n dcm[0, 2] = -cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[2]\n dcm[1, 0] = -cangle[1]*sangle[2]\n dcm[1, 1] = -sangle[0]*sangle[1]*sangle[2] + cangle[0]*cangle[2]\n dcm[1, 2] = cangle[0]*sangle[1]*sangle[2] + sangle[0]*cangle[2]\n dcm[2, 0] = sangle[1]\n dcm[2, 1] = -sangle[0]*cangle[1]\n dcm[2, 2] = cangle[0]*cangle[1]\n return dcm\n elif rot_seq == 'xyx':\n dcm[0, 0] = cangle[1]\n dcm[0, 1] = sangle[0]*sangle[1]\n dcm[0, 2] = -cangle[0]*sangle[1]\n dcm[1, 0] = sangle[1]*sangle[2]\n dcm[1, 1] = -sangle[0]*cangle[1]*sangle[2] + cangle[0]*cangle[2]\n dcm[1, 2] = cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[2]\n dcm[2, 0] = sangle[1]*cangle[2]\n dcm[2, 1] = -sangle[0]*cangle[2]*cangle[1] - cangle[0]*sangle[2]\n dcm[2, 2] = cangle[0]*cangle[2]*cangle[1] - sangle[0]*sangle[2]\n return dcm\n elif rot_seq == 'xzy':\n dcm[0, 0] = cangle[2]*cangle[1]\n dcm[0, 1] = cangle[0]*cangle[2]*sangle[1] + sangle[0]*sangle[2]\n dcm[0, 2] = sangle[0]*cangle[2]*sangle[1] - cangle[0]*sangle[2]\n dcm[1, 0] = -sangle[1]\n dcm[1, 1] = cangle[0]*cangle[1]\n dcm[1, 2] = sangle[0]*cangle[1]\n dcm[2, 0] = sangle[2]*cangle[1]\n dcm[2, 1] = cangle[0]*sangle[1]*sangle[2] - sangle[0]*cangle[2]\n dcm[2, 2] = sangle[0]*sangle[1]*sangle[2] + cangle[0]*cangle[2]\n return dcm\n elif rot_seq == 'xzx':\n dcm[0, 0] = cangle[1]\n dcm[0, 1] = cangle[0]*sangle[1]\n dcm[0, 2] = sangle[0]*sangle[1]\n dcm[1, 0] = -sangle[1]*cangle[2]\n dcm[1, 1] = cangle[0]*cangle[2]*cangle[1] - sangle[0]*sangle[2]\n dcm[1, 2] = sangle[0]*cangle[2]*cangle[1] + cangle[0]*sangle[2]\n dcm[2, 0] = sangle[1]*sangle[2]\n dcm[2, 1] = -cangle[0]*cangle[1]*sangle[2] - sangle[0]*cangle[2]\n dcm[2, 2] = -sangle[0]*cangle[1]*sangle[2] + cangle[0]*cangle[2]\n return dcm\n else:\n return False", "def d2(x0,y0,x1,y1):\n return (x0-x1)*(x0-x1) + (y0-y1)*(y0-y1)", "def calcPad2D(H, W, M):\n\treturn (*calcPad1D(W,M), *calcPad1D(H,M))", "def ddm(self):\n return dec2ddm(self.dec_angle)", "def dock_dx_dy(block1, dock1n, block2, dock2n):\n _dock1 = block1.docks[dock1n]\n _dock2 = block2.docks[dock2n]\n _d1type, _d1dir, _d1x, _d1y = _dock1[0:4]\n _d2type, _d2dir, _d2x, _d2y = _dock2[0:4]\n if block1 == block2:\n return (100, 100)\n if _d1dir == _d2dir:\n return (100, 100)\n if (_d2type is not 'number') or (dock2n is not 0):\n if block1.connections is not None and \\\n dock1n < len(block1.connections) and \\\n block1.connections[dock1n] is not None:\n return (100, 100)\n if block2.connections is not None and \\\n dock2n < len(block2.connections) and \\\n block2.connections[dock2n] is not None:\n return (100, 100)\n if _d1type != _d2type:\n if block1.name in STRING_OR_NUMBER_ARGS:\n if _d2type == 'number' or _d2type == 'string':\n pass\n elif block1.name in CONTENT_ARGS:\n if _d2type in CONTENT_BLOCKS:\n pass\n else:\n return (100, 100)\n (_b1x, _b1y) = block1.spr.get_xy()\n (_b2x, _b2y) = block2.spr.get_xy()\n return ((_b1x + _d1x) - (_b2x + _d2x), (_b1y + _d1y) - (_b2y + _d2y))", "def symsigma(self):\n return 1 if self.sc_mode == \"one_shot\" else 0", "def getDustDensity(grid=None, ppar=None):\n mesh = np.meshgrid(grid.x, grid.y, grid.z, indexing='ij')\n if ppar['crd_sys'] == 'sph':\n rr = mesh[0]\n tt = mesh[1]\n pp = mesh[2]\n xx = rr * np.sin(tt) * np.sin(pp)\n yy = rr * np.sin(tt) * np.cos(pp)\n zz = rr * np.cos(tt)\n cyrr = np.sqrt(xx**2. + yy**2)\n elif ppar['crd_sys'] == 'car':\n xx = mesh[0]\n yy = mesh[1]\n zz = mesh[2]\n rr = np.sqrt(xx**2 + yy**2 + zz**2)\n cyrr = np.sqrt(xx**2. + yy**2.)\n else:\n raise ValueError('crd_sys not specified in ppar')\n\n # calculate surface density\n nflat = len(ppar['dRin'])\n flat = cyrr * 0.\n for ii in range(nflat):\n flatii = fn_getflat(cyrr, ppar['dRin_w'][ii], ppar['dRin'][ii], \n ppar['dRout'][ii], ppar['dRout_w'][ii], \n ppar['dsigp'][ii], ppar['dsig0'][ii])\n flat = flat + flatii\n\n nring = len(ppar['dring_r'])\n ring = cyrr * 0\n for ii in range(nring):\n ringii = fn_getring(cyrr, ppar['dring_r'][ii], \n ppar['dring_win'][ii], ppar['dring_wout'][ii], \n ppar['dring_a'][ii])\n ring = ring + ringii\n\n nlynbell = len(ppar['dLB_Rin'])\n lynbell = cyrr * 0\n for ii in range(nlynbell):\n lynbellii = fn_getLyndenBell(cyrr, ppar['dLB_Rin'][ii], \n ppar['dLB_Rsig'][ii], ppar['dLB_sigp'][ii], \n ppar['dLB_sig0'][ii])\n lynbell = lynbell + lynbellii\n\n sig = flat + ring + lynbell\n\n # calculate the dust density\n op = dustopac.radmc3dDustOpac()\n dinfo = op.readDustInfo()\n ngs = len(dinfo['gsize'])\n dweights = dinfo['dweights']\n\n rhodust = np.zeros([grid.nx, grid.ny, grid.nz, ngs], dtype=np.float64) \n for ig in range(ngs):\n hhii = ppar['dHt'][ig] * (cyrr / ppar['dRt'][ig])**ppar['dqheight'][ig]\n rho_ig = sig / np.sqrt(2.*np.pi) / hhii * np.exp(-0.5*(zz/hhii)**2)\n rhodust[:,:,:,ig] = rho_ig * dweights\n\n reg = rhodust < ppar['cutddens']\n rhodust[reg]= ppar['cutddens']\n\n return rhodust", "def densidad(qe):\r\n global x,rhoe,rhoi,dx,nparticulas,npuntos_malla,pared_izquierda,pared_derecha\r\n \r\n j1 = sp.dtype(sp.int32) # Asegura que la variable permanezca entera\r\n j2 = sp.dtype(sp.int32) \r\n \r\n # Factor de ponderacion de carga \r\n re = qe/dx \r\n # Densidad electronica \r\n rhoe = sp.zeros(npuntos_malla+1) \r\n # Mapa de cargas sobre la malla\r\n for i in range(nparticulas):\r\n xa = x[i]/dx # xparticula/dx\r\n j1 = int(xa) # indices de la malla fija xmalla/dx\r\n j2 = j1 + 1 # Siguiente punto en la malla\r\n f2 = xa - j1 # |xmalla - xparticula|/dx\r\n f1 = 1.0 - f2\r\n rhoe[j1] = rhoe[j1] + re*f1\r\n rhoe[j2] = rhoe[j2] + re*f2\r\n\r\n # Condiciones de frontera periodica\r\n rhoe[0] += rhoe[npuntos_malla]\r\n rhoe[npuntos_malla] = rhoe[0]\r\n \r\n # Se agrega una densidad de iones neutral\r\n rhoi = rho0\r\n\r\n return True", "def system(p):\r\n\r\n C1, C2, C3, C4, C5, C6, C7, C8, \\\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22, \\\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34,\\\r\n C35, C36, C37, C38, C39, C40, C41, C42, C43, \\\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56 = p\r\n\r\n C = [C1, C2, C3, C4, C5, C6, C7, C8,\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34, C35, C36, C37,\r\n C38, C39, C40, C41, C42, C43,\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56]\r\n\r\n eqs = [C[i] * (Kd[i] + Rtot - sum(C)) + Etot[i] * (sum(C) - Rtot) for i in range(n)]\r\n\r\n return eqs", "def manhattenDist(position1, position2):\n dx,dy = manhattenPath(position1, position2)\n return float(abs(dx) + abs(dy))", "def measure(x, y):\n return dotc_gpu(x, y)", "def _get_terminal_size_windows():\r\n\r\n\r\n #from https://gist.github.com/jtriley/1108174, thank you very much for this hard to produce code!\r\n\r\n # stdin handle is -10\r\n # stdout handle is -11\r\n # stderr handle is -12\r\n h = windll.kernel32.GetStdHandle(-12)\r\n csbi = create_string_buffer(22)\r\n res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)\r\n if res:\r\n (bufx, bufy, curx, cury, wattr,\r\n left, top, right, bottom,\r\n maxx, maxy) = struct.unpack(\"hhhhHhhhhhh\", csbi.raw)\r\n sizex = right - left + 1\r\n sizey = bottom - top + 1\r\n return sizex, sizey", "def int_21H_2(self):\r\n byte_DL = self.registers['DX'].get_byte(0)\r\n char_DL = byte_DL.decode(\"ascii\") # zdekodowany znak do wyswietlenia\r\n self.GUI.set_output(char_DL)", "def w_to_d(self, wx, wy):\r\n dx = (wx - self.wxmin) * self.xscale + self.dxmin\r\n dy = (wy - self.wymin) * self.yscale + self.dymin\r\n return dx, dy", "def compute_thermo(E,dos,TT):\n if (len(dos)<3):\n print (\"Not enough points in the phonon DOS!\")\n return None\n \n ZPE = 0.5*dos_integral(E,dos,1)\n modes = dos_integral(E,dos)\n \n EvibT = np.zeros(len(TT))\n SvibT = np.zeros(len(TT))\n CvibT = np.zeros(len(TT))\n FvibT = np.zeros(len(TT))\n for i in range(0,len(TT)):\n h = 0.5*(E[2]-E[0])\n arg = K_BOLTZMANN_RY*TT[i]\n arg2 = 2.0 * arg\n Evib = 0.0\n Svib = 0.0\n Cvib = 0.0\n for j in range(0,len(dos)-3,3):\n\n Evib += 3.0*E[j]/tanh(E[j]/(arg2))*dos[j]+\\\n 3.0*E[j+1]/tanh(E[j+1]/(arg2))*dos[j+1]+\\\n 2.0*E[j+2]/tanh(E[j+2]/(arg2))*dos[j+2]\n \n Svib += 3.0*(E[j]/arg2/tanh(E[j]/arg2)-log(2.0*sinh(E[j]/arg2)))*dos[j]+\\\n 3.0*(E[j+1]/arg2/tanh(E[j+1]/arg2)-log(2.0*sinh(E[j+1]/arg2)))*dos[j+1]+\\\n 2.0*(E[j+2]/arg2/tanh(E[j+2]/arg2)-log(2.0*sinh(E[j+2]/arg2)))*dos[j+2]\n\n try: # avoid overflow error for arg very small\n Cvib += 3.0*pow(E[j]/arg,2)/( 4.0*pow(sinh(E[j]/(arg2)),2) )*dos[j]+\\\n 3.0*pow(E[j+1]/arg,2)/( 4.0*pow(sinh(E[j+1]/(arg2)),2) )*dos[j+1]+\\\n 2.0*pow(E[j+2]/arg,2)/( 4.0*pow(sinh(E[j+2]/(arg2)),2) )*dos[j+2]\n except:\n Cvib += 0.0\n\n EvibT[i] = h*0.5*Evib*3.0/8.0 # h is the integration step, 0.5 comes from the equation for E,\n # the factor 3.0/8.0 comes from the Simpson 3/8 rule\n SvibT[i] = h*K_BOLTZMANN_RY*Svib*3.0/8.0\n CvibT[i] = h*K_BOLTZMANN_RY*Cvib*3.0/8.0\n FvibT = EvibT - SvibT * TT\n\n print ()\n return TT, EvibT, SvibT, CvibT, FvibT, ZPE, modes", "def system_fleet_dimensioning(self):", "def calc_T_sys(nu_obs):\n return 100 * u.K + 120 * (nu_obs / (150 * u.MHz))**(-2.55) * u.K", "def dx(self):\n if self._uniform_cell_size[0] == gxapi.rDUMMY:\n return None\n return self._uniform_cell_size[0]", "def diagonals_in_hd():\n number_of_pairs = 100000\n angles_for_d = {}\n for d in (10, 100, 1000):\n number_of_corners = 2 ** d - 1\n first_corner = [random.randint(0, number_of_corners) for _ in range(0, number_of_pairs)]\n second_corner = [random.randint(0, number_of_corners) for _ in range(0, number_of_pairs)]\n\n dummy_d = [d for _ in range(0, number_of_pairs)]\n angles = []\n with cf.ProcessPoolExecutor() as executor:\n results = executor.map(find_angle, first_corner, second_corner, dummy_d)\n for result in results:\n angles.append(result)\n ser = pd.Series(angles)\n print(f\"Angles between diagonals for {d} dimensions\")\n print(ser.describe())\n angles_for_d[d] = ser\n\n plot_pmfs_for_ds(angles_for_d)", "def spatial_dos(h,dos):\n if h.has_spin == False and h.has_eh==False: return np.array(dos)\n elif h.has_spin == True and h.has_eh==False: \n return np.array([dos[2*i]+dos[2*i+1] for i in range(len(dos)//2)])\n elif h.has_spin == False and h.has_eh==True: \n return np.array([dos[2*i]+dos[2*i+1] for i in range(len(dos)//2)])\n elif h.has_spin == True and h.has_eh==True: \n return np.array([dos[4*i]+dos[4*i+1]+dos[4*i+2]+dos[4*i+3] for i in range(len(dos)//4)])\n else: raise", "def _d_converter(self):\n units = {'um':1e-6, 'mm':1e-3, 'inch':2.54e-2, 'in':2.54e-2,\\\n 'micron':1e-6, 'mil':2.54e-5, 'm':1.0}\n for i in self.stack:\n i.thickness = i.thickness*units[i.units]\n return", "def d_delayS_d_par(self,par):\n e = self.ecc()\n cE = np.cos(self.E())\n sE = np.sin(self.E())\n sOmega = np.sin(self.omega())\n cOmega = np.cos(self.omega())\n TM2 = self.M2.value*Tsun\n\n logNum = 1-e*cE-self.SINI*(sOmega*(cE-e)+\n (1-e**2)**0.5*cOmega*sE)\n dTM2_dpar = self.prtl_der('TM2',par)\n dsDelay_dTM2 = -2*np.log(logNum)\n decc_dpar = self.prtl_der('ecc',par)\n dsDelay_decc = -2*TM2/logNum*(-cE-self.SINI*(-e*cOmega*sE/np.sqrt(1-e**2)-sOmega))\n dE_dpar = self.prtl_der('E',par)\n dsDelay_dE = -2*TM2/logNum*(e*sE-self.SINI*(np.sqrt(1-e**2)*cE*cOmega-sE*sOmega))\n domega_dpar = self.prtl_der('omega',par)\n dsDelay_domega = -2*TM2/logNum*self.SINI*((cE-e)*cOmega-np.sqrt(1-e**2)*sE*sOmega)\n dSINI_dpar = self.prtl_der('SINI',par)\n dsDelay_dSINI = -2*TM2/logNum*(-np.sqrt(1-e**2)*cOmega*sE-(cE-e)*sOmega)\n return dTM2_dpar*dsDelay_dTM2 + decc_dpar*dsDelay_decc + \\\n dE_dpar*dsDelay_dE +domega_dpar*dsDelay_domega + \\\n dSINI_dpar*dsDelay_dSINI", "def espec2(self, x, y):\n\n self.x = x\n self.y = y\n\n #cross-spectral density - welch method (complex valued)\n sp2 = mlab.csd(self.x, self.y, NFFT=self.nfft, Fs=self.fs, detrend=mlab.detrend_mean, window=mlab.window_hanning, noverlap=self.nfft/2)\n self.f = sp2[1][1:]\n sp2 = sp2[0][1:]\n \n #co e quad espectro (real e imag) - verificar com parente\n co = np.real(sp2)\n qd = np.imag(sp2)\n \n #phase (angle function)\n ph = np.angle(sp2,deg=True)\n \n #ecoherence between x and y (0-1)\n coer = mlab.cohere(self.x , self.y, NFFT=self.nfft, Fs=self.fs, detrend=mlab.detrend_mean, window=mlab.window_hanning, noverlap=self.nfft/2)\n coer = coer[0][1:]\n \n #intervalo de confianca para a amplitude do espectro cruzado - 95%\n ici = sp2 * 14 /26.12\n ics = sp2 * 14 /5.63\n \n #intervalo de confianca para coerencia\n icc = np.zeros(len(sp2))\n icc[:] = 1 - (0.05 ** (1 / (14 / 2.0 - 1)))\n \n self.aa2 = np.array([self.f,sp2,co,qd,ph,coer,ici,ics,icc]).T\n\n return self.aa2", "def diagnosticos(): \r\n global rhoe,Ex,npuntos_malla,itiempo,longitud_malla,rho0,aP,v1,v2,F\r\n global EnergiaK, EnergiaP, EnergiaT, emax\r\n global iout,igrafica,ifase,ivdist, distribucion\r\n global Archivos_Densidades, Archivos_Campo, Archivos_Efase, Archivos_Fdistribucion\r\n \r\n # Se crea el eje para graficar las cantidades fisicas involucradas:\r\n xgrafica = dx * sp.arange(npuntos_malla+1)\r\n \r\n if (itiempo == 0): \r\n plt.figure('Cantidades')\r\n plt.clf()\r\n \r\n if (igrafica > 0):\r\n # Se grafica cada paso dado por el contador igrafica:\r\n if (sp.fmod(itiempo,igrafica) == 0): \r\n # Densidad total\r\n plt.figure(1)\r\n if (itiempo >0 ): plt.cla()\r\n plt.plot(xgrafica, -(rhoe+rho0), 'r', label='Densidad')\r\n plt.xlabel('x')\r\n plt.xlim(0,longitud_malla)\r\n plt.ylim(-1.5,1.5)\r\n plt.legend(loc=1)\r\n # Se imprimen y se guardan las imagenes de acuerdo a iout:\r\n plt.pause(0.0001)\r\n plt.draw()\r\n filename = '%0*d_densidad'%(5, itiempo)\r\n Archivos_Densidades[itiempo] = filename\r\n if (iout > 0):\r\n if (sp.fmod(itiempo,iout) == 0): \r\n plt.savefig(filename+'.png',dpi=720)\r\n \r\n # Campo electrico\r\n plt.figure(2)\r\n if (itiempo >0 ): plt.cla()\r\n plt.plot(xgrafica, Ex, 'b' , label = 'Ex')\r\n plt.xlabel('x', fontsize = 18)\r\n plt.ylabel('Ex', fontsize = 18)\r\n plt.xticks(np.linspace(0,16,4), fontsize = 18)\r\n plt.yticks(np.linspace(-0.0010,0.0010,5), fontsize = 18)\r\n plt.xlim(0,longitud_malla)\r\n plt.ylim(-0.0015,0.0015)\r\n plt.legend(loc = 1)\r\n # Se imprimen y se guardan las imagenes de acuerdo a iout:\r\n plt.pause(0.0001)\r\n plt.draw()\r\n filename = '%0*d_campoelectrico'%(5, itiempo)\r\n Archivos_Campo[itiempo] = filename\r\n if (iout > 0):\r\n if (sp.fmod(itiempo,iout) == 0): \r\n plt.savefig(filename+'.png',dpi=720)\r\n \r\n if (ifase > 0):\r\n if (sp.fmod(itiempo,ifase) == 0): \r\n # Se grafica el espacio de fase en el paso dado por el contador ifase:\r\n plt.figure(3)\r\n if (itiempo >0 ): plt.cla()\r\n v1 = sp.zeros(nparticulas)\r\n v2 = sp.zeros(nparticulas)\r\n x1 = sp.zeros(nparticulas)\r\n x2 = sp.zeros(nparticulas)\r\n for i in range(nparticulas):\r\n if (v[i-1]>v[i]):\r\n v1[i]=v[i]\r\n x1[i]=x[i]\r\n elif(v[i-1]<v[i]):\r\n v2[i]=v[i]\r\n x2[i]=x[i] \r\n if(distribucion == 0):\r\n plt.scatter(x,v,marker='.',s=0.1,color='black') \r\n elif(distribucion == 1 or distribucion == 2):\r\n plt.scatter(x1,v1,marker='.',s=0.1,color='red') \r\n plt.scatter(x2,v2,marker='.',s=0.1,color='blue')\r\n plt.xticks(np.linspace(0,100,6), fontsize = 18)\r\n plt.yticks(np.linspace(-8,8,5), fontsize = 18)\r\n plt.xlabel('x', fontsize = 18)\r\n plt.ylabel('v', fontsize = 18)\r\n plt.xlim(0,longitud_malla)\r\n plt.ylim(-4,8)\r\n\r\n # Se imprimen y se guardan las imagenes de acuerdo a iout:\r\n plt.pause(0.0001)\r\n plt.draw()\r\n filename = '%0*d_espaciofase'%(5, itiempo)\r\n Archivos_Efase[itiempo] = filename\r\n if (iout > 0):\r\n if (sp.fmod(itiempo,iout) == 0): \r\n plt.savefig(filename+'.png',dpi=240)\r\n \r\n if (ivdist > 0):\r\n if (sp.fmod(itiempo,ivdist)==0):\r\n plt.figure(4)\r\n if (itiempo >0 ): plt.cla() \r\n plt.scatter(v,F,marker = '.' , s=0.1, color ='green')\r\n plt.xlim(-5*vh,5*vh)\r\n plt.ylim(0,1.0)\r\n plt.xlabel('v')\r\n plt.ylabel('f(v)')\r\n #fn_vdist = 'vdist_%0*d'%(5, itiempo)\r\n # Se imprimen y se guardan las imagenes de acuerdo a iout:\r\n plt.pause(0.0001)\r\n plt.draw()\r\n filename = '%0*d_fdistribucion'%(5, itiempo)\r\n Archivos_Fdistribucion[itiempo] = filename\r\n if (iout > 0):\r\n if (sp.fmod(itiempo,iout) == 0): \r\n plt.savefig(filename+'.png',dpi=720)\r\n #Se escriben los datos de la distribucion en un archivo:\r\n# sp.savetxt(fn_vdist, sp.column_stack((v,F)),fmt=('%1.4e','%1.4e')) \r\n \r\n # Energia cinetica:\r\n v2 = v**2\r\n EnergiaK[itiempo] = 0.5*masa*sum(v2)\r\n \r\n # Energia potencial:\r\n e2 = Ex**2\r\n EnergiaP[itiempo] = 0.5*dx*sum(e2)\r\n emax = max(Ex) # Campo maximo para analisis de inestabilidad\r\n \r\n # Energia total: \r\n EnergiaT[itiempo] = EnergiaP[itiempo] + EnergiaK[itiempo]\r\n \r\n return True", "def pdf(self, grid, dataSegment):\n return (grid[0] ** dataSegment[0]) * (np.exp(-grid[0])) / (np.math.factorial(dataSegment[0]))", "def density(self):\r\n return self.count_ones() / float(self.xspan * self.yspan)", "def cmd_calculation():", "def _w_euc_2d(self, i, j):\n\n xd = self.coords[i][0] - self.coords[j][0]\n yd = self.coords[i][1] - self.coords[j][1]\n return self._nint(sqrt(xd ** 2 + yd ** 2))", "def DR(R,Pc):\n return r1*R*(K1**B1/(K1**B1 + (A/R)**B1))*(S/(S + R*Pc + Pc)) \\\n - gwt*A - r2*R*(K2**B2/(K2**B2 + (A)**B2))*(S/(S + R*Pc + Pc) ) \\\n *(R*M)/(K3 + R*M) + R*gc", "def test_dmi_uses_unit_length_2dmesh():\n A = 8.78e-12 # J/m\n D = 1.58e-3 # J/m^2\n Ms = 3.84e5 # A/m\n\n energies = []\n\n # unit_lengths 1e-9 and 1 are common, let's throw in an intermediate length\n # just to challenge the system a little:\n for unit_length in (1, 1e-4, 1e-9):\n radius = 200e-9 / unit_length\n maxh = 5e-9 / unit_length\n helical_period = (4 * pi * A / D) / unit_length\n k = 2 * pi / helical_period\n # HF 27 April 2014: The next command fails in dolfin 1.3\n # mesh = df.CircleMesh(df.Point(0, 0), radius, maxh)\n # The actual shape of the domain shouldn't matter for the test,\n # so let's use a Rectangular mesh which should work the same:\n\n nx = ny = int(round(radius / maxh))\n mesh = df.RectangleMesh(df.Point(0, 0), df.Point(radius, radius), nx, ny)\n\n S3 = df.VectorFunctionSpace(mesh, \"CG\", 1, dim=3)\n m_expr = df.Expression((\"0\", \"cos(k * x[0])\", \"sin(k * x[0])\"), k=k, degree=1)\n m = Field(S3, m_expr, name='m')\n dmi = DMI(D)\n Ms_dg = Field(df.FunctionSpace(mesh, 'DG', 0), Ms)\n dmi.setup(m, Ms_dg, unit_length=unit_length)\n energies.append(dmi.compute_energy())\n\n H = df.Function(S3)\n H.vector()[:] = dmi.compute_field()\n print H(0.0, 0.0)\n\n print \"Using unit_length = {}.\".format(unit_length)\n print \"Helical period {}.\".format(helical_period)\n print \"Energy {}.\".format(dmi.compute_energy())\n\n rel_diff_energies = abs(energies[0] - energies[1]) / abs(energies[1])\n print \"Relative difference of energy {}.\".format(rel_diff_energies)\n assert rel_diff_energies < 1e-13\n\n rel_diff_energies2 = abs(energies[0] - energies[2]) / abs(energies[2])\n print \"Relative difference2 of energy {}.\".format(rel_diff_energies2)\n assert rel_diff_energies2 < 1e-13", "def dgdx(self, X):\n \n return 2*(X[0]) - 2", "def __dNdlog2dN(self,Dp,dNdlogDp):\n \n x = np.log10(Dp)\n y = (x[1:]+x[:-1])/2.\n y = np.pad(y,1,'constant',constant_values=(x[0]-(y[0]-x[0]),x[-1]+(x[-1]-y[-1])))\n dlogDp = np.diff(y)\n return dNdlogDp*dlogDp # cm-3", "def getPhysicalDisplayInfo(self) -> Dict[str, Union[int, float]]:\n phyDispRE = re.compile(\n r'.*PhysicalDisplayInfo{(?P<width>\\d+) x (?P<height>\\d+), .*, density (?P<density>[\\d.]+).*')\n ret = self.raw_shell('dumpsys display')\n if m := phyDispRE.search(ret):\n displayInfo = {}\n for prop in ['width', 'height']:\n displayInfo[prop] = int(m.group(prop))\n for prop in ['density']:\n # In mPhysicalDisplayInfo density is already a factor, no need to calculate\n displayInfo[prop] = float(m.group(prop))\n return displayInfo\n\n # This could also be mSystem or mOverscanScreen\n phyDispRE = re.compile('\\s*mUnrestrictedScreen=\\((?P<x>\\d+),(?P<y>\\d+)\\) (?P<width>\\d+)x(?P<height>\\d+)')\n # This is known to work on older versions (i.e. API 10) where mrestrictedScreen is not available\n dispWHRE = re.compile(r'\\s*DisplayWidth=(?P<width>\\d+) *DisplayHeight=(?P<height>\\d+)')\n ret = self.raw_shell('dumpsys window')\n m = phyDispRE.search(ret, 0)\n if not m:\n m = dispWHRE.search(ret, 0)\n if m:\n displayInfo = {}\n for prop in ['width', 'height']:\n displayInfo[prop] = int(m.group(prop))\n for prop in ['density']:\n d = self._getDisplayDensity(strip=True)\n if d:\n displayInfo[prop] = d\n else:\n # No available density information\n displayInfo[prop] = -1.0\n return displayInfo\n\n # gets C{mPhysicalDisplayInfo} values from dumpsys. This is a method to obtain display dimensions and density\n phyDispRE = re.compile(r'Physical size: (?P<width>\\d+)x(?P<height>\\d+).*Physical density: (?P<density>\\d+)',\n re.S)\n ret = self.raw_shell('wm size; wm density')\n\n if m := phyDispRE.search(ret):\n displayInfo = {}\n for prop in ['width', 'height']:\n displayInfo[prop] = int(m.group(prop))\n for prop in ['density']:\n displayInfo[prop] = float(m.group(prop))\n return displayInfo\n\n return {}", "def _calculate_system(self) -> None:\n self.y = solve_ode(\n derivative,\n self.y0,\n self.t,\n self.g,\n self.pendulum1,\n self.pendulum2\n )\n\n # Calculate individual pendulum paths\n self.pendulum1.calculate_path(\n theta=self.y[:, 0],\n dtheta=self.y[:, 1]\n )\n self.pendulum2.calculate_path(\n theta=self.y[:, 2],\n dtheta=self.y[:, 3],\n x0=self.pendulum1.x,\n y0=self.pendulum1.y\n )\n\n self.w = self.y[:, 1]\n self.df = pd.DataFrame(\n self.y,\n columns=[\"theta1\", \"dtheta1\", \"theta2\", \"dtheta2\"]\n )", "def calc_meand(screensize):\n Ls = screensize\n Lw, Lh = screensize\n Lw2, Lh2 = screensize**2\n Lw3, Lh3 = screensize**3\n d = np.linalg.norm(Ls)\n a1 = (5/2)*(Lw2/Lh)*np.log((Lh + d) / Lw)\n a2 = d*(3 - (Lw2/Lh2) - (Lh2/Lw2))\n return (1/15) * ((Lw3/Lh2)+(Lh3/Lw2)+a2+a1)" ]
[ "0.6104026", "0.5681574", "0.5631583", "0.545737", "0.53376925", "0.5288433", "0.52243865", "0.5200089", "0.5097595", "0.5092457", "0.50678277", "0.50439966", "0.5032084", "0.5018616", "0.50028914", "0.50016", "0.4991276", "0.49865207", "0.49861157", "0.49838954", "0.4969009", "0.49622178", "0.49527118", "0.49448827", "0.49436855", "0.49410722", "0.4896444", "0.48645335", "0.48573348", "0.4837725", "0.48290515", "0.47866225", "0.47745138", "0.47743854", "0.47692728", "0.47670695", "0.47553053", "0.47499573", "0.47206184", "0.4705319", "0.47034532", "0.46985912", "0.46963388", "0.46831247", "0.46812132", "0.4680747", "0.46661294", "0.46614018", "0.465586", "0.46536365", "0.4648209", "0.4636895", "0.4632727", "0.46310288", "0.4619214", "0.46143624", "0.46038127", "0.4593795", "0.4590061", "0.4580153", "0.4574349", "0.45727247", "0.4571122", "0.45668417", "0.45590767", "0.4553636", "0.45533448", "0.45522478", "0.45506728", "0.45480588", "0.45477384", "0.45431474", "0.4540403", "0.45403275", "0.45376956", "0.45361432", "0.45328456", "0.45317182", "0.4527501", "0.45241955", "0.45218992", "0.4520158", "0.4517658", "0.45160696", "0.4509209", "0.45085752", "0.45002455", "0.4495462", "0.44830734", "0.44826636", "0.447758", "0.44765136", "0.4476203", "0.44761553", "0.44751453", "0.4473774", "0.44683373", "0.44655713", "0.4460763", "0.44607043", "0.44567114" ]
0.0
-1
Calculate many LDOS, by diagonalizing the Hamiltonian
def multi_ldos(h,es=[0.0],delta=0.001,nrep=3,nk=2,numw=3,random=False): print("Calculating eigenvectors in LDOS") if h.is_sparse: # sparse Hamiltonian from bandstructure import smalleig print("SPARSE Matrix") evals,ws = [],[] # empty list ks = klist.kmesh(h.dimensionality,nk=nk) # get grid hk = h.get_hk_gen() # get generator for k in ks: # loop print("Diagonalizing in LDOS, SPARSE mode") if random: k = np.random.random(3) # random vector print("RANDOM vector in LDOS") e,w = smalleig(hk(k),numw=numw,evecs=True) evals += [ie for ie in e] ws += [iw for iw in w] # evals = np.concatenate([evals,e]) # store # ws = np.concatenate([ws,w]) # store # raise # (evals,ws) = h.eigenvectors(nk) # get the different eigenvectors else: print("DENSE Matrix") (evals,ws) = h.eigenvectors(nk) # get the different eigenvectors ds = [(np.conjugate(v)*v).real for v in ws] # calculate densities del ws # remove the wavefunctions os.system("rm -rf MULTILDOS") # remove folder os.system("mkdir MULTILDOS") # create folder go = h.geometry.copy() # copy geometry go = go.supercell(nrep) # create supercell fo = open("MULTILDOS/MULTILDOS.TXT","w") # files with the names for e in es: # loop over energies print("MULTILDOS for energy",e) out = np.array([0.0 for i in range(h.intra.shape[0])]) # initialize for (d,ie) in zip(ds,evals): # loop over wavefunctions fac = delta/((e-ie)**2 + delta**2) # factor to create a delta out += fac*d # add contribution out /= np.pi # normalize out = spatial_dos(h,out) # resum if necessary name0 = "LDOS_"+str(e)+"_.OUT" # name of the output name = "MULTILDOS/" + name0 write_ldos(go.x,go.y,out.tolist()*(nrep**h.dimensionality), output_file=name) # write in file fo.write(name0+"\n") # name of the file fo.flush() # flush fo.close() # close file # Now calculate the DOS from dos import calculate_dos es2 = np.linspace(min(es),max(es),len(es)*10) ys = calculate_dos(evals,es2,delta) # use the Fortran routine from dos import write_dos write_dos(es2,ys,output_file="MULTILDOS/DOS.OUT")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def diagonals_in_hd():\n number_of_pairs = 100000\n angles_for_d = {}\n for d in (10, 100, 1000):\n number_of_corners = 2 ** d - 1\n first_corner = [random.randint(0, number_of_corners) for _ in range(0, number_of_pairs)]\n second_corner = [random.randint(0, number_of_corners) for _ in range(0, number_of_pairs)]\n\n dummy_d = [d for _ in range(0, number_of_pairs)]\n angles = []\n with cf.ProcessPoolExecutor() as executor:\n results = executor.map(find_angle, first_corner, second_corner, dummy_d)\n for result in results:\n angles.append(result)\n ser = pd.Series(angles)\n print(f\"Angles between diagonals for {d} dimensions\")\n print(ser.describe())\n angles_for_d[d] = ser\n\n plot_pmfs_for_ds(angles_for_d)", "def main():\n diagonals_in_hd()", "def all_dhkl(self, crystal):\n #d_min = self.wavelength/self.max2theta*pi/2\n d_min = self.wavelength/sin(self.max2theta/2)/2\n \n # This block is to find the shortest d_hkl, \n # for all basic directions (1,0,0), (0,1,0), (1,1,0), (1,-1,0) and so on, 26 in total \n hkl_max = np.array([1,1,1])\n for h1 in [-1, 0, 1]:\n for k1 in [-1, 0, 1]:\n for l1 in [-1, 0, 1]:\n hkl_index = np.array([[h1,k1,l1]])\n d = float(np.linalg.norm( np.dot(hkl_index, crystal.rec_matrix), axis=1))\n if d>0:\n multiple = 1/d/d_min\n hkl_index *= round(multiple)\n for i in range(len(hkl_max)):\n if hkl_max[i] < hkl_index[0,i]:\n hkl_max[i] = hkl_index[0,i]\n #h1 = 2*ceil(np.linalg.norm(crystal.cell_para[0])/d_min)\n #k1 = 2*ceil(np.linalg.norm(crystal.cell_para[1])/d_min)\n #l1 = 2*ceil(np.linalg.norm(crystal.cell_para[2])/d_min)\n h1, k1, l1 = hkl_max\n h = np.arange(-h1,h1)\n k = np.arange(-k1,k1)\n l = np.arange(-l1,l1)\n \n hkl = np.array((np.meshgrid(h,k,l))).transpose()\n hkl_list = np.reshape(hkl, [len(h)*len(k)*len(l),3])\n hkl_list = hkl_list[np.where(hkl_list.any(axis=1))[0]]\n d_hkl = 1/np.linalg.norm( np.dot(hkl_list, crystal.rec_matrix), axis=1)\n #for ix, a in enumerate(hkl_list):\n # if np.array_equal(a, np.array([1,-1,3])) is True:\n # print(a)\n # break\n #\n #print(ix, hkl_list[ix], d_hkl[ix], d_min)\n\n shortlist = d_hkl > (d_min)\n d_hkl = d_hkl[shortlist]\n hkl_list = hkl_list[shortlist]\n sintheta = self.wavelength/2/d_hkl\n\n self.theta = np.arcsin(sintheta)\n self.hkl_list = hkl_list\n self.d_hkl = d_hkl\n \n #return hkl_list, d_hkl, sintheta", "def slabldos(h,energies=np.linspace(-1.0,1.0,40),delta=None,nk=40):\n if h.dimensionality!=2: raise # nope\n ds = ldosmap(h,energies=energies,delta=delta,nk=nk)\n if len(ds[0])!=len(h.geometry.z): \n print(\"Wrong dimensions\",len(ds[0]),len(h.geometry.z))\n raise\n f = open(\"DOSMAP.OUT\",\"w\")\n f.write(\"# energy, index, DOS, zposition\\n\")\n for ie in range(len(energies)):\n for iz in range(len(h.geometry.z)):\n f.write(str(energies[ie])+\" \")\n f.write(str(iz)+\" \")\n f.write(str(ds[ie,iz])+\" \")\n f.write(str(h.geometry.z[iz])+\"\\n\")\n f.close()\n return energies,np.transpose(ds) # retunr LDOS ", "def test_lindblad_pseudorandom(self):\n rng = np.random.default_rng(9848)\n dim = 10\n num_ham = 4\n num_diss = 3\n\n b = 1.0 # bound on size of random terms\n\n # generate random hamiltonian\n randoperators = rng.uniform(low=-b, high=b, size=(num_ham, dim, dim)) + 1j * rng.uniform(\n low=-b, high=b, size=(num_ham, dim, dim)\n )\n rand_ham_ops = Array(randoperators + randoperators.conj().transpose([0, 2, 1]))\n\n # generate random hamiltonian coefficients\n rand_ham_coeffs = rng.uniform(low=-b, high=b, size=(num_ham)) + 1j * rng.uniform(\n low=-b, high=b, size=(num_ham)\n )\n rand_ham_carriers = Array(rng.uniform(low=-b, high=b, size=(num_ham)))\n rand_ham_phases = Array(rng.uniform(low=-b, high=b, size=(num_ham)))\n\n ham_sigs = []\n for coeff, freq, phase in zip(rand_ham_coeffs, rand_ham_carriers, rand_ham_phases):\n ham_sigs.append(Signal(coeff, freq, phase))\n\n ham_sigs = SignalList(ham_sigs)\n\n # generate random dissipators\n rand_diss = Array(\n rng.uniform(low=-b, high=b, size=(num_diss, dim, dim))\n + 1j * rng.uniform(low=-b, high=b, size=(num_diss, dim, dim))\n )\n\n # random dissipator coefficients\n rand_diss_coeffs = rng.uniform(low=-b, high=b, size=(num_diss)) + 1j * rng.uniform(\n low=-b, high=b, size=(num_diss)\n )\n rand_diss_carriers = Array(rng.uniform(low=-b, high=b, size=(num_diss)))\n rand_diss_phases = Array(rng.uniform(low=-b, high=b, size=(num_diss)))\n\n diss_sigs = []\n for coeff, freq, phase in zip(rand_diss_coeffs, rand_diss_carriers, rand_diss_phases):\n diss_sigs.append(Signal(coeff, freq, phase))\n\n diss_sigs = SignalList(diss_sigs)\n\n # random anti-hermitian frame operator\n rand_op = rng.uniform(low=-b, high=b, size=(dim, dim)) + 1j * rng.uniform(\n low=-b, high=b, size=(dim, dim)\n )\n frame_op = Array(rand_op - rand_op.conj().transpose())\n evect = -1j * np.linalg.eigh(1j * frame_op)[1]\n f = lambda x: evect.T.conj() @ x @ evect\n\n lindblad_frame_op = frame_op\n\n # construct model\n hamiltonian = HamiltonianModel(operators=rand_ham_ops, signals=ham_sigs)\n lindblad_model = LindbladModel.from_hamiltonian(\n hamiltonian=hamiltonian, dissipator_operators=rand_diss, dissipator_signals=diss_sigs\n )\n lindblad_model.rotating_frame = lindblad_frame_op\n\n A = Array(\n rng.uniform(low=-b, high=b, size=(dim, dim))\n + 1j * rng.uniform(low=-b, high=b, size=(dim, dim))\n )\n\n t = rng.uniform(low=-b, high=b)\n value = lindblad_model(t, A, in_frame_basis=False)\n value_in_frame_basis = lindblad_model(\n t, lindblad_model.rotating_frame.operator_into_frame_basis(A), in_frame_basis=True\n )\n\n ham_coeffs = np.real(\n rand_ham_coeffs * np.exp(1j * 2 * np.pi * rand_ham_carriers * t + 1j * rand_ham_phases)\n )\n ham = np.tensordot(ham_coeffs, rand_ham_ops, axes=1)\n\n diss_coeffs = np.real(\n rand_diss_coeffs\n * np.exp(1j * 2 * np.pi * rand_diss_carriers * t + 1j * rand_diss_phases)\n )\n\n expected = self._evaluate_lindblad_rhs(\n A, ham, dissipators=rand_diss, dissipator_coeffs=diss_coeffs, frame_op=frame_op, t=t\n )\n\n self.assertAllClose(ham_coeffs, ham_sigs(t))\n self.assertAllClose(diss_coeffs, diss_sigs(t))\n self.assertAllClose(f(rand_diss), lindblad_model._dissipator_operators)\n self.assertAllClose(f(rand_ham_ops), lindblad_model._hamiltonian_operators)\n self.assertAllClose(f(-1j * frame_op), lindblad_model.get_drift(in_frame_basis=True))\n self.assertAllClose(-1j * frame_op, lindblad_model.get_drift(in_frame_basis=False))\n self.assertAllClose(f(-1j * frame_op), lindblad_model._operator_collection.drift)\n self.assertAllClose(expected, value)\n\n lindblad_model.evaluation_mode = \"dense_vectorized\"\n vectorized_value = lindblad_model.evaluate_rhs(\n t, A.flatten(order=\"F\"), in_frame_basis=False\n ).reshape((dim, dim), order=\"F\")\n self.assertAllClose(value, vectorized_value)\n\n vec_gen = lindblad_model.evaluate(t, in_frame_basis=False)\n vectorized_value_lmult = (vec_gen @ A.flatten(order=\"F\")).reshape((dim, dim), order=\"F\")\n self.assertAllClose(value, vectorized_value_lmult)\n\n rho_in_frame_basis = lindblad_model.rotating_frame.operator_into_frame_basis(A)\n vectorized_value_lmult_fb = (\n lindblad_model.evaluate(t, in_frame_basis=True) @ rho_in_frame_basis.flatten(order=\"F\")\n ).reshape((dim, dim), order=\"F\")\n self.assertAllClose(vectorized_value_lmult_fb, value_in_frame_basis)\n\n if Dispatch.DEFAULT_BACKEND != \"jax\":\n lindblad_model.evaluation_mode = \"sparse\"\n sparse_value = lindblad_model.evaluate_rhs(t, A, in_frame_basis=False)\n self.assertAllCloseSparse(value, sparse_value)\n\n lindblad_model.evaluation_mode = \"sparse_vectorized\"\n sparse_vectorized_value = lindblad_model.evaluate_rhs(\n t, A.flatten(order=\"F\"), in_frame_basis=False\n ).reshape((dim, dim), order=\"F\")\n self.assertAllCloseSparse(value, sparse_vectorized_value)\n\n sparse_vec_gen = lindblad_model.evaluate(t, in_frame_basis=False)\n sparse_vectorized_value_lmult = (sparse_vec_gen @ A.flatten(order=\"F\")).reshape(\n (dim, dim), order=\"F\"\n )\n self.assertAllCloseSparse(sparse_vectorized_value_lmult, value)", "def ldos2d(h,e=0.0,delta=0.001,nrep=3,nk=None,mode=\"green\",\n random=True,num_wf=20):\n if mode==\"green\":\n import green\n if h.dimensionality!=2: raise # only for 1d\n if nk is not None:\n print(\"LDOS using normal integration with nkpoints\",nk)\n gb,gs = green.bloch_selfenergy(h,energy=e,delta=delta,mode=\"full\",nk=nk)\n d = [ -(gb[i,i]).imag for i in range(len(gb))] # get imaginary part\n else:\n print(\"LDOS using renormalization adaptative Green function\")\n gb,gs = green.bloch_selfenergy(h,energy=e,delta=delta,mode=\"adaptive\")\n d = [ -(gb[i,i]).imag for i in range(len(gb))] # get imaginary part\n elif mode==\"arpack\": # arpack diagonalization\n import klist\n if nk is None: nk = 10\n hkgen = h.get_hk_gen() # get generator\n ds = [] # empty list\n for k in klist.kmesh(h.dimensionality,nk=nk): # loop over kpoints\n print(\"Doing\",k)\n if random:\n print(\"Random k-point\")\n k = np.random.random(3) # random k-point\n hk = csc_matrix(hkgen(k)) # get Hamiltonian\n ds += [ldos_arpack(hk,num_wf=num_wf,robust=False,\n tol=0,e=e,delta=delta)]\n d = ds[0]*0.0 # inititlize\n for di in ds: d += di # add\n d /=len(ds) # normalize\n d = spatial_dos(h,d) # convert to spatial resolved DOS\n g = h.geometry # store geometry\n x,y = g.x,g.y # get the coordinates\n go = h.geometry.copy() # copy geometry\n go = go.supercell(nrep) # create supercell\n write_ldos(go.x,go.y,d.tolist()*(nrep**2),z=go.z) # write in file", "def all_dhkl(self, crystal):\n rec_matrix = crystal.get_reciprocal_cell()\n d_min = self.wavelength/np.sin(self.max2theta/2)/2\n\n # This block is to find the shortest d_hkl, \n # for all basic directions (1,0,0), (0,1,0), (1,1,0), (1,-1,0) \n hkl_index = create_index()\n hkl_max = np.array([1,1,1])\n\n for index in hkl_index:\n d = np.linalg.norm(np.dot(index, rec_matrix))\n multiple = int(np.ceil(1/d/d_min))\n index *= multiple\n for i in range(len(hkl_max)):\n if hkl_max[i] < index[i]:\n hkl_max[i] = index[i]\n \n h1, k1, l1 = hkl_max\n h = np.arange(-h1,h1+1)\n k = np.arange(-k1,k1+1)\n l = np.arange(-l1,l1+1)\n\n hkl = np.array((np.meshgrid(h,k,l))).transpose()\n hkl_list = np.reshape(hkl, [len(h)*len(k)*len(l),3])\n hkl_list = hkl_list[np.where(hkl_list.any(axis=1))[0]]\n d_hkl = 1/np.linalg.norm( np.dot(hkl_list, rec_matrix), axis=1)\n\n shortlist = d_hkl > (d_min)\n d_hkl = d_hkl[shortlist]\n hkl_list = hkl_list[shortlist]\n sintheta = self.wavelength/2/d_hkl\n\n self.theta = np.arcsin(sintheta)\n self.hkl_list = np.array(hkl_list)\n self.d_hkl = d_hkl", "def _compute_diag_H(self, t, index, update_derivatives=False, stationary=False):\r\n\r\n \"\"\"if delta_i~=delta_j:\r\n [h, dh_dD_i, dh_dD_j, dh_dsigma] = np.diag(simComputeH(t, index, t, index, update_derivatives=True, stationary=self.is_stationary))\r\n else:\r\n Decay = self.decay[index]\r\n if self.delay is not None:\r\n t = t - self.delay[index]\r\n \r\n t_squared = t*t\r\n half_sigma_decay = 0.5*self.sigma*Decay\r\n [ln_part_1, sign1] = ln_diff_erfs(half_sigma_decay + t/self.sigma,\r\n half_sigma_decay)\r\n \r\n [ln_part_2, sign2] = ln_diff_erfs(half_sigma_decay,\r\n half_sigma_decay - t/self.sigma)\r\n \r\n h = (sign1*np.exp(half_sigma_decay*half_sigma_decay\r\n + ln_part_1\r\n - log(Decay + D_j)) \r\n - sign2*np.exp(half_sigma_decay*half_sigma_decay\r\n - (Decay + D_j)*t\r\n + ln_part_2 \r\n - log(Decay + D_j)))\r\n \r\n sigma2 = self.sigma*self.sigma\r\n\r\n if update_derivatives:\r\n \r\n dh_dD_i = ((0.5*Decay*sigma2*(Decay + D_j)-1)*h \r\n + t*sign2*np.exp(\r\n half_sigma_decay*half_sigma_decay-(Decay+D_j)*t + ln_part_2\r\n )\r\n + self.sigma/np.sqrt(np.pi)*\r\n (-1 + np.exp(-t_squared/sigma2-Decay*t)\r\n + np.exp(-t_squared/sigma2-D_j*t)\r\n - np.exp(-(Decay + D_j)*t)))\r\n \r\n dh_dD_i = (dh_dD_i/(Decay+D_j)).real\r\n \r\n \r\n \r\n dh_dD_j = (t*sign2*np.exp(\r\n half_sigma_decay*half_sigma_decay-(Decay + D_j)*t+ln_part_2\r\n )\r\n -h)\r\n dh_dD_j = (dh_dD_j/(Decay + D_j)).real\r\n\r\n dh_dsigma = 0.5*Decay*Decay*self.sigma*h \\\r\n + 2/(np.sqrt(np.pi)*(Decay+D_j))\\\r\n *((-Decay/2) \\\r\n + (-t/sigma2+Decay/2)*np.exp(-t_squared/sigma2 - Decay*t) \\\r\n - (-t/sigma2-Decay/2)*np.exp(-t_squared/sigma2 - D_j*t) \\\r\n - Decay/2*np.exp(-(Decay+D_j)*t))\"\"\"\r\n pass", "def build_linear_diags(self):\n N = self.N\n dx = self.dx\n j = self._j # Index of the mid-point\n\n diags = np.zeros((2*self._j+1, self.N))\n\n # Advection term\n cff1 = -1/(2*dx)\n\n # Need to stagger these diagonals so lower and upper bands are symmetric\n diags[j-1, :-2] += -1*cff1*self.c[2:]\n diags[j+1, :] += 1*cff1*self.c[:]\n\n # Sponge term\n x = np.arange(0,N*dx,dx)\n rdist = x[-1] - x # Distance from right boundary\n spongefac = -np.exp(-6*rdist/self.spongedist)/self.spongetime\n diags[j,:] += spongefac \n\n return diags", "def _lindblad_driven(H, rho0, c_ops=None, e_ops=None, Nt=1, dt=0.005, t0=0.,\n return_result=True):\n\n def calculateH(t):\n\n Ht = H[0]\n\n for i in range(1, len(H)):\n Ht += - H[i][1](t) * H[i][0]\n\n return Ht\n\n nstates = H[0].shape[-1]\n\n if c_ops is None:\n c_ops = []\n if e_ops is None:\n e_ops = []\n\n\n # initialize the density matrix\n rho = rho0.copy()\n rho = rho.astype(complex)\n\n\n\n t = t0\n\n if return_result == False:\n\n f_dm = open('den_mat.dat', 'w')\n fmt_dm = '{} ' * (nstates**2 + 1) + '\\n'\n\n f_obs = open('obs.dat', 'w')\n fmt = '{} '* (len(e_ops) + 1) + '\\n'\n\n for k in range(Nt):\n\n t += dt\n\n Ht = calculateH(t)\n\n rho = rk4(rho, liouvillian, dt, Ht, c_ops)\n\n # dipole-dipole auto-corrlation function\n #cor = np.trace(np.matmul(d, rho))\n\n # take a partial trace to obtain the rho_el\n # compute observables\n observables = np.zeros(len(e_ops), dtype=complex)\n\n for i, obs_op in enumerate(e_ops):\n observables[i] = obs_dm(rho, obs_op)\n\n f_obs.write(fmt.format(t, *observables))\n\n\n f_obs.close()\n f_dm.close()\n\n return rho\n\n else:\n\n rholist = [] # store density matries\n\n result = Result(dt=dt, Nt=Nt, rho0=rho0)\n\n observables = np.zeros((Nt, len(e_ops)), dtype=complex)\n\n for k in range(Nt):\n\n t += dt\n\n Ht = calculateH(t)\n\n rho = rk4(rho, liouvillian, dt, Ht, c_ops)\n\n rholist.append(rho.copy())\n\n observables[k, :] = [obs_dm(rho, op) for op in e_ops]\n\n\n result.observables = observables\n result.rholist = rholist\n\n return result", "def diagonalizing_gates(self):\n return [Hadamard(wires=self.wires)]", "def diagonalizing_gates(self):\n return [\n PauliZ(wires=self.wires),\n S(wires=self.wires),\n Hadamard(wires=self.wires),\n ]", "def diag(B,s,H,ia,ib,ic,chia,chic):\n # Get a guess for the ground state based on the old MPS\n d = B[0].shape[0]\n theta0 = np.tensordot(np.diag(s[ia]),np.tensordot(B[ib],B[ic],axes=(2,1)),axes=(1,1))\n theta0 = np.reshape(np.transpose(theta0,(1,0,2,3)),((chia*chic)*(d**2)))\n\n # Diagonalize Hamiltonian\n e0,v0 = arp.eigsh(H,k=1,which='SA',return_eigenvectors=True,v0=theta0,ncv=20)\n \n return np.reshape(v0.squeeze(),(d*chia,d*chic)),e0", "def ldosmap(h,energies=np.linspace(-1.0,1.0,40),delta=None,nk=40):\n if delta is None:\n delta = (np.max(energies)-np.min(energies))/len(energies) # delta\n hkgen = h.get_hk_gen() # get generator\n dstot = np.zeros((len(energies),h.intra.shape[0])) # initialize\n for ik in range(nk): \n print(\"Random k-point\",ik,nk,end=\"\\r\")\n k = np.random.random(3) # random k-point\n hk = hkgen(k) # ge Hamiltonian\n ds = ldos_waves(hk,es=energies,delta=delta) # LDOS for this kpoint\n dstot += ds # add\n print(\"LDOS finished\")\n dstot /=nk # normalize\n dstot = [spatial_dos(h,d) for d in dstot] # convert to spatial resolved DOS\n return np.array(dstot)", "def post_heatdiag(self,ds):\n #\n self.drmid=self.rmid*0 # mem allocation\n self.drmid[1:-1]=(self.rmid[2:]-self.rmid[0:-2])*0.5\n self.drmid[0]=self.drmid[1]\n self.drmid[-1]=self.drmid[-2]\n\n dt = np.zeros_like(self.time)\n dt[1:] = self.time[1:] - self.time[0:-1]\n dt[0] = dt[1]\n rst=np.nonzero(dt<0) #index when restat happen\n dt[rst]=dt[rst[0]+1]\n self.dt = dt\n\n #get separatrix r\n self.rs=np.interp([1],self.psin,self.rmid)\n \n self.rmidsepmm=(self.rmid-self.rs)*1E3 # dist from sep in mm\n\n #get heat\n self.qe=np.transpose(self.e_perp_energy_psi + self.e_para_energy_psi)/dt/ds\n self.qi=np.transpose(self.i_perp_energy_psi + self.i_para_energy_psi)/dt/ds\n self.ge=np.transpose(self.e_number_psi)/dt/ds\n self.gi=np.transpose(self.i_number_psi)/dt/ds\n\n self.qe = np.transpose(self.qe)\n self.qi = np.transpose(self.qi)\n self.ge = np.transpose(self.ge)\n self.gi = np.transpose(self.gi)\n\n self.qt=self.qe+self.qi\n #imx=self.qt.argmax(axis=1)\n mx=np.amax(self.qt,axis=1)\n self.lq_int=mx*0 #mem allocation\n\n for i in range(mx.shape[0]):\n self.lq_int[i]=np.sum(self.qt[i,:]*self.drmid)/mx[i]", "def __dpll(L,h,x, L2=0):\n if len(x.shape)>1: h = h.reshape(-1,1);\n p = 1./(1+np.exp(2*x*(L.dot(x)+h))) # compute p(x^s_i|x^s_!i) for all i,s\n dh = 2*p*x\n if len(x.shape)>1: dh = dh.mean(1); # average over data if x[i] are vectors\n dL = L.tocoo()\n for k in range(dL.nnz):\n i,j = dL.row[k],dL.col[k]\n dL.data[k] = 2*np.mean((p[i]+p[j])*(x[i]*x[j])) # avg over s if needed\n return dL.tocsr(),dh", "def HamiltonianMatrix(self):\n self.Inter = sp.Matrix([[0,self.t],[self.t,0]])\n self.Intra1 = sp.Matrix([[0,v],[w,0]])\n self.Intra2 = sp.Matrix([[0,w],[v,0]])\n H = sp.Matrix([])\n for i in range(1, self.N+1):\n fila = sp.Matrix([])\n for j in range(1, self.N+1):\n if j==i:\n fila = fila.row_join(self.Inter)\n elif j==i+1:\n fila = fila.row_join(self.Intra1)\n elif j==i-1:\n fila = fila.row_join(self.Intra2)\n else:\n fila = fila.row_join(sp.Matrix([[0,0],[0,0]]))\n H = H.col_join(fila) \n H.simplify()\n #printer = StrPrinter()\n #print(H.table(printer,align='center'))\n self.H = H", "def dKdiag_dtheta(self,dL_dKdiag,X,target):\r\n FX = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X)\r\n\r\n La = np.column_stack((self.a[0]*np.ones((self.n_basis,1)),self.a[1]*self.basis_omega, self.a[2]*self.basis_omega**2))\r\n Lo = np.column_stack((self.basis_omega,self.basis_omega,self.basis_omega))\r\n Lp = np.column_stack((self.basis_phi,self.basis_phi+np.pi/2,self.basis_phi+np.pi))\r\n r,omega,phi = self._cos_factorization(La,Lo,Lp)\r\n Gint = self._int_computation( r,omega,phi, r,omega,phi)\r\n\r\n Flower = np.array(self._cos(self.basis_alpha,self.basis_omega,self.basis_phi)(self.lower))[:,None]\r\n F1lower = np.array(self._cos(self.basis_alpha*self.basis_omega,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]\r\n\r\n #dK_dvar\r\n dK_dvar = 1./self.variance*mdot(FX,self.Gi,FX.T)\r\n\r\n #dK_dlen\r\n da_dlen = [-6/self.lengthscale**3,-2*np.sqrt(3)/self.lengthscale**2,0.]\r\n db_dlen = [0.,2*self.lengthscale/3.]\r\n dLa_dlen = np.column_stack((da_dlen[0]*np.ones((self.n_basis,1)),da_dlen[1]*self.basis_omega,da_dlen[2]*self.basis_omega**2))\r\n r1,omega1,phi1 = self._cos_factorization(dLa_dlen,Lo,Lp)\r\n dGint_dlen = self._int_computation(r1,omega1,phi1, r,omega,phi)\r\n dGint_dlen = dGint_dlen + dGint_dlen.T\r\n dG_dlen = self.lengthscale**2/(4*np.sqrt(3))*Gint + self.lengthscale**3/(12*np.sqrt(3))*dGint_dlen + db_dlen[0]*np.dot(Flower,Flower.T) + db_dlen[1]*np.dot(F1lower,F1lower.T)\r\n dK_dlen = -mdot(FX,self.Gi,dG_dlen/self.variance,self.Gi,FX.T)\r\n\r\n #dK_dper\r\n dFX_dper = self._cos(-self.basis_alpha[None,:]*self.basis_omega[None,:]/self.period*X ,self.basis_omega[None,:],self.basis_phi[None,:]+np.pi/2)(X)\r\n\r\n dLa_dper = np.column_stack((-self.a[0]*self.basis_omega/self.period, -self.a[1]*self.basis_omega**2/self.period, -self.a[2]*self.basis_omega**3/self.period))\r\n dLp_dper = np.column_stack((self.basis_phi+np.pi/2,self.basis_phi+np.pi,self.basis_phi+np.pi*3/2))\r\n r1,omega1,phi1 = self._cos_factorization(dLa_dper,Lo,dLp_dper)\r\n\r\n IPPprim1 = self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2) + 1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi/2))\r\n IPPprim1 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2) + 1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi/2))\r\n IPPprim2 = self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2) + self.upper*np.cos(phi-phi1.T))\r\n IPPprim2 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2) + self.lower*np.cos(phi-phi1.T))\r\n IPPprim = np.where(np.isnan(IPPprim1),IPPprim2,IPPprim1)\r\n\r\n IPPint1 = 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi) + 1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi)\r\n IPPint1 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi) + 1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi)\r\n IPPint2 = 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi) + 1./2*self.upper**2*np.cos(phi-phi1.T)\r\n IPPint2 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi) + 1./2*self.lower**2*np.cos(phi-phi1.T)\r\n IPPint = np.where(np.isnan(IPPint1),IPPint2,IPPint1)\r\n\r\n dLa_dper2 = np.column_stack((-self.a[1]*self.basis_omega/self.period, -2*self.a[2]*self.basis_omega**2/self.period))\r\n dLp_dper2 = np.column_stack((self.basis_phi+np.pi/2,self.basis_phi+np.pi))\r\n r2,omega2,phi2 = self._cos_factorization(dLa_dper2,Lo[:,0:2],dLp_dper2)\r\n\r\n dGint_dper = np.dot(r,r1.T)/2 * (IPPprim - IPPint) + self._int_computation(r2,omega2,phi2, r,omega,phi)\r\n dGint_dper = dGint_dper + dGint_dper.T\r\n\r\n dFlower_dper = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega/self.period,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]\r\n dF1lower_dper = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega**2/self.period,self.basis_omega,self.basis_phi+np.pi)(self.lower)+self._cos(-self.basis_alpha*self.basis_omega/self.period,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]\r\n\r\n dG_dper = 1./self.variance*(self.lengthscale**3/(12*np.sqrt(3))*dGint_dper + self.b[0]*(np.dot(dFlower_dper,Flower.T)+np.dot(Flower,dFlower_dper.T)) + self.b[1]*(np.dot(dF1lower_dper,F1lower.T)+np.dot(F1lower,dF1lower_dper.T)))\r\n\r\n dK_dper = 2* mdot(dFX_dper,self.Gi,FX.T) - mdot(FX,self.Gi,dG_dper,self.Gi,FX.T)\r\n\r\n target[0] += np.sum(np.diag(dK_dvar)*dL_dKdiag)\r\n target[1] += np.sum(np.diag(dK_dlen)*dL_dKdiag)\r\n target[2] += np.sum(np.diag(dK_dper)*dL_dKdiag)", "def doppler(self, lmdas):\n lmda_0 = 656.3\n velocities = []\n for lmda in lmdas:\n velocities.append((lmda - lmda_0)*constants.c/lmda_0)\n return np.array(velocities)", "def dKdiag_dtheta(self,dL_dKdiag,X,target):\r\n FX = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X)\r\n\r\n La = np.column_stack((self.a[0]*np.ones((self.n_basis,1)), self.a[1]*self.basis_omega, self.a[2]*self.basis_omega**2, self.a[3]*self.basis_omega**3))\r\n Lo = np.column_stack((self.basis_omega, self.basis_omega, self.basis_omega, self.basis_omega))\r\n Lp = np.column_stack((self.basis_phi, self.basis_phi+np.pi/2, self.basis_phi+np.pi, self.basis_phi+np.pi*3/2))\r\n r,omega,phi = self._cos_factorization(La,Lo,Lp)\r\n Gint = self._int_computation( r,omega,phi, r,omega,phi)\r\n\r\n Flower = np.array(self._cos(self.basis_alpha,self.basis_omega,self.basis_phi)(self.lower))[:,None]\r\n F1lower = np.array(self._cos(self.basis_alpha*self.basis_omega,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]\r\n F2lower = np.array(self._cos(self.basis_alpha*self.basis_omega**2,self.basis_omega,self.basis_phi+np.pi)(self.lower))[:,None]\r\n\r\n #dK_dvar\r\n dK_dvar = 1. / self.variance * mdot(FX, self.Gi, FX.T)\r\n\r\n #dK_dlen\r\n da_dlen = [-3*self.a[0]/self.lengthscale, -2*self.a[1]/self.lengthscale, -self.a[2]/self.lengthscale, 0.]\r\n db_dlen = [0., 4*self.b[1]/self.lengthscale, 2*self.b[2]/self.lengthscale, 2*self.b[3]/self.lengthscale, 2*self.b[4]/self.lengthscale]\r\n dLa_dlen = np.column_stack((da_dlen[0]*np.ones((self.n_basis,1)), da_dlen[1]*self.basis_omega, da_dlen[2]*self.basis_omega**2, da_dlen[3]*self.basis_omega**3))\r\n r1,omega1,phi1 = self._cos_factorization(dLa_dlen,Lo,Lp)\r\n dGint_dlen = self._int_computation(r1,omega1,phi1, r,omega,phi)\r\n dGint_dlen = dGint_dlen + dGint_dlen.T\r\n dlower_terms_dlen = db_dlen[0]*np.dot(Flower,Flower.T) + db_dlen[1]*np.dot(F2lower,F2lower.T) + db_dlen[2]*np.dot(F1lower,F1lower.T) + db_dlen[3]*np.dot(F2lower,Flower.T) + db_dlen[4]*np.dot(Flower,F2lower.T)\r\n dG_dlen = 15*self.lengthscale**4/(400*np.sqrt(5))*Gint + 3*self.lengthscale**5/(400*np.sqrt(5))*dGint_dlen + dlower_terms_dlen\r\n dK_dlen = -mdot(FX,self.Gi,dG_dlen/self.variance,self.Gi,FX.T)\r\n\r\n #dK_dper\r\n dFX_dper = self._cos(-self.basis_alpha[None,:]*self.basis_omega[None,:]/self.period*X ,self.basis_omega[None,:],self.basis_phi[None,:]+np.pi/2)(X)\r\n\r\n dLa_dper = np.column_stack((-self.a[0]*self.basis_omega/self.period, -self.a[1]*self.basis_omega**2/self.period, -self.a[2]*self.basis_omega**3/self.period, -self.a[3]*self.basis_omega**4/self.period))\r\n dLp_dper = np.column_stack((self.basis_phi+np.pi/2,self.basis_phi+np.pi,self.basis_phi+np.pi*3/2,self.basis_phi))\r\n r1,omega1,phi1 = self._cos_factorization(dLa_dper,Lo,dLp_dper)\r\n\r\n IPPprim1 = self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2) + 1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi/2))\r\n IPPprim1 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2) + 1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi/2))\r\n IPPprim2 = self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2) + self.upper*np.cos(phi-phi1.T))\r\n IPPprim2 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2) + self.lower*np.cos(phi-phi1.T))\r\n IPPprim = np.where(np.isnan(IPPprim1),IPPprim2,IPPprim1)\r\n\r\n IPPint1 = 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi) + 1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi)\r\n IPPint1 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi) + 1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi)\r\n IPPint2 = 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi) + .5*self.upper**2*np.cos(phi-phi1.T)\r\n IPPint2 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi) + .5*self.lower**2*np.cos(phi-phi1.T)\r\n IPPint = np.where(np.isnan(IPPint1),IPPint2,IPPint1)\r\n\r\n dLa_dper2 = np.column_stack((-self.a[1]*self.basis_omega/self.period, -2*self.a[2]*self.basis_omega**2/self.period, -3*self.a[3]*self.basis_omega**3/self.period))\r\n dLp_dper2 = np.column_stack((self.basis_phi+np.pi/2, self.basis_phi+np.pi, self.basis_phi+np.pi*3/2))\r\n r2,omega2,phi2 = self._cos_factorization(dLa_dper2,Lo[:,0:2],dLp_dper2)\r\n\r\n dGint_dper = np.dot(r,r1.T)/2 * (IPPprim - IPPint) + self._int_computation(r2,omega2,phi2, r,omega,phi)\r\n dGint_dper = dGint_dper + dGint_dper.T\r\n\r\n dFlower_dper = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega/self.period,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]\r\n dF1lower_dper = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega**2/self.period,self.basis_omega,self.basis_phi+np.pi)(self.lower)+self._cos(-self.basis_alpha*self.basis_omega/self.period,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]\r\n dF2lower_dper = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega**3/self.period,self.basis_omega,self.basis_phi+np.pi*3/2)(self.lower) + self._cos(-2*self.basis_alpha*self.basis_omega**2/self.period,self.basis_omega,self.basis_phi+np.pi)(self.lower))[:,None]\r\n\r\n dlower_terms_dper = self.b[0] * (np.dot(dFlower_dper,Flower.T) + np.dot(Flower.T,dFlower_dper))\r\n dlower_terms_dper += self.b[1] * (np.dot(dF2lower_dper,F2lower.T) + np.dot(F2lower,dF2lower_dper.T)) - 4*self.b[1]/self.period*np.dot(F2lower,F2lower.T)\r\n dlower_terms_dper += self.b[2] * (np.dot(dF1lower_dper,F1lower.T) + np.dot(F1lower,dF1lower_dper.T)) - 2*self.b[2]/self.period*np.dot(F1lower,F1lower.T)\r\n dlower_terms_dper += self.b[3] * (np.dot(dF2lower_dper,Flower.T) + np.dot(F2lower,dFlower_dper.T)) - 2*self.b[3]/self.period*np.dot(F2lower,Flower.T)\r\n dlower_terms_dper += self.b[4] * (np.dot(dFlower_dper,F2lower.T) + np.dot(Flower,dF2lower_dper.T)) - 2*self.b[4]/self.period*np.dot(Flower,F2lower.T)\r\n\r\n dG_dper = 1./self.variance*(3*self.lengthscale**5/(400*np.sqrt(5))*dGint_dper + 0.5*dlower_terms_dper)\r\n dK_dper = 2*mdot(dFX_dper,self.Gi,FX.T) - mdot(FX,self.Gi,dG_dper,self.Gi,FX.T)\r\n\r\n target[0] += np.sum(np.diag(dK_dvar)*dL_dKdiag)\r\n target[1] += np.sum(np.diag(dK_dlen)*dL_dKdiag)\r\n target[2] += np.sum(np.diag(dK_dper)*dL_dKdiag)", "def dKdiag_dtheta(self,dL_dKdiag,X,target):\r\n FX = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X)\r\n\r\n La = np.column_stack((self.a[0]*np.ones((self.n_basis,1)),self.a[1]*self.basis_omega))\r\n Lo = np.column_stack((self.basis_omega,self.basis_omega))\r\n Lp = np.column_stack((self.basis_phi,self.basis_phi+np.pi/2))\r\n r,omega,phi = self._cos_factorization(La,Lo,Lp)\r\n Gint = self._int_computation( r,omega,phi, r,omega,phi)\r\n\r\n Flower = np.array(self._cos(self.basis_alpha,self.basis_omega,self.basis_phi)(self.lower))[:,None]\r\n\r\n #dK_dvar\r\n dK_dvar = 1./self.variance*mdot(FX,self.Gi,FX.T)\r\n\r\n #dK_dlen\r\n da_dlen = [-1./self.lengthscale**2,0.]\r\n dLa_dlen = np.column_stack((da_dlen[0]*np.ones((self.n_basis,1)),da_dlen[1]*self.basis_omega))\r\n r1,omega1,phi1 = self._cos_factorization(dLa_dlen,Lo,Lp)\r\n dGint_dlen = self._int_computation(r1,omega1,phi1, r,omega,phi)\r\n dGint_dlen = dGint_dlen + dGint_dlen.T\r\n dG_dlen = 1./2*Gint + self.lengthscale/2*dGint_dlen\r\n dK_dlen = -mdot(FX,self.Gi,dG_dlen/self.variance,self.Gi,FX.T)\r\n\r\n #dK_dper\r\n dFX_dper = self._cos(-self.basis_alpha[None,:]*self.basis_omega[None,:]/self.period*X ,self.basis_omega[None,:],self.basis_phi[None,:]+np.pi/2)(X)\r\n\r\n dLa_dper = np.column_stack((-self.a[0]*self.basis_omega/self.period, -self.a[1]*self.basis_omega**2/self.period))\r\n dLp_dper = np.column_stack((self.basis_phi+np.pi/2,self.basis_phi+np.pi))\r\n r1,omega1,phi1 = self._cos_factorization(dLa_dper,Lo,dLp_dper)\r\n\r\n IPPprim1 = self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2) + 1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi/2))\r\n IPPprim1 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2) + 1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi/2))\r\n IPPprim2 = self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2) + self.upper*np.cos(phi-phi1.T))\r\n IPPprim2 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2) + self.lower*np.cos(phi-phi1.T))\r\n IPPprim = np.where(np.isnan(IPPprim1),IPPprim2,IPPprim1)\r\n\r\n IPPint1 = 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi) + 1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi)\r\n IPPint1 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi) + 1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi)\r\n IPPint2 = 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi) + 1./2*self.upper**2*np.cos(phi-phi1.T)\r\n IPPint2 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi) + 1./2*self.lower**2*np.cos(phi-phi1.T)\r\n IPPint = np.where(np.isnan(IPPint1),IPPint2,IPPint1)\r\n\r\n dLa_dper2 = np.column_stack((-self.a[1]*self.basis_omega/self.period))\r\n dLp_dper2 = np.column_stack((self.basis_phi+np.pi/2))\r\n r2,omega2,phi2 = dLa_dper2.T,Lo[:,0:1],dLp_dper2.T\r\n\r\n dGint_dper = np.dot(r,r1.T)/2 * (IPPprim - IPPint) + self._int_computation(r2,omega2,phi2, r,omega,phi)\r\n dGint_dper = dGint_dper + dGint_dper.T\r\n\r\n dFlower_dper = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega/self.period,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]\r\n\r\n dG_dper = 1./self.variance*(self.lengthscale/2*dGint_dper + self.b[0]*(np.dot(dFlower_dper,Flower.T)+np.dot(Flower,dFlower_dper.T)))\r\n\r\n dK_dper = 2*mdot(dFX_dper,self.Gi,FX.T) - mdot(FX,self.Gi,dG_dper,self.Gi,FX.T)\r\n\r\n target[0] += np.sum(np.diag(dK_dvar)*dL_dKdiag)\r\n target[1] += np.sum(np.diag(dK_dlen)*dL_dKdiag)\r\n target[2] += np.sum(np.diag(dK_dper)*dL_dKdiag)", "def generate_diagonal(n, l):\n res = []\n arr = [1] * l\n l = l+1\n for diag in range(n):\n res = []\n for index in range(1, l):\n summed = sum(arr[:index]) # sum is really slow for large numbers\n res.append(summed)\n arr = res\n return (arr)", "def diagonal(step='Metropolis', iters=5000):\n X = mc.Uniform('X', lower=-1., upper=1., value=[0., 0.])\n\n @mc.potential\n def near_diag(X=X):\n if abs(X[0] - X[1]) < .1:\n return 0\n else:\n return -inf\n\n mod = setup_and_sample(vars(), step, iters)\n mod.shape = pl.array([[-1,-1], [-1,-.9], [.9,1], [1,1], [1,.9], [-.9,-1], [-1,-1]])\n mod.true_mean = [0,0]\n mod.true_iqr = ['(-.5,.5)', '(-.5,5)']\n return mod", "def matrix_neumann2D(Omega,Nx,Ny):\r\n \r\n hx = (Omega[1]-Omega[0])/Nx\r\n hy = (Omega[3]-Omega[2])/Ny\r\n hx2 = hx*hx\r\n hy2 = hy*hy\r\n\r\n # Les inconnues sont numérotés de 0 à Nx suivant x et 0 à Ny\r\n # suivant y. La taille du problème est donc (Nx+1)*(Ny+1).\r\n\r\n # Pour -Laplacien(u), la matrice est constituée de (Ny+1)x(Ny+1)\r\n # blocs de taille (Nx+1)x(Nx+1), de la forme\r\n #\r\n # A = [ A0 B ]\r\n # [ B A1 B ]\r\n # [ B A1 B ]\r\n # [ . . . ]\r\n # [ B A1 B ]\r\n # [ B A0 ]\r\n #\r\n # Au final, on peut commencer à remplir avec des diagonales\r\n N = (1+Nx)*(1+Ny)\r\n diags = np.zeros((5,N))\r\n # La diagonale est constante\r\n diags[2,:] = 2./hx2+2./hy2\r\n # Diagonale -1\r\n diags[1,:] = -1./hx2 # en général\r\n diags[1,np.arange(Nx,N,Nx+1)] = 0. # bord gauche\r\n diags[1,np.arange(Nx-1,N,Nx+1)] = -2./hx2 # bord droit\r\n # Diagonale +1\r\n diags[3,:] = -1./hx2 # en général\r\n diags[3,np.arange(0,N,Nx+1)] = 0. # bord droit\r\n diags[3,np.arange(1,N,Nx+1)] = -2./hx2 # bord gauche\r\n # Diagonale -(Nx+1)\r\n diags[0,:] = -1./hy2 # en général\r\n diags[0,(Nx+1)*(Ny-1):(Nx+1)*Ny] = -2./hy2 # bord bas\r\n # Diagonale +(Nx+1)\r\n diags[4,:] = -1./hy2 # en général\r\n diags[4,Nx+1:2*(Nx+1)] = -2./hy2 # bord haut\r\n\r\n # Construction de la matrice creuse de u --> -Laplacien(u)\r\n A = sp.spdiags(diags,[-(Nx+1),-1,0,1,(Nx+1)], (Nx+1)*(Ny+1),\r\n (Nx+1)*(Ny+1), format=\"csc\")\r\n\r\n return A", "def build_dispersion_diags(self):\n N = self.N\n j = self._j # Index of the mid-point\n diags = np.zeros((2*self._j+1, self.N))\n\n dx3 = np.power(self.dx, 3.)\n\n ## This tells us how the diagonal matrix construction works\n #diags[j-2,:-4] = np.arange(1,N+1)[4:]\n #diags[j-1,:-2] = np.arange(1,N+1)[2:]\n #diags[j+1,:] = np.arange(1,N+1)\n #diags[j+2,:] = np.arange(1,N+1)\n\n #diags[j,0:2] = 11\n #diags[j+1,1:3] = 12\n #diags[j+2,2:4] = 13\n #diags[j+3,3:5]= 14\n\n\n # Original method had assymmetric diagonals\n #cff = -self.beta/(2*dx3)\n #diags[j-2,:] += -1*cff\n #diags[j-1,:] += 2*cff\n #diags[j+1,:] += -2*cff\n #diags[j+2,:] += 1*cff\n\n # Need to stagger these diagonals so lower and upper bands are symmetric\n cff = -1/(2*dx3)\n beta = self.beta\n diags[j-2,:-4] += -1*cff*beta[4:]\n diags[j-1,:-2] += 2*cff*beta[2:]\n diags[j+1,:] += -2*cff*beta\n diags[j+2,:] += 1*cff*beta\n\n ## Left boundary - use forward differencing\n diags[j-1,0] = 0\n diags[j,0:2] = -2*cff*beta[0]\n diags[j+1,1:3] = 6*cff*beta[0]\n diags[j+2,2:4] = -6*cff*beta[0]\n diags[j+3,3:5] = 2*cff*beta[0]\n\n # Zero first two points\n #diags[j-1,0] = 0\n #diags[j,0:2] = 0 \n #diags[j+1,1:3] = 0 \n #diags[j+2,2:4] = 0 \n #if self._j>2:\n # diags[j+3,3:5] = 0 \n\n return diags", "def metro_ising(L, T, h):\n\n lattice = transform_lattice(np.random.rand(N + 2, N + 2)) # +2 because of periodic bounds\n ising_chain = [lattice]\n m = 0\n\n for i in range(L):\n rand_row = np.random.choice(index)\n rand_col = np.random.choice(index)\n\n if H(ising_chain[i], rand_row, rand_col, h, T):\n new_lattice = ising_chain[i].copy()\n new_lattice[rand_row][rand_col] *= -1\n ising_chain.append(transform_lattice(new_lattice))\n else:\n ising_chain.append(ising_chain[i])\n\n m += np.sum(ising_chain[i][1:N + 1, 1:N + 1]) # magnetization\n\n return m", "def dolomite():\n\n rho = 2840.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 205.; C[0,1] = 71.; C[0,2] = 57.4; C[0,3] = -19.5; C[0,4] = 13.7; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 205.; C[1,2] = 57.4; C[1,3] = 19.5; C[1,4] = -13.7; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 113.; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 39.8; C[3,4] = 0.; C[3,5] = -13.7\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 39.8; C[4,5] = -19.5\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 67.\n\n return C, rho", "def _fast_lindblad(H, rho0, c_ops, e_ops=None, Nt=1, dt=0.005):\n\n # initialize the density matrix\n rho = rho0\n\n # if e_ops is None:\n # e_ops = []\n\n t = 0.0\n dt2 = dt/2.\n # first-step\n # rho_half = rho0 + liouvillian(rho0, h0, c_ops) * dt2\n # rho1 = rho0 + liouvillian(rho_half, h0, c_ops) * dt\n\n # rho_old = rho0\n # rho = rho1\n\n # f_dm = open('den_mat.dat', 'w')\n # fmt_dm = '{} ' * (nstates**2 + 1) + '\\n'\n\n # f_obs = open('obs.dat', 'w')\n # fmt = '{} '* (len(e_ops) + 1) + '\\n'\n\n for k in range(Nt):\n\n t += dt\n\n # rho_new = rho_old + liouvillian(rho, h0, c_ops) * 2. * dt\n # # update rho_old\n # rho_old = rho\n # rho = rho_new\n\n # rho = rk4(rho, liouvillian, dt, H, c_ops)\n k1 = liouvillian(rho, H, c_ops )\n k2 = liouvillian(rho + k1*dt2, H, c_ops)\n k3 = liouvillian(rho + k2*dt2, H, c_ops)\n k4 = liouvillian(rho + k3*dt, H, c_ops)\n\n rho += (k1 + 2*k2 + 2*k3 + k4)/6. * dt\n\n # dipole-dipole auto-corrlation function\n #cor = np.trace(np.matmul(d, rho))\n\n # take a partial trace to obtain the rho_el\n # compute observables\n observables = np.zeros(len(e_ops), dtype=complex)\n\n # for i, obs_op in enumerate(e_ops):\n # observables[i] = obs_dm(rho, obs_op)\n\n # f_obs.write(fmt.format(t, *observables))\n\n\n # f_obs.close()\n # f_dm.close()\n\n return rho\n\n # else:\n\n # rholist = [] # store density matries\n\n # result = Result(dt=dt, Nt=Nt, rho0=rho0)\n\n # observables = np.zeros((Nt, len(e_ops)), dtype=complex)\n\n # for k in range(Nt):\n\n # t += dt\n # rho = rk4(rho, liouvillian, dt, H, c_ops)\n\n # rholist.append(rho.copy())\n\n\n # observables[k, :] = [obs_dm(rho, op) for op in e_ops]\n\n\n # result.observables = observables\n # result.rholist = rholist\n\n # return result", "def L(order=4):\n dim_sh = dimension(order)\n L = np.zeros((dim_sh, dim_sh))\n for j in range(dim_sh):\n l = sh_degree(j)\n L[j, j] = - (l * (l + 1))\n return L", "def hdmatrix(self):\n hdmat = np.zeros((len(self.pulsars), len(self.pulsars)))\n\n for i,pulsar1 in enumerate(self.pulsars):\n for j,pulsar2 in enumerate(self.pulsars):\n hdmat[i,j] = hellingsdowns_factor(pulsar1, pulsar2)\n self.hdm = hdmat\n return hdmat", "def fixed_time_trajectories(self, ll=1, distributions=None, discrete=False, noise=0):\n\n self.time_uniform = np.linspace(0, self.nsteps, self.nsteps * self.padding)\n\n for t in tqdm.tqdm(range(self.ntraj)):\n\n if distributions is not None:\n\n if self.dwell_distribution == 'exponential':\n self.lamb = np.random.choice(distributions[0])\n elif self.dwell_distribution == 'power':\n self.alpha = np.random.choice(distributions[0])\n\n self.hop_sigma = np.random.choice(distributions[1])\n self.H = np.random.choice(distributions[2])\n #self.H = np.mean(distributions[2])\n\n time = [0]\n total_time = 0 # saves a lot of time\n\n while total_time < self.nsteps:\n\n # hop at random time intervals according to one of the following PDFs\n if self.dwell_distribution == 'exponential':\n time.append(sampling.random_exponential_dwell(self.lamb))\n elif self.dwell_distribution == 'power':\n if self.alpha == 1:\n time.append(1)\n else:\n time.append(sampling.random_power_law_dwell(1 + self.alpha, ll=ll, discrete=discrete)[0])\n else:\n sys.exit('Please enter a valid dwell time probability distribution')\n total_time += time[-1]\n\n time = np.cumsum(time)\n\n if self.hop_distribution in ['gaussian', 'Gaussian']:\n\n z = np.cumsum(np.random.normal(loc=0, scale=self.hop_sigma, size=len(time)))\n z -= z[0] # untested\n\n elif self.hop_distribution in ['fbm', 'fractional', 'fraction_brownian_motion']:\n z = fbm.FBM(len(time), self.H, method=\"daviesharte\").fbm()[:-1] # automatically inserts zero at beginning of array\n z /= ((1.0 / len(time)) ** self.H) # reversing a normalization done in the fbm code\n z *= self.hop_sigma\n self.steps.append(z[1:] - z[:-1]) # for autocorrelation calculation\n\n else:\n sys.exit('Please enter a valid hop distance probability distribution')\n\n # for visualizing hops\n # trajectory_hops = np.zeros([2 * len(time) - 1, 2])\n #\n # trajectory_hops[1::2, 0] = time[1:]\n # trajectory_hops[2::2, 0] = time[1:]\n #\n # trajectory_hops[::2, 1] = z\n # trajectory_hops[1:-1:2, 1] = z[:-1]\n # trajectory_hops[-1, 1] = z[-1]\n # plt.plot(trajectory_hops[:, 0], trajectory_hops[:, 1])\n # plt.show()\n # exit()\n\n # make uniform time intervals with the same interval for each simulated trajectory\n self.z_interpolated[t, :] = z[np.digitize(self.time_uniform, time, right=False) - 1]\n\n #plt.hist(np.random.normal(loc=0, scale=noise, size=len(self.time_uniform)))\n\n if noise > 0:\n self.z_interpolated += np.random.normal(loc=0, scale=noise, size=len(self.time_uniform))\n\n self.time_uniform *= self.dt\n # plt.plot(trajectory_hops[:, 0]*self.dt, trajectory_hops[:, 1])\n # plt.plot(self.time_uniform, self.z_interpolated[-1, :])\n # plt.show()\n # exit()", "def lltnum(self,):\n m = self.m\n n = self.n\n diag = self.diag\n perm = self.perm\n AAt = self.AAt\n kAAt = self.kAAt\n iAAt = self.iAAt\n mark = self.mark\n self.denwin\n\n m2 = m+n\n #/*------------------------------------------------------+\n #| initialize constants */\n\n temp = np.zeros(m2)\n first = np.zeros(m2, dtype=np.int)\n link = np.empty(m2, dtype=np.int)\n for i in range(m2):\n link[i] = -1\n\n maxdiag=0.0\n for i in range(m2):\n if abs(diag[i]) > maxdiag:\n maxdiag = abs(diag[i])\n\n self.ndep=0\n\n #/*------------------------------------------------------+\n #| begin main loop - this code is taken from George and |\n #| Liu's book, pg. 155, modified to do LDLt instead |\n #| of LLt factorization. */\n\n for i in range(m2):\n diagi = diag[i]\n sgn_diagi = -1 if perm[i] < n else 1\n j = link[i]\n while j != -1:\n newj = link[j]\n k = first[j]\n lij = AAt[k]\n lij_dj = lij*diag[j]\n diagi -= lij*lij_dj\n k_bgn = k+1\n k_end = kAAt[j+1]\n if k_bgn < k_end:\n first[j] = k_bgn\n row = iAAt[k_bgn]\n link[j] = link[row]\n link[row] = j\n if j < self.denwin:\n for kk in range(k_bgn, k_end):\n temp[iAAt[kk]] += lij_dj*AAt[kk]\n else:\n ptr = row\n for kk in range(k_bgn, k_end):\n temp[ptr] += lij_dj*AAt[kk]\n ptr+=1\n\n j=newj\n\n k_bgn = kAAt[i]\n k_end = kAAt[i+1]\n for kk in range(k_bgn, k_end):\n row = iAAt[kk]\n AAt[kk] -= temp[row]\n\n if abs(diagi) <= self.epsnum*maxdiag or mark[i] == False:\n\n #if (sgn_diagi*diagi <= epsnum*maxdiag || mark[i] == FALSE)\n\n self.ndep+=1\n maxoffdiag = 0.0\n for kk in range(k_bgn, k_end):\n maxoffdiag = max( maxoffdiag, abs( AAt[kk] ) )\n\n if maxoffdiag < 1.0e+6*self._EPS:\n mark[i] = False\n else:\n diagi = sgn_diagi * self._EPS\n\n diag[i] = diagi\n if k_bgn < k_end:\n first[i] = k_bgn\n row = iAAt[k_bgn]\n link[i] = link[row]\n link[row] = i\n for kk in range(k_bgn, k_end):\n row = iAAt[kk]\n if mark[i]:\n AAt[kk] /= diagi\n else:\n AAt[kk] = 0.0\n\n temp[row] = 0.0\n\n del(link)\n del(first)\n del(temp)", "def _lindblad(H, rho0, c_ops, e_ops=None, Nt=1, dt=0.005, return_result=True):\n\n # initialize the density matrix\n rho = rho0.copy()\n rho = rho.astype(complex)\n\n if e_ops is None:\n e_ops = []\n\n t = 0.0\n # first-step\n # rho_half = rho0 + liouvillian(rho0, h0, c_ops) * dt2\n # rho1 = rho0 + liouvillian(rho_half, h0, c_ops) * dt\n\n # rho_old = rho0\n # rho = rho1\n if return_result == False:\n\n # f_dm = open('den_mat.dat', 'w')\n # fmt_dm = '{} ' * (nstates**2 + 1) + '\\n'\n\n f_obs = open('obs.dat', 'w')\n fmt = '{} '* (len(e_ops) + 1) + '\\n'\n\n for k in range(Nt):\n\n # compute observables\n observables = np.zeros(len(e_ops), dtype=complex)\n\n for i, obs_op in enumerate(e_ops):\n observables[i] = obs_dm(rho, obs_op)\n\n t += dt\n\n # rho_new = rho_old + liouvillian(rho, h0, c_ops) * 2. * dt\n # # update rho_old\n # rho_old = rho\n # rho = rho_new\n\n rho = rk4(rho, liouvillian, dt, H, c_ops)\n\n # dipole-dipole auto-corrlation function\n #cor = np.trace(np.matmul(d, rho))\n\n # take a partial trace to obtain the rho_el\n\n\n f_obs.write(fmt.format(t, *observables))\n\n\n f_obs.close()\n # f_dm.close()\n\n return rho\n\n else:\n\n rholist = [] # store density matries\n\n result = Result(dt=dt, Nt=Nt, rho0=rho0)\n\n observables = np.zeros((Nt, len(e_ops)), dtype=complex)\n\n for k in range(Nt):\n\n t += dt\n rho = rk4(rho, liouvillian, dt, H, c_ops)\n\n rholist.append(rho.copy())\n\n\n observables[k, :] = [obs_dm(rho, op) for op in e_ops]\n\n\n result.observables = observables\n result.rholist = rholist\n\n return result", "def calc_lampam_2(ss):\n if isinstance(ss, list):\n lampam = np.zeros((len(ss), 12), float)\n for index in range(len(ss)):\n lampam[index] = calc_lampam_2(ss[index])\n return lampam\n if ss.ndim == 2 and ss.shape[0] > 1:\n lampam = np.zeros((ss.shape[0], 12), float)\n for index in range(ss.shape[0]):\n lampam[index] = calc_lampam_2(ss[index])\n return lampam\n\n n_plies_in_panels = np.size(ss) # laminate ply count\n\n theta2 = np.deg2rad(2*ss.astype(float))\n theta4 = 2*theta2\n cos_sin = np.concatenate((\n np.cos(theta2),\n np.cos(theta4),\n np.sin(theta2),\n np.sin(theta4))).reshape((4, n_plies_in_panels))\n\n for_the_top = np.arange(n_plies_in_panels)\n z_0 = np.ones(n_plies_in_panels)\n z_2 = ((1-n_plies_in_panels/2)*z_0+for_the_top)**3 \\\n - ((1-n_plies_in_panels/2)*z_0+for_the_top - 1)**3\n z_1 = ((1-n_plies_in_panels/2)*z_0+for_the_top)**2 \\\n - ((1-n_plies_in_panels/2)*z_0+for_the_top - 1)**2\n\n return np.array([\n (1/n_plies_in_panels)*np.matmul(cos_sin, z_0),\n (2/n_plies_in_panels**2)*np.matmul(cos_sin, z_1),\n (4/n_plies_in_panels**3)*np.matmul(cos_sin, z_2)]).reshape(12)", "def mounting_matrix(self):\n # fmt: off\n count = 0\n for x in range(self.ntheta):\n self.M[count][count] = 1\n self.f[count][0] = self.p_in\n count = count + self.nz - 1\n self.M[count][count] = 1\n self.f[count][0] = self.p_out\n count = count + 1\n count = 0\n for x in range(self.nz - 2):\n self.M[self.ntotal - self.nz + 1 + count][1 + count] = 1\n self.M[self.ntotal - self.nz + 1 + count][self.ntotal - self.nz + 1 + count] = -1\n count = count + 1\n count = 1\n j = 0\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i][self.ntheta - 1])\n self.M[count][self.ntotal - 2 * self.nz + count] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1, j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][self.ntheta - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = self.nz + 1\n for j in range(1, self.ntheta - 1):\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i, j - 1])\n self.M[count][count - self.nz] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1][j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][j - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = count + 2\n count = 1\n for j in range(self.ntheta - 1):\n for i in range(1, self.nz - 1):\n if j == 0:\n self.f[count][0] = (self.c0w[i][j] - self.c0w[i][self.ntheta - 1]) / self.dtheta\n else:\n self.f[count][0] = (self.c0w[i, j] - self.c0w[i, j - 1]) / self.dtheta\n count = count + 1\n count = count + 2\n # fmt: on", "def run_test0():\r\n \r\n ndia, nadi, nnucl, ntraj = 1, 1, 2, 500\r\n\r\n # ======= Hierarchy of Hamiltonians =======\r\n ham = nHamiltonian(ndia, nadi, nnucl)\r\n ham.init_all(2)\r\n print \"id=\", ham.id, \" level=\", ham.level\r\n\r\n ham1 = [] \r\n for tr in xrange(ntraj):\r\n ham1.append( nHamiltonian(ndia, nadi, nnucl) ) \r\n print ham1[tr].id, ham1[tr].level\r\n ham1[tr].init_all(2)\r\n ham.add_child(ham1[tr])\r\n print Cpp2Py(ham1[tr].get_full_id())\r\n\r\n # Set up the models and compute internal variables\r\n # Initialization\r\n # Model parameters \r\n params = { \"model\":1 }\r\n\r\n # Simulation parameters\r\n dt = 1.0\r\n\r\n # Dynamical variables and system-specific properties\r\n mean_q = MATRIX(nnucl,1); \r\n sigma_q = MATRIX(nnucl,1); \r\n mean_p = MATRIX(nnucl,1); \r\n sigma_p = MATRIX(nnucl,1); \r\n iM = MATRIX(nnucl,1);\r\n\r\n for i in xrange(nnucl):\r\n mean_q.set(i,0, -1.0) \r\n sigma_q.set(i,0, 0.05) \r\n mean_p.set(i,0, 0.0) \r\n sigma_p.set(i,0, 0.0)\r\n iM.set(i,0, 1.0/2000.0)\r\n\r\n rnd = Random()\r\n q = MATRIX(nnucl,ntraj); aux_functs.sample(q, mean_q, sigma_q, rnd)\r\n p = MATRIX(nnucl,ntraj); aux_functs.sample(p, mean_p, sigma_p, rnd) \r\n\r\n # Initial calculations\r\n q.show_matrix()\r\n\r\n # Compute Initial trajectory probability distributions for all dof\r\n #bin(q, -2.0, 2.0, 0.01)\r\n\r\n ham.compute_diabatic(compute_model, q, params, 1)\r\n ham.compute_adiabatic(1, 1);\r\n ham.add_ethd_adi(q, iM, 1)\r\n\r\n os.system(\"mkdir _2D_dist\")\r\n out1 = open(\"_output.txt\", \"w\"); out1.close() \r\n\r\n # Do the propagation\r\n for i in xrange(100):\r\n\r\n aux_functs.bin2(q, -2.0, 2.0, 0.1, -2.0, 2.0, 0.1, \"_2D_dist/_2D_distrib_\"+str(i)+\"_.txt\")\r\n\r\n Verlet1(dt, q, p, iM, ham, compute_model, params, 1)\r\n\r\n #=========== Properties ==========\r\n\r\n Ekin, Epot, Etot = aux_functs.compute_etot(ham, p, iM)\r\n\r\n # Print the ensemble average - kinetic, potential, and total energies\r\n # Print the tunneling information. Here, we count each trajectory across the barrier.\r\n out1 = open(\"_output.txt\", \"a\")\r\n out1.write( \" %8.5f %8.5f %8.5f %8.5f\\n\" % ( i*dt, Ekin, Epot, Etot ) )\r\n out1.close()", "def _ldlj(movement, fs, data_type='speed'):\n _amp, _dur, _jerk = _dlj(movement, fs, data_type)\n return np.log(_amp), np.log(_dur), np.log(_jerk)", "def diagonals(self):\n left_top_shifts = map(lambda i: (-(i + 1), -(i + 1)), range(min(\n self.left_distance, self.top_distance)))\n left_bottom_shifts = map(lambda i: (-(i + 1), +(i + 1)), range(min(\n self.left_distance, self.bottom_distance)))\n right_top_shifts = map(lambda i: (+(i + 1), -(i + 1)), range(min(\n self.right_distance, self.top_distance)))\n right_bottom_shifts = map(lambda i: (+(i + 1), +(i + 1)), range(min(\n self.right_distance, self.bottom_distance)))\n return set(chain(\n left_top_shifts, left_bottom_shifts,\n right_top_shifts, right_bottom_shifts))", "def test_diagonal_gate_wrapper(self):\n shots = 100\n lsts = [\n [1, -1],\n [1, -1, -1, 1],\n [1.0, -1.0, -1.0, 1.0]]\n circuits = [ ref_diagonal_gate.diagonal_gate_circuits_deterministic_w(state)\n for state in [ np.array(lst, dtype=t) \n for t in (None, float, np.float32, complex, np.complex64)\n for lst in lsts ] ]\n result = execute(circuits, self.SIMULATOR, shots=shots).result()\n self.assertTrue(getattr(result, 'success', False))", "def problem():\n\n print 'problem #28'\n\n depth = 1\n numbers = 0\n s = 0\n for x in xrange(1, 1001**2+1):\n numbers += 1\n if depth == 1 or numbers % (depth - 1) == 0:\n s += x\n if x / depth == depth:\n depth += 2\n numbers = 0\n print 'the sum of diagonal number is %s' % s", "def gstate(N, periodic):\n\n # Create Hamiltonian matrix\n H = kronH(N, periodic)\n \n # Diagonalize\n print('Diagonalizing...', end=' ', flush=True)\n w, v = eigsh(H, k=1, which='SA')\n print('Done')\n\n return w[0]", "def test_equal_site_hamiltonian(L):\n site_dims = [2] * L\n site_ops = [pauli.X] * L\n bond_ops = [np.kron(pauli.Z, pauli.Z)] * (L - 1)\n full_hamiltonian = ExDiagPropagator(None, site_dims, site_ops, bond_ops, 0.01).H\n mp_norm = get_norm_of_hamiltonian(site_ops, bond_ops)\n assert abs(np.linalg.norm(full_hamiltonian) - mp_norm) == pytest.approx(0.0)", "def get_incorrect_distance_matrix(L):\n n = len(L)\n D = np.zeros((n,n))\n for i in range(n):\n for j in range(n):\n if i != j:\n D[i][j] = get_minor(L, [i], [j]) / get_minor(L, [i], [i])\n return D", "def diagM(l):\r\n dim = len(l)\r\n M = np.zeros((dim, dim))\r\n np.fill_diagonal(M, l)\r\n return matrix(M)", "def LDL(A, d):\n n = shape(A)[0]\n L = array(eye(n))\n dg = zeros(n)\n dg[0] = A[0, 0]\n for k in range(1, n):\n m = reshape(array(A[:k, k].copy()), k)\n rforwardsolve(L[:k, :k], m, d)\n L[k, :k] = m/dg[:k]\n dg[k] = A[k, k] - dot(L[k, :k], m)\n return L, dg", "def get_correct_distance_matrix(L):\n n = len(L)\n D = np.zeros((n,n))\n for i in range(n):\n for j in range(n):\n if i != j:\n D[i][j] = get_minor(L, [i, j], [i, j]) / get_minor(L, [i], [i])\n return D", "def DFS_Hamilton_all(AL):\n global H_sequences\n H_sequences = []\n explored = [0]\n for vertex in AL[0]: \n _DFS_Hamilton_all(explored, AL, 0, vertex) \n return H_sequences", "def set_hamiltonian(self, system):\n system.clear_hamiltonian()\n if 'bh' in system.left_block.operators.keys():\n system.add_to_hamiltonian(left_block_op='bh')\n if 'bh' in system.right_block.operators.keys():\n system.add_to_hamiltonian(right_block_op='bh')\n system.add_to_hamiltonian('dimer', 'id', 'id', 'id', -(1. - self.U))\n system.add_to_hamiltonian('id', 'dimer', 'id', 'id', -(1. - self.U))\n system.add_to_hamiltonian('id', 'id', 'dimer', 'id', -(1. - self.U))\n system.add_to_hamiltonian('id', 'id', 'id', 'dimer', -(1. - self.U))\n \n# system.add_to_hamiltonian('dimer', 'id', 'id', 'id', self.U)\n# system.add_to_hamiltonian('id', 'dimer', 'id', 'id', self.U)\n# system.add_to_hamiltonian('id', 'id', 'dimer', 'id', self.U)\n# system.add_to_hamiltonian('id', 'id', 'id', 'dimer', self.U)\n\n system.add_to_hamiltonian('rprm_up_minus_dag', 'rprm_up_plus', 'id', 'id', -(1. + self.U)/2.)\n system.add_to_hamiltonian('rprm_down_minus_dag', 'rprm_down_plus', 'id', 'id', -(1. + self.U)/2.)\n system.add_to_hamiltonian('rprm_up_minus', 'rprm_up_plus_dag', 'id', 'id', (1. + self.U)/2.)\n system.add_to_hamiltonian('rprm_down_minus', 'rprm_down_plus_dag', 'id', 'id', (1. + self.U)/2.)\n \n system.add_to_hamiltonian('id', 'rprm_up_minus_dag', 'rprm_up_plus', 'id', -(1.+self.U)/2.)\n system.add_to_hamiltonian('id', 'rprm_down_minus_dag', 'rprm_down_plus', 'id', -(1.+self.U)/2.)\n system.add_to_hamiltonian('id', 'rprm_up_minus', 'rprm_up_plus_dag', 'id', (1.+self.U)/2.)\n system.add_to_hamiltonian('id', 'rprm_down_minus', 'rprm_down_plus_dag', 'id', (1.+self.U)/2.)\n\n system.add_to_hamiltonian('id','id', 'rprm_up_minus_dag', 'rprm_up_plus', -(1.+self.U)/2.)\n system.add_to_hamiltonian('id','id', 'rprm_down_minus_dag', 'rprm_down_plus', -(1.+self.U)/2.)\n system.add_to_hamiltonian('id','id', 'rprm_up_minus', 'rprm_up_plus_dag', (1.+self.U)/2.)\n system.add_to_hamiltonian('id','id', 'rprm_down_minus', 'rprm_down_plus_dag', (1.+self.U)/2.)", "def lawsonite():\n\n rho = 3090.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 214.; C[0,1] = 69.; C[0,2] = 82.; C[0,3] = 0.; C[0,4] = 0.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 226.; C[1,2] = 65.; C[1,3] = 0.; C[1,4] = 0.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 259.; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 60.; C[3,4] = 0.; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 65.; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 17.\n\n return C, rho", "def dl(z,h=0.7,omegalambda=0.7,omegam=0.3,omegak=0.0):\n return distcalc(z,h,omegalambda,omegam,omegak)['dl']", "def total_electronic_hamiltonian(self):\n return block_diag(*[self.electronic_manifold(n) for n in range(3)])", "def test_diagonal_gate(self):\n shots = 100\n circuits = ref_diagonal_gate.diagonal_gate_circuits_deterministic(\n final_measure=True)\n targets = ref_diagonal_gate.diagonal_gate_counts_deterministic(\n shots)\n result = execute(circuits, self.SIMULATOR, shots=shots).result()\n self.assertTrue(getattr(result, 'success', False))\n self.compare_counts(result, circuits, targets, delta=0)", "def generate_diagonals(self):\n x = self.square[0]\n y = self.square[1]\n diagonals = [[]]\n \n diagonals.append( ( (x+a, y+a) for a in range(1,8) ) )\n diagonals.append( ( (x+a, y-a) for a in range(1,8) ) )\n diagonals.append( ( (x-a, y+a) for a in range(1,8) ) )\n diagonals.append( ( (x-a, y-a) for a in range(1,8) ) )\n \n return diagonals", "def test_loglike(dlm,Cl,noise,beam):\n lmax = Cl.shape[0]\n tt_exp = -1./2 * np.real(np.vdot(dlm.T,hp.almxfl(dlm,1/(beam[:lmax]**2*Cl[:,1]+noise[:lmax]))))\n #plt.plot(Cl[:,1])\n tt_det = - 1./2 *(np.arange(1,lmax+1)*np.log((noise[:lmax]+Cl[:,1]*beam[:lmax]**2))).sum() \n tt_f = tt_exp + tt_det\n return tt_exp,tt_det,tt_f#,Cl[:,1]", "def _Th(numberPoints, domainLength):\n h = domainLength/numberPoints\n\n a = np.ones(numberPoints+1)*4.\n b = np.ones(numberPoints)*(-1)\n\n a[0] = h**2\n a[numberPoints] = h**2\n\n b[0] = 0\n b[numberPoints-1] = 0\n\n T = sp.diags(a, 0) + sp.diags(b, -1) + sp.diags(b, 1)\n return T", "def bandsemicholeskyL(A, d):\n n = shape(A)[0]\n for k in range(n):\n if A[k,k] > 0:\n kp=array([n, k + 1 + d]).min();\n A[k,k] = sqrt(A[k,k])\n A[(k+1):kp, k] =A [(k+1):kp, k]/A[k, k]\n for j in range(k+1, kp):\n A[j:kp, j] = A[j:kp, j] - A[j, k]*A[j:kp, k]\n else:\n A[k:kp, k] = 0\n return tril(A)", "def lherzolite():\n\n rho = 3270.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 187.4; C[0,1] = 63.71; C[0,2] = 63.87; C[0,3] = 0.78; C[0,4] = 2.02; C[0,5] = -3.2\n C[1,0] = C[0,1]; C[1,1] = 211.25; C[1,2] = 64.5; C[1,3] = -3.07; C[1,4] = 0.87; C[1,5] = -5.78\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 190.; C[2,3] = 0.38; C[2,4] = 2.38; C[2,5] = -0.12\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 67.9; C[3,4] = -2.12; C[3,5] = 1.6\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 63.12; C[4,5] = -0.55\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 66.83\n\n return C, rho", "def hyperdiagonal(coords):\n \n mini = coords.min(axis=0)\n maxi = coords.max(axis=0)\n dist = (maxi - mini)**2\n dist = np.sqrt(dist.sum())\n return dist", "def get_right_mps(D, L):\n mps = np.zeros([L], np.object)\n\n #ten0 = np.zeros([D, D],np.object) # <-- bottom-most\n #teni = np.zeros([D, D, D],np.object) # <-- general\n ten0 = np.zeros([D, D]) # <-- bottom-most\n teni = np.zeros([D, D, D]) # <-- general\n\n ten0[z, z] = 1\n ten0[a, a] = 1\n\n teni[z,a,z] = 1\n teni[a,a,a] = 1\n teni[z,z,a] = 1\n \n mps[0] = ten0\n for i in range(1, L):\n mps[i] = teni\n return mps", "def Psi(l,m,theta,phi):\n if numpy.isscalar(theta): \n theta=numpy.array([[theta]])\n phi=numpy.array([[phi]])\n Psilm_th=numpy.zeros(theta.shape,dtype=complex)\n Psilm_ph=numpy.zeros(theta.shape,dtype=complex)\n x=numpy.cos(theta)\n thetaNonZerosIdx=numpy.where(theta!=0.0)\n if len(thetaNonZerosIdx[0]) != 0:\n Ylm=scipy.special.sph_harm(m,l,phi[thetaNonZerosIdx],theta[thetaNonZerosIdx])\n #Compute derivative of sphrHarm function w.r.t. theta:\n if l>=numpy.abs(m):\n Plmpo=legendreLM(l,m+1,x[thetaNonZerosIdx])\n YlmPmpo=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/float(math.factorial(l+m)))*Plmpo*numpy.exp(1j*m*phi[thetaNonZerosIdx])\n #YlmPmpo=sqrt((l-m)*(l+m+1))*spharm(l,m+1,theta,phi)*exp(-i*phi) %Should be equivalent to above formula.\n dtYlm=+YlmPmpo+m*x[thetaNonZerosIdx]*Ylm/numpy.sin(theta[thetaNonZerosIdx])\n # thetZerInd=[find(theta==0); find(theta==pi)]\n # dtYlm(thetZerInd)=0; %This is a fudge to remove NaNs\n else:\n dtYlm=numpy.zeros(theta[thetaNonZerosIdx].shape,dtype=complex)\n\n #dtYlm=spharmDtheta(l,m,theta,phi)\n\n Psilm_ph[thetaNonZerosIdx]=+1j*m/numpy.sin(theta[thetaNonZerosIdx])*Ylm\n Psilm_th[thetaNonZerosIdx]=+dtYlm\n #Ref: http://mathworld.wolfram.com/VectorSphericalHarmonic.html\n\n thetaZerosIdx=numpy.where(theta==0.0)\n if len(thetaZerosIdx[0]) != 0:\n if numpy.abs(m)==1:\n Yl1B=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/math.factorial(l+m))*PBl1(l,m)*numpy.exp(1j*m*phi[thetaZerosIdx])\n Plmpo=legendreLM(l,m+1,x[thetaZerosIdx])\n YlmPmpo=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/math.factorial(l+m))*Plmpo*numpy.exp(1j*m*phi[thetaZerosIdx])\n dtYlm=+YlmPmpo+m*Yl1B\n Psilm_ph[thetaZerosIdx]=+1j*m*Yl1B\n Psilm_th[thetaZerosIdx]=+dtYlm\n else:\n Plmpo=legendreLM(l,m+1,x[thetaZerosIdx])\n YlmPmpo=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/math.factorial(l+m))*Plmpo*numpy.exp(1j*m*phi[thetaZerosIdx])\n dtYlm=+YlmPmpo+0\n Psilm_ph[thetaZerosIdx]=0\n Psilm_th[thetaZerosIdx]=+dtYlm\n return Psilm_th,Psilm_ph", "def detect_repetition(sdm, diagonal_num = 30, thres_rate = 0.2, min_sdm_window_size = 48, is_local = True, is_plot = False):\n\n length = len(sdm)\n dig_mean = calculate_sdm_min_diagonal(sdm, window_size = min_sdm_window_size, is_partial = is_local)\n\n # using a FIR filter to smooth mean of diagonals\n B = np.ones(50) / 50\n dig_lp = scipy.signal.lfilter(B, 1, dig_mean)\n dig = dig_mean - dig_lp\n\n # calculate the smoothed differential of diagonals\n B = np.array([1, 0, -1])\n dig_smooth_diiiferentia = scipy.signal.lfilter(B, 1 ,dig)\n\n if is_plot:\n plt.plot(dig_mean, label = 'mean of diagonals')\n plt.plot(dig, label = 'mean of diagonals without linear offset')\n plt.plot(dig_lp, label = 'smoothed mean of diagonals')\n plt.plot(dig_smooth_diiiferentia, label = 'derivative of mean of diagonals')\n plt.title('mean of diagonals')\n plt.legend()\n\n\n # index where the smoothed differential of diagonals from negative to positive\n # the minima value is the minimum value of diagonals\n minima = np.array([])\n minima_indeces = np.array([], dtype = int)\n for i in range(len(dig_smooth_diiiferentia) - 1):\n if dig_smooth_diiiferentia[i] < 0 and dig_smooth_diiiferentia[i + 1] > 0:\n minima_indeces = np.append(minima_indeces, i)\n minima = np.append(minima, dig[i])\n\n # delete by otsu algorithm\n threshold_otsu = get_otsu_threshold(np.matrix(minima))\n del_indeces = np.array([])\n # for i in range(len(minima)):\n # if minima[i] > threshold_otsu:\n # del_indeces = np.append(del_indeces, i)\n\n while True:\n threshold_otsu += 1\n del_indeces = np.array([])\n for i in range(len(minima)):\n if minima[i] > threshold_otsu:\n del_indeces = np.append(del_indeces, i)\n\n if len(minima_indeces) - len(del_indeces) > 50 or len(del_indeces) == 0:\n break\n\n\n\n\n minima = np.delete(minima, del_indeces)\n minima_indeces = np.delete(minima_indeces, del_indeces)\n\n # calculate a threshold\n long_vector = np.array([])\n for index in minima_indeces:\n long_vector = np.append(long_vector, np.diag(sdm, -index))\n\n all_len = len(long_vector)\n long_vector = np.sort(long_vector)\n\n while(True):\n\n threshold = long_vector[int(round(thres_rate * all_len))]\n minima_count = 0\n\n # calculate a binary matrix\n binary_matrix = np.zeros([length, length], dtype = int)\n\n\n for index in minima_indeces:\n temp = np.diag(sdm, -index)\n for j in range(len(temp)):\n if temp[j] > threshold:\n binary_matrix[index + j, j] = 1\n minima_count += 1\n\n # if the number of segments is smaller than 10\n if minima_count < 20 and thres_rate < 1:\n thres_rate += 0.05\n else:\n break\n\n\n # enhance the binary matrix\n enhanced_binary_matrix = binary_matrix.copy()\n for index in minima_indeces:\n temp = np.diag(sdm, -index)\n j = 0\n while len(temp) >= 25 or j <= len(temp):\n if temp[j] == 0:\n j += 1\n if j + 25 - 1 > len(temp):\n break\n\n continue\n\n if j + 25 - 1 > len(temp):\n break\n\n kernel = temp[j : j + 25 - 1]\n if isenhance(kernel):\n for k in range(25):\n enhanced_binary_matrix[index + j + k] = 1\n\n j = j + 25 - 1\n\n j += 1\n if j + 25 - 1 > len(temp):\n break\n\n return enhanced_binary_matrix, minima_indeces", "def analytic_dLdp(q,ps,C1s,C0s,ks,bs,sigma=1):\n n_p=len(ps)\n r=np.linalg.norm(ps-q,axis=1).reshape(-1,1)\n r_hat=(ps-q)/r\n t_hat=np.zeros(r_hat.shape)\n t_hat[:,0]=-r_hat[:,1]\n t_hat[:,1]=r_hat[:,0]\n\n dLdeta=np.zeros(n_p).reshape(-1,1)\n dLdr=np.zeros(n_p).reshape(-1,1)\n\n\n for i in range(n_p):\n Keta=2*(ks[i]*bs[i])**2/(sigma**2) * (r[i]-C1s[i])**(2*bs[i]-2)\n Kr=2*(ks[i]*bs[i])**2/(sigma**2) * (bs[i]-1) * (r[i]-C1s[i])**(2*bs[i]-3)\n sum_eta=sum_kr=0\n for j in range(n_p):\n \n rkrj=np.max([np.min([r_hat[i,:].dot(r_hat[j,:]),1]),-1])\n \n direction=np.sign(np.linalg.det(r_hat[[j,i],:]))\n\n sum_eta += (ks[j]*bs[j])**2 * (r[j]-C1s[j])**(2*bs[j]-2) * rkrj * np.sqrt(1-rkrj**2) * direction\n sum_kr += (ks[j]*bs[j])**2 * (r[j]-C1s[j])**(2*bs[j]-2) * (1-rkrj**2)\n \n dLdeta[i]=Keta*sum_eta\n dLdr[i]=Kr*sum_kr\n \n dLdp = dLdr * r_hat + (dLdeta/r) * t_hat\n \n \n return dLdp", "def diagonalizing_gates(self):\n raise NotImplementedError", "def _h(W):\r\n # E = slin.expm(W * W)\r\n # h = np.trace(E) - d\r\n M = np.eye(d) + W * W / d\r\n E = np.linalg.matrix_power(M, d - 1)\r\n h = (E.T * M).sum() - d\r\n G_h = E.T * W * 2\r\n return h, G_h", "def calc_lampam(ss, constraints=None):\n if constraints is None:\n return calc_lampam_2(ss)\n\n if isinstance(ss, list):\n lampam = np.zeros((len(ss), 12), float)\n for index in range(len(ss)):\n lampam[index] = calc_lampam(ss[index], constraints)\n return lampam\n if ss.ndim == 2 and ss.shape[0] > 1:\n lampam = np.zeros((ss.shape[0], 12), float)\n for index in range(ss.shape[0]):\n lampam[index] = calc_lampam(ss[index], constraints)\n return lampam\n n_plies_in_panels = np.size(ss) # laminate ply count\n\n if not constraints.sym:\n cos_sin = np.empty((4, n_plies_in_panels), float)\n for ind in range(n_plies_in_panels):\n cos_sin[:, ind] = np.copy(constraints.cos_sin[\n constraints.ind_angles_dict[ss[ind]]].reshape((4, )))\n\n for_the_top = np.arange(n_plies_in_panels)\n z_0 = np.ones(n_plies_in_panels)\n z_2 = ((1-n_plies_in_panels/2)*z_0+for_the_top)**3 \\\n - ((1-n_plies_in_panels/2)*z_0+for_the_top - 1)**3\n z_1 = ((1-n_plies_in_panels/2)*z_0+for_the_top)**2 \\\n - ((1-n_plies_in_panels/2)*z_0+for_the_top - 1)**2\n return np.array([\n (1/n_plies_in_panels)*np.matmul(cos_sin, z_0),\n (2/n_plies_in_panels**2)*np.matmul(cos_sin, z_1),\n (4/n_plies_in_panels**3)*np.matmul(cos_sin, z_2)]).reshape(12)\n\n cos_sin = np.empty((4, np.size(ss) // 2), float)\n for ind in range(np.size(ss) // 2):\n cos_sin[:, ind] = constraints.cos_sin[\n constraints.ind_angles_dict[ss[ind]]].reshape((4,))\n\n for_the_top = np.arange(np.size(ss) // 2)\n z_0 = np.ones(np.size(ss) // 2)\n z_2 = ((1 - n_plies_in_panels / 2) * z_0 + for_the_top) ** 3 \\\n - ((1 - n_plies_in_panels / 2) * z_0 + for_the_top - 1) ** 3\n lampam = np.array([\n (2/n_plies_in_panels)*np.matmul(cos_sin, z_0),\n np.array([0, 0, 0, 0]),\n (8/n_plies_in_panels**3)*np.matmul(cos_sin, z_2)]).reshape(12)\n\n if np.size(ss) % 2:\n cos_sin_mid = constraints.cos_sin[\n constraints.ind_angles_dict[ss[n_plies_in_panels // 2]]]\n lampam += np.array([\n (1/n_plies_in_panels)*cos_sin_mid,\n np.zeros((4,), dtype=float),\n (1/n_plies_in_panels**3)*cos_sin_mid]).reshape(12)\n return lampam", "def dLdp(C1s,C0s,ks,bs,sigma=1):\n # return np.array(jit(jacfwd(L,argnums=1))(q,ps,C1s,C0s,ks,bs,sigma))\n \n # A = FIM(q,ps,C1s,C0s,ks,bs,sigma)\n \n # Construct A(q,ps)\n A = FIM(C1s,C0s,ks,bs,sigma)\n\n # Construct dAdp(q,ps)\n dAdp = jit(jacfwd(A,argnums=1))\n \n # Construct inv_A(q,ps)\n inv_A=lambda q,ps: jnp.linalg.inv(A(q,ps))\n \n # print(np.trace(-dAinv(inv_A,dAdp),axis1=0,axis2=1)-np.array(jit(jacfwd(L,argnums=1))(q,ps,C1s,C0s,ks,bs,sigma)))\n \n # Construct dLdP(q,ps)\n\n\n\n return lambda q,ps: -np.array(jnp.trace(dAinv(inv_A(q,ps),dAdp(q,ps)),axis1=0,axis2=1))", "def dhMatrix(self):\n row1 = np.array([np.cos(self.theta), -np.sin(self.theta)*np.cos(self.alpha), np.sin(self.theta)*np.sin(self.alpha), self.a*np.cos(self.theta)])\n row2 = np.array([np.sin(self.theta), np.cos(self.theta)*np.cos(self.alpha), -np.cos(self.theta)*np.sin(self.alpha), self.a*np.sin(self.theta)])\n row3 = np.array([0.0, np.sin(self.alpha), np.cos(self.alpha), self.d])\n row4 = np.array([0.0, 0.0, 0.0, 1.0])\n T = np.array([row1, row2, row3, row4])\n return T", "def metro_ising(lattice, L, T, h):\n\n m = 0.0\n\n for i in range(L):\n rand_row = np.random.choice(np.arange(1, N+1))\n rand_col = np.random.choice(np.arange(1, N+1))\n\n if H(lattice, rand_row, rand_col, h, T):\n lattice[rand_row, rand_col] *= -1\n\n # check whether bounds have to be updated\n if np.isin(rand_col, [0, N+1]) or np.isin(rand_row, [0, N+1]):\n lattice = update_bounds(lattice)\n\n m += np.sum(lattice[1:N + 1, 1:N + 1])\n\n return m/L, lattice", "def get_diagonal(self, parameters, space_group='preconditioner'):\n return super(RWGDominantSystem, self).get_diagonal(parameters, space_group).weak_form()", "def fdm_2d(N,L,x,y,h,k):\n\n # Create the Laplacian as a 1d sparse matrix using central difference\n ones = np.ones(N)\n diagvalues = np.array([ones,-2*ones,ones])\n offsets = np.array([-1,0,1])\n lap1d = sps.dia_matrix((diagvalues,offsets), shape=(N,N))/h**2\n \n # Represent 2d coordinates as kronecker sum\n lap = sps.kron(lap1d,sps.diags(np.ones(N))) + \\\n sps.kron(sps.diags(np.ones(N)),lap1d)\n \n # potential terms\n pot_x = np.repeat(x**2,N)\n pot_y = np.tile(y**2,N)\n\n # The whole Hamiltonian in matrix form\n A = (-1*lap + sps.diags(pot_x) + sps.diags(pot_y))/2\n\n # Calculate the k smallest eigenvalues and corresponding eigenvectors\n E, psi = eigsh(A,k=k,which='SM')\n\n\n # Perturbated potential\n a = 25\n pot_new = pot_x + pot_y + gauss_pert(N,a).flatten()\n\n # Plot the new potential\n X,Y = np.meshgrid(x,y)\n fig = plt.figure()\n ax = fig.add_subplot(1,2,1,projection='3d')\n ax.plot_surface(X, Y, pot_new.reshape((N,N)), cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax = fig.add_subplot(1,2,2)\n fig.suptitle(r'Potential with a Gaussian perturbation')\n ax.imshow(pot_new.reshape(N,N),extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'perturbated_potential.png'))\n\n # The perturbated Hamiltonian in matrix form\n A = (-1*lap + sps.diags(pot_new))/2\n\n # Calculate the k smallest eigenvalues and corresponding eigenvector\n # Of the perturbated system\n E_p, psi_p = eigsh(A,k=k,which='SM')\n\n return E,psi,E_p,psi_p", "def build_cumulative_downhill_matrix(self):\n\n import time\n from scipy import sparse as sparse\n\n\n walltime = time.clock()\n\n downHillaccuMat = self.downhillMat.copy() \n accuM = self.downhillMat.copy() # work matrix\n\n DX = np.ones(self.tri.npoints) # measure when all the info has been propagated out.\n previous_nonzero = 0\n it = 0\n\n while np.count_nonzero(DX) != previous_nonzero:\n accuM = accuM.dot(self.downhillMat)\n downHillaccuMat = downHillaccuMat + accuM \n previous_nonzero = np.count_nonzero(DX)\n\n DX = self.downhillMat.dot(DX) \n\n it += 1\n \n\n print \" - Dense downhill matrix storage time \", time.clock() - walltime\n print \" - Maximum path length \",it\n\n walltime = time.clock()\n\n\n # Turn this into a loop !\n\n A1 = self.downhillMat.tocsr()\n A2 = A1.dot(A1)\n A2a = A1 + A2\n A4 = A2.dot(A2)\n A4a = A2a + A2.dot(A2a)\n A8 = A4.dot(A4)\n A8a = A4a + A4.dot(A4a)\n A16 = A8.dot(A8)\n A16a = A8a + A8.dot(A8a)\n A32 = A16.dot(A16)\n A32a = A16a + A16.dot(A16a)\n A64 = A32.dot(A32)\n A64a = A32a + A32.dot(A32a)\n A128 = A64.dot(A64)\n A128a = A64a + A64.dot(A64a)\n\n print \"A32.nnz = \", A32.nnz\n print \"A64.nnz = \", A64.nnz\n print \"A128.nnz = \", A128.nnz\n\n\n print \" - Dense downhill matrix storage time v2\", time.clock() - walltime\n print \" - Maximum path length \", 128\n\n\n downHillaccuMat = downHillaccuMat + sparse.identity(self.tri.npoints, format='csr')\n\n downHillaccuMat2 = A128a + sparse.identity(self.tri.npoints, format='csr')\n\n\n return downHillaccuMat, downHillaccuMat2", "def TR_algo3(h, vd=2):\n ve = 0\n vd = 2\n p = [0]*N\n for i in range(M-1, -1, -1):\n w = [bit_component(h, i*N+ii) for ii in range(N)]\n #print(i, w)\n w = sum( [wx*2**j for j, wx in enumerate(w)] )\n #print(i, w, gc(w))\n l = gc(w)\n l = T_inv(ve, vd, l)\n for j in range(N):\n p[j] += bit_component(l, j) << i\n ve = ve ^ rotate_left(e(w), vd+1)\n vd = (vd + d(w) + 1) % N\n return p", "def calc_lampam_sym(ss, constraints):\n if isinstance(ss, list):\n lampam = np.zeros((len(ss), 12), float)\n for index in range(len(ss)):\n lampam[index] = calc_lampam_sym(ss[index], constraints)\n return lampam\n if ss.ndim == 2 and ss.shape[0] > 1:\n lampam = np.zeros((ss.shape[0], 12), float)\n for index in range(ss.shape[0]):\n lampam[index] = calc_lampam_sym(ss[index], constraints)\n return lampam\n\n n_plies_in_panels = 2 * np.size(ss) # laminate ply count\n\n cos_sin = np.empty((4, n_plies_in_panels // 2), float)\n for ind in range(n_plies_in_panels // 2):\n cos_sin[:, ind] = constraints.cos_sin[\n constraints.ind_angles_dict[ss[ind]]].reshape((4, ))\n\n for_the_top = np.arange(n_plies_in_panels // 2)\n z_0 = np.ones(n_plies_in_panels // 2)\n z_2 = ((1 - n_plies_in_panels / 2) * z_0 + for_the_top) ** 3 \\\n - ((1 - n_plies_in_panels / 2) * z_0 + for_the_top - 1) ** 3\n lampam = np.array([\n (2 / n_plies_in_panels)*np.matmul(cos_sin, z_0),\n np.array([0, 0, 0, 0]),\n (8 / n_plies_in_panels**3)*np.matmul(cos_sin, z_2)]).reshape(12)\n return lampam", "def DM(self, masses=None):\n N = len(self.diameters)\n rs = self.rs\n d = self.ndim\n M = np.zeros((d * N, d * N))\n\n for i in range(N):\n sigi = self.diameters[i]\n for j in range(i):\n rijvec = rs[i, :] - rs[j, :]\n rijvec = rijvec - np.around(rijvec)\n rijsq = np.sum(rijvec**2)\n dij = (sigi + self.diameters[j]) / 2\n dijsq = dij**2\n if rijsq < dijsq:\n rij = np.sqrt(rijsq)\n rijouter = np.outer(rijvec, rijvec)\n # U(r) = ½(1 - r/d)²\n # d²U/dxdy = (dr/dx)(dr/dy)/d² - (1 - r/d)(d²r/dxdy)/d\n # dr/dx = x/r\n # d²r/dxdy = -(x y) / r³\n # d²U/dxdy = -(x y)/(r² d²) + (1 - r/d)((x y)/r²)/(d r)\n # d²U/dx² = (dr/dx)²/d² - (1 - r/d)(d²r/dx²)/d\n # d²r/dx² = -x² / r³ + 1/r\n # d²U/dxᵢdxⱼ = -(xᵢ xⱼ)/(r² d²) + (1 - r/d)((xᵢ xⱼ)/r² -\n # δᵢⱼ)/(d r)\n\n Mij1 = -rijouter / rijsq / dijsq\n Mij2 = (1 - rij / dij) * \\\n (rijouter / rijsq - np.eye(d)) / rij / dij\n Mij = Mij1 + Mij2\n\n M[d * i:d * i + d, d * j:d * j + d] = Mij\n M[d * j:d * j + d, d * i:d * i + d] = Mij\n M[d * i:d * i + d, d * i:d * i + d] -= Mij\n M[d * j:d * j + d, d * j:d * j + d] -= Mij\n\n np.divide(M, self.L**2, out=M)\n if masses is None:\n return M\n\n # TODO: is the mass part of this really part of this?\n marr = np.array(masses)\n assert np.shape(masses) == np.shape(self.diameters)\n marr = np.array([masses] * d)\n marr = marr.T.flatten()\n # marr is now [m1,m1,m2,m2,...] (in 2D)\n mm = np.eye(d * N)\n np.multiply(mm, marr**-.5, out=mm)\n # mm is now M^-½, where M is the mass matrix\n\n mm.dot(M, out=M)\n M.dot(mm, out=M)\n return M", "def kd_domain_split(counts_all, ndomains, log=null_log):\n\n split_fac = 1.35 * (float(ndomains)/np.cumprod(counts_all.shape)[-1])**(1.0/3.0)\n print('split factor', split_fac, file=log)\n # First translate the box so 0,0,0 in best posn to minimise communication\n total_shifts = []\n for axis in range(3):\n # Sum over other axes\n sum_axes = list(np.arange(len(counts_all.shape)))\n sum_axes.pop(axis)\n sum_axes = tuple(sum_axes)\n\n count_ax = counts_all.sum(axis=sum_axes, dtype=np.int64)\n # amount communicated per plane\n comm = count_ax + np.roll(count_ax, 1)\n\n total_shifts.append(np.argmin(comm))\n\n\n for axis, r in enumerate(total_shifts):\n counts_all = np.roll(counts_all, shift=-r, axis=axis)\n\n print('Best shifts', total_shifts, file=log)\n\n\n # pad\n counts_pad = np.empty(tuple(v+2 for v in counts_all.shape), dtype=counts_all.dtype)\n counts_pad[1:-1,1:-1,1:-1] = counts_all\n counts_pad[1:-1,1:-1,0] = counts_pad[1:-1,1:-1, -2]\n counts_pad[1:-1,1:-1,-1] = counts_pad[1:-1,1:-1,1]\n counts_pad[1:-1,0] = counts_pad[1:-1, -2]\n counts_pad[1:-1,-1] = counts_pad[1:-1, 1]\n counts_pad[0] = counts_pad[-2]\n counts_pad[-1] = counts_pad[1]\n\n\n domain_segments = []\n\n doms_tosplit = [((0,0,0), counts_pad, ndomains)]\n\n while len(doms_tosplit):\n dom_topleft, counts, ndom = doms_tosplit.pop(0)\n\n if ndom==1:\n # done\n dom_shape = tuple(v-2 for v in counts.shape)\n domain_segments.append((dom_topleft, dom_shape, counts.sum(dtype=np.uint64)))\n continue\n\n # Bisect this domain \n axis, split_idx, n_L = bisect_anyaxis(counts, ndom, split_fac)\n\n n_R = ndom-n_L\n\n if axis==0:\n counts_L, counts_R = counts[:split_idx+2], counts[split_idx:]\n elif axis==1:\n counts_L, counts_R = counts[:,:split_idx+2], counts[:,split_idx:] \n elif axis==2:\n counts_L, counts_R = counts[:,:,:split_idx+2], counts[:,:,split_idx:]\n else:\n raise Exception('3d only, aaargh.')\n\n # add left and right domains\n doms_tosplit.append((dom_topleft, counts_L, n_L))\n\n # top left of right domain\n dom_R_topleft = list(dom_topleft)\n dom_R_topleft[axis] += split_idx\n dom_R_topleft = tuple(dom_R_topleft)\n\n doms_tosplit.append((dom_R_topleft, counts_R, n_R))\n\n\n # sort domains biggest->smallest\n domain_segments = sorted(domain_segments, key=lambda ijk_shape_pts:-ijk_shape_pts[2])\n\n doms = np.empty(counts_all.shape, dtype=np.int16)\n\n for d,(ijk, shape, tot_pts) in enumerate(domain_segments):\n segment = tuple(slice(i,i+size) for i,size in zip(ijk, shape))\n doms[segment] = d+1\n real_pts = counts_all[segment].sum(dtype=np.int64)\n# print('domain', d, 'shape', shape, '{:,} pts, {:,} total'.format(real_pts, tot_pts), file=log)\n\n # Undo the total shifts\n for axis, r in enumerate(total_shifts):\n doms = np.roll(doms, shift=r, axis=axis)\n \n return doms", "def checkHarm(dM,P,tbase,harmL0=terra.config.harmL,ver=True):\n\t MESL = []\n\t Pcad = P / config.lc\n\n\t dMW = FFA.XWrap(dM , Pcad)\n\t harmL = []\n\t for harm in harmL0:\n\t # Fold dM on the harmonic\n\t Pharm = P*float(harm)\n\t if Pharm < tbase / 3.:\n\t dMW_harm = FFA.XWrap(dM,Pharm / config.lc )\n\t sig = dMW_harm.mean(axis=0)\n\t c = dMW_harm.count(axis=0)\n\t MES = sig * np.sqrt(c) \n\t MESL.append(MES.max())\n\t harmL.append(harm)\n\t MESL = np.array(MESL)\n\t MESL /= MESL[0]\n\n\t if ver:\n\t print(harmL)\n\t print(MESL)\n\t return harmL,MESL", "def ldos_finite(h,e=0.0,n=10,nwf=4,delta=0.0001):\n if h.dimensionality!=1: raise # if it is not one dimensional\n intra = csc(h.intra) # convert to sparse\n inter = csc(h.inter) # convert to sparse\n interH = inter.H # hermitian\n m = [[None for i in range(n)] for j in range(n)] # full matrix\n for i in range(n): # add intracell\n m[i][i] = intra\n for i in range(n-1): # add intercell\n m[i][i+1] = inter\n m[i+1][i] = interH\n m = bmat(m) # convert to matrix\n (ene,wfs) = slg.eigsh(m,k=nwf,which=\"LM\",sigma=0.0) # diagonalize\n wfs = wfs.transpose() # transpose wavefunctions\n dos = (wfs[0].real)*0.0 # calculate dos\n for (ie,f) in zip(ene,wfs): # loop over waves\n c = 1./(1.+((ie-e)/delta)**2) # calculate coefficient\n dos += np.abs(f)*c # add contribution\n odos = spatial_dos(h,dos) # get the spatial distribution\n go = h.geometry.supercell(n) # get the supercell\n write_ldos(go.x,go.y,odos) # write in a file\n return dos # return the dos", "def Hamiltonian_setup(N,V,interval):\n (a,b) = interval\n H = np.zeros((N-1,N-1))\n h= (b-a)/N\n for i in range(N-1):\n for j in range(N-1):\n if i==j:\n x=((i+1)*h) + a\n Vi=V(x)\n H[i,j] = Vi + 2/(h**2)\n if i==j+1 or i==j-1:\n H[i,j]=-1/(h**2)\n return H", "def set_DirichletSS_sparse(self):\n \n \n self.set_Dirichlet_vessel(self.inlet)\n\n\n self.tissue_consumption(self.Mt)\n \n #REINITIALISATION OF THE VECTOR OF TISSUE PHI!!!\n self.phi_t=np.zeros(len(self.phit))\n \n self.set_Dirichlet_north(0)\n self.set_Dirichlet_east(0)\n self.set_Dirichlet_west(0)\n \n self.A.eliminate_zeros()", "def checkHarm(dM,P,tbase,harmL0=config.harmL,ver=True):\n\t MESL = []\n\t Pcad = P / config.lc\n\n\t dMW = FFA.XWrap(dM , Pcad)\n\t harmL = []\n\t for harm in harmL0:\n\t # Fold dM on the harmonic\n\t Pharm = P*float(harm)\n\t if Pharm < tbase / 3.:\n\t dMW_harm = FFA.XWrap(dM,Pharm / config.lc )\n\t sig = dMW_harm.mean(axis=0)\n\t c = dMW_harm.count(axis=0)\n\t MES = sig * np.sqrt(c) \n\t MESL.append(MES.max())\n\t harmL.append(harm)\n\t MESL = np.array(MESL)\n\t MESL /= MESL[0]\n\n\t if ver:\n\t print(harmL)\n\t print(MESL)\n\t return harmL,MESL", "def fixed_steps_trajectories(self, noise=0, nt=1, ll=0.1, limit=None):\n\n print('Generating Trajectories...')\n for i in tqdm.tqdm(range(self.ntraj)):\n\n if self.hop_distribution == 'gaussian' or self.hop_distribution == 'Gaussian':\n z_position = np.cumsum(\n np.random.normal(loc=0, scale=self.hop_sigma, size=self.nsteps)) # accumulate gaussian steps\n else:\n sys.exit('Please enter a valid hop distance probability distribution')\n\n self.trajectories[i, :, 1] = z_position - z_position[0] # make initial z equal to 0\n\n # hop at random time intervals according to one of the following PDFs\n if self.dwell_distribution == 'exponential':\n time = sampling.random_exponential_dwell(self.lamb, size=self.nsteps)\n elif self.dwell_distribution == 'power':\n time = sampling.random_power_law_dwell(1 + self.alpha, size=self.nsteps, ll=ll, limit=limit)\n else:\n sys.exit('Please enter a valid dwell time probability distribution')\n\n time = np.cumsum(time) # accumulate dwell times\n time -= time[0]\n\n self.trajectories[i, :, 0] = time\n\n # Add to array with all corners of hop distribution for visualization purposes\n self.trajectory_hops[i, 1::2, 0] = time[1:]\n self.trajectory_hops[i, 2::2, 0] = time[1:]\n\n self.trajectory_hops[i, ::2, 1] = self.trajectories[i, :, 1]\n self.trajectory_hops[i, 1:-1:2, 1] = self.trajectories[i, :-1, 1]\n self.trajectory_hops[i, -1, 1] = self.trajectories[i, -1, 1]\n\n print('Interpolating Trajectories...')\n # make uniform time intervals with the same interval for each simulated trajectory\n max_time = np.min(self.trajectories[:, -1, 0])\n self.time_uniform = np.linspace(0, max_time, self.nsteps*10)\n\n if nt > 1:\n # self.pbar = tqdm.tqdm(total=self.ntraj)\n pool = Pool(nt)\n for i, t in enumerate(pool.map(self.interpolate_trajectories, range(self.ntraj))):\n self.z_interpolated[i, :] = t\n else:\n for t in tqdm.tqdm(range(self.ntraj)):\n self.z_interpolated[t, :] = self.trajectories[t, np.digitize(self.time_uniform,\n self.trajectories[t, :, 0], right=False) - 1, 1]\n #self.z_interpolated[t, :] = self.interpolate_trajectories(t, noise=noise)", "def calculate_L(self, n_t, n_n, l0, l1, dt, ctant):\n d_scl = np.sqrt((self.t_D.get_value() ** 2).sum(1).mean())\n\n a = n_t * n_n * l1 * dt * d_scl ** 2 * np.log(l1 / l0) ** 2\n # try:\n # self._b\n # except:\n # from scipy.linalg import eigh as largest_eigh\n # k=1\n # N = inv_cov.shape[0]\n # evals_large, _ = largest_eigh(inv_cov, eigvals=(N-k,N-1))\n b = 0.5 * self.QUAD_REG.max()\n # FIXME: not quite right for non-diagonal matrix, but is close enough\n # in practice\n\n # print a, b, b / a\n return ((a + b) * ctant).astype('float32')", "def make_dhdu(ham, controls, derivative_fn):\n\n dHdu = []\n for ctrl in controls:\n dHdu.append(derivative_fn(ham, ctrl['symbol']))\n\n return dHdu", "def diag_hamiltonian(self, subsystem, evals=None):\n evals_count = subsystem.truncated_dim\n if evals is None:\n evals = subsystem.eigenvals(evals_count=evals_count)\n diag_qt_op = qt.Qobj(inpt=np.diagflat(evals[0:evals_count]))\n return self.identity_wrap(diag_qt_op, subsystem)", "def dKdiag_dtheta(self,dL_dKdiag,X,target):\r\n raise NotImplementedError", "def diag(diagnoal):\n raise NotImplementedError", "def dKdiag_dtheta(self,dL_dKdiag,X,target):\r\n target[0] += np.sum(dL_dKdiag)", "def derivatives(self):\n self.rdot = self.v\n self.vdot[:,:] = 0.0\n self.udot[:] = 0.0\n\n t = time()\n for nl in self.nlists: \n nl.separations()\n #nl.apply_minimum_image()\n self.timing['pairsep time'] = (time() - t)\n\n t = time()\n if SPROPS:\n properties.spam_properties(self,self.nl_default \\\n ,self.h[0:self.n],self.hlr[0:self.n])\n self.timing['SPAM time'] = time() - t\n \n t = time()\n for force in self.forces:\n force.apply()\n self.timing['force time'] = time() - t\n \n if ADVECTIVE:\n self.rdot[:,:] = 0.0", "def two_body_old(sys, psi):\n # psi = np.reshape(psi,\n # (fci.cistring.num_strings(sys.nsites, sys.nup), fci.cistring.num_strings(sys.nsites, sys.ndown)))\n D = 0.\n for i in range(sys.nsites):\n w = (i + 1) % sys.nsites\n v = (i - 1) % sys.nsites\n\n D += harmonic.compute_inner_product(psi, sys.nsites, (sys.nup, sys.ndown), [i, w, i, i], [1, 0, 1, 0], [1, 1, 0, 0])\n\n D += harmonic.compute_inner_product(psi, sys.nsites, (sys.nup, sys.ndown), [i, i, i, w], [1, 0, 1, 0], [1, 1, 0, 0])\n\n D -= harmonic.compute_inner_product(psi, sys.nsites, (sys.nup, sys.ndown), [v, i, i, i], [1, 0, 1, 0], [1, 1, 0, 0])\n\n D -= harmonic.compute_inner_product(psi, sys.nsites, (sys.nup, sys.ndown), [i, i, v, i], [1, 0, 1, 0], [1, 1, 0, 0])\n\n return D.conj()", "def checkLDS(T, A, C, Q, S):\n obs = np.zeros(T)\n hid = np.zeros(T)\n hid[0] = 4\n\n for i in range(1,T):\n hid[i] = A*hid[i-1] + Q*np.random.randn()\n obs[i] = C*hid[i] + S*np.random.randn()\n \n plt.figure()\n plt.title(\"Simulated Data\")\n plt.plot(hid, c='blue')\n plt.plot(obs, c='red')\n plt.legend(['hidden', 'observed'])\n plt.show()\n \n mykf = KalmanFilter(initial_state_mean=4, n_dim_state=1, n_dim_obs=1, em_vars=['transition_matrices', 'observation_matrices', 'transition_covariance', 'observation_covariance'])\n mykf.em(obs,n_iter=200)\n \n plt.figure()\n myZ, mySig = mykf.smooth(obs)\n plt.title(\"Estimated States vs Ground Truth\")\n plt.plot(myZ, c='red')\n plt.plot(hid, c='blue')\n plt.legend(['smoothed','true'])\n plt.show()\n \n return mykf.transition_matrices, mykf.observation_matrices, mykf.transition_covariance, mykf.observation_covariance", "def run_klds(window):\n Fk, nfreq = get_fft(window)\n for size in range(50):\n print size\n dom_fk, dom_fq = get_dom_freq(Fk, nfreq, size)\n entropy = kld(Fk, nfreq, dom_fk, dom_fq)\n print(entropy)", "def calculate_SW1_TOA(d,sza,L):\n rho_SW1_TOA=np.pi*(d**2)*L[:,123,:]/(0.253*np.cos(sza))\n return rho_SW1_TOA", "def dsdt(s, t, a, P, I, G, U, lengths, masses, k1, k2):\r\n d = len(a) + 1\r\n theta = s[2:2 + d]\r\n vcm = s[2 + d:4 + d]\r\n dtheta = s[4 + d:]\r\n\r\n cth = np.cos(theta)\r\n sth = np.sin(theta)\r\n rVx = np.dot(P, -sth * dtheta)\r\n rVy = np.dot(P, cth * dtheta)\r\n Vx = rVx + vcm[0]\r\n Vy = rVy + vcm[1]\r\n\r\n Vn = -sth * Vx + cth * Vy\r\n Vt = cth * Vx + sth * Vy\r\n\r\n EL1 = np.dot((v1Mv2(-sth, G, cth) + v1Mv2(cth, G, sth)) * dtheta[None, :]\r\n + (v1Mv2(cth, G, -sth) + v1Mv2(sth, G, cth)) * dtheta[:, None], dtheta)\r\n EL3 = np.diag(I) + v1Mv2(sth, G, sth) + v1Mv2(cth, G, cth)\r\n EL2 = - k1 * np.dot((v1Mv2(-sth, P.T, -sth) + v1Mv2(cth, P.T, cth)) * lengths[None, :], Vn) \\\r\n - k1 * np.power(lengths, 3) * dtheta / 12. \\\r\n - k2 * \\\r\n np.dot((v1Mv2(-sth, P.T, cth) + v1Mv2(cth, P.T, sth))\r\n * lengths[None, :], Vt)\r\n ds = np.zeros_like(s)\r\n ds[:2] = vcm\r\n ds[2:2 + d] = dtheta\r\n ds[2 + d] = - \\\r\n (k1 * np.sum(-sth * Vn) + k2 * np.sum(cth * Vt)) / np.sum(masses)\r\n ds[3 + d] = - \\\r\n (k1 * np.sum(cth * Vn) + k2 * np.sum(sth * Vt)) / np.sum(masses)\r\n ds[4 + d:] = np.linalg.solve(EL3, EL1 + EL2 + np.dot(U, a))\r\n return ds", "def _matrix_store_smooth_downhill(self):\n \n import time\n from scipy import sparse as sparse\n from scipy.sparse import linalg as linalgs \n \n\n t = time.clock()\n\n\n size = 0\n for nl in self.neighbour_array_lo_hi:\n size += 3 # len(nl)\n\n row_array = np.empty(size, dtype = int)\n col_array = np.empty(size, dtype = int)\n slope_array = np.zeros(size)\n local_slope_array = np.zeros(64)\n\n\n idx=0 \n for row in range(0, len(self.neighbour_array_lo_hi)): \n neighbours = self.neighbour_array_lo_hi[row] \n npoints = self.tri.points[neighbours]\n\n ## work out (downhill) gradient to (max of three) nearby neighbours\n \n\n for col, column in enumerate(neighbours[0:3]): \n \n delta_h = self.height[column] - self.height[row] \n\n\n if delta_h < 0.0:\n delta_s2 = (self.x[column] - self.x[row])**2 + (self.y[column] - self.y[row])**2\n local_slope_array[col] = ( delta_h**2 / delta_s2 )**5\n\n elif delta_h == 0.0 and self.bmask[row] == False:\n local_slope_array[col] = 1.0e-20\n\n else:\n local_slope_array[col] = 1.0e-20 \n \n # Normalise this so that it conserves mass (note - low points will have no contributions here !) \n \n norm = local_slope_array[0:len(neighbours)].sum()\n if norm != 0.0:\n norm = 1.0 / norm\n\n for col, column in enumerate(neighbours[0:3]): \n row_array[idx] = row\n col_array[idx] = column \n slope_array[idx] = local_slope_array[col] * norm\n\n idx += 1\n\n # We can re-pack this array into a sparse matrix for v. fast computation of downhill operator \n\n slopeCOO = sparse.coo_matrix( (slope_array, (row_array, col_array)) ).T\n slopeMat = slopeCOO.tocsr() \n \n print \"SlopeMat.shape \", slopeMat.shape, size\n\n # slopeNormVec = np.array(slopeMat.sum(axis=1)).T[0]\n # slopeNormVec[slopeNormVec != 0.0] = 1.0 / slopeNormVec[slopeNormVec != 0.0]\n # slopeNormMat = sparse.eye(self.tri.npoints)\n # slopeNormMat.setdiag(slopeNormVec)\n # slopeMat = slopeNormMat.dot(slopeMat)\n\n slopeMat.eliminate_zeros()\n self.smoothDownhillMat = slopeMat\n\n return", "def densidad(qe):\r\n global x,rhoe,rhoi,dx,nparticulas,npuntos_malla,pared_izquierda,pared_derecha\r\n \r\n j1 = sp.dtype(sp.int32) # Asegura que la variable permanezca entera\r\n j2 = sp.dtype(sp.int32) \r\n \r\n # Factor de ponderacion de carga \r\n re = qe/dx \r\n # Densidad electronica \r\n rhoe = sp.zeros(npuntos_malla+1) \r\n # Mapa de cargas sobre la malla\r\n for i in range(nparticulas):\r\n xa = x[i]/dx # xparticula/dx\r\n j1 = int(xa) # indices de la malla fija xmalla/dx\r\n j2 = j1 + 1 # Siguiente punto en la malla\r\n f2 = xa - j1 # |xmalla - xparticula|/dx\r\n f1 = 1.0 - f2\r\n rhoe[j1] = rhoe[j1] + re*f1\r\n rhoe[j2] = rhoe[j2] + re*f2\r\n\r\n # Condiciones de frontera periodica\r\n rhoe[0] += rhoe[npuntos_malla]\r\n rhoe[npuntos_malla] = rhoe[0]\r\n \r\n # Se agrega una densidad de iones neutral\r\n rhoi = rho0\r\n\r\n return True", "def Dmat(numpts, delta=1):\n a = 0.5 / delta * ones(numpts)\n a[0] = 0\n a[-2] = 0\n #b=-2./delta**2*ones(numpts); b[0]=0;b[-1]=0\n c = -0.5 / delta * ones(numpts)\n c[1] = 0\n c[-1] = 0\n return sparse.spdiags([a, c], [-1, 1], numpts, numpts)", "def calc_det_dzh(theta):\n return 919.49 - 27.018 * theta + 0.26209 * theta ** 2 - 0.00083803 * theta ** 3", "def NLSA(self,N=1,q=12,l=None):\n\n X = self.data[self.IDs]\n X = np.flip(X.T[(np.arange(q))+np.arange(np.max(self.dimT-(q-1),0)).reshape(-1,1)].reshape(self.dimT-q+1,self.n*q).T,0)\n K = np.zeros((self.dimT-q,self.dimT-q))\n for i in range(self.dimT-q):\n xi = np.atleast_2d(X[:,1+i])\n xi_m1 = np.atleast_2d(X[:,1+i-1])\n elli = cdist(xi,xi_m1,'euclidean')\n for j in range(self.dimT-q):\n xj = np.atleast_2d(X[:,1+j])\n xj_m1 = np.atleast_2d(X[:,1+j-1])\n ellj = cdist(xj,xj_m1,'euclidean')\n K[i,j] = np.exp(-cdist(xi,xj,'sqeuclidean')/(elli*ellj))\n\n Qi,Qj = np.meshgrid(np.sum(K,axis=1),np.sum(K,axis=1))\n K_tilde = K/(Qi*Qj)\n P = K_tilde/np.atleast_2d(np.sum(K_tilde,axis=1)).T #transition (probability) matrix\n L = np.eye(self.dimT-q) - P\n Lambda, phi = np.linalg.eig(L) #Lϕ = λϕ\n Z, mu = np.linalg.eig(P) #μP = μ\n mu = mu[:,np.isclose(Z,1,atol=1e-12)].ravel() #take eigenvector corresponding to where eigenvalue = 1.\n mu = mu / np.sum(mu) #to make the sum of μ equal to 1 (it is a vector of probabilities)\n \n if l is None:\n l = self.dimT-q\n else:\n l = l\n \n A = np.linalg.multi_dot([X[:,1:],np.diag(mu),phi[:,-l:]]) #project X onto leading l Laplacian eigenfunctions\n U,S,V = np.linalg.svd(A,full_matrices=False)\n\n EEOFs = np.zeros((self.dimX,self.dimY,self.dimT,N)) ; EEOFs[:,:,0,:] = np.nan \n X_rec = np.zeros((self.n,self.dimT,self.dimT-q)) ; X_rec[:,0,:] = np.nan\n #note that we set the first time stamp (i.e. year 1) to nan, as we have used this to compute the \n #phase velocities (elli,ellj). \n for k in range(self.dimT-q):\n Xk = S[k]*np.dot(np.atleast_2d(U[:,k]).T,np.atleast_2d(V.T[:,k]))\n offset1 = 0\n offset2 = 1\n for t in range(1,self.dimT):\n if t == 1:\n X_rec[:,t,k] = Xk[-self.n:,0]\n elif (t > 1) & (t < q):\n x_kj = np.zeros((self.n,t+1))\n start = self.n*q - (t+1)*self.n\n for l in range(t+1):\n x_kj[:,l] = Xk[start:start+self.n,l]\n start += self.n\n X_rec[:,t,k] = np.mean(x_kj,1)\n elif (t >= q) & (t <= self.dimT-q):\n x_kj = np.zeros((self.n,q))\n start = 0\n for l in range(offset1,q+offset1):\n x_kj[:,l-offset1] = Xk[start:start+self.n,l]\n start += self.n\n offset1 += 1\n X_rec[:,t,k] = np.mean(x_kj,1)\n elif (t > self.dimT-q) & (t < self.dimT-1):\n x_kj = np.zeros((self.n,q-offset2))\n start = 0\n for l in range(offset1,(q-offset2)+offset1):\n x_kj[:,l-offset1] = Xk[start:start+self.n,l]\n start += self.n\n offset1 += 1\n offset2 += 1\n X_rec[:,t,k] = np.mean(x_kj,1)\n elif t == self.dimT-1:\n X_rec[:,t,k] = Xk[:self.n,-1]\n\n EEOFs[self.IDs] = (np.flip(X_rec,0)/self.weights[self.IDs][:,np.newaxis,np.newaxis])[:,:,:N]\n \n return EEOFs", "def diago_triangle(self,inv):\n [r,c] = self.D\n assert c == 2*r, \"Le tableau doit être un rectangle L x (2L)\"\n m = r - 1\n S = self\n T = zeros(r,c)\n while m >= 0:\n pivot = S[m,m]\n assert pivot !=0, \"matrice non inversible\"\n for k in range(m-1,-1,-1):\n if S[k,m] != 0:\n S = S.comb_lignes(pivot, -S[k,m],k,m)\n T = T.remplace_ligneg(m,S.F)\n S = S.decoupe_bas()\n m -= 1\n for k in range(r):\n T = T.mult_ligne(inv(T[k,r-1]),k)\n return T", "def find_vanishing_lderivs(self, do_print=True, latex=True, nd=50):\n res = list()\n if(latex):\n S = \" $ \"\n O = \" & \"\n else:\n S = \" \"\n O = \" \"\n if(len(list(self._Lv.keys())) == 0):\n return res\n L = list(self._Lv.keys())\n L.sort()\n L.reverse()\n s = \"\"\n sc = \"\"\n # increase mpmath.mp.dps to print all relevant digits\n mpold = mpmath.mp.dps\n mpmath.mp.dps = self.maxdigs\n for DD in L:\n x = self._Lv[DD]\n if(abs(x) < 1E-10):\n # res.append((DD,x))\n res.append(DD)\n s = s + S + str(DD) + S + O + S + sci_pretty_print(self._Lv[DD], nd, latex_pow=latex) + S + \"\\\\\\\\ \\n\"\n c = self.get_coefficient(DD)\n if c is not None:\n x = c.real()\n x1 = floor(x)\n x2 = ceil(x)\n er1 = abs(x1 - x)\n er2 = abs(x2 - x)\n erm = min(er1, er2)\n print(\"erm({0})={1}\".format(DD, erm))\n erms = sci_pretty_print(erm, 2, latex_pow=latex)\n if(er1 < er2):\n xi = x1\n else:\n xi = x2\n # sc=sc+S+str(DD)+S+\"\\t\"+O+S+sci_pretty_print(c.real,nd,latex_pow=latex)+\"\\\\ \\n\"\n sc = sc + S + str(DD) + S + O + S + str(xi) + S + O + S + erms + S + \"\\\\\\\\ \\n\"\n else:\n sc = sc + S + str(DD) + S + O + S + \" \" + S + O + S + \" \" + S + \"\\\\\\\\ \\n\"\n print(s)\n print(sc)\n mpmath.mp.dps = mpold\n return res" ]
[ "0.6510543", "0.6441997", "0.616607", "0.6134413", "0.607191", "0.59936696", "0.59168214", "0.58911335", "0.5871839", "0.58278966", "0.5818667", "0.58146787", "0.5806394", "0.5788211", "0.57797736", "0.57464814", "0.57270575", "0.5663071", "0.56431705", "0.56420153", "0.56268334", "0.5622348", "0.561804", "0.5614621", "0.5601662", "0.55942905", "0.55769014", "0.5546393", "0.55180323", "0.55073494", "0.5488803", "0.54759026", "0.5466346", "0.5450469", "0.54459614", "0.5443229", "0.5436637", "0.5422318", "0.54215866", "0.5392473", "0.5387608", "0.53861237", "0.53680533", "0.5360792", "0.5349173", "0.5344442", "0.53157103", "0.5308226", "0.5279615", "0.5273596", "0.52658015", "0.526487", "0.52639276", "0.5259527", "0.5249324", "0.52480865", "0.52449906", "0.5242412", "0.5240558", "0.52354616", "0.52309203", "0.5229492", "0.52189296", "0.52096784", "0.52091485", "0.51932746", "0.51908123", "0.5184533", "0.5172042", "0.51666266", "0.51584125", "0.515833", "0.51536316", "0.5152416", "0.5144477", "0.5140614", "0.513956", "0.51367736", "0.51311797", "0.51203835", "0.51048225", "0.50951743", "0.50938755", "0.5093775", "0.5093205", "0.50898933", "0.5084856", "0.50809526", "0.5072159", "0.5071016", "0.5069305", "0.50618994", "0.5060937", "0.50595987", "0.5054344", "0.5053822", "0.5053332", "0.50530857", "0.50503093", "0.50462294" ]
0.68332195
0
Resums a certain DOS to show only the spatial dependence
def spatial_dos(h,dos): if h.has_spin == False and h.has_eh==False: return np.array(dos) elif h.has_spin == True and h.has_eh==False: return np.array([dos[2*i]+dos[2*i+1] for i in range(len(dos)//2)]) elif h.has_spin == False and h.has_eh==True: return np.array([dos[2*i]+dos[2*i+1] for i in range(len(dos)//2)]) elif h.has_spin == True and h.has_eh==True: return np.array([dos[4*i]+dos[4*i+1]+dos[4*i+2]+dos[4*i+3] for i in range(len(dos)//4)]) else: raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def grass_drass():", "def system_fleet_dimensioning(self):", "def display_ds9(ds9_name, image_id):\n os.system('xpaset {} fits < {}'.format(ds9_name, image_id))", "def execute(self, parameters, messages):\n\n\n\n arcpy.AddMessage(\"default.gdb_path: %s\" % arcpy.env.workspace)\n\n\n arcpy.ImportToolbox(os.path.join(os.path.dirname(__file__), \"URB.pyt\"))\n arcpy.gp.toolbox = os.path.join(os.path.dirname(__file__), \"URB.pyt\")\n\n extent = parameters[0].value\n srs = parameters[1].value\n\n arcpy.AddMessage(\"control: %s %s\" % (extent, srs))\n\n ext_poly = ext2poly(extent, arcpy.SpatialReference(3003))\n \n sel_fc = create_fc(ws=\"scratch\")\n ext_fc_cursor = arcpy.da.InsertCursor(sel_fc,(\"SHAPE@\"))\n ext_fc_cursor.insertRow([ext_poly])\n del ext_fc_cursor\n\n sel_lyr = arcpy.mapping.Layer(sel_fc)\n arcpy.AddMessage(\"sel_lyr: %s\" % str(sel_lyr))\n\n check_layer_list = [\n [r\"Connessioni database\\VISIO_R_GDBT.sde\\SIT.DBTOPOGRAFICO\\SIT.UN_VOL\", \"UN_VOL_AV\", 0],\n [r\"Connessioni database\\VISIO_R_GDBT.sde\\SIT.DBTOPOGRAFICO\\SIT.AATT\", \"\", 1],\n [r\"Connessioni database\\VISIO_R_GDBT.sde\\SIT.DBTOPOGRAFICO\\SIT.MN_EDI_NOVOL\", \"\", 2],\n [r\"Connessioni database\\VISIO_R_GDBT.sde\\SIT.DBTOPOGRAFICO\\SIT.MN_UVOL\", \"MN_UVO_ALT\", 3],\n [r\"Connessioni database\\VISIO_R_GDBT.sde\\SIT.DBTOPOGRAFICO\\SIT.AR_VRD\", \"\", 4],\n #[r\"Connessioni database\\VISIO_R_GDBT.sde\\SIT.DBTOPOGRAFICO\\SIT.AR_MARC\", \"\", 5],\n #[r\"Connessioni database\\VISIO_R_GDBT.sde\\SIT.DBTOPOGRAFICO\\SIT.AC_VEI\", \"\", 6],\n [r\"Connessioni database\\VISIO_R_GDBT.sde\\SIT.DBTOPOGRAFICO\\SIT.CL_AGR\", \"\", 7],\n [r\"Connessioni database\\VISIO_R_GDBT.sde\\SIT.DBTOPOGRAFICO\\SIT.A_PED\", \"\", 8],\n [r\"Connessioni database\\VISIO_R_GDBT.sde\\SIT.DBTOPOGRAFICO\\SIT.PS_INC\", \"\", 9],\n ]\n\n \n sel_fc = get_jobfile(\"memory\")\n sel_fc_fields = ( \n (\"Layer\", \"TEXT\", None, None, 10, \"\", \"NULLABLE\", \"NON_REQUIRED\"), \n (\"Color\", \"SHORT\", None, None, None, \"\", \"NULLABLE\", \"NON_REQUIRED\"), \n (\"TxtValue\", \"TEXT\", None, None, 10, \"\", \"NULLABLE\", \"NON_REQUIRED\"), \n ) \n intersectOutput_clean = create_fc(\"memory\", fields=sel_fc_fields)\n\n sel_note = get_jobfile(\"memory\")\n sel_note_fields = ( \n (\"Layer\", \"TEXT\", None, None, 50, \"\", \"NULLABLE\", \"NON_REQUIRED\"), \n (\"Color\", \"SHORT\", None, None, None, \"\", \"NULLABLE\", \"NON_REQUIRED\"), \n (\"TxtValue\", \"TEXT\", None, None, 255, \"\", \"NULLABLE\", \"NON_REQUIRED\"), \n (\"CADType\", \"TEXT\", None, None, 50, \"\", \"NULLABLE\", \"NON_REQUIRED\"), \n ) \n intersectOutput_note = create_fc(\"memory\", fields=sel_note_fields, geom_type=\"POINT\")\n cursor_note = arcpy.da.InsertCursor(intersectOutput_note, (\"Layer\", \"Color\", \"TxtValue\", \"CADType\", \"SHAPE@\"))\n\n for check_layer_def in check_layer_list:\n check_layer = check_layer_def[0]\n arcpy.AddMessage(\"check_layer: %s\" % check_layer)\n desc = arcpy.Describe(check_layer)\n inFeatures = [ check_layer, sel_lyr ]\n intersectOutput = get_jobfile(\"memory\")\n clusterTolerance = 0 \n arcpy.Intersect_analysis(inFeatures, intersectOutput, \"\", clusterTolerance, \"input\")\n\n if check_layer_def[1]:\n field_def = (\"Layer\", \"Color\", \"TxtValue\", \"SHAPE@\")\n check_def = [check_layer_def[1], \"SHAPE@\"]\n else:\n field_def = (\"Layer\", \"Color\", \"SHAPE@\")\n check_def = [\"SHAPE@\"]\n\n cursor_clean = arcpy.da.InsertCursor(intersectOutput_clean,field_def)\n\n with arcpy.da.SearchCursor(intersectOutput, check_def) as cursor:\n for row in cursor:\n if check_layer_def[1]:\n row_def = [desc.name.replace(\"SIT.\",\"\"), check_layer_def[2], str(row[0]), cursor[1]]\n note_def = row_def[:-1] + [\"TEXT\", arcpy.PointGeometry(cursor[1].centroid)]\n cursor_note.insertRow(note_def)\n else:\n row_def = [desc.name.replace(\"SIT.\",\"\"), check_layer_def[2], cursor[0]]\n cursor_clean.insertRow(row_def)\n \n del cursor_clean\n del cursor_note\n\n\n extraction_json_filepath = get_jobfile(\"output\",\"json\")\n arcpy.FeaturesToJSON_conversion(intersectOutput_clean, extraction_json_filepath, format_json=True, geoJSON=True)\n\n arcpy.AddMessage(extraction_json_filepath)\n parameters[2].value = extraction_json_filepath\n\n extraction_dxf_filepath = get_jobfile(\"output\",\"dxf\")\n arcpy.ExportCAD_conversion([intersectOutput_clean, intersectOutput_note], \"DXF_R2004\", extraction_dxf_filepath, \"USE_FILENAMES_IN_TABLES\", \"OVERWRITE_EXISTING_FILES\", \"\")\n parameters[3].value = extraction_dxf_filepath\n\n lyr = arcpy.mapping.Layer(intersectOutput_clean)\n parameters[4].value = intersectOutput_clean\n\n #if parameters[1].valueAsText:\n # with open(parameters[1].valueAsText,\"w\") as f:\n # f.write(json.dumps(output, indent=3))", "def SimpleReferenceGrid(min_x,min_y,max_x,max_y,x_divisions,y_divisions,\n color=(0.5,1.0,0.5,1.0),xoff=-0.15,yoff=-0.04,\n label_type=None,shapes_name=\"Grid\"):\n\n shps=gview.GvShapes(name=shapes_name)\n gview.undo_register( shps )\n shps.add_field('position','string',20)\n\n if os.name == 'nt':\n font=\"-adobe-helvetica-medium-r-*-*-12-*-*-*-*-*-*-*\"\n else:\n #font=\"-adobe-helvetica-medium-r-*-*-12-*-*-*-*-*-*-*\"\n #font=\"-urw-helvetica-medium-r-normal-*-9-*-*-*-p-*-iso8859-2\"\n font=\"-adobe-helvetica-medium-r-normal-*-8-*-*-*-p-*-iso10646-1\"\n #font=\"-misc-fixed-medium-r-*-*-9-*-*-*-*-*-*-*\"\n\n\n lxoff=(max_x-min_x)*xoff # horizontal label placement\n lyoff=(max_y-min_y)*yoff # vertical label placement\n\n hspc=(max_x-min_x)/x_divisions\n vspc=(max_y-min_y)/y_divisions\n\n for hval in numpy.arange(min_x,max_x+hspc/100.0,hspc):\n nshp=gview.GvShape(type=gview.GVSHAPE_LINE)\n nshp.set_node(hval,max_y,0,0)\n nshp.set_node(hval,min_y,0,1)\n shps.append(nshp)\n pshp=gview.GvShape(type=gview.GVSHAPE_POINT)\n pshp.set_node(hval,min_y+lyoff)\n pshp.set_property('position',\"%.1f\" % hval)\n shps.append(pshp)\n\n for vval in numpy.arange(min_y,max_y+vspc/100.0,vspc):\n nshp=gview.GvShape(type=gview.GVSHAPE_LINE)\n nshp.set_node(min_x,vval,0,0)\n nshp.set_node(max_x,vval,0,1)\n shps.append(nshp)\n pshp=gview.GvShape(type=gview.GVSHAPE_POINT)\n pshp.set_node(min_x+lxoff,vval)\n pshp.set_property('position',\"%.1f\" % vval)\n shps.append(pshp)\n\n cstr=gvogrfs.gv_to_ogr_color(color)\n if len(cstr) < 9:\n cstr=cstr+\"FF\"\n clstr=str(color[0])+' '+str(color[1])+' '+str(color[2])+' '+str(color[3])\n\n layer=gview.GvShapesLayer(shps)\n layer.set_property('_line_color',clstr)\n layer.set_property('_point_color',clstr)\n # Set antialias property so that lines look nice\n # when rotated.\n layer.set_property('_gl_antialias','1')\n layer.set_property('_gv_ogrfs_point',\n 'LABEL(t:{position},f:\"'+font+'\",c:'+cstr+')')\n layer.set_read_only(True) \n\n return layer", "def main():\n dpi = 1\n dpi = 2\n width = int(360)\n height = int(130)\n mywidth = int(width*dpi)\n myheight = int(height*dpi)\n FWHM = 7.5 # degrees\n FWHM = 10.0 # degrees\n FWHM = 5.0 # degrees\n FWHM = 3.0 # degrees\n FWHM = 1.0 # degrees\n weight = 1.\n\n nargs = len(sys.argv)\n if nargs < 2:\n print('GR: GRid Observations of integrated intensity produced by the T Command')\n print('GR produces fits images for each of the horns used for the observations.')\n print('For observations at the same coordinates, the ratios of intensities are also produced.')\n print('The FITS format files require header information, which is copied from the')\n print('Cold Load File provided by the user')\n print('GR RA|GAL <cold file name> <savefile1> [<savefile2> ... <savefileN>]')\n print(\"\")\n print('Glen Langston, National Science Foundation -- 20 May 12')\n exit()\n\n gridtype = sys.argv[1]\n gridtype = gridtype.upper()\n print('Grid Type: ', gridtype)\n\n # enable having ra going from 24 to 0 hours == 360 to 0 degrees\n xsign = 1.\n xoffset = 0.\n if gridtype == 'RA':\n xmin = 0.\n xmax = 360.\n ymin = -40.\n ymax = 90.\n maptype = 'RA'\n elif gridtype == '-RA':\n xmin = 0.\n xmax = 360.\n ymin = -40.\n ymax = 90.\n xsign = -1.\n xoffset = 360. # when x = 360. should be at zero.\n maptype = 'RA'\n elif gridtype == '-EL':\n xmin = 0.\n xmax = 360.\n ymin = 0.\n ymax = 90.\n xsign = -1.\n xoffset = 360. # when x = 360. should be at zero.\n maptype = 'AZEL'\n elif gridtype == 'RA0':\n xmin = 0.\n xmax = 360.\n ymin = -41.\n ymax = 89.\n xsign = -1.\n xoffset = 180. # when x = 360. should be at zero.\n gridtype = 'RA'\n elif gridtype == 'GAL':\n xmin = -180.\n xmax = 180.\n ymin = -90.\n ymax = 90.\n maptype = 'GAL'\n\n if gridtype != 'RA' and gridtype != 'GAL' and gridtype != '-RA' and gridtype != \"RA0\":\n print('Error parsing grid type: ', gridtype)\n print('1st argument should be either RA, -RA or GAL')\n exit()\n\n rs = radioastronomy.Spectrum()\n\n if doRatio: \n #create the grid with map parameters\n grid1 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \\\n height=height, dpi=dpi, FWHM=FWHM, \\\n projection=\"-CAR\", gridtype=maptype)\n grid2 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \\\n height=height, dpi=dpi, FWHM=FWHM, \\\n projection=\"-CAR\", gridtype=maptype)\n grid3 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \\\n height=height, dpi=dpi, FWHM=FWHM, \\\n projection=\"-CAR\", gridtype=maptype)\n grid4 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \\\n height=height, dpi=dpi, FWHM=FWHM, \\\n projection=\"-CAR\", gridtype=maptype)\n # put each telescope in a different grid\n grids = [grid1, grid2, grid3, grid4]\n\n gridall = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \\\n height=height, dpi=dpi, FWHM=FWHM, \\\n projection=\"-CAR\", gridtype=maptype)\n \n\n projection = \"-AIT\"\n# coldfile \n coldfile = sys.argv[2]\n# get telescope geographic location etc\n print(\"Reading Observing parameters from: %s\" % (coldfile))\n rs.read_spec_ast(coldfile)\n print(\"Observer: %s \" % (rs.observer))\n\n# first read through all data and find hot load\n names = sys.argv[3:]\n names = sorted(names)\n\n firsttime = \"\"\n lasttime = \"\"\n count = 0\n # setup grid indicies so that cpuIndex goes to the correct grid\n # This assumes telescopes 2,3,4,5 are being used] \n gridIndex = [0,0,0,1,2,3]\n # for all save Files to Grid\n for filename in names:\n print(\"File: %s\" % (filename))\n f = open(filename)\n\n date = \"Unknown\"\n while date != \"\":\n date, time, cpuIndex, telaz, telel, tSys, tRx, tRms, tint, KperC, tSourcemax, velSource, dV, tVSum, tVSumRms, tSumKmSec, dTSumKmSec, gainFactor = gainfactor.readSaveValues( f)\n dlen = len(date)\n if dlen < 1:\n break\n if date[0] == \"#\":\n continue\n # else not a comment process the line\n count = count + 1\n isodate = \"20\"+date+\"T\"+time\n# print(\"DateTime: %s\" % (isodate))\n rs.utc = datetime.datetime.strptime(isodate,\"%Y-%m-%dT%H:%M:%S\")\n# print(\"Utc: %s\" % (rs.utc))\n rs.telaz = telaz\n rs.telel = telel\n rs.azel2radec()\n\n ra = rs.ra\n dec = rs.dec\n lon = rs.gallon\n lat = rs.gallat\n tsum = tSumKmSec\n tsdv = dTSumKmSec\n tmax = tSourcemax\n vave = tVSum\n vsdv = tVSumRms\n if firsttime == \"\":\n firsttime = date\n else:\n lasttime = date\n\n# if vave > -100. and vave < 100:\n# mygrid.convolve( lon, lat, vave, 1.)\n iGrid = gridIndex[cpuIndex]\n gainCorr = telescopefactors[iGrid]\n tsum = tsum * gainCorr\n if gridtype == 'RA':\n if doRatio:\n grids[iGrid].convolve(ra, dec, tsum, weight)\n gridall.convolve( ra, dec, tsum, weight)\n elif gridtype == '-RA':\n x = (ra*xsign) + xoffset\n if doRatio:\n grids[iGrid].convolve(x, dec, tsum, weight)\n gridall.convolve( x, dec, tsum, weight)\n elif gridtype == 'RA0':\n x = (ra*xsign) + xoffset\n if x < 0:\n x = x + xmax\n elif x > xmax:\n x = x - xmax\n if doRatio:\n grids[iGrid].convolve(x, dec, tsum, weight)\n gridall.convolve( x, dec, tsum, weight)\n else:\n if doRatio:\n grids[iGrid].convolve(lon, lat, tsum, weight)\n gridall.convolve( lon, lat, tsum, weight)\n\n if count == 0:\n print('Convolving Coordinates: ', ra, dec, lon, lat)\n print('Convolving Intensities: ', tsum, tsdv, vave, vsdv)\n print('Convolvign Parameters : ', n, time)\n count = count + 1\n # end reading all lines in save file\n f.close()\n\n # normalize each of the gridded images\n if doRatio:\n grids[0].normalize()\n grids[1].normalize()\n grids[2].normalize()\n grids[3].normalize()\n gridall.normalize()\n# mygrid.check()\n# zmin = -1000.\n# zmax = 3000.\n# limit grid intensities for plotting\n# mygrid.set_ij( 0, 0, zmax, 1.)\n# mygrid.set_ij( 1, 1, zmin, 1.)\n# mygrid.limit(zmin, zmax)\n\n subplots = False\n\n if subplots:\n fig, ax = plt.subplots(figsize=(myheight, mywidth), dpi=dpi)\n\n if gridtype == 'RA':\n cax = fig.add_axes([-180, 180], [-90, 90])\n else:\n cax = fig.add_axes([0, 24], [-90, 90])\n\n cbar = fig.colorbar(cax, ticks=[zmin, zmax], orientation='horizontal')\n cbar.ax.set_yticklabels([str(zmin), str(zmax)])\n\n ax.set_title(\"Citizen Science: Horn observations of our Galaxy\")\n else:\n#y_ticks = ymin + (ymax-ymin)*ticks/myheight\n\n ticks = np.arange(0, mywidth, 30*dpi)\n x_ticks = xmin + ((xmax-xmin)*ticks/mywidth)\n\n plt.imshow(gridall.image, interpolation='nearest', cmap=plt.get_cmap('jet'))\n\n if firsttime != lasttime:\n plt.title(\"Citizen Science: Observing our Galaxy: %s to %s\" % (firsttime, lasttime))\n else:\n plt.title(\"Citizen Science: Observing our Galaxy: %s\" % (firsttime))\n if gridtype == 'RA':\n plt.xlabel(\"Right Ascension (hours)\")\n plt.ylabel(\"Declination (degrees)\")\n labels = ticks/(mywidth/24)\n yticks = np.arange(0, myheight, 15*dpi)\n elif gridtype == '-RA':\n plt.xlabel(\"Right Ascension (hours)\")\n plt.ylabel(\"Declination (degrees)\")\n labels = 24 - (ticks/(mywidth/24))\n labels[0] = 0\n labels[0] = 24\n yticks = np.arange(0, myheight, 15*dpi)\n elif gridtype == '-EL':\n plt.xlabel(\"Right Ascension (hours)\")\n plt.ylabel(\"Elevation (degrees)\")\n labels = 24 - (ticks/(mywidth/24))\n labels[0] = 0\n labels[0] = 24\n yticks = np.arange(0, myheight, 15*dpi)\n elif gridtype == 'RA0': # put 0 hours in middle of plot\n plt.xlabel(\"Right Ascension (hours)\")\n plt.ylabel(\"Declination (degrees)\")\n labels = 12 - (ticks/(mywidth/24))\n nlabels = len(labels)\n for iii in range(nlabels):\n if labels[iii] < 0:\n labels[iii] = 24 + labels[iii]\n if labels[iii] == 24:\n labels[iii] = 0\n yticks = np.arange(0, myheight, 15*dpi)\n else:\n yticks = np.arange(0, myheight, 30*dpi)\n ticks = np.arange(0, mywidth, 30*dpi)\n x_ticks = xmin + (xmax-xmin)*ticks/mywidth\n labels = x_ticks\n plt.xlabel(\"Galactic Longitude (degrees)\")\n plt.ylabel(\"Galactic Latitude (degrees)\")\n # wnat an integer list of labels\n# slabels = str(labels)\n print(ticks, labels)\n y_ticks = ymax - (ymax-ymin)*yticks/myheight\n plt.yticks(yticks, y_ticks)\n plt.xticks(ticks, labels, rotation='horizontal')\n plt.colorbar()\n\n crval2 = (xmin + xmax)/2.\n crval1 = (ymin + ymax)/2.\n cdelt1 = (-1./float(dpi)) - .001\n cdelt2 = (1./float(dpi)) + .001\n if doRatio:\n# now show eacsh of the images\n for iGrid in range(4):\n imagetemp = copy.deepcopy(grids[iGrid].image)\n imagetemp2 = copy.deepcopy(grids[iGrid].image)\n kkk = myheight - 1\n for jjj in range(myheight):\n imagetemp[:][kkk] = imagetemp2[:][jjj]\n kkk = kkk - 1\n grids[iGrid].image = imagetemp\n writeFitsImage( rs, iGrid+2, grids[iGrid], projection)\n\n # put each telescope in a different grid\n ratio1 = copy.deepcopy(grid1)\n ratio2 = copy.deepcopy(grid1)\n ratio3 = copy.deepcopy(grid1)\n gratios = [ratio1, ratio2, ratio3]\n ratios = np.zeros(3)\n rmss = np.zeros(3)\n\n jGrid = 3\n for iGrid in range(3):\n print(\"Gain Ratios for Telescopes T%d and T%d\" % (iGrid+2, jGrid+2))\n ratio, rms, aratio = gridratio(grids[iGrid], grids[jGrid])\n ratios[iGrid] = ratio\n rmss[iGrid] = rms\n writeFitsImage( rs, iGrid+2, aratio, projection)\n \n writeFitsImage( rs, 0, gridall, projection)\n plt.show()", "def main():\n strikes, dips, normals, slip = generate_normal_ss_data(330, 60, n=500, porp=1)\n #strikes, dips, normals, slip = generate_normal_data(330, 60, n=500, porp=10)\n sigma = invert_plane_stress(normals, slip)\n plot(sigma, strikes, dips)\n plt.show()", "def findzpd(self):\n dc=0.5*self.rms*self.ndstep\n #fixed at 0.1 of the dispersion\n dd=0.1*self.ws.coef[1]\n\n #set upt he docef values\n dcoef=self.ws.coef*0.0\n dcoef[0]=dc\n dcoef[1]=dd\n self.ws=st.findxcor(self.xarr, self.farr, self.swarr, self.sfarr, self.ws, \n dcoef=dcoef, ndstep=self.ndstep, best=False, inttype='interp')\n self.plotArt()\n self.redraw_canvas()", "def align(): # open EH and fast shutter\n\t#marAuxiliary.closeMarShield()\n\td2in()\n\td3in()\n\tsh('o')", "def graphic_auto():\r\n print(\"\\nCe mode n'est pas supporté. Passez en affichage textuel pour le mode automatique\")", "def normdos(line, E_fermi):\n\tls = line.split()\n\tif len(ls) == 3:\n\t\tls[0] = float(ls[0])-E_fermi\n\t\tline = \" {: 7.3f} {} {}\\n\".format(ls[0], ls[1], ls[2])\n\treturn line", "def spatial(self):", "def difficulty(mag):\n mag = float(mag)\n if mag <= -4:\n return \"Visible in daytime.\"\n elif mag <= 6:\n return \"Visible at night.\"\n else:\n flux = mag_def(\"%s x\" % mag)\n needed_flux = mag_def(\"6 x\")\n eye_area = math.pi * (0.005**2)\n needed_power = needed_flux * eye_area\n diameter = 2 * math.sqrt(needed_power / (flux*math.pi))\n return \"%s m telescope needed.\" % diameter", "def ic_fill():\n text = \"! Add missing Atoms and assign them coordinates\\n\"\n text += \"ic fill preserve\\nic param\\nic build\\nhbuild\\n\\n\"\n return text", "def stempot(self,xmax,ymax,nx,ny,atms,pixelshift,scalefactor):\n #zed=2 for rutherford scattering of the nucleus, less for screening\n zed = 1.7\n\n ix = numpy.arange(1.0,nx)\n iy = numpy.arange(1.0,ny)\n dx = xmax/nx\n dy = ymax/ny\n rx = numpy.arange(0,xmax-dx,dx)\n ry = numpy.arange(0,ymax-dy,dy)\n\n Zatom = atms.get_atomic_numbers()\n #translate atoms such that the center of mass is in the center of the computational cell\n com = atms.get_center_of_mass()\n #com = [ 44.40963074 , 44.65497562 , 44.90406073] #for AuNP\n #com = numpy.array(com)\n #print 'com',com -0.149836425, 0.29967285, 0\n #com += [0.41205016875, 0.6742639125, 0] #for rotated line profile \n #com += [-0.149836425, 0.29967285, 0] #for AuNP\n #com += pixelshift\n #print 'com+pixelshift',com\n cop = xmax/2.0\n trans = [cop-i for i in com]\n atms.translate(trans)\n positions=atms.get_positions()\n ax=[]\n ay=[]\n az=[]\n for o,t,h in positions:\n ax.append(o)\n ay.append(t)\n az.append(h)\n ax = numpy.array(ax)\n ay = numpy.array(ay)\n az = numpy.array(az)\n amax = len(Zatom)\n\n #find boundaries of slice\n axmin = min(ax)\n axmax = max(ax)\n aymin = min(ay)\n aymax = max(ay)\n\n V= numpy.zeros((nx,ny))\n\n #map x and y coords of the atoms to the nearest grid points\n #A fraction of the atom must be assigned to the closest gridpoints\n #to avoid sum and difference frequencies appearing in the image\n #grid point to the left of the atom\n ix = numpy.array([math.floor(axi/dx) for axi in ax])\n #apply periodic boundary conditions\n iax = numpy.array([math.fmod(iaxi,nx) for iaxi in ix])\n ibx = numpy.array([math.fmod(iaxi+1,nx) for iaxi in ix])\n #fraction of atom at iax\n fax = numpy.array([1-math.fmod((axi/dx),1 ) for axi in ax])\n #grid point above the atom\n iy = numpy.array([math.floor(ayi/dy) for ayi in ay])\n #apply periodic boundary conditions\n iay = numpy.array([math.fmod(iayi,ny) for iayi in iy])\n iby = numpy.array([math.fmod(iayi+1,ny) for iayi in iy])\n #fraction of atom at iay \n fay = numpy.array([1-math.fmod((ayi/dy),1 ) for ayi in ay])\n #Add each atom to the potential grid\n V1 = numpy.array([fax[i] * fay[i] * (Zatom[i]**zed) for i in range(len(fax))])\n V2 = numpy.array([(1-fax[i]) * fay[i] * (Zatom[i]**zed) for i in range(len(fax))])\n V3 = numpy.array([fax[i] * (1-fay[i]) * (Zatom[i]**zed) for i in range(len(fax))])\n V4 = numpy.array([(1-fax[i]) * (1-fay[i]) * (Zatom[i]**zed) for i in range(len(fax))])\n #V1 = numpy.array([fax[i] * fay[i] * scalefactor for i in range(len(fax))])\n #V2 = numpy.array([(1-fax[i]) * fay[i] * scalefactor for i in range(len(fax))])\n #V3 = numpy.array([fax[i] * (1-fay[i]) * scalefactor for i in range(len(fax))])\n #V4 = numpy.array([(1-fax[i]) * (1-fay[i]) * scalefactor for i in range(len(fax))])\n\n for j in range(amax):\n V[iax[j],iay[j]] += V1[j]\n V[ibx[j],iay[j]] += V2[j]\n V[iax[j],iby[j]] += V3[j]\n V[ibx[j],iby[j]] += V4[j]\n rev_trans = [-1.0*i for i in trans]\n atms.translate(rev_trans)\n return V", "def displayNative(self):\n\n\t\tcmd.hide(\"lines\", \"native\")\n\t\tcmd.color(\"gray\", \"native & name CA\")\n\t\t#cmd.zoom(\"nearby\")", "def display4(*args):\n #-------------------- unpack\n twiss_func = args[0]\n cos_like = args[1]\n sin_like = args[2]\n lat_plot = args[3]\n #-------------------- beta x,y & dispersion x\n s = [twiss_func(i,'s') for i in range(twiss_func.nbpoints)] # Abszisse\n bx = [twiss_func(i,'bx') for i in range(twiss_func.nbpoints)] # beta x\n by = [twiss_func(i,'by') for i in range(twiss_func.nbpoints)] # beta y\n dx = [twiss_func(i,'dx') for i in range(twiss_func.nbpoints)] # dispersion x\n#-------------------- longitudinal trajectories\n z1= [cos_like(i,'s') for i in range(cos_like.nbpoints)]\n cz= [cos_like(i,'cz') for i in range(cos_like.nbpoints)]\n cdp= [cos_like(i,'cdp') for i in range(cos_like.nbpoints)]\n\n z2= [sin_like(i,'s') for i in range(sin_like.nbpoints)]\n sz= [sin_like(i,'sz') for i in range(sin_like.nbpoints)]\n sdp= [sin_like(i,'sdp') for i in range(sin_like.nbpoints)]\n #-------------------- lattice viseo\n vzero = [0. for i in range(lat_plot.nbpoints)] # zero line\n vis_abszisse = [lat_plot(i,'s') for i in range(lat_plot.nbpoints)]\n vis_ordinate = [lat_plot(i,'viseo') for i in range(lat_plot.nbpoints)]\n #-------------------- figure frame\n width=14; height=7.6\n # fighdr = 'lattice version = {}, input file = {}'.format(PARAMS['lattice_version'],PARAMS['input_file'])\n fig = plt.figure(num=1,figsize=(width,height),facecolor='#eaecef',tight_layout=False)\n\n #-------------------- beta functions\n splot211=plt.subplot(211)\n splot211.set_title('beta x,y')\n # mapping box\n splot211.text(0.01, 1.1, UTIL.FLAGS.get('mapping'),transform=splot211.transAxes,fontsize=8,bbox=dict(boxstyle='round',facecolor='wheat',alpha=0.5),verticalalignment='top')\n # function plots\n plt.plot(s,bx, label=r\"$\\beta$x [m]\", color='black', linestyle='-')\n plt.plot(s,by, label=r\"$\\beta$y [m]\", color='red', linestyle='-')\n plt.plot(s,dx, label=r'$\\eta_x$ [m]' , color='green', linestyle='-') # dispersion x\n vscale=splot211.axis()[3]*0.25\n viseoz = [x*vscale for x in vis_ordinate]\n plt.plot(vis_abszisse,viseoz,label='',color='black')\n plt.plot(vis_abszisse,vzero,color='green',linestyle='--')\n # zero line\n splot211.plot(vis_abszisse,vzero,color='green',linestyle='--')\n plt.legend(loc='lower right',fontsize='x-small')\n\n #-------------------- longitudinal tracks z, dP/P\n # ax_l = left abszisse\n ax_l=plt.subplot(212)\n # ax_l=plt.subplot(10,1,(7,9))\n ax_l.set_title('synchrotron oscillation')\n ax_l.set_ylabel(r\"z [mm]\")\n ax_l.tick_params(axis='y', colors='green')\n ax_l.yaxis.label.set_color('green')\n ax_l.plot(z1,cz,label='C',color='green')\n ax_l.plot(z2,sz,label='S',color='green',linestyle=':')\n plt.legend(loc='lower left',fontsize='x-small')\n # ax_r = right abszisse\n ax_r = ax_l.twinx()\n ax_r.set_ylabel(r'$\\Delta$p/p [%]')\n ax_r.tick_params(axis='y', colors='red')\n ax_r.yaxis.label.set_color('red')\n ax_r.plot(z2,cdp,label='C',color='red')\n ax_r.plot(z2,sdp,label='S',color='red',linestyle=':')\n ax_r.plot(vis_abszisse,vzero,color='red', linestyle='--')\n plt.legend(loc='lower right',fontsize='x-small')\n # lattice elements\n vscale=ax_l.axis()[3]*0.25\n viseoz = [x*vscale for x in vis_ordinate]\n ax_l.plot(vis_abszisse,viseoz,label='',color='black')\n ax_l.plot(vis_abszisse,vzero,color='green',linestyle='--')", "def main():\n\n parser = argparse.ArgumentParser(description=main.__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n epilog=\"Homepage: https://github.com/kbat/mc-tools\")\n parser.add_argument('com', type=str, help='plot requests file name', nargs='?', default=\"/tmp/foo.c\")\n parser.add_argument('comout', type=str, help='COMOUT file name', nargs='?', default=\"zoom\")\n\n args = parser.parse_args()\n\n cmd = {} # dictionary of commands\n\n bas = False\n plane = False\n \n with open(args.com) as f:\n for line in f.readlines():\n words = line.strip().split()\n if len(words) is 0:\n continue\n\n for i,w in enumerate(words):\n if re.search(\"^bas\", w):\n cmd['bas'] = list(map(float, words[i+1:i+7]))\n if plane is False: bas = True # basis was before plane cuts\n elif re.search(\"^or\", w):\n cmd['or'] = list(map(float, words[i+1:i+4]))\n elif re.search(\"^ex\", w):\n try: # both x and y scales are given\n cmd['ex'] = list(map(float, words[i+1:i+3]))\n continue\n except ValueError: # just 1 scale is given\n cmd['ex'] = list(map(float, words[i+1:i+2]))\n elif re.search(\"^lab\", w):\n cmd['label'] = list(map(int, map(float, words[i+1:i+3]))) #+ [words[i+3]]\n elif re.search(\"^p[xyz]\", w):\n cmd[w] = [float(words[i+1])]\n if bas is False: plane = True # plane cuts were before basis\n elif re.search(\"^legend\", w):\n cmd[w] = [words[i+1]]\n elif w == \"scale\":\n print(w)\n if int(words[i+1]): # no need to put 'scale 0'\n cmd[w] = [words[i+1]]\n elif w in (\"mesh\"):\n if int(words[i+1])==1: # no need to put 'mesh 1'\n cmd[w] = [words[i+1]]\n\n print(bas, plane)\n\n if plane: # bas was first\n keys = ('bas', 'or', 'ex', 'px', 'py', 'pz', 'label', 'mesh', 'legend', 'scale')\n elif bas:\n keys = ('or', 'ex', 'px', 'py', 'pz', 'bas', 'label', 'mesh', 'legend', 'scale')\n else:\n keys = {'or', 'ex', 'label', 'mesh', 'legend', 'scale'}\n \n with open(args.comout, 'w') as f:\n for key in keys:\n if key in cmd:\n # newline required by mcplot:\n if key in ('mesh', 'legend', 'scale', 'label'):\n f.write(\"\\n\")\n f.write(\"%s %s \" % (key,\" \".join(str(e) for e in cmd[key]),))\n f.write(\"\\n\")", "def displayNormal(self):\n\n\t\tif self.native.numResidues() != 0:\n\t\t\tcmd.hide(\"lines\", \"native\")\n\n\t\tview = cmd.get_view()\n\t\tself.displayDesigned()\n\t\tcmd.set_view(view)", "def render(self):\n map = {0:'.', 1:'x', 2:'o'} # grid label vs how to plot\n print(''.join(map[i] for i in self.grid[0:3]))\n print(''.join(map[i] for i in self.grid[3:6]))\n print(''.join(map[i] for i in self.grid[6:9]))\n print('====')", "def render(self):\n map = {0:'.', 1:'x', 2:'o'} # grid label vs how to plot\n print(''.join(map[i] for i in self.grid[0:3]))\n print(''.join(map[i] for i in self.grid[3:6]))\n print(''.join(map[i] for i in self.grid[6:9]))\n print('====')", "def write_ldos(x,y,dos,output_file=\"LDOS.OUT\",z=None):\n fd = open(output_file,\"w\") # open file\n fd.write(\"# x, y, local density of states\\n\")\n ii = 0\n for (ix,iy,idos) in zip(x,y,dos): # write everything\n fd.write(str(ix) +\" \"+ str(iy) + \" \"+ str(idos))\n if z is not None: fd.write(\" \"+str(z[ii]))\n fd.write(\"\\n\")\n ii += 1\n fd.close() # close file", "def residLike(self):\n\n # --------------------------------------------------------------------------------------------- #\n # Compute the residuals\n if self.csys == 'GAL':\n # Redo some file computations with this coordinate system\n self.outbinexp = os.path.join(self.workpath, 'BinExpMapGAL'+self.suffix+'.fits')\n self.outbincub = os.path.join(self.workpath, 'BinCubeGAL'+self.suffix+'.fits')\n self.outsrcmap = os.path.join(self.workpath, 'SrcMapsGAL'+self.suffix+'.fits')\n self.outresid = os.path.join(self.workpath, 'ResidGAL'+self.suffix+'.fits')\n self.outresig = os.path.join(self.workpath, 'ResSigmaGAL'+self.suffix+'.fits')\n\n self._gtExpmap()\n self._gtBincube()\n self._gtSrcmap()\n else:\n # Nothing to add\n pass\n \n self._gtBinmap()\n self._gtModel()\n # Create the residual count map (count_map - model_map)\n if not os.path.isfile(self.outresid):\n os.popen(\"farith {} {} {} ops=SUB\".format(self.outbinmap, self.outgtmod,\n self.outresid))\n # Create the sigma-residual map (residual_map/sqrt(model_map))\n if not os.path.isfile(self.outresig):\n os.popen(\"ftpixcalc {} '(a-b)/sqrt(b)' a={} b={}\".format(self.outresig,\n self.outbinmap, self.outgtmod))\n\n # --------------------------------------------------------------------------------------------- #\n # Get the sources to overplot\n srcs = self.getSrc()\n srcs = srcs[(srcs['Separation'] <= 3.) & ([not i.endswith('c') for i in srcs['Name']])]\n # Plot the residuals\n resplt1 = FermiMap()\n resplt1.savepath = self.workpath\n resplt1.image = self.outresig\n resplt1.figname = 'ResSigma.pdf'\n dmin, dmax = np.abs(resplt1.datamin), resplt1.datamax\n resplt1.datamin = - min(dmin, dmax)\n resplt1.datamax = + min(dmin, dmax)\n resplt1.cbarlabel = r'Residual $\\sigma$/pixel'\n resplt1.mapSky()\n resplt1.srcSky(srcs['RA'], srcs['DEC'], srcs['Name'])\n resplt1.save()\n print(\"\\t=== Figure '{}' created ===\".format( os.path.join(resplt1.savepath, resplt1.figname) ))\n\n resplt2 = FermiMap()\n resplt2.savepath = self.workpath\n resplt2.image = self.outresid\n resplt2.figname = 'Residuals.pdf'\n dmin, dmax = np.abs(resplt2.datamin), resplt2.datamax\n resplt2.datamin = - min(dmin, dmax)\n resplt2.datamax = + min(dmin, dmax)\n resplt2.cbarlabel = r'Residual counts/pixel'\n resplt2.mapSky()\n resplt2.srcSky(srcs['RA'], srcs['DEC'], srcs['Name'])\n resplt2.save()\n print(\"\\t=== Figure '{}' created ===\".format( os.path.join(resplt2.savepath, resplt2.figname) ))\n return", "def command_line(X=3,Y=3,visible=True):\r\n #visible flag can be set to False for timing purposes\r\n if X == 0 or Y == 0:\r\n return\r\n# np.random.seed(1)\r\n global grid\r\n grid = make_grid(X,Y)\r\n while not check_done(grid):\r\n (x,y) = get_min_shannon_entropy(grid)\r\n #print(\"Collapsing \",grid[y][x].block_loc)\r\n grid[y][x].collapse_wavefunction()\r\n if visible:\r\n render_text(grid)\r\n #print(\"---\"*5)\r\n return", "def SpatialPyramid(des, codebook):\r\n\t# YOUR CODE HERE\r", "def showDetectorMap(display, pfsConfig, detMap, width=100, zoom=0, xcen=None, fiberIds=None, showLegend=True,\n lines=None, alpha=1.0, getCtypeFromReferenceLine=getCtypeFromReferenceLineDefault):\n\n plt.sca(display._impl._figure.axes[0])\n height = detMap.getBBox().getHeight()\n y = np.arange(0, height)\n\n SuNSS = TargetType.SUNSS_IMAGING in pfsConfig.targetType\n\n showAll = False\n if xcen is None:\n if fiberIds is None:\n fiberIds = detMap.fiberId\n showAll = True\n else:\n try:\n fiberIds[0]\n except TypeError:\n fiberIds = [fiberIds]\n\n if len(fiberIds) == 1:\n fid = fiberIds[0]\n try:\n xcen = detMap.getXCenter(fid, height/2)\n except IndexError:\n warnings.warn(\"Index %d is not found in DetectorMap\" % (fid)) # doesn't permit lazy eval\n xcen = detMap.bbox.getWidth()//2\n else:\n pass # xcen is already set\n\n nFiberShown = 0\n for fid in detMap.fiberId:\n ls = '-'\n if fid in pfsConfig.fiberId:\n ind = pfsConfig.selectFiber([fid])\n imagingFiber = pfsConfig.targetType[ind] == TargetType.SUNSS_IMAGING\n if pfsConfig.fiberStatus[ind] == FiberStatus.BROKENFIBER:\n ls = ':'\n color = 'cyan' if SuNSS and imagingFiber else 'magenta'\n else:\n color = 'green' if SuNSS and imagingFiber else 'red'\n else:\n if SuNSS:\n continue\n\n if fiberIds is not None and len(fiberIds) > 1 and fid not in fiberIds:\n continue\n\n try:\n ind = pfsConfig.selectFiber([fid])[0]\n except IndexError: # e.g. the pfsConfig contains a subset of the entire PFI\n continue\n\n imagingFiber = pfsConfig.targetType[ind] == TargetType.SUNSS_IMAGING\n if pfsConfig.fiberStatus[ind] == FiberStatus.BROKENFIBER:\n ls = ':'\n color = 'cyan' if SuNSS and imagingFiber else 'magenta'\n else:\n color = 'green' if SuNSS and imagingFiber else 'red'\n\n fiberX = detMap.getXCenter(fid, height//2)\n if showAll or len(fiberIds) > 1 or np.abs(fiberX - xcen) < width/2:\n fiberX = detMap.getXCenter(fid)\n plt.plot(fiberX[::20], y[::20], ls=ls, alpha=alpha, label=f\"{fid}\",\n color=color if showAll else None)\n nFiberShown += 1\n #\n # Plot the position of a set of lines\n #\n if lines:\n if fiberIds is None or len(fiberIds) == 0:\n fiberIds = detMap.fiberId\n stride = len(fiberIds)//25 + 1\n else:\n stride = 1\n\n # find the first and last valid fibres\n firstGood, lastGood = None, None\n ll = lines[0]\n for i, fid in enumerate(fiberIds):\n xc, yc = detMap.findPoint(fid, ll.wavelength)\n\n if np.isnan(xc + yc):\n continue\n\n if firstGood is None:\n firstGood = i\n lastGood = i\n\n for ll in lines:\n ctype = getCtypeFromReferenceLine(ll)\n if ctype == \"IGNORE\":\n continue\n\n xy = np.zeros((2, len(fiberIds))) + np.NaN\n\n for i, fid in enumerate(fiberIds):\n if i%stride != 0 and i not in (firstGood, lastGood):\n continue\n\n xc, yc = detMap.findPoint(fid, ll.wavelength)\n\n if len(fiberIds) == 1:\n display.dot('o', xc, yc, ctype=ctype)\n else:\n xy[0, i] = xc\n xy[1, i] = yc\n\n if len(fiberIds) > 1:\n good = np.isfinite(xy[0])\n if sum(good) > 0:\n plt.plot(xy[0][good], xy[1][good], color=ctype, alpha=alpha)\n\n if not showAll:\n if nFiberShown > 0 and showLegend:\n plt.legend()\n if zoom > 0:\n display.zoom(zoom, xcen, np.mean(y))", "def initial_representations():\n cmd.hide('everything', 'all')\n cmd.show('cartoon', 'all')\n cmd.select('ligand', 'resn NFT')\n cmd.deselect()\n cmd.show(\"sticks\", \"ligand\")", "def repairWorldMap(self,fileRefs,gridLines=True):\n if not fileRefs.fmap: return 0\n progress = self.progress\n progress.setMax((28*2)**2)\n progress(0.0,_(\"Drawing Cells\"))\n proCount = 0\n for gridx in range(-28,28,1):\n for gridy in range(28,-28,-1):\n id = '[%d,%d]' % (gridx,gridy)\n cell = fileRefs.cells_id.get(id,None)\n isMarked = cell and cell.flags & 32\n fileRefs.fmap.drawCell(self.lands.get(id),gridx,gridy,isMarked)\n proCount += 1\n progress(proCount)\n fileRefs.fmap.drawGrid(gridLines)\n return 1", "def handle_solution_line(line,i):\n satisfied_clauses = set()\n print(\"\\n % solution\");\n for s in line.split()[1:]:\n l = int(s)\n if (l !=0):\n satisfied_clauses.update(lit_to_clauses[dimacs2index(l)])\n if (l>0): \n print(\" \\\\node () at (v%d) {\\\\only<%d>{\\pgfimage[width = 1cm]{figures/switchon}}} ;\" % (l,i))\n else: \n assert l<0\n print(\" \\\\node () at (v%d) {\\\\only<%d>{\\pgfimage[width = 1cm]{figures/switchoff}}} ;\" % (-l,i))\n for c in satisfied_clauses:\n print(\" \\\\node () at (c%d) {\\\\only<%d>{\\pgfimage[width = 1cm]{figures/lighton}}} ;\" % (c,i))\n for c in set(range(1,m+1)).difference(satisfied_clauses):\n print(\" \\\\node () at (c%d) {\\\\only<%d>{\\pgfimage[width = 1cm]{figures/lightoff}}} ;\" % (c,i))", "def display():\r\n fill(un)\r\n ellipse(x,y,2*rayonBalle,2*rayonBalle)\r\n \r\n fill(deux)\r\n ellipse(xDeux,yDeux,2*rayonBalle,2*rayonBalle) \r\n \r\n fill(trois)\r\n ellipse(xTrois,yTrois,2*rayonBalle,2*rayonBalle)", "def solvate(self):\n\n pass", "def show_map(pdb,show_sticks_all=False, show_sticks_metalbinding=True, show_probes=True, show_pdb_metals=True):\n view=py3Dmol.view(width=1000, height=800)\n\n view.addModel(open(pdb+'.pdb', 'r').read(),'pdb')\n if show_probes:\n view.addModel(open(pdb+'_PredictedSites.xyz', 'r').read(),'xyz')\n probes = open(pdb+'_PredictedSites.xyz', 'r').readlines()\n if(int(probes[0])!=0):\n probabilities = [p.replace('#','').split()[-1] for p in probes[2:]] # read p from comment in xyz file\n colors = {}\n # use different colors for the probabilities\n for i,x in enumerate(probabilities):\n colors[i] = '#%02x%02x%02x' % (0, 128, int(float(x)/float(probabilities[0])*255))\n else: #no predicted site\n colors = [] \n view.addLabel(\"No probe predicted\", {'position': {'x':0, 'y':0, 'z':0}, 'backgroundColor': '#0080FF', 'fontColor': 'white'});\n \n view.zoomTo()\n view.setBackgroundColor('white')\n view.setStyle({},{'cartoon': {'color':'gray'}})\n if show_sticks_all:\n view.setStyle({}, {'stick':{},'cartoon': {'color':'gray'}})\n if show_pdb_metals:\n view.getModel(0).setStyle({'resn':\"ZN\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"CA\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"CU\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"HG\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"MG\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"FE\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"MN\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"NI\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"MB\"},{'sphere': {'opacity':.75}})\n \n if show_probes:\n view.getModel(1).setStyle({},{'sphere': {'colorscheme':{'prop':'index', 'map':colors}}})\n \n # add hoverable labels for the residues and the predicted metals\n # two callbacks are needed, one for the residues and one for the metals\n # the metal one displays the probability\n view.getModel(0).setHoverable({},True,'''function(atom,viewer,event,container) {\n if(!atom.label) {\n atom.label = viewer.addLabel(atom.resn+atom.resi+\":\"+atom.atom,{position: atom, backgroundColor: 'mintcream', fontColor:'black'});\n }}''',\n '''function(atom,viewer) { \n if(atom.label) {\n viewer.removeLabel(atom.label);\n delete atom.label;\n }\n }''')\n view.getModel(1).setHoverable({},True,'''function(atom,viewer,event,container) {\n if(!atom.label) {\n atom.label = viewer.addLabel(atom.atom+\" [\"+atom.serial+\"]\",{position: atom, backgroundColor: 'mintcream', fontColor:'black'});\n }}''',\n '''function(atom,viewer) { \n if(atom.label) {\n viewer.removeLabel(atom.label);\n delete atom.label;\n }\n }''')\n if show_sticks_metalbinding:\n view.setStyle({'resn':\"HIS\"},{'stick': {}, 'cartoon': {'color':'gray'}})\n view.setStyle({'resn':\"ASP\"},{'stick': {}, 'cartoon': {'color':'gray'}})\n view.setStyle({'resn':\"GLU\"},{'stick': {}, 'cartoon': {'color':'gray'}})\n view.setStyle({'resn':\"CYS\"},{'stick': {}, 'cartoon': {'color':'gray'}})\n\n return view.show()", "def visualizeObs():\n fcontourf(fObs, [-2, 2], [-1, 1], [0, 10])", "def visualize_svd():", "def test_rebincpds_geom(self):\n command = '{0} -r 1.03'.format(\n os.path.join(self.datadir, 'monol_test_E3-50_cpds') +\n HEN_FILE_EXTENSION)\n hen.rebin.main(command.split())\n os.path.exists(os.path.join(self.datadir,\n 'monol_test_E3-50_cpds_rebin1.03' +\n HEN_FILE_EXTENSION))", "def draw_par_mer(self):\n self.m.drawparallels(np.arange(int(self.latmin), int(self.latmax), 5),\n labels=[1, 0, 0, 0], linewidth=self.lw, fontsize=self.fs)\n self.m.drawmeridians(np.arange(self.lonmin, self.lonmax+4, 7),\n labels=[0, 0, 0, 1], linewidth=self.lw, fontsize=self.fs)", "def exemple():\r\n\r\n case_1 = \"\\u25CC\"\r\n case_1 = u\"{}\".format(case_1)\r\n fourmi_1_1 = \"\\u22C0\"\r\n fourmi_1_1 = u\"{}\".format(fourmi_1_1)\r\n fourmi_2_1 = \"\\u21CA\"\r\n fourmi_2_1 = u\"{}\".format(fourmi_2_1)\r\n fourmi_3_1 = \"\\u25BC\"\r\n fourmi_3_1 = u\"{}\".format(fourmi_3_1)\r\n fourmi_1_2 = \"\\u22C0\"\r\n fourmi_1_2 = u\"{}\".format(fourmi_1_2)\r\n fourmi_2_2 = \"\\u21C8\"\r\n fourmi_2_2 = u\"{}\".format(fourmi_2_2)\r\n fourmi_3_2 = \"\\u25B2\"\r\n fourmi_3_2 = u\"{}\".format(fourmi_3_2)\r\n clods_1 = \"\\u2726\"\r\n clods_1 = u\"{}\".format(clods_1)\r\n clods_2 = \"\\u2737\"\r\n clods_2 = u\"{}\".format(clods_2)\r\n clods_3 = \"\\u2739\"\r\n clods_3 = u\"{}\".format(clods_3)\r\n \r\n print(term.move_xy(82,3) + term.white + 'DEPOT : ' + (case_1))\r\n print(term.move_xy(82,5) + term.white + 'Clods de niveau 1 : ' + (clods_1))\r\n print(term.move_xy(82,6) + term.white + 'Clods de niveau 2 : ' + (clods_2))\r\n print(term.move_xy(82,7) + term.white + 'Clods de niveau 3 : ' + (clods_3))\r\n print(term.move_xy(82,8) + term.white + 'Fourmis de niveau 1 : ' + (fourmi_1_1) + ' ' + (fourmi_1_2))\r\n print(term.move_xy(82,9) + term.white + 'Fourmis de niveau 2 : ' + (fourmi_2_1) + ' ' + (fourmi_2_2))\r\n print(term.move_xy(82,10) + term.white + 'Fourmis de niveau 3 : ' + (fourmi_3_1) + ' ' + (fourmi_3_2))\r\n print(term.move_xy(82,12) + term.white + 'Joueur 1 vous jouez en rouge.')\r\n print(term.move_xy(82,13) + term.white + 'Joueur 2 vous jouez en jaune.')", "def rec_default(self):\n self.pcdi_triggers.setText('(50,50)')\n self.pcdi_type.setText('LUCY')\n self.pcdi_iter.setText('20')\n self.pcdi_normalize.setText('true')\n self.pcdi_roi.setText('(16, 16, 16)')", "def syed_dilation(data, vessel):", "def flat(s='flat'):\n s = s.strip()[:80] #truncate to 80 char to fit in FITS header\n print camera.SetShutter(0)\n camera.status.imgtype = 'FLAT'\n camera.status.object = s\n camera.status.update()", "def update( self, qhalf = \"qhalf\", qconvex = \"qconvex\" ):\n\n def makeDots( v1, v2, v3 ):\n \"\"\" helper function\n \"\"\"\n res = []\n for nx in xrange( -2, 3 ):\n for ny in xrange( -2, 3 ):\n for nz in xrange( -2, 3 ):\n res.append( ( v1 * nx ) + ( v2 * ny ) + ( v3 * nz ) )\n return res\n\n def str2vec( s ):\n \"\"\" helper function\n \"\"\"\n if ',' in s:\n ss = s.split( \",\" )\n else:\n ss = s.split( )\n\n fs = map( lambda s: float( s.strip( ) ), ss )\n return Vec( fs[ 0 ], fs[ 1 ], fs[ 2 ] )\n\n\n rep = self.rep.minimize()\n\n try:\n dots = makeDots( rep.v1, rep.v2, rep.v3 )\n\n if 'win' in sys.platform:\n p1 = Popen( [ qhalf, \"Fp\" ], stdin = PIPE, stdout = PIPE ) #, close_fds = True )\n p2 = Popen( [ qconvex, \"o\" ],stdin = PIPE, stdout = PIPE ) #, close_fds = True )\n else:\n p1 = Popen( [ qhalf, \"Fp\" ], stdin = PIPE, stdout = PIPE, close_fds = True )\n p2 = Popen( [ qconvex, \"o\" ],stdin = PIPE, stdout = PIPE, close_fds = True )\n\n\n s = '3 1\\n'\n s += '0 0 0\\n'\n s += '4\\n'\n s += str( len( dots ) - 1 ) + '\\n'\n for d in dots:\n if d.vlen() > 0.001:\n n = d.norm()\n off = - d.vlen() / 2.0\n s += \"%.20f %.20f %.20f %.20f\\n\" % ( n[ 0 ], n[ 1 ], n[ 2 ], off )\n\n ret = p1.communicate( s )[ 0 ]\n ret = p2.communicate( ret )[ 0 ]\n\n lines = ret.split( '\\n' )\n params = lines[ 1 ].split( )\n params = map( lambda s: int( s.strip() ), params )\n dcount = params[ 0 ]\n\n lines = lines[ 2: ]\n pnts = map( lambda s: str2vec( s ), lines[ :dcount ] )\n\n lines = lines[ dcount: ]\n\n polys = []\n for l in lines:\n nums = l.split()\n if nums:\n nums = map( lambda s: int( s ), nums )\n nums = nums[ 1: ]\n ppnt = map( lambda n: pnts[ n ], nums )\n polys.append( Poly( ppnt ) )\n except Exception, e:\n print \"errrrrorrr:\", e\n\n self.mesh = Mesh( polys )", "def SimpleLatLongGrid(min_x,min_y,max_x,max_y,hdeg,hmin,hsec,vdeg,vmin,vsec,\n color=(0.5,1.0,0.5,1.0),xoff=-0.18,yoff=1.04,\n label_type=None,shapes_name=\"Grid\"):\n\n shps=gview.GvShapes(name=shapes_name)\n gview.undo_register( shps )\n shps.add_field('position','string',20)\n\n if os.name == 'nt':\n font=\"-adobe-helvetica-medium-r-*-*-12-*-*-*-*-*-*-*\"\n else:\n #font=\"-adobe-helvetica-medium-r-*-*-12-*-*-*-*-*-*-*\"\n #font=\"-urw-helvetica-medium-r-normal-*-9-*-*-*-p-*-iso8859-2\"\n font=\"-adobe-helvetica-medium-r-normal-*-8-*-*-*-p-*-iso10646-1\"\n #font=\"-misc-fixed-medium-r-*-*-9-*-*-*-*-*-*-*\"\n\n x_spacing=float(hdeg)+(float(hmin)+(float(hsec)/60.0))/60.0\n y_spacing=float(vdeg)+(float(vmin)+(float(vsec)/60.0))/60.0\n\n\n # Round to nearest integer space\n max_x=min_x+numpy.floor((max_x-min_x)/x_spacing)*x_spacing\n max_y=min_y+numpy.floor((max_y-min_y)/y_spacing)*y_spacing\n\n lxoff=(max_x-min_x)*xoff # horizontal label placement\n lyoff=(max_y-min_y)*yoff # vertical label placement\n\n for hval in numpy.arange(min_x,\n max_x+x_spacing/100.0,\n x_spacing):\n nshp=gview.GvShape(type=gview.GVSHAPE_LINE)\n nshp.set_node(hval,max_y,0,0)\n nshp.set_node(hval,min_y,0,1)\n shps.append(nshp)\n pshp=gview.GvShape(type=gview.GVSHAPE_POINT)\n pshp.set_node(hval,min_y+lyoff)\n hstr=GetLatLongString(hval,'longitude')\n pshp.set_property('position',hstr)\n shps.append(pshp)\n\n for vval in numpy.arange(min_y,\n max_y+y_spacing/100.0,\n y_spacing):\n nshp=gview.GvShape(type=gview.GVSHAPE_LINE)\n nshp.set_node(min_x,vval,0,0)\n nshp.set_node(max_x,vval,0,1)\n shps.append(nshp)\n pshp=gview.GvShape(type=gview.GVSHAPE_POINT)\n pshp.set_node(min_x+lxoff,vval)\n vstr=GetLatLongString(vval,'latitude')\n pshp.set_property('position',vstr)\n shps.append(pshp)\n\n cstr=gvogrfs.gv_to_ogr_color(color)\n if len(cstr) < 9:\n cstr=cstr+\"FF\"\n clstr=str(color[0])+' '+str(color[1])+' '+str(color[2])+' '+str(color[3])\n\n layer=gview.GvShapesLayer(shps)\n layer.set_property('_line_color',clstr)\n layer.set_property('_point_color',clstr)\n # Set antialias property so that lines look nice\n # when rotated.\n layer.set_property('_gl_antialias','1')\n layer.set_property('_gv_ogrfs_point',\n 'LABEL(t:{position},f:\"'+font+'\",c:'+cstr+')')\n layer.set_read_only(True)\n\n return layer", "def plot_initial_geometry(ni=0.0, mu=0.5):", "def main(idrun):\n int_type = numpy.int32\n double_type = numpy.float64\n float_type = numpy.float32\n complex_type = numpy.complex64\n\n ns = 7\n iudm = 19; iuv = 12\n dname = numpy.array([\"LONGITUDINAL EFIELD \",\"ELEC CURRENT DENSITY\",\n \"VECTOR POTENTIAL \",\"TRANSVERSE EFIELD \",\n \"MAGNETIC FIELD \",\"RADIATIVE VPOTENTIAL\",\n \"ION CURRENT DENSITY \"],dtype=str)\n\n# create string from idrun\n if (idrun < 0):\n cdrun = \"Unknown\"\n while (cdrun.isdigit() == False):\n cdrun = input(\"enter integer idrun: \")\n idrun = int(cdrun)\n cdrun = str(idrun)\n fname = \"diag3.\" + cdrun\n cmfield3.ffopen3(iudm,fname)\n\n# nscalars = table of available diagnostics\n nscalars = numpy.zeros((ns),int_type,'F')\n\n# determine which vector diagnostics are available\n cmfield3.readvdiags3(iudm,nscalars)\n\n nts = numpy.zeros((1),int_type,'F')\n modesx = numpy.zeros((1),int_type,'F')\n modesy = numpy.zeros((1),int_type,'F')\n modesz = numpy.zeros((1),int_type,'F')\n mrec = numpy.zeros((1),int_type,'F')\n fname = numpy.array([\"\"],'S32')\n\n# select diagnostic\n m = numpy.sum(nscalars)\n while True:\n if (m > 0):\n n = -1\n while True:\n if (n < 0):\n for i in range(0,ns):\n if (nscalars[i]==1):\n print (\"enter \", i+1,\" for\", \n numpy.str.rstrip(dname[i]))\n print (\"enter \", 0,\" for EXIT\")\n c = input(\"\")\n if (c.isdigit()):\n n = int(c)\n if (n==0):\n break\n if ((n >= 1) and (n <= ns)):\n if (nscalars[n-1]==0):\n n = -1\n else:\n n = -1\n if (n > 0):\n break\n print (\"invalid entry, try again or enter 0 to quit\")\n else:\n print (\"no vector diagnostic files found\")\n n = 0\n# exit procedure\n if (n==0):\n if (\"vfield\" in globals()):\n vfield = None\n cmfield3.closeff3(iudm)\n return\n\n print (numpy.str.rstrip(dname[n-1]), \" diagnostic selected\")\n\n# return parameters for selected vector diagnostic:\n# nts, modesx, modesy, modesz, nrec, fname\n cmfield3.vdiagparams3(iudm,n,nts,modesx,modesy,modesz,mrec,fname)\n nrec = mrec[0]\n\n# nx/ny/nz = number of global grid points in x/y/z direction\n nx = int(math.pow(2,in3.indx)); ny = int(math.pow(2,in3.indy))\n nz = int(math.pow(2,in3.indz))\n# kyp/kzp = number of real grids in each field partition in y/z\n kyp = int((ny - 1)/in3.nvpy) + 1; kzp = int((nz - 1)/in3.nvpz) + 1\n# kyb/kzb = minimum number of processors in distributed array in y/z\n kyb = int((ny - 1)/kyp) + 1; kzb = int((nz - 1)/kzp) + 1\n# nyv = second dimension of scalar field array, >= ny\n# nzv = third dimension of scalar field array, >= nz\n nyv = kyp*kyb; nzv = kzp*kzb\n\n# allocate vector array\n if (\"vfield\" not in globals()):\n vfield = numpy.empty((in3.ndim,nx,nyv,nzv),float_type,'F')\n dt = in3.dt*float(nts[0])\n\n# open stream file for vector field\n cmfield3.fsopen3(iuv,fname)\n\n# nrec = number of complete records\n nrec = int(nrec/(kyb*kzb))\n print (\"records found: nrec = \", nrec)\n\n# read and transpose vector data\n for ii in range(0,nrec):\n# read real vector field\n cmfield3.freadv3(iuv,vfield,in3.ndim,nx,kyp,kyb,kzp,kzb)\n it = nts[0]*ii\n time = dt*float(ii)\n# show time\n print (\"it,time=\",it,time)\n cmfield3.closeff3(iuv)\n print()", "def slabldos(h,energies=np.linspace(-1.0,1.0,40),delta=None,nk=40):\n if h.dimensionality!=2: raise # nope\n ds = ldosmap(h,energies=energies,delta=delta,nk=nk)\n if len(ds[0])!=len(h.geometry.z): \n print(\"Wrong dimensions\",len(ds[0]),len(h.geometry.z))\n raise\n f = open(\"DOSMAP.OUT\",\"w\")\n f.write(\"# energy, index, DOS, zposition\\n\")\n for ie in range(len(energies)):\n for iz in range(len(h.geometry.z)):\n f.write(str(energies[ie])+\" \")\n f.write(str(iz)+\" \")\n f.write(str(ds[ie,iz])+\" \")\n f.write(str(h.geometry.z[iz])+\"\\n\")\n f.close()\n return energies,np.transpose(ds) # retunr LDOS ", "def fix(self):\n gAsset = cmds.ls(type='gAsset')\n\n trans = cmds.listRelatives(gAsset[0], p=True)\n meshes = cmds.listRelatives(trans, ad=True, type='mesh')\n for mesh in meshes:\n if mesh:\n try:\n cmds.addAttr(mesh, ln=\"grid_renderGeo\", at='double', dv=1)\n cmds.setAttr(\n '{0}.grid_renderGeo'.format(mesh), e=False, keyable=False, lock=True)\n except:\n pass\n\n self.run()", "def SimpleMeasuredGrid(min_x,min_y,max_x,max_y,x_spacing,y_spacing,\n color=(0.5,1.0,0.5,1.0),xoff=-0.14,yoff=1.04,\n label_type=None,shapes_name=\"Grid\"):\n\n shps=gview.GvShapes(name=shapes_name)\n gview.undo_register( shps )\n shps.add_field('position','string',20)\n\n if os.name == 'nt':\n font=\"-adobe-helvetica-medium-r-*-*-12-*-*-*-*-*-*-*\"\n else:\n #font=\"-adobe-helvetica-medium-r-*-*-12-*-*-*-*-*-*-*\"\n #font=\"-urw-helvetica-medium-r-normal-*-9-*-*-*-p-*-iso8859-2\"\n font=\"-adobe-helvetica-medium-r-normal-*-8-*-*-*-p-*-iso10646-1\"\n #font=\"-misc-fixed-medium-r-*-*-9-*-*-*-*-*-*-*\"\n\n\n # Round to nearest integer space\n max_x=min_x+numpy.floor((max_x-min_x)/x_spacing)*x_spacing\n max_y=min_y+numpy.floor((max_y-min_y)/y_spacing)*y_spacing\n\n lxoff=(max_x-min_x)*xoff # horizontal label placement\n lyoff=(max_y-min_y)*yoff # vertical label placement\n\n for hval in numpy.arange(min_x,\n max_x+x_spacing/100.0,\n x_spacing):\n nshp=gview.GvShape(type=gview.GVSHAPE_LINE)\n nshp.set_node(hval,max_y,0,0)\n nshp.set_node(hval,min_y,0,1)\n shps.append(nshp)\n pshp=gview.GvShape(type=gview.GVSHAPE_POINT)\n pshp.set_node(hval,min_y+lyoff)\n pshp.set_property('position',\"%d\" % int(hval+0.5))\n shps.append(pshp)\n\n for vval in numpy.arange(min_y,\n max_y+y_spacing/100.0,\n y_spacing):\n nshp=gview.GvShape(type=gview.GVSHAPE_LINE)\n nshp.set_node(min_x,vval,0,0)\n nshp.set_node(max_x,vval,0,1)\n shps.append(nshp)\n pshp=gview.GvShape(type=gview.GVSHAPE_POINT)\n pshp.set_node(min_x+lxoff,vval)\n pshp.set_property('position',\"%d\" % int(vval+0.5))\n shps.append(pshp)\n\n cstr=gvogrfs.gv_to_ogr_color(color)\n if len(cstr) < 9:\n cstr=cstr+\"FF\"\n clstr=str(color[0])+' '+str(color[1])+' '+str(color[2])+' '+str(color[3])\n\n layer=gview.GvShapesLayer(shps)\n layer.set_property('_line_color',clstr)\n layer.set_property('_point_color',clstr)\n # Set antialias property so that lines look nice\n # when rotated.\n layer.set_property('_gl_antialias','1')\n layer.set_property('_gv_ogrfs_point',\n 'LABEL(t:{position},f:\"'+font+'\",c:'+cstr+')')\n layer.set_read_only(True) \n\n return layer", "def _vtkGmsh(indGmsh):\n if (indGmsh == 1):\n indVtk = 3\n pass\n elif (indGmsh == 2):\n indVtk = 5\n pass\n elif (indGmsh == 3):\n indVtk = 9\n pass\n elif (indGmsh == 4):\n indVtk = 10\n pass\n elif (indGmsh == 5):\n indVtk = 12\n pass\n elif indGmsh == 6: # 6-node prism\n indVtk = 13\n pass\n elif indGmsh == 7: # 5-node pyramid\n indVtk = 14\n pass\n return indVtk", "def execute(self, parameters, messages):\r\n import math\r\n import os\r\n import arcpy\r\n\r\n # set default values\r\n out_layer_path = arcpy.env.scratchWorkspace\r\n if not out_layer_path:\r\n out_layer_path = arcpy.env.scratchGDB\r\n in_layer = parameters[0].valueAsText\r\n spacing = parameters[1].valueAsText\r\n out_layer_name = parameters[5].valueAsText\r\n out_layer_abspath = arcpy.os.path.join(out_layer_path, out_layer_name)\r\n dens_field = parameters[2].valueAsText\r\n geodesic_field_value = parameters[3].valueAsText\r\n loxodrome_field_value = parameters[4].valueAsText\r\n \r\n def densify_points(input_layer, out_layer, dens_spacing, dens_type_field):\r\n counter = 0\r\n current_point = arcpy.Point()\r\n in_field_names = ['SHAPE@' if f.type == 'Geometry' else f.name for f in arcpy.ListFields(input_layer)]\r\n geom_field_index = in_field_names.index('SHAPE@')\r\n dens_field_index = in_field_names.index(dens_type_field)\r\n current_dens_type = None\r\n # cursor to write output layer\r\n cur = arcpy.da.InsertCursor(out_layer, \"*\")\r\n # loop through the features densify\r\n with arcpy.da.SearchCursor(input_layer, in_field_names) as cursor:\r\n for row in cursor:\r\n\r\n # get the Densification type\r\n if row[dens_field_index] == geodesic_field_value:\r\n dens_type = \"GEODESIC\"\r\n elif row[dens_field_index] == loxodrome_field_value:\r\n dens_type = \"LOXODROME\"\r\n else:\r\n dens_type = None\r\n\r\n # this is to write the first point\r\n if counter == 0:\r\n # get the point geometry\r\n pt_geom = row[geom_field_index]\r\n first_point = pt_geom.getPart()\r\n # save point geometry for the next point\r\n current_point.X = first_point.X\r\n current_point.Y = first_point.Y\r\n # save the densification type for the next point\r\n current_dens_type = dens_type\r\n # save the row for later\r\n row_list = list(row)\r\n row_list.append('Original')\r\n row = tuple(row_list)\r\n cur.insertRow(row)\r\n\r\n # This is for subsequent points\r\n elif counter > 0:\r\n start_point = current_point\r\n start_point_geometry = arcpy.PointGeometry(start_point, in_srs)\r\n next_point_geometry = row[geom_field_index]\r\n next_point = next_point_geometry.getPart()\r\n next_point_geometry = arcpy.PointGeometry(next_point, in_srs)\r\n if current_dens_type is not None: # densify if loxodrome or geodesic\r\n angle, distance = start_point_geometry.angleAndDistanceTo(next_point_geometry,\r\n current_dens_type)\r\n # determine how many densified segments there will be\r\n segment_count = int(math.ceil(distance / float(dens_spacing)))\r\n # adjust the spacing distance\r\n segment_length = distance / segment_count\r\n # find every waypoint along segment\r\n for i in range(1, segment_count):\r\n waypoint = start_point_geometry.pointFromAngleAndDistance(angle,\r\n segment_length * i,\r\n current_dens_type)\r\n point = arcpy.Point(waypoint.extent.XMax, waypoint.extent.YMax)\r\n current_point.X = point.X\r\n current_point.Y = point.Y\r\n # write to output layer\r\n row_list = list(row)\r\n row_list.append(\"Densified\")\r\n row_list[geom_field_index] = (point.X, point.Y)\r\n out_row = tuple(row_list)\r\n cur.insertRow(out_row)\r\n # save point geometry for the next point\r\n current_point.X = next_point.X\r\n current_point.Y = next_point.Y\r\n # save the densification type for the next point\r\n current_dens_type = dens_type\r\n else: # don't densify if not loxodrome or geodesic, just copy the point\r\n # write to output layer\r\n row_list = list(row)\r\n row_list.append(\"not densified\")\r\n row_list[geom_field_index] = next_point_geometry\r\n out_row = tuple(row_list)\r\n cur.insertRow(out_row)\r\n # save point geometry for the next point\r\n current_point.X = next_point.X\r\n current_point.Y = next_point.Y\r\n # save the densification type for the next point\r\n current_dens_type = dens_type\r\n # write the last point \r\n row_list = list(row)\r\n row_list.append('Original')\r\n row = tuple(row_list)\r\n cur.insertRow(row)\r\n counter += 1\r\n if cur:\r\n del cur\r\n\r\n # get input geometry type\r\n desc = arcpy.Describe(in_layer)\r\n in_srs = desc.spatialReference\r\n arcpy.env.overwriteOutput = True\r\n if desc.featureType == \"Simple\" and desc.datasetType == \"FeatureClass\":\r\n # create output layer\r\n arcpy.CreateFeatureclass_management(out_layer_path,\r\n out_layer_name,\r\n \"POINT\",\r\n in_layer,\r\n spatial_reference=in_srs)\r\n\r\n # add field for pointType (original or densified)\r\n fields = arcpy.ListFields(in_layer)\r\n field_name_list = [field.name for field in fields]\r\n point_type_field = ''\r\n for fieldName in [\"pointType\", \"pntType\", \"pntTyp\"]:\r\n if fieldName not in field_name_list:\r\n point_type_field = fieldName\r\n break\r\n arcpy.AddField_management(out_layer_abspath,\r\n point_type_field,\r\n \"TEXT\",\r\n field_length=50)\r\n\r\n # run the densification\r\n densify_points(in_layer, out_layer_abspath, spacing, dens_field)\r\n\r\n # add output layer to map TOC\r\n \r\n # determine ArcMap or ArcPro\r\n arc_version = None\r\n try:\r\n import arcpy.mapping\r\n arc_version = \"ArcMap\"\r\n except ImportError as error:\r\n try:\r\n import arcpy.mp\r\n arc_version = \"ArcPro\"\r\n except ImportError as error:\r\n arcpy.AddMessage(error)\r\n \r\n if arc_version == \"ArcMap\":\r\n # ArcMap method\r\n mxd = arcpy.mapping.MapDocument(\"CURRENT\")\r\n data_frame = arcpy.mapping.ListDataFrames(mxd)[0]\r\n layer = arcpy.mapping.Layer(os.path.join(out_layer_path, out_layer_name))\r\n arcpy.mapping.AddLayer(data_frame, layer, \"AUTO_ARRANGE\")\r\n if arc_version == \"ArcPro\":\r\n # ArcPro method\r\n project = arcpy.mp.ArcGISProject(\"CURRENT\")\r\n project_map = project.listMaps()[0]\r\n project_map.addDataFromPath(os.path.join(out_layer_path, out_layer_name))\r\n arcpy.GetMessages()", "def plot_feeding_zones_and_power_law_fit_MMEN_per_system_observed_and_physical(sssp_per_sys, sssp, n_mult_min=2, n_mult_max=10, max_core_mass=10., prescription='CL2013', n=10., a0=1., p0=1., p1=-1.5, scale_up=False, N_sys=10):\n y_sym_star = '*' if scale_up else ''\n assert 2 <= n_mult_min <= n_mult_max\n count = 0\n for i,det_sys in enumerate(sssp_per_sys['det_all']):\n if count >= N_sys:\n break\n if n_mult_min <= np.sum(det_sys) <= n_mult_max:\n count += 1\n print('##### System %s (i=%s):' % (count, i))\n\n Mstar = sssp['Mstar_all'][i]\n Mp_sys = sssp_per_sys['mass_all'][i]\n core_mass_sys = np.copy(Mp_sys) # all planet masses including padded zeros\n core_mass_sys[core_mass_sys > max_core_mass] = max_core_mass\n R_sys = sssp_per_sys['radii_all'][i] # all planet radii including padded zeros\n a_sys = sssp_per_sys['a_all'][i] # all semimajor axes including padded zeros\n\n core_mass_sys_obs = np.copy(Mp_sys)[det_sys == 1] # masses of observed planets\n core_mass_sys_obs[core_mass_sys_obs > max_core_mass] = max_core_mass\n R_sys_obs = R_sys[det_sys == 1] # radii of observed planets\n a_sys_obs = a_sys[det_sys == 1] # semimajor axes of observed planets\n core_mass_sys = core_mass_sys[a_sys > 0] # masses of all planets\n R_sys = R_sys[a_sys > 0] # radii of all planets\n a_sys = a_sys[a_sys > 0] # semimajor axes of all planets\n\n sigma_sys = solid_surface_density_prescription(core_mass_sys, R_sys, a_sys, Mstar=Mstar, n=n, prescription=prescription) # using all planets\n sigma_sys_obs = solid_surface_density_prescription(core_mass_sys_obs, R_sys_obs, a_sys_obs, Mstar=Mstar, n=n, prescription=prescription) # using observed planets only\n\n if scale_up:\n sigma0, beta, scale_factor_sigma0 = fit_power_law_and_scale_up_MMEN(a_sys, sigma_sys, a0=a0, p0=p0, p1=p1)\n sigma0_obs, beta_obs, scale_factor_sigma0_obs = fit_power_law_and_scale_up_MMEN(a_sys_obs, sigma_sys_obs, a0=a0, p0=p0, p1=p1)\n else:\n sigma0, beta = fit_power_law_MMEN(a_sys, sigma_sys, a0=a0, p0=p0, p1=p1)\n scale_factor_sigma0 = 1.\n sigma0_obs, beta_obs = fit_power_law_MMEN(a_sys_obs, sigma_sys_obs, a0=a0, p0=p0, p1=p1)\n scale_factor_sigma0_obs = 1.\n\n # Plot the system:\n a_array = np.linspace(1e-3,2,1001)\n sigma_MMSN = MMSN(a_array)\n\n delta_a_sys_S2014 = feeding_zone_S2014(core_mass_sys, R_sys, a_sys, Mstar=Mstar)\n delta_a_sys_nHill = feeding_zone_nHill(core_mass_sys, a_sys, Mstar=Mstar, n=n)\n delta_a_sys_RC2014, a_bounds_sys = feeding_zone_RC2014(a_sys)\n\n fig = plt.figure(figsize=(12,8))\n plot = GridSpec(1,1,left=0.1,bottom=0.15,right=0.95,top=0.95,wspace=0,hspace=0)\n ax = plt.subplot(plot[0,0])\n plt.scatter(a_sys, np.log10(sigma_sys), marker='o', s=100.*R_sys**2., facecolors='none', edgecolors='k', label='All planets')\n plt.scatter(a_sys_obs, np.log10(sigma_sys_obs), marker='o', s=100.*R_sys_obs**2., color='k', label='Observed planets')\n for j,a in enumerate(a_sys):\n # Plot various feeding zones for each planet:\n da_S2014 = delta_a_sys_S2014[j]\n da_nHill = delta_a_sys_nHill[j]\n #da_RC2014 = delta_a_sys_RC2014[j]\n #plt.plot([0.5*a, 1.5*a], [np.log10(1.15*sigma_sys[j])]*2, lw=1, color='k')\n #plt.plot([a - da_S2014/2., a + da_S2014/2.], [np.log10(1.05*sigma_sys[j])]*2, lw=1, color='r')\n #plt.plot([a - da_nHill/2., a + da_nHill/2.], [np.log10(0.95*sigma_sys[j])]*2, lw=1, color='b')\n #plt.plot([a_bounds_sys[j], a_bounds_sys[j+1]], [np.log10(0.85*sigma_sys[j])]*2, lw=1, color='m')\n #plt.plot([a_bounds_sys[j], a_bounds_sys[j+1]], [np.log10(sigma_sys[j])]*2, lw=2, color='k') # lines for feeding zones\n plt.axvspan(a_bounds_sys[j], a_bounds_sys[j+1], alpha=0.2 if j%2==0 else 0.1, ec=None, fc='k')\n\n # To compare the planet masses to the integrated disk masses:\n Mp_core = core_mass_sys[j]\n plt.annotate(r'${:0.1f} M_\\oplus$'.format(Mp_core), (a, np.log10(1.5*sigma_sys[j])), ha='center', fontsize=16)\n if prescription == 'CL2013':\n Mp_intdisk = solid_mass_integrated_r0_to_r_given_power_law_profile(1.5*a, 0.5*a, sigma0, beta, a0=a0)\n print('Planet (core) mass: {:0.2f} M_earth --- Integrated disk mass (CL2013): {:0.2f} M_earth'.format(Mp_core, Mp_intdisk))\n elif prescription == 'S2014':\n Mp_intdisk = solid_mass_integrated_r0_to_r_given_power_law_profile(a + da_S2014/2., a - da_S2014/2., sigma0, beta, a0=a0)\n print('Planet (core) mass: {:0.2f} M_earth --- Integrated disk mass (S2014): {:0.2f} M_earth'.format(Mp_core, Mp_intdisk))\n elif prescription == 'nHill':\n Mp_intdisk = solid_mass_integrated_r0_to_r_given_power_law_profile(a + da_nHill/2., a - da_nHill/2., sigma0, beta, a0=a0)\n print('Planet (core) mass: {:0.2f} M_earth --- Integrated disk mass (nHill): {:0.2f} M_earth'.format(Mp_core, Mp_intdisk))\n elif prescription == 'RC2014':\n Mp_intdisk = solid_mass_integrated_r0_to_r_given_power_law_profile(a_bounds_sys[j+1], a_bounds_sys[j], sigma0, beta, a0=a0)\n print('Planet (core) mass: {:0.2f} M_earth --- Integrated disk mass (RC2014): {:0.2f} M_earth'.format(Mp_core, Mp_intdisk))\n\n plt.plot(a_array, np.log10(MMEN_power_law(a_array, sigma0, beta, a0=a0)), lw=3, ls='-', color='r', label=r'Fit to all planets ($\\Sigma_0^%s = {:0.0f}$, $\\beta = {:0.2f}$)'.format(sigma0, beta) % y_sym_star)\n plt.plot(a_array, np.log10(MMEN_power_law(a_array, sigma0_obs, beta_obs, a0=a0)), lw=3, ls='--', color='r', label=r'Fit to observed planets ($\\Sigma_0^%s = {:0.0f}$, $\\beta = {:0.2f}$)'.format(sigma0_obs, beta_obs) % y_sym_star)\n plt.plot(a_array, np.log10(sigma_MMSN), lw=3, color='g', label=r'MMSN ($\\Sigma_0 = {:0.0f}$, $\\beta = {:0.2f}$)'.format(MMSN(a0), -1.5))\n ax.tick_params(axis='both', labelsize=20)\n plt.gca().set_xscale(\"log\")\n plt.xticks([0.05, 0.1, 0.2, 0.4, 0.8])\n ax.get_xaxis().set_major_formatter(ticker.ScalarFormatter())\n plt.xlim([0.04,0.9])\n plt.ylim([-0.5,5.5])\n plt.xlabel(r'Semimajor axis, $a$ (AU)', fontsize=20)\n plt.ylabel(r'Surface density, $\\log_{10}(\\Sigma/{\\rm g\\,cm}^{-2})$', fontsize=20)\n plt.legend(loc='lower left', bbox_to_anchor=(0.,0.), ncol=1, frameon=False, fontsize=16)\n\n plt.show()", "def update_visual_field(vars_):\n vars_.visual_field = np.random.normal(size=vars_.number_of_locs)\n vars_.visual_field = vars_.visual_field / vars_.dprime_map[vars_.focus] - 0.5\n vars_.visual_field[vars_.target_location] += 1.0", "def WindingDesign(main):\n oEditor = main['ANSYS']['oEditor']\n\n # Slots number\n Slots = main['ANSYS']['FixedVariables']['Slots']\n\n # SlotType\n SlotType = main['ANSYS']['FixedVariables']['SlotType']\n\n # Geimetric parameters\n g = main['ANSYS']['DesignProperties']['Stator']['g']\n\n Hs0 = main['ANSYS']['DesignProperties']['Slot']['Hs0']\n Hs1 = main['ANSYS']['DesignProperties']['Slot']['Hs1']\n Hs2 = main['ANSYS']['DesignProperties']['Slot']['Hs2']\n Bs1 = main['ANSYS']['DesignProperties']['Slot']['Bs1']\n Bs2 = main['ANSYS']['DesignProperties']['Slot']['Bs2']\n\n DiaGap = main['ANSYS']['DesignProperties']['Rotor']['DiaGap']\n\n # Coils Arrange ABC\n PhasesABC = main['ANSYS']['Winding']['ABC']\n\n # Color used for phases\n Color = main['ANSYS']['Winding']['Color']\n\n oEditor.CreateUserDefinedPart(\n [\n \"NAME:UserDefinedPrimitiveParameters\",\n \"DllName:=\"\t\t, \"RMxprt/LapCoil.dll\",\n \"Version:=\"\t\t, \"16.0\",\n \"NoOfParameters:=\"\t, 22,\n \"Library:=\"\t\t, \"syslib\",\n [\n \"NAME:ParamVector\",\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"DiaGap\",\n \"Value:=\"\t\t, \"DiaGap+g*2\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"DiaYoke\",\n \"Value:=\"\t\t, \"DiaYoke\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Length\",\n \"Value:=\"\t\t, \"0mm\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Skew\",\n \"Value:=\"\t\t, \"0deg\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Slots\",\n \"Value:=\"\t\t, str(int(Slots))\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"SlotType\",\n \"Value:=\"\t\t, str(int(SlotType))\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Hs0\",\n \"Value:=\"\t\t, \"Hs0\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Hs1\",\n \"Value:=\"\t\t, \"Hs1\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Hs2\",\n \"Value:=\"\t\t, \"Hs2\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Bs0\",\n \"Value:=\"\t\t, \"Bs0\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Bs1\",\n \"Value:=\"\t\t, \"Bs1\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Bs2\",\n \"Value:=\"\t\t, \"Bs2\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Rs\",\n \"Value:=\"\t\t, \"Rs\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"FilletType\",\n \"Value:=\"\t\t, \"0\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Layers\",\n \"Value:=\"\t\t, \"2\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"CoilPitch\",\n \"Value:=\"\t\t, \"1\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"EndExt\",\n \"Value:=\"\t\t, \"5mm\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"SpanExt\",\n \"Value:=\"\t\t, \"25mm\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"BendAngle\",\n \"Value:=\"\t\t, \"0deg\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"SegAngle\",\n \"Value:=\"\t\t, \"10deg\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"LenRegion\",\n \"Value:=\"\t\t, \"200mm\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"InfoCoil\",\n \"Value:=\"\t\t, \"0\"\n ]\n ]\n ],\n [\n \"NAME:Attributes\",\n \"Name:=\"\t\t, \"LapCoil1\",\n \"Flags:=\"\t\t, \"\",\n \"Color:=\"\t\t, \"(143 175 143)\",\n \"Transparency:=\"\t, 0,\n \"PartCoordinateSystem:=\", \"Global\",\n \"UDMId:=\"\t\t, \"\",\n \"MaterialValue:=\"\t, \"\\\"copper\\\"\",\n \"SurfaceMaterialValue:=\", \"\\\"\\\"\",\n \"SolveInside:=\"\t\t, True,\n \"ShellElement:=\"\t, False,\n \"ShellElementThickness:=\", \"0mm\",\n \"IsMaterialEditable:=\"\t, True,\n \"UseMaterialAppearance:=\", False,\n \"IsLightweight:=\"\t, False\n ]\n )\n\n # Body Separation\n oEditor.SeparateBody(\n [\n \"NAME:Selections\",\n \"Selections:=\"\t\t, \"LapCoil1\",\n \"NewPartsModelFlag:=\"\t, \"Model\"\n ],\n [\n \"CreateGroupsForNewObjects:=\", False\n ]\n )\n\n # Average Slot Width\n AverWidth = (Bs2 + Bs1)/2\n\n # Average Radius\n AverRadius = DiaGap/2 + g + Hs0 + Hs1 + Hs2*0.75\n\n # Angle to shift and find the kth tooth\n ShiftSlot = 1/Slots*np.pi\n\n # Angle to fond the corrent layer\n ShiftLayer = np.arctan(AverWidth/4/AverRadius)\n\n # List to save the coils sides names\n WindingNames = [[], [], []]\n\n # Phases name to employed\n PhaseNames = ['A', 'B', 'C']\n\n for phase, row in enumerate(PhasesABC):\n\n PhaseName = [[], []]\n\n for coil, slot in enumerate(row):\n\n SlotAngle = np.abs(slot)/Slots*2*np.pi - ShiftSlot\n\n if coil % 2 == 1:\n SlotAngle = SlotAngle - ShiftLayer\n\n else:\n SlotAngle = SlotAngle + ShiftLayer\n\n x = np.cos(SlotAngle)*AverRadius\n y = np.sin(SlotAngle)*AverRadius\n\n Name0 = oEditor.GetBodyNamesByPosition(\n [\n \"NAME:Parameters\",\n \"XPosition:=\", str(x)+\"mm\",\n \"YPosition:=\", str(y)+\"mm\",\n \"ZPosition:=\", \"0mm\"\n ]\n )\n\n C = Color[phase]\n\n if np.sign(slot) == 1:\n\n CoilSideName = PhaseNames[phase]+\"In\"+str(np.abs(coil))\n\n PhaseName[0] += [CoilSideName]\n\n oEditor.ChangeProperty(\n [\n \"NAME:AllTabs\",\n [\n \"NAME:Geometry3DAttributeTab\",\n [\n \"NAME:PropServers\",\n Name0[0]\n ],\n [\n \"NAME:ChangedProps\",\n [\n \"NAME:Name\",\n \"Value:=\"\t\t,\n CoilSideName\n ],\n [\n \"NAME:Color\",\n \"R:=\"\t\t\t, C[0],\n \"G:=\"\t\t\t, C[1],\n \"B:=\"\t\t\t, C[2]\n ],\n\n ]\n ]\n ]\n )\n else:\n\n CoilSideName = PhaseNames[phase]+\"Out\"+str(np.abs(coil))\n\n PhaseName[1] += [CoilSideName]\n\n oEditor.ChangeProperty(\n [\n \"NAME:AllTabs\",\n [\n \"NAME:Geometry3DAttributeTab\",\n [\n \"NAME:PropServers\",\n Name0[0]\n ],\n [\n \"NAME:ChangedProps\",\n [\n \"NAME:Name\",\n \"Value:=\"\t\t,\n CoilSideName\n ],\n [\n \"NAME:Color\",\n \"R:=\"\t\t\t, C[0],\n \"G:=\"\t\t\t, C[1],\n \"B:=\"\t\t\t, C[2],\n ],\n\n ]\n ]\n ]\n )\n\n WindingNames[phase] += PhaseName\n\n main['ANSYS']['Winding']['CoilNames'] = WindingNames\n\n return main", "def main():\n utl.calibrate(False)\n undistort(False)\n edge_detect(False)\n transform(False)\n identify_line(False)\n lane_line(True)", "def setDisplayWireframe():\n for node in nuke.allNodes():\n print node.name()\n goodGeo = [\"Group\", \"ReadGeo\",\"ReadGeo2\",\"Sphere\",\"Cube\",\"Cylinder\",\"Card\", \"Card2\"]\n if node.Class() in goodGeo:\n if node.Class() == \"Group\":\n node.begin()\n for child in nuke.allNodes():\n if child.Class() in goodGeo:\n child['display'].setValue(1)\n node.end()\n else:\n node['display'].setValue(1)", "def plot_instructions_lim_mags(self):\n return self.__plot_instructions_lim_mags", "def print_seg_row_col(sp) : \n s, r, c = src_from_rc8x8(sp.peak_y_raw, sp.peak_x_raw)\n print('seg: %d, row: %.1f, col: %.1f' % (s, r, c))", "def change_resolution(self):", "def _update_farness_map(self,ind):", "def disp(sg, fg=0.1):\n pfm = 300000\n f = 0.4\n F = 0.5\n\n return 1.8 * (f/F)**(3/5) * G**(2/5) * pfm**(1/5) * fg**(-2/5) * sg**(1/5)", "def doParametersOfInterest(self):\r\n if self.fg4fixed:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4[0]\")\r\n self.modelBuilder.doVar(\"r[1,0,4]\")\r\n print \"Fixing CMS_zz4l_fg4\"\r\n poi = \"r\"\r\n else:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg4\"):\r\n print \"have fg4 inside\"\r\n else:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4[0.,-1,1]\")\r\n poi = \"CMS_zz4l_fg4\"\r\n if self.cPOI:\r\n if self.modelBuilder.out.var(\"cww_zz\"):\r\n print \"have czz_ww inside\"\r\n else:\r\n self.modelBuilder.doVar(\"cww_zz[0.5,-10,10]\")\r\n poi += \",cww_zz\"\r\n\r\n if self.fg2POI:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg2\"):\r\n print \"have fg2 inside\"\r\n else:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg2[0.,0,1]\")\r\n poi += \",CMS_zz4l_fg2\"\r\n if self.muFloating:\r\n self.modelBuilder.doVar(\"r[1,0,2000]\")\r\n if self.muAsPOI:\r\n print \"Treating r as a POI\"\r\n poi += \",r\"\r\n else:\r\n self.modelBuilder.out.var(\"r\").setAttribute(\"flatParam\")\r\n if self.phiFloating:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg4phi\"):\r\n print \"have fg4phi inside\"\r\n else: \r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4phi[0.,-3.1415926,3.1415926]\")\r\n if self.phiPOI:\r\n poi += \",CMS_zz4l_fg4phi\"\r\n else:\r\n self.modelBuilder.out.var(\"CMS_zz4l_fg4phi\").setAttribute(\"flatParam\")\r\n if self.phi2Floating:\r\n #self.modelBuilder.doVar(\"CMS_zz4l_fg4phi[0.,-math.pi,math.pi]\")\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg2phi\"):\r\n print \"have fg2phi inside\"\r\n else: \r\n self.modelBuilder.doVar(\"CMS_zz4l_fg2phi[0.,-3.1415926,3.1415926]\")\r\n self.modelBuilder.out.var(\"CMS_zz4l_fg2phi\").setAttribute(\"flatParam\")\r\n \r\n self.modelBuilder.doSet(\"POI\",poi)", "def draw_house_foundation(x, y, width, height):\n print('Типа рисую фундамент...', x, y, width, height)", "def noAxisSystem():\n dislin.nograf()", "def setResNameCheckCoords(self):\n exit = False\n localDir = os.path.abspath('.')\n if not os.path.exists(self.tmpDir):\n os.mkdir(self.tmpDir)\n #if not os.path.exists(os.path.join(tmpDir, self.inputFile)):\n copy2(self.absInputFile, self.tmpDir)\n os.chdir(self.tmpDir)\n\n if self.ext == '.pdb':\n tmpFile = open(self.inputFile, 'r')\n else:\n cmd = '%s -i %s -fi %s -o tmp -fo ac -pf y' % \\\n (self.acExe, self.inputFile, self.ext[1:])\n self.printDebug(cmd)\n out = getoutput(cmd)\n if not out.isspace():\n self.printDebug(out)\n try:\n tmpFile = open('tmp', 'r')\n except:\n rmtree(self.tmpDir)\n raise\n\n tmpData = tmpFile.readlines()\n residues = set()\n coords = {}\n for line in tmpData:\n if 'ATOM ' in line or 'HETATM' in line:\n residues.add(line[17:20])\n at = line[0:17]\n cs = line[30:54]\n if coords.has_key(cs):\n coords[cs].append(at)\n else:\n coords[cs] = [at]\n #self.printDebug(coords)\n\n if len(residues) > 1:\n self.printError(\"more than one residue detected '%s'\" % str(residues))\n self.printError(\"verify your input file '%s'. Aborting ...\" % self.inputFile)\n sys.exit(1)\n\n dups = \"\"\n short = \"\"\n long = \"\"\n longSet = set()\n id = 0\n items = coords.items()\n l = len(items)\n for item in items:\n id += 1\n if len(item[1]) > 1: # if True means atoms with same coordinates\n for i in item[1]:\n dups += \"%s %s\\n\" % (i, item[0])\n\n# for i in xrange(0,len(data),f):\n# fdata += (data[i:i+f])+' '\n\n for id2 in xrange(id,l):\n item2 = items[id2]\n c1 = map(float,[item[0][i:i+8] for i in xrange(0,24,8)])\n c2 = map(float,[item2[0][i:i+8] for i in xrange(0,24,8)])\n dist2 = self.distance(c1,c2)\n if dist2 < minDist2:\n dist = math.sqrt(dist2)\n short += \"%8.5f %s %s\\n\" % (dist, item[1], item2[1])\n if dist2 < maxDist2: # and not longOK:\n longSet.add(str(item[1]))\n longSet.add(str(item2[1]))\n if str(item[1]) not in longSet:\n long += \"%s\\n\" % item[1]\n\n if dups:\n self.printError(\"Atoms with same coordinates in '%s'!\" % self.inputFile)\n self.printQuoted(dups[:-1])\n exit = True\n\n if short:\n self.printError(\"Atoms TOO close (< %s Ang.)\" % minDist)\n self.printQuoted(\"Dist (Ang.) Atoms\\n\" + short[:-1])\n exit = True\n\n if long:\n self.printError(\"Atoms TOO alone (> %s Ang.)\" % maxDist)\n self.printQuoted(long[:-1])\n exit = True\n\n if exit:\n if self.force:\n self.printWarn(\"You chose to proceed anyway with '-f' option. GOOD LUCK!\")\n else:\n self.printError(\"Use '-f' option if you want to proceed anyway. Aborting ...\")\n rmtree(self.tmpDir)\n sys.exit(1)\n\n resname = list(residues)[0]\n newresname = resname\n\n if resname.isdigit() or 'E' in resname[1:3].upper() or 'ADD' in resname.upper():\n newresname = 'R' + resname\n if not resname.isalnum():\n newresname = 'MOL'\n if newresname != resname:\n self.printWarn(\"In %s.lib, residue name will be '%s' instead of '%s' elsewhere\"\n % (self.acBaseName, newresname, resname))\n\n self.resName = newresname\n\n os.chdir(localDir)\n self.printDebug(\"setResNameCheckCoords done\")", "def display0(*args):\n #----------*----------* # unpack\n twiss_func = args[0]\n cos_like = args[1]\n sin_like = args[2]\n lat_plot = args[3]\n #-------------------- Bahnkoordinate (z)\n z = [twiss_func(i,'s') for i in range(twiss_func.nbpoints)]\n sgx = [twiss_func(i,'sigx') for i in range(twiss_func.nbpoints)]\n sgy = [twiss_func(i,'sigy') for i in range(twiss_func.nbpoints)]\n # zero = [0. for i in range(sigma_fun.nbpoints)]\n #-------------------- trajectories (tz)\n tz= [cos_like(i,'s') for i in range(cos_like.nbpoints)]\n cx= [cos_like(i,'cx') for i in range(cos_like.nbpoints)]\n # cxp= [cos_like(i,'cxp') for i in range(cos_like.nbpoints)]\n cy= [cos_like(i,'cy') for i in range(cos_like.nbpoints)]\n # cyp= [cos_like(i,'cyp') for i in range(cos_like.nbpoints)]\n # cz= [cos_like(i,'cz') for i in range(cos_like.nbpoints)]\n # cdp= [cos_like(i,'cdp') for i in range(cos_like.nbpoints)]\n\n sx= [sin_like(i,'sx') for i in range(sin_like.nbpoints)]\n # sxp= [sin_like(i,'sxp') for i in range(sin_like.nbpoints)]\n sy= [sin_like(i,'sy') for i in range(sin_like.nbpoints)]\n # syp= [sin_like(i,'syp') for i in range(sin_like.nbpoints)]\n # sz= [sin_like(i,'sz') for i in range(sin_like.nbpoints)]\n # sdp= [sin_like(i,'sdp') for i in range(sin_like.nbpoints)]\n #-------------------- lattice viseo\n stop_viseox = 5 # stop viseo plot after so many [m]\n stop_viseoy = 5 # stop viseo plot after so many [m]\n vzero = [0. for i in range(lat_plot.nbpoints)] # zero line\n vis_abszisse = [lat_plot(i,'s') for i in range(lat_plot.nbpoints)]\n vis_ordinate = [lat_plot(i,'viseo') for i in range(lat_plot.nbpoints)]\n #-------------------- figure frame\n width=14; height=7.6\n plt.figure(num=0,figsize=(width,height),facecolor='#eaecef',tight_layout=False)\n\n #-------------------- transverse X\n splot211=plt.subplot(211)\n splot211.set_title('transverse x')\n plt.plot(z,sgx ,label=r'$\\sigma$ [m]',color='green')\n plt.plot(tz,cx ,label='Cx[m]', color='blue',linestyle='-')\n # plt.plot(tz,cxp,label=\"Cx'[m]\",color='blue',linestyle=':')\n plt.plot(tz,sx, label='Sx[m]', color='red' ,linestyle='-')\n # plt.plot(tz,sxp,label=\"Sx'[m]\",color='red' ,linestyle=':')\n # vscale=plt.axis()[3]*0.1\n # viseox = [x*vscale for x in vis_ordinate]\n # for i,s in enumerate(vis_abszisse):\n # if s > stop_viseox:\n # viseox[i] = 0.\n # plt.plot(vis_abszisse,viseox,label='',color='black')\n plt.plot(vis_abszisse,vzero,color='black')\n plt.legend(loc='lower right',fontsize='x-small')\n\n #-------------------- transverse Y\n splot212=plt.subplot(212)\n splot212.set_title('transverse y')\n plt.plot(z,sgy ,label=r'$\\sigma$ [m]',color='green')\n plt.plot(tz,cy, label='Cy[m]', color='blue',linestyle='-')\n # plt.plot(tz,cyp,label=\"Cy'[m]\",color='blue',linestyle=':')\n plt.plot(tz,sy, label='Sy[m]', color='red' ,linestyle='-')\n # plt.plot(tz,syp,label=\"Sy'[m]\",color='red' ,linestyle=':')\n vscale=plt.axis()[3]*0.1\n viseoy = [x*vscale for x in vis_ordinate]\n # for i,s in enumerate(vis_abszisse):\n # if s > stop_viseoy:\n # viseoy[i] = 0.\n plt.plot(vis_abszisse,viseoy,label='',color='black')\n plt.plot(vis_abszisse,vzero,color='black')\n plt.legend(loc='lower right',fontsize='x-small')", "def plot_lineratios(modeldata,modeldata2='None',line1='CIV1551',line2='CIII1908',line3='CIV1551',line4='HeII1640',\n plotname='./TESTPLOT.pdf',Zgas=False,logU=False,xid=0.3,nh=100,COratio=0.38,Mcutoff=100,\n logx=True,logy=True,logp1=False,logp2=False,fixxrange=False,fixyrange=False,\n showobs=None,noobserr=False,verbose=True):\n NFalse = 0\n freeparam = []\n inforstr = \"\"\n # - - - - - - - - - - - - - - - - - - - - - - - -\n legenddic = {}\n legenddic['Zgas'] = r'Z$_\\textrm{gas}$'\n legenddic['logUs'] = r'log$_\\textrm{10}$(U)'\n legenddic['xid'] = r'$\\xi_\\textrm{d}$'\n legenddic['nh'] = r'n$_\\textrm{H}$ / [cm$^3$]'\n legenddic['COCOsol'] = r'C/O / [C/O]$_\\textrm{sun}$'\n legenddic['mup'] = r'M$_\\textrm{cut IMF}$ / [M$_\\textrm{sun}]$'\n # - - - - - - - - - - - - - - - - - - - - - - - -\n if not Zgas:\n Zgasrange = [0.0,1.0]\n NFalse = NFalse + 1.0\n #inforstr = inforstr+' Zgas:vary, '\n freeparam.append('Zgas')\n else:\n Zgasrange = [Zgas-1e-6,Zgas+1e-6]\n inforstr = inforstr+' '+legenddic['Zgas']+'='+str(Zgas)+', '\n # - - - - - - - - - - - - - - - - - - - - - - - -\n if not logU:\n logUrange = [-5.0,0.0]\n NFalse = NFalse + 1.0\n #inforstr = inforstr+' logU:vary, '\n freeparam.append('logUs')\n else:\n logUrange = [logU-0.1,logU+0.1]\n inforstr = inforstr+' '+legenddic['logUs']+'='+str(logU)+', '\n # - - - - - - - - - - - - - - - - - - - - - - - -\n if not xid:\n xidrange = [0.0,0.6]\n NFalse = NFalse + 1.0\n #inforstr = inforstr+' xid:vary, '\n freeparam.append('xid')\n else:\n xidrange = [xid-0.01,xid+0.01]\n inforstr = inforstr+' '+legenddic['xid']+'='+str(xid)+', '\n # - - - - - - - - - - - - - - - - - - - - - - - -\n if not nh:\n nhrange = [0.0,1.0e6]\n NFalse = NFalse + 1.0\n #inforstr = inforstr+' nH:vary, '\n freeparam.append('nh')\n else:\n nhrange = [nh-1.0,nh+1.0]\n inforstr = inforstr+' '+legenddic['nh']+'='+str(nh)+', '\n # - - - - - - - - - - - - - - - - - - - - - - - -\n if not COratio:\n COratiorange = [0.0,2.0]\n NFalse = NFalse + 1.0\n #inforstr = inforstr+' C/O:vary, '\n freeparam.append('COCOsol')\n else:\n COratiorange = [COratio-0.001,COratio+0.001]\n inforstr = inforstr+' '+legenddic['COCOsol']+'='+str(COratio)+', '\n # - - - - - - - - - - - - - - - - - - - - - - - -\n if not Mcutoff:\n Mcutoffrange = [0.0,400.0]\n NFalse = NFalse + 1.0\n #inforstr = inforstr+' Mcutoff:vary, '\n freeparam.append('mup')\n else:\n Mcutoffrange = [Mcutoff-1.0,Mcutoff+1.0]\n inforstr = inforstr+' '+legenddic['mup']+'='+str(Mcutoff)+', '\n # - - - - - - - - - - - - - - - - - - - - - - - -\n\n if NFalse != 2:\n sys.exit(' Two and only two of the model parameters (Zgas,logU,xid,nh,COratio,Mcutoff) '\n 'should be set to Flase to define the model grid; however it appears '+str(NFalse)+\n ' parameters where not set')\n\n # - - - - - - - - - - - - - - - - - - - - - - - -\n goodent = np.where( (modeldata['Zgas'] > Zgasrange[0]) & (modeldata['Zgas'] < Zgasrange[1]) &\n (modeldata['logUs'] > logUrange[0]) & (modeldata['logUs'] < logUrange[1]) &\n (modeldata['xid'] > xidrange[0]) & (modeldata['xid'] < xidrange[1]) &\n (modeldata['nh'] > nhrange[0]) & (modeldata['nh'] < nhrange[1]) &\n (modeldata['COCOsol'] > COratiorange[0]) & (modeldata['COCOsol'] < COratiorange[1]) &\n (modeldata['mup'] > Mcutoffrange[0]) & (modeldata['mup'] < Mcutoffrange[1]) )\n\n Ngoodent = len(goodent[0])\n\n if Ngoodent > 1:\n if verbose: print(' - Getting data for '+str(Ngoodent)+' data points satisfying (SFR)model selection ')\n param1_1 = modeldata[freeparam[0]][goodent]\n if logp1:\n param1_1 = np.log10(param1_1)\n\n param1_2 = modeldata[freeparam[1]][goodent]\n if logp2:\n param1_2 = np.log10(param1_2)\n\n ratio1_1 = modeldata[line1][goodent]/modeldata[line2][goodent]\n ratio1_2 = modeldata[line3][goodent]/modeldata[line4][goodent]\n else:\n if verbose: print(' WARNING: Less than 2 (SFR)model grid points to plot; no output generated')\n return\n\n # - - - - - - - - - - - - - - - - - - - - - - - -\n if modeldata2 != 'None':\n goodent2 = np.where( (modeldata2['Zgas'] > Zgasrange[0]) & (modeldata2['Zgas'] < Zgasrange[1]) &\n (modeldata2['logUs'] > logUrange[0]) & (modeldata2['logUs'] < logUrange[1]) &\n (modeldata2['xid'] > xidrange[0]) & (modeldata2['xid'] < xidrange[1]) &\n (modeldata2['nh'] > nhrange[0]) & (modeldata2['nh'] < nhrange[1]) )\n\n Ngoodent2 = len(goodent2[0])\n\n if Ngoodent > 1:\n if verbose: print(' - Getting data for '+str(Ngoodent2)+' data points satisfying (AGN)model selection ')\n param2_1 = modeldata2[freeparam[0]][goodent2]\n if logp1:\n param2_1 = np.log10(param2_1)\n\n param2_2 = modeldata2[freeparam[1]][goodent2]\n if logp2:\n param2_2 = np.log10(param2_2)\n\n l2s = ['x','x','x','x'] # line names to use for Feltre+16 file\n for ll, linestr in enumerate([line1,line2,line3,line4]):\n if '1908' in linestr:\n l2 = linestr.replace('1908','1907')\n else:\n l2 = linestr\n\n l2s[ll] = l2\n\n ratio2_1 = modeldata2[l2s[0]][goodent2]/modeldata2[l2s[1]][goodent2]\n ratio2_2 = modeldata2[l2s[2]][goodent2]/modeldata2[l2s[3]][goodent2]\n else:\n if verbose: print(' WARNING: Less than 2 (AGN)model grid points to plot; no output generated')\n return\n\n # - - - - - - - - - - - PLOTTING - - - - - - - - - - -\n if verbose: print(' - Setting up and generating plot')\n plotname = plotname\n fig = plt.figure(figsize=(9, 5))\n fig.subplots_adjust(wspace=0.1, hspace=0.1,left=0.1, right=0.99, bottom=0.10, top=0.95)\n Fsize = 10\n lthick = 1\n marksize = 3\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif',size=Fsize)\n plt.rc('xtick', labelsize=Fsize)\n plt.rc('ytick', labelsize=Fsize)\n plt.clf()\n plt.ioff()\n plt.title(inforstr[:-2],fontsize=Fsize)\n\n margin = 0.1\n dx = np.abs(np.max(ratio1_1)-np.min(ratio1_1))\n dy = np.abs(np.max(ratio1_2)-np.min(ratio1_2))\n\n\n if fixxrange:\n xrange = fixxrange\n else:\n if logx:\n xrange = [np.min(ratio1_1)-np.min(ratio1_1)/2.,np.max(ratio1_1)+np.max(ratio1_1)/2.]\n else:\n xrange = [np.min(ratio1_1)-dx*margin,np.max(ratio1_1)+dx*margin]\n\n if fixyrange:\n yrange = fixyrange\n else:\n if logy:\n yrange = [np.min(ratio1_2)-np.min(ratio1_2)/2.,np.max(ratio1_2)+np.max(ratio1_2)/2.]\n else:\n yrange = [np.min(ratio1_2)-dy*margin,np.max(ratio1_2)+dy*margin]\n\n # ------------ PARAM1 ------------\n cmap = plt.cm.get_cmap('winter')\n cmin = np.min(param1_1)\n cmax = np.max(param1_1)\n colnorm = matplotlib.colors.Normalize(vmin=cmin,vmax=cmax)\n cmaparr = np.linspace(cmin, cmax, 30) #cmax-cmin)\n mm = plt.cm.ScalarMappable(cmap=cmap)\n mm.set_array(cmaparr)\n cb1 = plt.colorbar(mm)#shrink=0.25\n\n pstr1 = legenddic[freeparam[0]]\n if logp1:\n pstr1 = r'log$_\\textrm{10}$('+pstr1+')'\n\n cb1.set_label(pstr1+' (outer circle) - Fixed: black line')\n\n for p1 in np.unique(param1_1):\n p1col = cmap(colnorm(p1))\n p1ent = np.where(param1_1 == p1)\n\n plt.plot(ratio1_1[p1ent],ratio1_2[p1ent],'-',lw=lthick, color='k',zorder=1)\n\n plt.errorbar(ratio1_1[p1ent],ratio1_2[p1ent],xerr=None,yerr=None,\n marker='o',lw=0, markersize=marksize*3,\n markerfacecolor=p1col,ecolor=p1col,markeredgecolor = 'k',zorder=10)\n\n if modeldata2 is not 'None':\n p1ent = np.where(param2_1 == p1)\n\n plt.plot(ratio2_1[p1ent],ratio2_2[p1ent],'-',lw=lthick, color='k',zorder=1)\n\n plt.errorbar(ratio2_1[p1ent],ratio2_2[p1ent],xerr=None,yerr=None,\n marker='D',lw=0, markersize=marksize*3,\n markerfacecolor=p1col,ecolor=p1col,markeredgecolor = 'k',zorder=10)\n\n\n # ------------ PARAM2 ------------\n cmap = plt.cm.get_cmap('spring')\n cmin = np.min(param1_2)\n cmax = np.max(param1_2)\n colnorm = matplotlib.colors.Normalize(vmin=cmin,vmax=cmax)\n cmaparr = np.linspace(cmin, cmax, 30) #cmax-cmin)\n mm = plt.cm.ScalarMappable(cmap=cmap)\n mm.set_array(cmaparr)\n cb2 = plt.colorbar(mm)#shrink=0.25\n\n pstr2 = legenddic[freeparam[1]]\n if logp2:\n pstr2 = 'log10('+pstr2+')'\n\n cb2.set_label(pstr2+' (inner circle) - Fixed: gray line')\n\n for p2 in np.unique(param1_2):\n p2col = cmap(colnorm(p2))\n p2ent = np.where(param1_2 == p2)\n\n plt.plot(ratio1_1[p2ent],ratio1_2[p2ent],'-',lw=lthick, color='gray',zorder=2)\n\n plt.errorbar(ratio1_1[p2ent],ratio1_2[p2ent],xerr=None,yerr=None,\n marker='o',lw=0, markersize=marksize*1.5,\n markerfacecolor=p2col,ecolor=p2col,markeredgecolor = 'k',zorder=20)\n\n if modeldata2 is not 'None':\n p2ent = np.where(param2_2 == p2)\n\n plt.plot(ratio2_1[p2ent],ratio2_2[p2ent],'-',lw=lthick, color='gray',zorder=2)\n\n plt.errorbar(ratio2_1[p2ent],ratio2_2[p2ent],xerr=None,yerr=None,\n marker='D',lw=0, markersize=marksize*1.5,\n markerfacecolor=p2col,ecolor=p2col,markeredgecolor = 'k',zorder=20)\n\n\n if showobs != None:\n for ii, objid in enumerate(showobs[:,0]):\n if (showobs[:,7][ii] > xrange[0]) & (showobs[:,7][ii] < xrange[1]) & \\\n (showobs[:,9][ii] > yrange[0]) & (showobs[:,9][ii] < yrange[1]):\n\n if noobserr:\n obsxerr = None\n obsyerr = None\n else:\n obsxerr = showobs[:,8][ii]\n obsyerr = showobs[:,10][ii]\n plt.errorbar(showobs[:,7][ii],showobs[:,9][ii],xerr=obsxerr,yerr=obsyerr,\n marker='*',lw=lthick, markersize=marksize*2,\n markerfacecolor='k',ecolor='k',markeredgecolor = 'k',zorder=30)\n\n plt.xlabel(line1+'/'+line2)\n plt.ylabel(line3+'/'+line4)\n\n plt.xlim(xrange)\n plt.ylim(yrange)\n\n if logx:\n plt.xscale('log')\n if logy:\n plt.yscale('log')\n\n #--------- LEGEND ---------\n # plt.errorbar(-1,-1,xerr=None,yerr=None,fmt='o',lw=lthick,ecolor='white', markersize=marksize*2,\n # markerfacecolor='white',markeredgecolor = 'k',label='Ground-based spec')\n #\n # leg = plt.legend(fancybox=True, loc='upper center',prop={'size':Fsize},ncol=1,numpoints=1)\n # #bbox_to_anchor=(1.25, 1.03)) # add the legend\n # leg.get_frame().set_alpha(0.7)\n #--------------------------\n\n if verbose: print(' Saving plot to'+plotname)\n plt.savefig(plotname)\n plt.clf()\n plt.close('all')", "def Screw(No, Pz, sDim=0, manulYN=False):\n\t\tlineNode = \"w_{}*\".format(No)\n\t\tlineN = Helper.Psline(lineNode)\n\t\tPB0 = lineN[0]\n\t\t# Helper.addFid(PB0,lableName=\"PB00\")\n\t\tPa = lineN[1]\n\t\t# Dim = Helper.estimateDim(Pz, Pa)\n\t\tLscrew = lineN[2]\n\t\tif manulYN is False:\n\t\t\tPB = PB0\n\t\t\tPT = Pa\n\t\t\tHelper.delNode(lineNode)\n\t\t\tB_T = np.linalg.norm(PB - PT)\n\t\t\tLength = 5 * (B_T // 5) - B_T\n\t\t\tscrewDim = np.around(lineN[3], 1)\n\t\t\tHelper.p2pexLine(PB, PT, Length, screwDim, \"w_{}_D:{}_L\".format(No, screwDim), \"red\")\n\t\telse:\n\t\t\tPT = Pa\n\t\t\t# Helper.addFid(PB0, lableName=\"PB0\")\n\t\t\tPB = Helper.probeVolume(PT, PB0)\n\t\t\t# Helper.addFid(PB)\n\t\t\t# logging.debug(\"PB:{}\".format(PB))\n\t\t\t# logging.debug(\"PT:{}\".format(PT))\n\t\t\tB_T = np.linalg.norm(PB - PT)\n\t\t\tLength = 5 * (B_T // 5) - B_T\n\t\t\tscrewDim = np.around(lineN[3], 1)\n\t\t\tHelper.delNode(lineNode)\n\t\t\tHelper.p2pexLine(PB, PT, Length, screwDim, \"w_{}_D:{}_L\".format(No, screwDim), \"red\")\n\t\treturn int(Length + B_T), screwDim, PB, PT", "def get_wd_phys(sed_name):\n new_name = sed_name.replace('.','_').split('_')\n teff = float(new_name[-2])\n if new_name[1]!='He':\n logg = 0.1*float(new_name[2])\n else:\n logg = 0.1*float(new_name[3])\n\n return teff, -999.0, logg", "def dicom_cli():", "def show3(dlist,r=2,c=2,greyscale=False,output=False,samerange=True):\n\n#distrib.show3((d63[:128,:128,0]-1,d0[:128,:128,0]-1,N.log(d63[:128,:128,0]),d63ga[:128,:128,0]),greyscale=True)\n\n M.clf()\n\n fig = M.figure(figsize=(6.4, 6.4), dpi=100) \n axesarr=N.array([[0.01,0.51,0.4,0.4],\n [0.51,0.51,0.4,0.4],\n [0.01,0.01,0.4,0.4],\n [0.51,0.01,0.4,0.4]])\n\n print axesarr\n colorbax = 1.*axesarr\n print colorbax\n colorbax[:,2] = 0.*colorbax[:,2] + 0.03\n colorbax[:,0] += 0.4\n\n print colorbax\n\n if greyscale:\n colorscheme='binary'\n else:\n colorscheme='jet'\n\n # d63, d0, log d63, d63g\n titlearr=[r'$\\delta$',r'$\\delta_{\\rm initial}$',r'$\\log(1+\\delta)$',r'$\\delta_{\\rm Gauss}$']\n\n if (dlist[1] != None):\n min23 = min(min(dlist[2].flatten()),min(dlist[3].flatten()))\n max23 = max(max(dlist[2].flatten()),max(dlist[3].flatten()))\n\n max0 = max(dlist[1].flatten())\n min0 = min(dlist[1].flatten())\n\n initfact = min(max23/max0,min23/min0)\n print min23,max23, initfact\n\n sc = 0\n for d in dlist:\n if (d != None):\n M.axes(axesarr[sc])\n M.title(titlearr[sc],fontsize=23)\n if (sc > 1):\n print titlearr[sc]\n if (samerange):\n M.pcolor(d,cmap=M.get_cmap(colorscheme),vmin = min23,vmax=max23)\n else:\n M.pcolor(d,cmap=M.get_cmap(colorscheme))\n elif (sc == 1):\n #print min(d.flatten()*initfact),max(d.flatten()*initfact)\n if (samerange):\n M.pcolor(d*initfact,cmap=M.get_cmap(colorscheme),vmin = min23,vmax=max23)\n else:\n M.pcolor(d,cmap=M.get_cmap(colorscheme))\n\n else:\n M.pcolor(d,cmap=M.get_cmap(colorscheme))\n\n# if (sc == 1):\n# M.colorbar(ticks=[-0.1,-0.05,0,0.05,0.1])\n# else:\n\n M.axis('tight')\n M.axis('equal')\n M.axis('tight')\n M.xticks([])\n M.yticks([])\n\n cax = M.axes(colorbax[sc])\n M.colorbar(cax=cax)\n\n sc += 1\n\n #M.savefig('showdens.eps',dpi=8)\n #M.gcf().set_size_inches((6.4,6.4))\n #M.gcf().set_size_inches((15.,12.))\n if (output):\n if greyscale:\n M.savefig('showdens_grey.png',dpi=100)\n M.savefig('showdens_grey.pdf')\n else:\n fig.savefig('showdens.png',dpi=100)\n M.savefig('showdens.pdf')\n\n #M.show()", "def asses_space(meas):\n truth = list(meas.spec.central)\n truth[meas.spec.ipar('syst_s1')] = 1\n data = meas.spec(truth)\n meas.spec.set_data(data)\n\n lls, xs, rels, prob = minutils.find_minima(meas.spec)\n\n print(\"Found %d minima with likelihoods:\" % len(lls))\n print(', '.join([\"%.3f\" % l for l in lls]))\n\n print(\"Global minimum is found %.3f%% of the time\" % (100*prob))\n\n l0 = draw_spectrum(meas, truth, True, label='truth', linestyle='--')\n l1 = draw_spectrum(meas, xs[0], True, label='fit')\n plt.legend(handles=[l0, l1])\n plt.savefig('spectrum-ll_0.pdf', format='pdf')\n plt.clf()\n\n for imin in range(1, len(lls)):\n print(\"Local minimum %.3f\" % lls[imin])\n\n isort = np.argsort(np.fabs(rels[imin]))[::-1]\n par1 = meas.spec.pars[isort[0]]\n par2 = meas.spec.pars[isort[1]]\n print(\"Differs in %s, %s\" % (par1, par2))\n\n print(\"%s_0: %.3f, %s_%d: %.3f\" % (\n par1,\n xs[0][meas.spec.ipar(par1)],\n par1, imin,\n xs[imin][meas.spec.ipar(par1)]))\n\n print(\"%s_0: %.3f, %s_%d: %.3f\" % (\n par2,\n xs[0][meas.spec.ipar(par2)],\n par2, imin,\n xs[imin][meas.spec.ipar(par2)]))\n\n draw_spectrum(meas, truth, True, label='truth', linestyle='--')\n draw_spectrum(meas, xs[imin], True, label='fit')\n plt.legend(handles=[l0, l1])\n plt.savefig('spectrum-ll_%d.pdf'%imin, format='pdf')\n plt.clf()\n\n vals = minutils.slice2d(meas.spec, xs[imin], par1, par2)\n plt.hist2d(\n vals.T[0], \n vals.T[1], \n weights=np.exp(vals.T[2]), \n bins=len(vals)**0.5,\n normed=True)\n \n plt.xlabel(par1)\n plt.ylabel(par2)\n\n cbar = plt.colorbar()\n cbar.set_label('Likelihood density')\n\n plt.savefig('min_%d.pdf' % imin, format='pdf')\n plt.clf()", "def printSpace(x0,y0):\n\t\t\tprint\n\n\t\t\t# keep track of the island ids\n\t\t\tids = set()\n\n\t\t\t# convert id to a letter\n\t\t\tdef idChr(_id):\n\t\t\t\treturn chr(_id+97)\n\n\t\t\t# print the map\n\t\t\ti = 0\n\t\t\tfor y in xrange(h):\n\t\t\t\tfor x in xrange(w):\n\t\t\t\t\tif (x,y) == (x0,y0):\n\t\t\t\t\t\t# show current location\n\t\t\t\t\t\tprint \"O\",\n\t\t\t\t\telse:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t# show island label if available\n\t\t\t\t\t\t\t_id = findIslands.pixelToIslandId[i]\n\t\t\t\t\t\t\tids.add(_id)\n\t\t\t\t\t\t\tprint idChr(_id),\n\t\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\t\t# just display character if no island info available\n\t\t\t\t\t\t\tprint space[i],\n\t\t\t\t\ti += 1\n\t\t\t\tprint\n\n\t\t\t# print \n\t\t\tprint \"island -> parent -> global owner\"\n\t\t\tfor _id in ids:\n\t\t\t\tprint \"%s -> %s -> %s\" % (\n\t\t\t\t\tidChr(_id),\n\t\t\t\t\tidChr(findIslands.islandIdToParent[_id]._id),\n\t\t\t\t\tidChr(findIslands.getIslandOwner(_id)._id))", "def scancommandline(args):\r\n global gv\r\n def aflag ():\r\n gv[\"label_a_pops\"] = True\r\n def bflag (tempval):\r\n gv[\"popboxspaceadj\"] = float(tempval)\r\n\r\n def cflag (tempval):\r\n if check_PIL == False:\r\n print (\"PIL module not available, -c option cannot be used\")\r\n sys.stderr.write('NOJPG\\n')\r\n else:\r\n if tempval.upper() == 'J':\r\n gv[\"imagefileextension\"] = \".jpg\"\r\n elif tempval.upper() == 'P':\r\n gv[\"imagefileextension\"] = \".pdf\"\r\n elif tempval.upper() == 'N':\r\n gv[\"imagefileextension\"] = \".png\"\r\n else:\r\n print (\"-c variable\",tempval, \"not recognized\")\r\n sys.exit(1)\r\n\r\n def dflag ():\r\n gv[\"skipdemographicscaling\"] = True\r\n def eflag():\r\n gv[\"eventimes\"] = True\r\n def iflag (tempname):\r\n gv[\"imfilename\"] = tempname.strip()\r\n def oflag (tempname):\r\n if len(tempname) >= 3 and tempname[-3:].lower() != \"eps\":\r\n tempname += \".eps\"\r\n gv[\"outputfilename\"]= tempname.strip()\r\n def gflag (tempval):\r\n gv[\"globalscale\"] = float(tempval)\r\n def xflag (tempval):\r\n ## edited 9/1/2017, this seemed to work better. use maximumxpoint for making plot wider, and use localxscale for makeing it narrower\r\n f = float(tempval)\r\n if f > 1.0:\r\n gv[\"maximumxpoint\"] = gv[\"maximumxpoint\"] * f\r\n else:\r\n gv[\"localxscale\"] = f\r\n def yflag (tempval):\r\n gv[\"localyscale\"] = float(tempval)\r\n def jflag (tempval):\r\n gv[\"arrowheightadj\"] = float(tempval)\r\n def fflag(tempval):\r\n gv[\"font\"] = tempval\r\n gv[\"bifont\"] = gv[\"font\"] + \"-BoldItalic\"\r\n def kflag ():\r\n gv[\"line0y\"] = 0.88 ## a tradeof, between need to make room and not wanting to squash figure\r\n gv[\"anglenames\"] = True\r\n def mflag(tempval):\r\n if tempval[0].isdigit():\r\n gv[\"moption\"] = float(tempval)\r\n else:\r\n if tempval[0].lower() != 's':\r\n gv[\"moption\"] = tempval[0].lower()\r\n else:\r\n gv[\"moption\"] = tempval\r\n def nflag (tempname):\r\n gv[\"usealtnames\"] = True\r\n gv[\"altnamefilename\"] = tempname.strip()\r\n def qflag ():\r\n gv[\"popboxcintervalboxes\"] = False\r\n def rflag ():\r\n gv[\"popboxcintervalarrows\"] = False\r\n def pflag(tempval):\r\n gv[\"fontsize\"] = float(tempval)\r\n gv[\"fontfixed\"] = True\r\n def tflag(tempval):\r\n gv[\"lastt_lower_y\"] = float(tempval)\r\n gv[\"set_lastt_lower_y\"] = False\r\n def sflag ():\r\n gv[\"dosquare\"] = True\r\n gv[\"maximumxpoint\"] = 576.1\r\n def uflag ():\r\n gv[\"simplecolor\"] = True\r\n def vflag ():\r\n gv[\"rgbcolor\"] = True\r\n def wflag (tempval):\r\n maxscalar = 20\r\n temp = int(round(float(tempval)))\r\n if temp > maxscalar:\r\n print(\" maximum -w value: 20 \")\r\n exit()\r\n gv[\"widthscalar\"] = temp\r\n def zflag ():\r\n gv[\"excludeghost\"] = True\r\n\r\n def removewhitespace(temps):\r\n return \"\".join(temps.split())\r\n\r\n def cleanarglist(arglist,flags_with_values):\r\n \"\"\"\r\n \"\"\"\r\n if arglist[-1] =='':\r\n arglist.pop(-1)\r\n newarg = []\r\n if arglist[0][0] != \"-\": # skip program name at beginning of list\r\n arglist = arglist[1:]\r\n ai = 0\r\n while ai < len(arglist):\r\n if removewhitespace(arglist[ai]) != \"\":\r\n arglist[ai] = removewhitespace(arglist[ai])\r\n else:\r\n print ( \"bad whitespace in command line: \",repr(\" \".join(arglist)))\r\n sys.exit(1)\r\n if arglist[ai][0] == '-':\r\n if arglist[ai][1] in flags_with_values and len(arglist[ai])==2: ## found a space in the command line\r\n arglist[ai] = arglist[ai] + arglist[ai+1]\r\n newarg.append(arglist[ai])\r\n ai += 1\r\n else:\r\n newarg.append(arglist[ai])\r\n else:\r\n print ( \"error on command line, \\\"-\\\" not found:\",arglist[ai])\r\n printcommandset()\r\n sys.exit(1)\r\n ai += 1\r\n\r\n return newarg\r\n\r\n def checkallflags(flags_with_values,flags_withoutvalues,cldic):\r\n \"\"\"\r\n checks that flags that must be used are used\r\n checks that flags_with_values,flags_withoutvalues and cldic all make use of the appropriate flags\r\n \"\"\"\r\n if len(set(flags_with_values).intersection(set(flags_without_values))) > 0:\r\n print ( \"error some flags appear in two lists of flags, with and without required values:\",set(flags_with_values).intersection(set(flags_without_values)))\r\n printcommandset()\r\n sys.exit(1)\r\n for flag in set(flags_with_values).union(set(flags_withoutvalues)):\r\n if flag not in cldic:\r\n print ( \"error some flag mismatch between strings of flags and dictionary of flags:\",flag)\r\n printcommandset()\r\n sys.exit(1)\r\n return\r\n def check_flags_used(flagsused, flags_must_use):\r\n for f in flags_must_use:\r\n if f not in flagsused:\r\n print(\"-%c missing from command line. Run without any commands to get the help screen.\"%f)\r\n sys.exit(1)\r\n return\r\n\r\n cldic = {'a':aflag,'b':bflag,'c':cflag,'d':dflag,'e':eflag,'f':fflag,\\\r\n 'g':gflag,'i':iflag,'j':jflag,'k':kflag,'m':mflag,'n':nflag,'o':oflag,\\\r\n 'p':pflag, 'q':qflag,'r':rflag,'s':sflag, 't':tflag,'u':uflag,'v':vflag,'w':wflag,\\\r\n 'x':xflag,'y':yflag,'z':zflag}\r\n flags_must_use = 'i'\r\n flags_with_values = \"cbfgijmoptxynw\"\r\n flags_without_values = \"adesuvkqrz\"\r\n cmdstr = \" \".join(args)\r\n checkallflags(flags_with_values,flags_without_values,cldic)\r\n argv = cleanarglist(args,flags_with_values)\r\n flagsused = ''\r\n for i in range(0,len(argv)):\r\n if argv[i][0] == '-':\r\n flaglet = argv[i][1].lower()\r\n flagsused += flaglet\r\n## print ( i, flaglet)\r\n if len(argv[i]) == 2:\r\n if i == (len(argv)-1):\r\n cldic[flaglet]()\r\n else:\r\n if argv[i+1][0] == '-':\r\n cldic[flaglet]()\r\n else:\r\n cldic[flaglet](argv[i+1])\r\n i += 1\r\n else:\r\n if (len(argv[i]) < 2):\r\n print ( \"problem on command line \")\r\n exit()\r\n cldic[flaglet](argv[i][2:len(argv[i])])\r\n else:\r\n print ( \"error on command line, \\\"-\\\" not found:\",argv[i])\r\n printcommandset()\r\n sys.exit(1)\r\n check_flags_used(flagsused, flags_must_use)\r\n return cmdstr", "def adjust(self):\n self._import_draws()\n self.draws[\"sigma_sub\"] = self._calc_sigma_sub()\n self.draws[self.me_map[\"resid\"]] = self._resid()\n for sub_me in self.me_map[\"sub\"].keys():\n if \"squeeze\" in list(self.me_map[\"sub\"][sub_me].keys()):\n self.draws[self.me_map[\"sub\"][sub_me][\"squeeze\"]] = (\n self._squeeze(sub_me))\n if \"excess\" in list(self.me_map[\"sub\"][sub_me].keys()):\n self.draws[self.me_map[\"sub\"][sub_me][\"excess\"]] = (\n self._excess(sub_me))\n self._export()", "def show_trunk(height=2):\n for k in range(height):\n print(\"|\".center(GROUND_WIDTH))", "def plots_standardcolumn():\n\n data = st.read('standard-column_det0.m', reader='det')\n A = 18/np.cos(np.pi/6) # cm length of face of the hexagon\n Ah = 6. * (A * 18./2) # Area of the hexagon\n V = Ah * (160 + 793 + 120)\n plot_detector(data, 'Axial', V)", "def proz2D():\r\n print(\"processing: \",CURDATA()[0]) \r\n XCMD(\"apk2d\",WAIT_TILL_DONE)\r\n ABS2() #Baseline correction \r\n ABS1()", "def diagnosticos(): \r\n global rhoe,Ex,npuntos_malla,itiempo,longitud_malla,rho0,aP,v1,v2,F\r\n global EnergiaK, EnergiaP, EnergiaT, emax\r\n global iout,igrafica,ifase,ivdist, distribucion\r\n global Archivos_Densidades, Archivos_Campo, Archivos_Efase, Archivos_Fdistribucion\r\n \r\n # Se crea el eje para graficar las cantidades fisicas involucradas:\r\n xgrafica = dx * sp.arange(npuntos_malla+1)\r\n \r\n if (itiempo == 0): \r\n plt.figure('Cantidades')\r\n plt.clf()\r\n \r\n if (igrafica > 0):\r\n # Se grafica cada paso dado por el contador igrafica:\r\n if (sp.fmod(itiempo,igrafica) == 0): \r\n # Densidad total\r\n plt.figure(1)\r\n if (itiempo >0 ): plt.cla()\r\n plt.plot(xgrafica, -(rhoe+rho0), 'r', label='Densidad')\r\n plt.xlabel('x')\r\n plt.xlim(0,longitud_malla)\r\n plt.ylim(-1.5,1.5)\r\n plt.legend(loc=1)\r\n # Se imprimen y se guardan las imagenes de acuerdo a iout:\r\n plt.pause(0.0001)\r\n plt.draw()\r\n filename = '%0*d_densidad'%(5, itiempo)\r\n Archivos_Densidades[itiempo] = filename\r\n if (iout > 0):\r\n if (sp.fmod(itiempo,iout) == 0): \r\n plt.savefig(filename+'.png',dpi=720)\r\n \r\n # Campo electrico\r\n plt.figure(2)\r\n if (itiempo >0 ): plt.cla()\r\n plt.plot(xgrafica, Ex, 'b' , label = 'Ex')\r\n plt.xlabel('x', fontsize = 18)\r\n plt.ylabel('Ex', fontsize = 18)\r\n plt.xticks(np.linspace(0,16,4), fontsize = 18)\r\n plt.yticks(np.linspace(-0.0010,0.0010,5), fontsize = 18)\r\n plt.xlim(0,longitud_malla)\r\n plt.ylim(-0.0015,0.0015)\r\n plt.legend(loc = 1)\r\n # Se imprimen y se guardan las imagenes de acuerdo a iout:\r\n plt.pause(0.0001)\r\n plt.draw()\r\n filename = '%0*d_campoelectrico'%(5, itiempo)\r\n Archivos_Campo[itiempo] = filename\r\n if (iout > 0):\r\n if (sp.fmod(itiempo,iout) == 0): \r\n plt.savefig(filename+'.png',dpi=720)\r\n \r\n if (ifase > 0):\r\n if (sp.fmod(itiempo,ifase) == 0): \r\n # Se grafica el espacio de fase en el paso dado por el contador ifase:\r\n plt.figure(3)\r\n if (itiempo >0 ): plt.cla()\r\n v1 = sp.zeros(nparticulas)\r\n v2 = sp.zeros(nparticulas)\r\n x1 = sp.zeros(nparticulas)\r\n x2 = sp.zeros(nparticulas)\r\n for i in range(nparticulas):\r\n if (v[i-1]>v[i]):\r\n v1[i]=v[i]\r\n x1[i]=x[i]\r\n elif(v[i-1]<v[i]):\r\n v2[i]=v[i]\r\n x2[i]=x[i] \r\n if(distribucion == 0):\r\n plt.scatter(x,v,marker='.',s=0.1,color='black') \r\n elif(distribucion == 1 or distribucion == 2):\r\n plt.scatter(x1,v1,marker='.',s=0.1,color='red') \r\n plt.scatter(x2,v2,marker='.',s=0.1,color='blue')\r\n plt.xticks(np.linspace(0,100,6), fontsize = 18)\r\n plt.yticks(np.linspace(-8,8,5), fontsize = 18)\r\n plt.xlabel('x', fontsize = 18)\r\n plt.ylabel('v', fontsize = 18)\r\n plt.xlim(0,longitud_malla)\r\n plt.ylim(-4,8)\r\n\r\n # Se imprimen y se guardan las imagenes de acuerdo a iout:\r\n plt.pause(0.0001)\r\n plt.draw()\r\n filename = '%0*d_espaciofase'%(5, itiempo)\r\n Archivos_Efase[itiempo] = filename\r\n if (iout > 0):\r\n if (sp.fmod(itiempo,iout) == 0): \r\n plt.savefig(filename+'.png',dpi=240)\r\n \r\n if (ivdist > 0):\r\n if (sp.fmod(itiempo,ivdist)==0):\r\n plt.figure(4)\r\n if (itiempo >0 ): plt.cla() \r\n plt.scatter(v,F,marker = '.' , s=0.1, color ='green')\r\n plt.xlim(-5*vh,5*vh)\r\n plt.ylim(0,1.0)\r\n plt.xlabel('v')\r\n plt.ylabel('f(v)')\r\n #fn_vdist = 'vdist_%0*d'%(5, itiempo)\r\n # Se imprimen y se guardan las imagenes de acuerdo a iout:\r\n plt.pause(0.0001)\r\n plt.draw()\r\n filename = '%0*d_fdistribucion'%(5, itiempo)\r\n Archivos_Fdistribucion[itiempo] = filename\r\n if (iout > 0):\r\n if (sp.fmod(itiempo,iout) == 0): \r\n plt.savefig(filename+'.png',dpi=720)\r\n #Se escriben los datos de la distribucion en un archivo:\r\n# sp.savetxt(fn_vdist, sp.column_stack((v,F)),fmt=('%1.4e','%1.4e')) \r\n \r\n # Energia cinetica:\r\n v2 = v**2\r\n EnergiaK[itiempo] = 0.5*masa*sum(v2)\r\n \r\n # Energia potencial:\r\n e2 = Ex**2\r\n EnergiaP[itiempo] = 0.5*dx*sum(e2)\r\n emax = max(Ex) # Campo maximo para analisis de inestabilidad\r\n \r\n # Energia total: \r\n EnergiaT[itiempo] = EnergiaP[itiempo] + EnergiaK[itiempo]\r\n \r\n return True", "def renishaw_1d_si():\r\n\r\n import os\r\n from ..raman_hyperspectra_read_files import read_RAMAN_RENISHAW_txt_0D\r\n\r\n fname = os.path.join(os.path.dirname(__file__), \"RENISHAW_1D_Si.txt\")\r\n da_sliced, da_sliced_interp, da, da_interp = read_RAMAN_RENISHAW_txt_0D(fname)\r\n\r\n return da_sliced, da_sliced_interp, da, da_interp", "def testDoubleMIP(self):\n\n self.M.render(self.testoutput[2], wide=True)", "def plot_reconstruction_diagnostics(self, figsize=(20, 10)):\n figs = []\n fig_names = []\n\n # upsampled frequency\n fx_us = tools.get_fft_frqs(2 * self.nx, 0.5 * self.dx)\n fy_us = tools.get_fft_frqs(2 * self.ny, 0.5 * self.dx)\n\n # plot different stages of inversion\n extent = tools.get_extent(self.fy, self.fx)\n extent_upsampled = tools.get_extent(fy_us, fx_us)\n\n for ii in range(self.nangles):\n fig = plt.figure(figsize=figsize)\n grid = plt.GridSpec(3, 4)\n\n for jj in range(self.nphases):\n\n # ####################\n # separated components\n # ####################\n ax = plt.subplot(grid[jj, 0])\n\n to_plot = np.abs(self.separated_components_ft[ii, jj])\n to_plot[to_plot <= 0] = np.nan\n plt.imshow(to_plot, norm=LogNorm(), extent=extent)\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 0:\n plt.title('O(f)otf(f)')\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 1:\n plt.title('m*O(f-fo)otf(f)')\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 2:\n plt.title('m*O(f+fo)otf(f)')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # deconvolved component\n # ####################\n ax = plt.subplot(grid[jj, 1])\n\n plt.imshow(np.abs(self.components_deconvolved_ft[ii, jj]), norm=LogNorm(), extent=extent)\n\n if jj == 0:\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 1:\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 2:\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 0:\n plt.title('deconvolved component')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # shifted component\n # ####################\n ax = plt.subplot(grid[jj, 2])\n\n # avoid any zeros for LogNorm()\n cs_ft_toplot = np.abs(self.components_shifted_ft[ii, jj])\n cs_ft_toplot[cs_ft_toplot <= 0] = np.nan\n plt.imshow(cs_ft_toplot, norm=LogNorm(), extent=extent_upsampled)\n plt.scatter(0, 0, edgecolor='r', facecolor='none')\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 1:\n circ2 = matplotlib.patches.Circle(-self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n elif jj == 2:\n circ2 = matplotlib.patches.Circle(self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n if jj == 0:\n plt.title('shifted component')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # normalized weights\n # ####################\n ax = plt.subplot(grid[jj, 3])\n\n to_plot = self.weights[ii, jj] / self.weight_norm\n to_plot[to_plot <= 0] = np.nan\n im2 = plt.imshow(to_plot, norm=LogNorm(), extent=extent_upsampled)\n im2.set_clim([1e-5, 1])\n fig.colorbar(im2)\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 1:\n circ2 = matplotlib.patches.Circle(-self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n elif jj == 2:\n circ2 = matplotlib.patches.Circle(self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n if jj == 0:\n plt.title('normalized weight')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n plt.suptitle('period=%0.3fnm at %0.3fdeg=%0.3frad, f=(%0.3f,%0.3f) 1/um\\n'\n 'mod=%0.3f, min mcnr=%0.3f, wiener param=%0.2f\\n'\n 'phases (deg) =%0.2f, %0.2f, %0.2f, phase diffs (deg) =%0.2f, %0.2f, %0.2f' %\n (self.periods[ii] * 1e3, self.angles[ii] * 180 / np.pi, self.angles[ii],\n self.frqs[ii, 0], self.frqs[ii, 1], self.mod_depths[ii, 1], np.min(self.mcnr[ii]), self.wiener_parameter,\n self.phases[ii, 0] * 180/np.pi, self.phases[ii, 1] * 180/np.pi, self.phases[ii, 2] * 180/np.pi,\n 0, np.mod(self.phases[ii, 1] - self.phases[ii, 0], 2*np.pi) * 180/np.pi,\n np.mod(self.phases[ii, 2] - self.phases[ii, 0], 2*np.pi) * 180/np.pi))\n\n figs.append(fig)\n fig_names.append('sim_combining_angle=%d' % (ii + 1))\n\n # #######################\n # net weight\n # #######################\n figh = plt.figure(figsize=figsize)\n grid = plt.GridSpec(1, 2)\n plt.suptitle('Net weight, Wiener param = %0.2f' % self.wiener_parameter)\n\n ax = plt.subplot(grid[0, 0])\n net_weight = np.sum(self.weights, axis=(0, 1)) / self.weight_norm\n im = ax.imshow(net_weight, extent=extent_upsampled, norm=PowerNorm(gamma=0.1))\n\n figh.colorbar(im, ticks=[1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 1e-2, 1e-3, 1e-4, 1e-5])\n\n ax.set_title(\"non-linear scale\")\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n circ2 = matplotlib.patches.Circle((0, 0), radius=2*self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n circ3 = matplotlib.patches.Circle(self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ3)\n\n circ4 = matplotlib.patches.Circle(-self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ4)\n\n circ5 = matplotlib.patches.Circle(self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ5)\n\n circ6 = matplotlib.patches.Circle(-self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ6)\n\n circ7 = matplotlib.patches.Circle(self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ7)\n\n circ8 = matplotlib.patches.Circle(-self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ8)\n\n ax.set_xlim([-2 * self.fmax, 2 * self.fmax])\n ax.set_ylim([2 * self.fmax, -2 * self.fmax])\n\n ax = plt.subplot(grid[0, 1])\n ax.set_title(\"linear scale\")\n im = ax.imshow(net_weight, extent=extent_upsampled)\n\n figh.colorbar(im)\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n circ2 = matplotlib.patches.Circle((0, 0), radius=2 * self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n circ3 = matplotlib.patches.Circle(self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ3)\n\n circ4 = matplotlib.patches.Circle(-self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ4)\n\n circ5 = matplotlib.patches.Circle(self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ5)\n\n circ6 = matplotlib.patches.Circle(-self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ6)\n\n circ7 = matplotlib.patches.Circle(self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ7)\n\n circ8 = matplotlib.patches.Circle(-self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ8)\n\n ax.set_xlim([-2 * self.fmax, 2 * self.fmax])\n ax.set_ylim([2 * self.fmax, -2 * self.fmax])\n\n figs.append(figh)\n fig_names.append('net_weight')\n\n return figs, fig_names", "def compare_dispersion_to_dos(omegas, kx, ky, mlat, outdir=None):\n # Save DOS from projection\n if outdir is None:\n outdir = dio.prepdir(mlat.lp['meshfn'])\n else:\n outdir = dio.prepdir(outdir)\n name = outdir + 'dispersion_gyro' + mlat.lp['meshfn_exten'] + '_nx' + str(len(kx)) + '_ny' + str(len(ky))\n name += '_maxkx{0:0.3f}'.format(np.max(np.abs(kx))).replace('.', 'p')\n name += '_maxky{0:0.3f}'.format(np.max(np.abs(ky))).replace('.', 'p')\n\n # initialize figure\n fig, ax = leplt.initialize_1panel_centered_fig()\n ax2 = ax.twinx()\n ax.hist(omegas.ravel(), bins=1000)\n\n # Compare the histograms of omegas to the dos and save the figure\n eigval = np.imag(mlat.get_eigval())\n print 'eigval = ', eigval\n ax2.hist(eigval[eigval > 0], bins=50, color=lecmap.green(), alpha=0.2)\n ax.set_title('DOS from dispersion')\n xlims = ax.get_xlim()\n ax.set_xlim(0, xlims[1])\n plt.savefig(name + '_dos.png', dpi=300)", "def integrate(coords,data,fault_pts,dshape_hex8,gll_weights,elmt):\n norm=0.0\n normx=0.0\n normy=0.0\n normz=0.0\n div=0.0 #normalizing factor to divide by\n divx=0.\n divy=0.\n divz=0.\n\n eps=1.0*g.mesh_spacing/(g.ngllx-1.)\n print 'eps=', eps\n f=open('eliminated_coords.vtk','w')\n\n #create integer versions of arrays to use in pulling out gll pts for each element\n data_round=np.rint(data)\n dati=data_round.astype(int)\n coord_round=np.rint(coords)\n coordi=coord_round.astype(int)\n\n #remove duplicates from data array\n dat_struc=np.ascontiguousarray(dati).view(np.dtype((np.void,dati.dtype.itemsize *dati.shape[1])))\n _,idx=np.unique(dat_struc,return_index=True)\n datu=dati[idx]\n data_unique=data[idx]\n\n for i_elmt in range(g.nelmt):\n #pull out geometric coordinates for this element\n elmt_coord_id=[j-1 for j in elmt[i_elmt]]\n elmt_coord=coordi[elmt_coord_id]\n\n #find corresponding gll pts for this element\n xmin=min(elmt_coord[:,0]);xmax=max(elmt_coord[:,0])\n ymin=min(elmt_coord[:,1]);ymax=max(elmt_coord[:,1])\n zmin=min(elmt_coord[:,2]);zmax=max(elmt_coord[:,2])\n gll_coord_id=np.nonzero((datu[:,0]>=xmin) & (datu[:,0]<=xmax) & (datu[:,1]>=ymin) & (datu[:,1]<=ymax) & (datu[:,2]>=zmin) & (datu[:,2]<=zmax))\n elmt_data=data_unique[gll_coord_id]\n if len(gll_coord_id[0]) != g.ngll:\n print \"elmt=\", elmt_coord_id\n print xmin,xmax,ymin,ymax,zmin,zmax\n print 'elmt_data=', elmt_data\n print \"gll pts found=\", len(gll_coord_id[0])\n raise ValueError(\"incorrect number of gll points found in element!\")\n exit\n\n #sort the gll coords so they correspond the order of the arrays giving the weights and shape function\n dat_sorted=elmt_data[npi.argsort((elmt_data[:,0], elmt_data[:,1],elmt_data[:,2]))]\n func=dat_sorted[:,3:]\n\n #if any gll pt is too close to fault, remove the element from the integration\n dist=distance.cdist(fault_pts,dat_sorted[:,0:3],'euclidean')\n if (dist<eps).any():\n print \"eliminated element #\", i_elmt\n np.savetxt(f,dat_sorted[:,0:3],fmt='%3.3f')\n continue\n\n for i_gll in range(g.ngll):\n\n #compute jacobian, its derivative and inverse\n jac=np.matmul(dshape_hex8[:,:,i_gll],elmt_coord)\n det_jac=np.linalg.det(jac)\n\n #perform the integration\n norm=norm+det_jac*gll_weights[i_gll]*np.dot((func[i_gll,3:6]-func[i_gll,0:3]),(func[i_gll,3:6]-func[i_gll,0:3]))\n div=div+det_jac*gll_weights[i_gll]*np.dot(func[i_gll,3:6],func[i_gll,3:6])\n normx=normx+det_jac*gll_weights[i_gll]*(func[i_gll,3]-func[i_gll,0])**2\n divx=divx+det_jac*gll_weights[i_gll]*(func[i_gll,3])**2\n normy=normy+det_jac*gll_weights[i_gll]*(func[i_gll,4]-func[i_gll,1])**2\n divy=divy+det_jac*gll_weights[i_gll]*(func[i_gll,4])**2\n normz=normz+det_jac*gll_weights[i_gll]*(func[i_gll,5]-func[i_gll,2])**2\n divz=divz+det_jac*gll_weights[i_gll]*(func[i_gll,5])**2\n\n norm_finalx=sqrt(normx/divx)\n norm_finaly=sqrt(normy/divy)\n norm_finalz=sqrt(normz/divz)\n norm_final=sqrt(norm/div)\n\n f.close()\n\n return norm_finalx, norm_finaly, norm_finalz,norm_final", "def force_show(sub_Idx):\n force_path = './dataset/subj_' + f'{sub_Idx:02d}'+ '/forces/force_' + f'{sub_Idx:02d}' + '.txt'\n image_path = './dataset/subj_' + f'{sub_Idx:02d}'+ '/images/'\n force_num = len(glob.glob(image_path + '*.jpg'))\n force_list = load_force_txt(force_path,force_num)\n print('showing '+f'{force_num:03d}'+ ' raw forces for subject ' + f'{sub_Idx:02d}')\n\n fig = plt.figure(figsize = (10, 7)) \n ax = plt.axes(projection =\"3d\") \n\n for x, y, z in force_list:\n ax.scatter3D(x, y, z, color = \"green\")\n ax.set_xlabel('X-axis', fontweight ='bold') \n ax.set_ylabel('Y-axis', fontweight ='bold') \n ax.set_zlabel('Z-axis', fontweight ='bold')\n plt.title(\"3D force data\") \n plt.show()", "def overviewCommand(self):\n plt.figure(11)\n plt.clf()\n ax = plt.subplot(211)\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n 1e6*self.raw['OPDC'].data.field('FUOFFSET'),\n color='r', label='FUOFFSET',\n linewidth=1, alpha=1) \n plt.plot(self.raw['OPDC'].data.field('TIME'),\n 1e6*(self.raw['OPDC'].data.field(self.DLtrack)-\n self.raw['OPDC'].data.field('PSP')),\n color='r', linewidth=3, alpha=0.5,\n label=self.DLtrack+'-PSP')\n plt.legend()\n plt.subplot(212, sharex=ax)\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n 1e6*self.raw['OPDC'].data.field('FUOFFSET')-\n 1e6*(self.raw['OPDC'].data.field(self.DLtrack)-\n self.raw['OPDC'].data.field('PSP')),\n color='k', label='$\\Delta$',\n linewidth=1, alpha=1) \n \n signal = self.raw['OPDC'].data.field('FUOFFSET')\n plt.figure(12)\n plt.clf()\n ax2 = plt.subplot(111)\n Fs = 1e6/np.diff(self.raw['OPDC'].data.field('TIME')).mean()\n print Fs\n ax2.psd(signal[:50000], NFFT=5000, Fs=Fs, label='FUOFFSET',scale_by_freq=0)\n plt.legend()", "def print_xyz(atoms,coordinates,filename):\n coordinates = [[w / angtobh for w in ww] for ww in coordinates] #bh to ang\n xyz = open(filename,\"a\")\n xyz.write(str(len(atoms)))\n xyz.write(\"\\nOptimizer geometry\\n\")\n for i in xrange(len(atoms)):\n\txyz.write(atoms[i] + ' ')\n\txyz.write(\" \".join(str(f) for f in coordinates[i]))\n\txyz.write(\"\\n\")\n coordinates = [[w * angtobh for w in ww] for ww in coordinates] #ang to bh\n xyz.close()", "def showmeasobsfrac(self, fields = [\"skystd\", \"adamom_flux\"]):\n\t\t\n\t\tcat = megalut.tools.io.readpickle(self.groupobspath)\n\t\t#print cat.colnames\n\t\t\t\t\n\t\tfor field in fields:\n\t\t\tnbad = np.sum(cat[field].mask)\n\t\t\tntot = len(cat)\n\t\t\tprint \"%20s: %.3f%% ( %i / %i are masked)\" % (field, 100.0*float(ntot - nbad)/float(ntot), nbad, ntot)\n\t\t\t\n\t\t#import matplotlib.pyplot as plt\n\t\t#plt.plot(cat[\"sewpy_FLUX_AUTO\"], cat[\"adamom_flux\"], \"b.\")\n\t\t#plt.show()", "def drawSystemInfo(self):\n for sim in self.systemSims:\n # draw name\n (x,y) = anwp.sl.engine.worldToScreen(sim.mySystemDict['x'], sim.mySystemDict['y'])\n pyui.desktop.getRenderer().drawText(sim.mySystemDict['name'], \n (x-30,y-70),\n sim.color1, self.game.app.planetFont, \n flipped = 1)\n # draw city number\n pyui.desktop.getRenderer().drawText(str(sim.mySystemDict['cities']), \n (x-10,y-6),\n sim.color2, self.game.app.systemFont, \n flipped = 1)", "def dump_step(self,status):\n super(vanderpol_output,self).dump_step(status)\n\n L = self.level\n\n oldcol = self.sframe\n # self.sframe = self.ax.scatter(L.uend.pos.values[0],L.uend.pos.values[1],L.uend.pos.values[2])\n self.sframe = self.ax.scatter(L.uend.values[0],L.uend.values[1])\n # Remove old line collection before drawing\n # if oldcol is not None:\n # self.ax.collections.remove(oldcol)\n plt.pause(0.00001)\n\n return None", "def line_ratio_map(quant1='L_[NII]122', quant2='L_[NII]205', ContourFunct='ne_mw', res=0.5, plane='xy', units='Jy', **kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n GR = glo.global_results()\n location = aux.moment0_map_location(res=res,plane=plane,gal_index=p.gal_index)\n\n try:\n momentmap = np.load(location, allow_pickle=True)\n print('Found stored momentmap data')\n except:\n print('Did not find stored momentmap data - creating')\n aux.convert_cell_data_to_regular_grid(res=res, plane=plane, gal_index=p.gal_index)\n momentmap = np.load(location, allow_pickle=True)\n\n indexes = momentmap[-1]\n index1, index2 = int(indexes[1]), int(indexes[2])\n \n momentmap = momentmap[:-1] \n \n dictionary = p.moment0_dict\n \n num1=dictionary[quant1]\n num2=dictionary[quant2]\n num =dictionary[ContourFunct]\n x = momentmap[:,1]\n y = momentmap[:,2]\n lumus = np.array(momentmap[:,3])\n \n line1=[]\n line2=[]\n Contour_Function=[]\n m=[]\n for row in lumus:\n \n line1.append(row[num1])\n line2.append(row[num2])\n \n \n if ContourFunct == 'ne_mw':\n if row[dictionary['m']] == 0:\n \n Contour_Function.append(0)\n else:\n Contour_Function.append(row[num]/row[dictionary['m']])\n else:\n Contour_Function.append(row[num]) \n \n line1 = np.array(line1)\n line2 = np.array(line2)\n Contour_Function = np.array(Contour_Function)\n \n ratio = np.divide(line1, line2, out=np.zeros_like(line1), where=line2!=0)\n \n ratio = ratio.reshape(index1, index2)\n x = x.reshape(index1, index2)\n y = y.reshape(index1, index2)\n line1 = line1.reshape(index1, index2)\n line2 = line2.reshape(index1, index2)\n #pdb.set_trace()\n Contour_Function=Contour_Function.reshape(index1,index2)\n \n ratio[ratio==0] = np.min(ratio[ratio>0])\n Contour_Function[Contour_Function==0] = 1e-30\n\n\n if p.add:\n fig,ax = plt.gcf(),p.ax #plot already available \n else:\n fig, ax = plt.subplots(figsize=(10,8))\n plt.subplots_adjust(left=0.1,bottom=0.2,right=0.8)\n \n if p.log: cs = ax.pcolormesh(x, y, np.log10(ratio), cmap=plt.cm.viridis, vmin=np.log10(ratio).max()-1.5, shading='auto')\n if not p.log: cs = ax.pcolormesh(x, y, ratio, cmap=plt.cm.viridis, vmin=ratio.max()/100, shading='auto')\n\n if not p.add:\n\n ax.set_title('Line Ratio map of ' + quant1.replace('L_','') + \"/\" + quant2.replace('L_',''))\n ax.set_xlabel('x [kpc]')\n ax.set_ylabel('y [kpc]')\n levels = np.arange(np.min(np.log10(Contour_Function[Contour_Function > 1e-30])).round(), np.max(np.log10(Contour_Function)).round(), 1)\n cr=ax.contour(x,y,np.log10(Contour_Function),cmap=plt.cm.plasma, levels=levels)\n\n if p.add:labels:''\n cbaxes=fig.add_axes([.15, 0.09, 0.6, 0.027])\n cbar=fig.colorbar(cr,cax=cbaxes,orientation='horizontal', label= 'log '+ getlabel(ContourFunct))\n cbaxes2 = fig.add_axes([0.82, 0.24, 0.027, 0.6])\n if p.log: fig.colorbar(cs, cax=cbaxes2, label= 'log ' + quant1.replace('L_','') + \" / \" + quant2.replace('L_','') )\n if not p.log: fig.colorbar(cs, cax=cbaxes2, label= quant1.replace('L_','') + \" / \" + quant2.replace('L_','') )\n if p.R_max:\n ax.set_xlim([-p.R_max,p.R_max])\n ax.set_ylim([-p.R_max,p.R_max])\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'lineratios/'): os.mkdir(p.d_plot + 'lineratios/') \n plt.savefig(p.d_plot+'lineratios/map_%s%s_%i_%s_%s' % (p.sim_name,p.sim_run,p.gal_index,quant1.replace('L_',''),quant2.replace('L_',''))+ '_' + plane + '_res' + str(res) +'.png', facecolor='w', dpi=500)", "def shortHelpString(self):\r\n return self.tr(\"This tool takes an address point layer, and buffers it a selectable distance (default is 5km) to simulate a lockdown movement restriction. \\nIt then counts Hospitals and Grocery Stores, as well as significant (>1.5ha) areas of parkland (significance size for parkland also adjustable). \\nA lockdown liveability score is then calculated for each address.\\n After processing, all files will appear in directory of input files. final_Address is layer with results, needs to be manually added after processing.\")", "def vis_segmentation(image, seg_map):\n plt.figure(figsize=(20, 20))\n \n seg_image = label_to_color_image(seg_map).astype(np.uint8)\n plt.imshow(seg_image)\n plt.axis('off')\n plt.savefig(str(image_id)+'_seg.jpg',bbox_inches='tight')\n plt.close()", "def visualise_cppn(self, resolution=(64, 64)):\n import matplotlib.pyplot as plt\n from matplotlib.pyplot import imshow\n data = np.empty([resolution[0], resolution[1]])\n x_linspace = np.linspace(-1, 1, resolution[0])\n y_linspace = np.linspace(-1, 1, resolution[1])\n for row, x in enumerate(x_linspace):\n for col, y in enumerate(y_linspace):\n data[row, col] = self.graph(np.array([x, y, 0, 0], dtype=np.float32))[0]\n #plt.axis([-1, 1, -1, 1])\n print(data.min(), \" \", data.max())\n imshow(data, cmap='Greys', vmin=-1, vmax=1)\n plt.show()", "def ggpl_house():\n\n\t# .lines ogni riga ha due coppie di x/y che costituiscono un segmento\n\tverts = []\n\tcells = []\n\ti = 0\n\treader = csv.reader(open(\"lines/muri_esterni.lines\", 'rb'), delimiter=',') \n\tfor row in reader:\n\t\tverts.append([float(row[0]), float(row[1])])\n\t\tverts.append([float(row[2]), float(row[3])])\n\t\ti+=2\n\t\tcells.append([i-1,i])\n\n\texternalWalls = MKPOL([verts,cells,None])\n\tfloor = SOLIDIFY(externalWalls)\n\tfloor = S([1,2,3])([.04,.04,.04])(floor)\n\texternalWalls = S([1,2,3])([.04,.04,.04])(externalWalls)\n\texternalWalls = OFFSET([.2,.2,4])(externalWalls)\n\theightWalls = SIZE([3])(externalWalls)[0]\n\tthicknessWalls = SIZE([2])(externalWalls)[0]\n\n\tverts = []\n\tcells = []\n\ti = 0\n\treader = csv.reader(open(\"lines/muri_interni.lines\", 'rb'), delimiter=',') \n\tfor row in reader:\n\t\tverts.append([float(row[0]), float(row[1])])\n\t\tverts.append([float(row[2]), float(row[3])])\n\t\ti+=2\n\t\tcells.append([i-1,i])\n\n\tinternalWalls = MKPOL([verts,cells,None])\n\tinternalWalls = S([1,2,3])([.04,.04,.04])(internalWalls)\n\tinternalWalls = OFFSET([.2,.2,4])(internalWalls)\n\twalls = STRUCT([externalWalls, internalWalls])\n\n\tverts = []\n\tcells = []\n\ti = 0\n\treader = csv.reader(open(\"lines/porte.lines\", 'rb'), delimiter=',') \n\tfor row in reader:\n\t\tverts.append([float(row[0]), float(row[1])])\n\t\tverts.append([float(row[2]), float(row[3])])\n\t\ti+=2\n\t\tcells.append([i-1,i])\n\n\tdoors = MKPOL([verts,cells,None])\n\tdoors = SOLIDIFY(doors)\n\tdoors = S([1,2,3])([.04,.04,.04])(doors)\n\tdoors = OFFSET([.2,.2,3])(doors)\n\twalls = DIFFERENCE([walls, doors])\n\n\n\tverts = []\n\tcells = []\n\ti = 0\n\treader = csv.reader(open(\"lines/finestre.lines\", 'rb'), delimiter=',') \n\tfor row in reader:\n\t\tverts.append([float(row[0]), float(row[1])])\n\t\tverts.append([float(row[2]), float(row[3])])\n\t\ti+=2\n\t\tcells.append([i-1,i])\n\n\twindows = MKPOL([verts,cells,None])\n\twindows = SOLIDIFY(windows)\n\twindows = S([1,2,3])([.04,.04,.04])(windows)\n\twindows = OFFSET([.2,.2,2])(windows)\n\theightWindows = SIZE([3])(windows)[0]\n\twindows = T(3)((heightWalls-heightWindows)/2.)(windows)\n\twalls = DIFFERENCE([walls, windows])\n\n\tfloor = TEXTURE(\"texture/floor.jpg\")(floor)\n\twalls = TEXTURE(\"texture/wall.jpg\")(walls)\n\thome = STRUCT([floor, walls])\n\treturn home", "def draw_normal_mode(mode=0, coords=None, normal_modes=None):\n fac=0.52917721067121 # bohr to A\n xyz =f\"{len(coords)}\\n\\n\"\n for i in range(len(coords)):\n atom_coords = [float(m) for m in coords[i][8:].split(' ')]\n mode_coords = [float(m) for m in normal_modes[mode][i][8:].split(' ')]\n xyz+=f\"{coords[i][0:4]} {atom_coords[0]*fac} {atom_coords[1]*fac} {atom_coords[2]*fac} {mode_coords[0]*fac} {mode_coords[1]*fac} {mode_coords[2]*fac} \\n\"\n view = py3Dmol.view(width=400, height=400)\n view.addModel(xyz, \"xyz\", {'vibrate': {'frames':10,'amplitude':1}})\n view.setStyle({'sphere':{'scale':0.30},'stick':{'radius':0.25}})\n view.setBackgroundColor('0xeeeeee')\n view.animate({'loop': 'backAndForth'})\n view.zoomTo()\n return(view.show())", "def main():\n cam = Realsense()\n # cam.access_intr_and_extr()\n profile = cam.pipeline.start(cam.config)\n depth_sensor = profile.get_device().first_depth_sensor()\n depth_scale = depth_sensor.get_depth_scale()\n align_to = rs.stream.color\n align = rs.align(align_to)\n\n objp = np.zeros((3*4,3), np.float32)\n objp[:,:2] = np.mgrid[0:4,0:3].T.reshape(-1,2)\n axis = np.float32([[1,0,0], [0,1,0], [0,0,-1]]).reshape(-1,3)\n # print(objp)\n\n try:\n while (True):\n # detect ArUco markers in RGB images\n frames = cam.pipeline.wait_for_frames()\n aligned_frames = align.process(frames)\n color_frame = aligned_frames.get_color_frame()\n color_image = np.asanyarray(color_frame.get_data()) \n frame = color_image\n font = cv2.FONT_HERSHEY_SIMPLEX\n corners, ids, rvecs, tvecs = cam.detect_markers_realsense(frame)\n \n if np.all(ids != None): # if markers are detected\n for i in range(0, ids.size):\n aruco.drawAxis(frame, cam.newcameramtx, cam.dist, rvecs[i],\n tvecs[i], 0.1) # Draw axis\n aruco.drawDetectedMarkers(frame, corners) # draw square around markers\n\n ###### DRAW ID #####\n strg = ''\n for i in range(0, ids.size):\n strg += str(ids[i][0])+', '\n\n cv2.putText(frame, \"Id: \" + strg, (0,25), font, 1, (0,255,0), 2,\n cv2.LINE_AA)\n\n\t ###### Output marker positions in camera frame ######\n \t # output tvec\n y0 = 60\n dy = 40\n for i in range(0, ids.size):\n y = y0 + i*dy\n cv2.putText(frame, str(tvecs[i][0]), (0, y), font, 1, (0,255,0),\n 2, cv2.LINE_AA)\n\n else:\n ##### DRAW \"NO IDS\" #####\n cv2.putText(frame, \"No Ids\", (0,64), font, 1, (0,255,0), 2,\n cv2.LINE_AA)\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n ret, corners = cv2.findChessboardCorners(gray, (4,3), None)\n if ret == True:\n corners2 = cv2.cornerSubPix(gray, corners,(11,11), (-1,-1),\n cam.criteria)\n corners2 = corners2[::-1]\n # print(corners2)\n # print(objp)\n frame = cv2.drawChessboardCorners(frame, (4,3), corners2, ret)\n # Find the rotation and translation vectors.\n _, rvecs, tvecs = cv2.solvePnP(objp, corners2, cam.newcameramtx,\n cam.dist)\n rot, _ = cv2.Rodrigues(rvecs)\n # print(rot)\n # project 3D points to image plane\n imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs,\n cam.newcameramtx, cam.dist)\n frame = draw(frame, corners2, imgpts)\n\n # Display the resulting frame\n cv2.imshow('frame',frame)\n cv2.waitKey(5)\n\n # When everything done, release the capture\n cv2.destroyAllWindows()\n\n finally:\n cam.pipeline.stop()", "def shortHelpString(self):\n\t\ttexto = \"\"\"\n\t\t\t\t\tThis function extracts the drainage basins for the Flow object and returns a Grid object that can be saved into the disk.\n\t\t\t\t\t\n\t\t\t\t\tFlow: Flow direccion raster\n\n\t\t\t\t\tMinimum area: Minimum area for basins to avoid very small basins. The area is given as a percentage of the total\tnumber of cells (default 0.5%).\n\t\t\t\t\t\n\t\t\t\t\tBasins: Output basins raster.\n\t\t\t\t\t\"\"\"\n\t\treturn texto", "def draw_schematic(xmin,xbins,ybins,c,R,outfile,vb=False):\r\n\tme = \"LE_2DLBS.draw_schematic: \"\r\n\tt0 = time.time()\r\n\tif os.path.isfile(outfile):\r\n\t\tif vb: print me+\"Schematic exists. Not overwriting.\"\r\n\t\treturn\r\n\t## Get spatial parameters\r\n\txini = xbins[0]\r\n\tX = xbins[len(xbins)/2]\r\n\txmax = 12#xbins[-1]1.0\r\n\tymax = ybins[-1]\r\n\tloff = 1.0\r\n\t## Wall region\r\n\tplt.axvspan(X,xmax, color=\"r\",alpha=0.05,zorder=0)\r\n\t## Wall boundary\r\n\tcircle = plt.Circle(c,R,facecolor='w',lw=3,edgecolor='r',zorder=1)\r\n\tplt.gcf().gca().add_artist(circle)\r\n\t## Remove left arc\r\n\tplt.axvspan(xmin-loff,X, color=\"w\",zorder=2)\r\n\t## Lines\r\n\tplt.hlines([-ymax,ymax],0.0,xmax,\r\n\t\tcolors='k', linestyles='-', linewidth=5.0,zorder=3)\r\n\tplt.vlines([xmin,xini,X],-ymax,ymax,\r\n\t\tcolors='k', linestyles=[\"-\",\"--\",\":\"], linewidth=2.0,zorder=3)\r\n\t## Outside simulation\r\n\tplt.axvspan(xmin,xmin-loff, color=\"k\",alpha=0.1,zorder=2)\r\n\t# plt.vlines(0.0,0.0,xmax,\r\n\t# \tcolors='k', linestyles='-', linewidth=2.0,zorder=3)\n\t# plt.axhspan(-ymax,0.0, color=\"k\",alpha=0.1,zorder=2)\r\n\t## Annotations\r\n\tplt.annotate(\"Not simulated\",xy=(xmin-0.5*loff,0.0),xycoords=\"data\",\r\n\t\t\thorizontalalignment='center', verticalalignment='center')\r\n\tplt.annotate(\"Wall region\",xy=(0.5*(c[0]+R+xmax),0.0),xycoords=\"data\",\r\n\t\t\thorizontalalignment='center', verticalalignment='center')\r\n\t# plt.annotate(\"Simulation boundary\",xy=(xmin,-0.5*ymax),xycoords=\"data\",\r\n\t\t\t# horizontalalignment='center', verticalalignment='center')\r\n\t# plt.annotate(\"Injection line\",xy=(xini,+0.5*ymax),xycoords=\"data\",\r\n\t\t\t# horizontalalignment='center', verticalalignment='center')\r\n\t# plt.annotate(\"Wall boundary\",xy=(X,-0.5*ymax),xycoords=\"data\",\r\n\t\t\t# horizontalalignment='center', verticalalignment='center')\r\n\tplt.text(xmin,-0.25*ymax,\"Simulation boundary\",rotation=270)\r\n\tplt.text(xini,-0.25*ymax,\"Injection line\",rotation=270)\r\n\tplt.text(0.5*(X+c[0]+R),-0.25*ymax,\"Wall boundary\",rotation=270)\r\n\tplt.annotate(\"Periodic boundary\",xy=(0.5*(xmin-loff+xmax),0.95*ymax),xycoords=\"data\",\r\n\t\t\thorizontalalignment='center', verticalalignment='center')\r\n\t## Show bins\r\n\t# plt.hlines(ybins,xini,xmax, colors='k', linestyles=\"-\",linewidth=0.2,zorder=2.1)\r\n\t# plt.vlines(xbins,-ymax,ymax,colors='k', linestyles=\"-\",linewidth=0.2,zorder=2.1)\r\n\t## Clip image, name axes, title\r\n\tplt.xlim([xmin-loff,xmax])\r\n\tplt.ylim([-ymax,ymax])\r\n\tplt.xlabel(\"$x$\",fontsize=fsa)\r\n\tplt.ylabel(\"$y$\",fontsize=fsa)\r\n\tplt.title(\"Schematic of simulation space. $R = \"+str(R)+\"$\",fontsize=fst)\r\n\t## Save and close\r\n\tplt.savefig(outfile)\r\n\tif vb:\n\t\tprint me+\"Figure saved to \"+outfile\n\t\tprint me+\"Time\",round(time.time()-t0,1),\"seconds.\"\r\n\tplt.clf()\r\n\treturn", "def draw():", "def Diagnostic_plot1(self, v=False):\n\n # sort modes by frequency (radial order)\n ds.mode_id.sort_values(['f0'], axis=0, ascending=True, inplace=True)\n\n # SNR values after smoothing/interpolating at radial mode freqs\n u = np.full(len(ds.mode_id), -99) # unsmoothed\n s1 = np.full(len(ds.mode_id), -99) # after Gaussian smoothing\n s2 = np.full(len(ds.mode_id), -99) # after uniform smoothing\n s3 = np.full(len(ds.mode_id), -99) # after linear interpolation\n\n for idx, f in ds.mode_id.iterrows():\n width = abs(f['w0']) # width to convolve/interpolate over\n\n # smooth by convolving with Guassian\n smoo = star.Conv(self.snr, width)\n\n # smooth with uniform filter\n smoo2 = ndim.filters.uniform_filter1d(self.snr, size=int(np.around(width)))\n\n # smooth by interpolating\n bins = np.arange(0., self.ds.freq[-1], width) # rebin data to get highest SNR\n smoo3 = np.interp(bins, self.ds.freq, self.snr) # SNR values at these freqs\n\n index = np.abs(self.ds.freq-f['f0']).argmin() # use the frequency closest to mode\n if v:\n print(self.ds.freq[index], self.snr[index])\n print('before smoo', self.snr[index])\n print('smoo1', smoo[index])\n print('smoo2', smoo2[index])\n print('smoo3', smoo3[np.abs(bins-f['f0']).argmin()], '\\n')\n\n u[idx] = self.snr[index]\n s1[idx] = smoo[index]\n s2[idx] = smoo2[index]\n s3[idx] = smoo3[np.abs(bins-f['f0']).argmin()]\n\n fig = plt.figure(figsize=(12, 18))\n plt.rc('font', size=26)\n plt.plot(self.ds.mode_id['f0'], u, label=r'unsmoothed')\n plt.plot(self.ds.mode_id['f0'], s1, label=r'Smoothed with 1D Gaussian')\n plt.plot(self.ds.mode_id['f0'], s2, label=r'Smoothed with uniform filter')\n plt.plot(self.ds.mode_id['f0'], s3, label=r'Smoothed by interpolating')\n plt.xlabel(r'$\\nu / \\mu$Hz')\n plt.ylabel(r'SNR')\n plt.legend(loc='upper right')\n plt.show()\n fig.savefig(os.getcwd() + os.sep + 'DetTest1_plots' + os.sep +'DetTest_Diagnostic_plot1_' + self.ds.epic + '.pdf')\n #sys.exit()", "def main(S, N):\n\n z_binary, z_density = point_count(N, S)\n\n extent = [-2, 2, -2, 2]\n plt.imshow(z_binary, extent=extent, cmap='Greys')\n plt.colorbar()\n plt.show()\n plt.close('all')\n\n log_zd = np.log10(z_density)\n plt.imshow(z_density, extent=extent, cmap='jet')\n plt.colorbar()\n plt.show()\n plt.close('all')\n\n log_zd = np.log10(z_density)\n plt.imshow(log_zd, extent=extent, cmap='jet')\n plt.colorbar()\n plt.show()\n plt.close('all')", "def fix(hobj):\n\n h.execute('create axon[2]', hobj)\n\n for sec in hobj.axon:\n sec.L = 30\n sec.diam = 1\n hobj.axonal.append(sec=sec)\n hobj.all.append(sec=sec) # need to remove this comment\n\n hobj.axon[0].connect(hobj.soma[0], 0.5, 0)\n hobj.axon[1].connect(hobj.axon[0], 1, 0)\n\n h.define_shape()" ]
[ "0.53796166", "0.5276114", "0.5230221", "0.5218842", "0.5199459", "0.5192862", "0.519138", "0.5182484", "0.5131913", "0.5114713", "0.51103306", "0.5051443", "0.50511736", "0.5021749", "0.4996543", "0.49938875", "0.498227", "0.49754798", "0.49719897", "0.49481302", "0.49481302", "0.49306878", "0.49092868", "0.49091834", "0.48976955", "0.4896833", "0.4892219", "0.48880094", "0.4886337", "0.48798558", "0.48753032", "0.4869625", "0.48662367", "0.48638016", "0.48632398", "0.48603", "0.48465392", "0.48364604", "0.4833524", "0.48247984", "0.48168793", "0.48158503", "0.48119104", "0.47993875", "0.47968116", "0.4780762", "0.4773298", "0.47708598", "0.47678468", "0.47671923", "0.47658932", "0.47512788", "0.47470012", "0.47438693", "0.47317156", "0.4729622", "0.47259104", "0.4719057", "0.4708325", "0.47072104", "0.470479", "0.470214", "0.4701865", "0.47016993", "0.46984792", "0.46982434", "0.4696563", "0.46936968", "0.46933487", "0.46915066", "0.46902385", "0.46896097", "0.46873283", "0.46855605", "0.46836555", "0.46815002", "0.4680811", "0.46798608", "0.4679073", "0.4672164", "0.46716815", "0.4667982", "0.46655625", "0.4659155", "0.46585965", "0.46581918", "0.4657373", "0.46553534", "0.4651865", "0.4651691", "0.4650758", "0.46505043", "0.4641676", "0.46407485", "0.46392295", "0.463669", "0.4634858", "0.46337783", "0.46295467", "0.46280324", "0.4627443" ]
0.0
-1
Write LDOS in a file
def write_ldos(x,y,dos,output_file="LDOS.OUT",z=None): fd = open(output_file,"w") # open file fd.write("# x, y, local density of states\n") ii = 0 for (ix,iy,idos) in zip(x,y,dos): # write everything fd.write(str(ix) +" "+ str(iy) + " "+ str(idos)) if z is not None: fd.write(" "+str(z[ii])) fd.write("\n") ii += 1 fd.close() # close file
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_file(l_dta, outputfile):\n l_dta2 = []\n for row in l_dta:\n s = '\\t'.join(row)\n l_dta2.append(s)\n s_dta = \"\\r\\n\".join(l_dta2)\n try:\n with open(outputfile, 'w') as fd:\n fd.write(s_dta)\n except (IOError,) as e:\n tracker()\n return None", "def write_lammps_files(self): \n lammps_file = self.file_name\n with open( lammps_file, 'w' ) as f:\n f.write(self.input_string())", "def write_lammps_potential_file(self):\n raise NotImplementedError", "def file_write(sp_length, sp_period, header, file_name):\n \n #specify filename and inform write\n out_file = open(file_name, \"w\")\n \n #add headers to file from list\n print(\"{0:>15}\".format(header[0]) ,\\\n \"{0:>15}\".format(header[1]) ,\\\n \"{0:>15}\".format(header[2]), file = out_file)\n \n #add data to file form lists \n for i in range(len(sp_length)):\n print(\"{0:>15}\".format(i) ,\\\n \"{0:>15.3f}\".format(sp_length[i]) ,\\\n \"{0:>15.3f}\".format(sp_period[i]), file = out_file)\n \n #close the file\n out_file.close()", "def write_telluric_transmission_to_file(wls,T,outpath):\n import pickle\n print('------Saving teluric transmission to '+outpath)\n with open(outpath, 'wb') as f: pickle.dump((wls,T),f)", "def write_lcat(fname, data):\n from esutil.recfile import Recfile\n\n d=os.path.dirname(fname)\n if not os.path.exists(d):\n print(\"making dir:\",d)\n os.makedirs(d)\n\n if os.path.exists(fname):\n os.remove(fname)\n\n print(\"writing:\",fname)\n with Recfile(fname,'w',delim=' ') as robj:\n robj.write(data)", "def _write(fdesc, data):\n while data:\n count = os.write(fdesc, data)\n data = data[count:]", "def write_file(self, f=None):\n # get model information\n nlay = self.parent.nlay\n dis = self.parent.get_package(\"DIS\")\n if dis is None:\n dis = self.parent.get_package(\"DISU\")\n\n # Open file for writing\n if f is None:\n f_obj = open(self.fn_path, \"w\")\n\n # Item 1: ipakcb, HDRY, IWDFLG, WETFCT, IWETIT, IHDWET, IKVFLAG, IKCFLAG\n f_obj.write(\n f\" {self.ipakcb:9d} {self.hdry:9.3G} {self.iwdflg:9d}\"\n f\" {self.wetfct:9.3G} {self.iwetit:9d} {self.ihdwet:9d}\"\n f\" {self.ikvflag:9d} {self.ikcflag:9d}\\n\"\n )\n\n # LAYCON array\n for layer in range(nlay):\n if self.intercellt[layer] > 0:\n f_obj.write(\n f\"{self.intercellt[layer]:1d} {self.laycon[layer]:1d} \"\n )\n else:\n f_obj.write(f\"0{self.laycon[layer]:1d} \")\n f_obj.write(\"\\n\")\n\n # TRPY, <ANGLEX>\n f_obj.write(self.trpy.get_file_entry())\n transient = not dis.steady.all()\n structured = self.parent.structured\n anis = any(t != 1 for t in self.trpy)\n if (not structured) and anis:\n f_obj.write(self.anglex.get_file_entry())\n\n # <SF1>, <TRAN>, <HY>, <VCONT>, <KV>, <SF2>, <WETDRY>\n for layer in range(nlay):\n if transient:\n f_obj.write(self.sf1[layer].get_file_entry())\n\n if self.ikcflag == 0:\n self._write_hy_tran_vcont_kv(f_obj, layer)\n\n if transient and (self.laycon[layer] in [2, 3, 4]):\n f_obj.write(self.sf2[layer].get_file_entry())\n\n if (self.iwdflg != 0) and (self.laycon[layer] in [1, 3]):\n f_obj.write(self.wetdry[layer].get_file_entry())\n\n # <KSAT> (if ikcflag==1)\n if abs(self.ikcflag == 1):\n f_obj.write(self.ksat.get_file_entry())\n\n f_obj.close()", "def writeWad(path, lumps):\n\n fp = open(path, \"wb\")\n\n # dummy header, will get overwritten later\n fp.write(\"\\x00\" * 12)\n\n # lump data\n offs = []\n for lumpname, lumpdata in lumps:\n offs.append(fp.tell())\n fp.write(lumpdata)\n\n # entry table\n infotableofs = fp.tell()\n for offset, (lumpname, lumpdata) in zip(offs, lumps):\n fp.write(struct.pack(\"<i\", offset))\n fp.write(struct.pack(\"<i\", len(lumpdata)))\n fp.write(_wadifyString(lumpname))\n\n # header\n fp.seek(0)\n fp.write(\"PWAD\")\n fp.write(struct.pack(\"<i\", len(lumps)))\n fp.write(struct.pack(\"<i\", infotableofs))\n\n fp.close()", "def save_file_(msl_data_path, filename, content):\n with open(msl_data_path + filename, 'w') as (file_):\n file_.write(content)\n file_.flush()\n file_.close()", "def _write(self):\n f = FortranFile(self.filename,mode='w')\n # Default omnivor binary header\n f.writeInts ( self.data['MK'] , 'i' ) \n f.writeInts ( self.data['itime'] , 'i' ) \n f.writeString ( self.data['version'] ) \n f.writeInts ( self.data['file_id'] , 'i' ) \n f.writeString ( self.data['sversion'] ) \n # Velocity field\n f.writeString ( self.data['stype'] ) \n f.writeInts ( self.data['is_grid'] , 'i' ) \n f.writeInts ( self.data['nCPs'] , 'i' ) \n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n f.writeInts ( self.data['n1'] , 'i' ) \n f.writeInts ( self.data['n2'] , 'i' ) \n f.writeInts ( self.data['n3'] , 'i' ) \n f.writeInts ( self.data['is_straight'] , 'i' ) \n f.writeReals ( self.data['v1'] , real_char ) \n f.writeReals ( self.data['v2'] , real_char ) \n f.writeReals ( self.data['v3'] , real_char ) \n\n CPs = self.data['CPs'].flatten(order = 'F')\n Utot = self.data['Utot'].flatten(order = 'F')\n f.writeReals(CPs,real_char)\n f.writeReals(Utot,real_char)", "def text_write_data(file, l):\r\n file = open(file, \"a+\")\r\n for name in l:\r\n file.write(str(name) + \"\\n\")\r\n file.close", "def write(filename):\n print(uc.write(filename))", "def write_to_file_z(path):\n path1 = path + \"/z_Macros\"\n if not os.path.exists(path1):\n os.mkdir(path1)\n for e in range(int(e_steps)+1):\n filename = \"x0y0z%ske%s.mac\" %(dz*z + z_min, e*de + e_min)\n path = path1\n fullpath = os.path.join(path, filename)\n f = open(fullpath, \"w\")\n f.write('/rat/physics_list/OmitMuonicProcesses true\\n')\n f.write(\"/rat/physics_list/OmitHadronicProcesses true \\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write('/rat/db/set DETECTOR geo_file \"geo/snoplus.geo\"\\n')\n f.write('/rat/db/set GEO[scint] material \"labppo_scintillator\"\\n')\n f.write('/rat/db/set DAQ dqxx_info 0 \\n')\n f.write(\"/run/initialize \\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/rat/proc frontend\\n\")\n f.write(\"/rat/proc trigger\\n\")\n f.write(\"/rat/proc eventbuilder\\n\")\n f.write(\"/rat/proc count\\n\")\n f.write(\"/rat/procset update 100\\n\")\n f.write(\"/rat/proc calibratePMT\\n\")\n f.write(\"/rat/proc scintFitter\\n\")\n f.write(\"/rat/proclast outroot\\n\")\n f.write('/rat/procset file \"x0y0z%ske%s.root\"\\n' %(dz*z + z_min, e*de + e_min))\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/generator/add combo gun:point:poisson\\n\")\n f.write(\"# want random, isotropic momentum distribution; energy given in MeV\\n\")\n f.write(\"/generator/vtx/set e- 0 0 0 %s\\n\" %(e*de + e_min))\n f.write(\"# position given in Cartesians, relative to detector center, in mm\\n\")\n f.write(\"/generator/pos/set 0 0 %s\\n\" % (dz*z + z_min))\n f.write(\"/generator/rate/set 1\\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/rat/run/start %s\\n\" %(n))\n f.write(\"exit\")", "def writeDomainFile():\n writeTemplate(localTemplate)", "def write_file_simple(self,filename):\n\n output = open(filename,\"w\")\n # write header\n output.write(\"# %1s %3s %22s %6s %22s\\n\"%(\"l\",\"n\",\"nu_theo (muHz)\",\"unused\",\"Inertia\"))\n for i in range(self.modes.shape[0]):\n output.write(\" %1d %3d %22.15e 0.0 %22.15e\\n\"%( \\\n self.modes[\"l\"][i], \\\n self.modes[\"n\"][i], \\\n self.modes[\"freq\"][i]*self.glb[ifreq_ref], \\\n self.modes[\"inertia\"][i]))\n output.close()", "def writeLronacPvlFile(outputPath, isHalfRes):\n\n if os.path.exists(outputPath):\n print outputPath + ' already exists, using existing file.'\n return True\n else: # Need to write the file\n print 'Generating LRONAC compatible .pvl file ' + outputPath\n\n f = open(outputPath, 'w')\n\n f.write('Object = IdealInstrumentsSpecifications\\n');\n f.write(' UserName = auto\\n');\n f.write(' Created = 2013-07-18T13:42:00\\n');\n f.write(' LastModified = 2013-07-18T13:42:00\\n\\n');\n f.write(' Group = \"LUNAR RECONNAISSANCE ORBITER/NACL\"\\n');\n\n if not isHalfRes: # Full resolution camera\n f.write(' TransY = 16.8833\\n')\n f.write(' ItransS = -2411.9\\n')\n f.write(' TransX = 0.6475\\n')\n f.write(' ItransL = -92.5\\n')\n f.write(' DetectorSamples = 10000\\n')\n else: # Half resolution camera\n f.write(' TransY = 16.8833\\n')\n f.write(' ItransS = -4823.8\\n') # Halved\n f.write(' TransX = 0.6475\\n')\n f.write(' ItransL = -185\\n') # Halved\n f.write(' DetectorSamples = 5000\\n') # Halved\n\n f.write(' End_Group\\n\\n')\n f.write('End_Object\\n')\n f.write('End')\n\n f.close()", "def file(self,file):\n self.lib.lammps_file(self.lmp,file.encode('utf-8'))", "def write_to_file(self):\n self.calibration_directory.mkdir(parents=True, exist_ok=True)\n with self.file_path.open(mode=\"w\") as file:\n \"\"\"\n ------------------\n Data virtual object\n ------------------\n \"\"\"\n file.write(\"Data received from the hololens:\\n\")\n file.write(f'{\"\".join(self.hololens_message)}\\n')\n file.write(\"Position and Rotation received from hololens \\n\")\n file.write(\"Pay attention: Left handed KOS and quaternion with scalar last\\n\")\n # for i in self.calibration_position:\n position = \" \".join([str(x) for x in self.calibration_position])\n file.write(position)\n file.write(\"\\n\")\n rotation = \" \".join([str(x) for x in self.calibration_rotation])\n file.write(rotation)\n file.write(\"\\n\")\n \"\"\"\n ------------------\n Holotracker\n ------------------\n \"\"\"\n file.write(f\"Holotracker Pose: Tracker->LH\\n\")\n file.write(\"x y z\\n\")\n position = \" \".join([str(x) for x in self.holo_tracker.position])\n file.write(f\"{position}\\n\")\n file.write(\"w i j k\\n\")\n rotation = \" \".join([str(x) for x in self.holo_tracker.rotation])\n file.write(f\"{rotation}\\n\")\n file.write(\"Homogenous matrix of Holo Tracker\\n\")\n np.savetxt(file, self.holo_tracker.get_pose_as_hom_matrix())\n file.write(\"\\n\")\n \"\"\"\n ------------------\n Calibrationtracker\n ------------------\n \"\"\"\n file.write(f\"Calibrationtracker Pose: Tracker->LH\\n\")\n file.write(\"x y z\\n\")\n position = \" \".join([str(x) for x in self.calibration_tracker.position])\n file.write(f\"{position}\\n\")\n file.write(\"w i j k\\n\")\n rotation = \" \".join([str(x) for x in self.calibration_tracker.rotation])\n file.write(f\"{rotation}\\n\")\n file.write(\"Homogenous matrix of Calibration Tracker\\n\")\n np.savetxt(file, self.calibration_tracker.get_pose_as_hom_matrix())\n file.write(\"\\n\")\n \"\"\"\n ------------------\n Calibration object used\n ------------------\n \"\"\"\n file.write(f\"CalibrationObject used : \\n{self.calibration_object}\")\n file.write(\"\\n\")\n \"\"\"\n ------------------\n Point registration service + reprojection error\n ------------------\n \"\"\"\n file.write(\"\\nMarix LH->Virtual\\n\")\n np.savetxt(file, self.hom_LH_to_virtual,)\n file.write(\"\\nReprojection error\\n\")\n file.write(f\"{self.reprojection_error}\")\n file.write(\"\\n\")\n \"\"\"\n ------------------\n Virtual center to Tracker\n ------------------\n \"\"\"\n file.write(\"\\nMatrix Virtual->Tracker\\n\")\n np.savetxt(file, self.hom_tracker_to_virtual)\n file.write(\"\\n\")\n \"\"\"\n ------------------\n Point Data which was used for matching\n ------------------\n \"\"\"\n file.write(\"POINTS THAT WERE MATCHED\\n\\n\")\n file.write(\"Virtual points. Already transformed into Right Hand KOS \\n\")\n np.savetxt(file, self.virtual_points)\n file.write(\"\\n\")\n file.write(\"Real points\\n\")\n np.savetxt(file, self.real_points)", "def create_lso_file(config):\n xst_dir = os.path.join(config[\"build_dir\"], XST_DIR)\n lso_fn = os.path.join(xst_dir, XST_PROJECT_LSO)\n\n xst_abs_dir = create_xst_dir(config)\n fn = os.path.join(xst_abs_dir, XST_PROJECT_LSO)\n #print \"lSO filename: %s\" % fn\n fp = open(fn, \"w\")\n #fp.write(\"DEFAULT_SEARCH_ORDER%s\" % os.linesep)\n fp.write(\"work%s\" % os.linesep)\n fp.close()\n \n return lso_fn\n #return fn", "def writetofile(self,direction,value):\r\n output = str(\"{},{} \\n\".format(direction,value))\r\n self.new_file.write(output)", "def write_potcar(self,suffix = \"\"):\n import tempfile\n potfile = open('POTCAR'+suffix,'w')\n for filename in self.ppp_list:\n if filename.endswith('R'):\n for line in open(filename, 'r'):\n potfile.write(line)\n elif filename.endswith('.Z'):\n file_tmp = tempfile.NamedTemporaryFile()\n os.system('gunzip -c %s > %s' % (filename, file_tmp.name))\n for line in file_tmp.readlines():\n potfile.write(line)\n file_tmp.close()\n potfile.close()", "def write_vasp(mol, filename='XYZ', suffix='.xyz', long_format=True):\n filname_suffix = ''.join([filename, suffix])\n with open(filname_suffix, \"w\") as f:\n f.write(_write_string(cell, long_format))", "def spew(path, data):\n with open(path, 'w+') as f:\n f.write(data)", "def save_file(msl_data_path, filename, content):\n with open(msl_data_path + filename, 'wb') as (file_):\n file_.write(content)\n file_.flush()\n file_.close()", "def _writeline(self, data):\n self._write(data+chr(13)+chr(10))", "def writefits(self,filename, z,lmu):\n t = Table([z,self.ebl_array(z,lmu)], names = ('REDSHIFT', 'EBL_DENS'))\n t2 = Table()\n t2['WAVELENGTH'] = Column(lmu, unit = 'micrometer')\n\n hdulist = fits.HDUList([fits.PrimaryHDU(),fits.table_to_hdu(t),fits.table_to_hdu(t2)])\n\n hdulist[1].name = 'NUINU_VS_Z'\n hdulist[2].name = 'WAVELENGTHS'\n\n hdulist.writeto(filename, overwrite = True)\n return", "def write_po(self, outputfile):\n raise NotImplementedError(\n \"Writing to this file format is not yet implemented\")", "def save_to_file(self, tojuliet):\n if self.lc.time[0] < 1e4:\n self.lc.time += 2457000\n ascii.write([self.lc.time, self.lc.flux, self.lc.flux_err], 'TIC%d.dat' % self.TIC,\n format='fixed_width_no_header', delimiter=' ', overwrite=True)\n if tojuliet:\n ascii.write([self.lc.time, self.lc.flux, self.lc.flux_err,\n ['TESS' for _ in self.lc.time]], 'TIC%d_juliet.dat' % self.TIC,\n format='fixed_width_no_header', delimiter=' ', overwrite=True)", "def write_initdata(xy0, v0, NL, BND, h, beta, outdir):\n dio.ensure_dir(outdir)\n M = np.hstack((xy0, v0))\n np.savetxt(outdir + 'NL.txt', NL, fmt='%i', delimiter=',', header='NL (Neighbor List)')\n np.savetxt(outdir + 'BND.txt', BND, fmt='%i', header='BND (Boundary List)')\n np.savetxt(outdir + 'xyv0.txt', M, delimiter=',', header='xy0 (initial positions) v0 (initial velocities)')\n with open(outdir + 'h.txt', \"w\") as hfile:\n hfile.write(\"# h (time step) \\n{0:4f}\".format(h))\n if beta != 'none':\n with open(outdir + 'beta.txt', \"w\") as betafile:\n betafile.write(\"# beta (damping coeff) \\n{0:4f}\".format(beta))", "def write_opal(self, file_name):\n \n return 0", "def _amber_write_input_file(self):\n logger.debug(\"Writing {}\".format(self.input))\n with open(os.path.join(self.path, self.input), \"w\") as f:\n f.write(\"{}\\n\".format(self.title))\n f.write(\" &cntrl\\n\")\n self._write_dict_to_mdin(f, self.cntrl)\n\n if self.ewald is not None:\n f.write(\" &ewald\\n\")\n self._write_dict_to_mdin(f, self.ewald)\n\n if self.cntrl[\"nmropt\"] == 1:\n if self.wt is not None:\n for line in self.wt:\n f.write(\" \"+line+\"\\n\")\n f.write(\" &wt type = 'END', /\\n\")\n if self.restraint_file is not None:\n f.write(\"DISANG = {}\\n\".format(self.restraint_file))\n f.write(\"LISTOUT = POUT\\n\\n\")\n if self.group is not None:\n f.write(\"{:s}\".format(self.group))", "def writeLMIn(self, line1, line2, line3):\n\n LMIn = open(self.LMInputFName, 'w')\n\n if self.rawDataOutputFlag:\n line2 += '-R'\n\n LMIn.write(line1 + '\\n' + line2 + '\\n' + line3)\n LMIn.close()", "def append_trl_file(trlfile, drizfile, clean=True):\n if not os.path.exists(drizfile):\n return\n # Open already existing trailer file for appending\n ftrl = open(trlfile, 'a')\n # Open astrodrizzle trailer file\n fdriz = open(drizfile)\n\n # Read in drizzle comments\n _dlines = fdriz.readlines()\n\n # Append them to CALWF3 trailer file\n ftrl.writelines(_dlines)\n\n # Close all files\n ftrl.close()\n fdriz.close()\n\n if clean:\n # Now, clean up astrodrizzle trailer file\n os.remove(drizfile)", "def write_to_file(data, filename):\n fimg = fits.HDUList()\n fimghdu = fits.PrimaryHDU()\n fimghdu.data = data\n fimg.append(fimghdu)\n fimg.writeto(filename, overwrite=True)\n print(' wrote output data to: ', filename)", "def write_ldat_header(self, datapath):\n contents = {}\n contents['ldat_type'] = self.ldat_type\n contents['filenametime'] = self.filenametime\n contents['station_id'] = self.station_id\n contents['rcusetup_cmds'] = self.rcusetup_cmds\n contents['beamctl_cmds'] = self.beamctl_cmds\n contents['rspctl_cmds'] = self.rspctl_cmds\n if self.caltabinfos != []:\n contents['caltabinfos'] = self.caltabinfos\n if self.septonconf:\n contents['septonconf'] = self.septonconf\n\n if not self.isLOFARdatatype(self.ldat_type):\n raise ValueError(\"Unknown LOFAR statistic type {}.\"\\\n .format(self.ldat_type))\n xtra = ''\n if self.ldat_type == 'acc':\n xtra = '_512x192x192'\n ldat_header_filename = (self.filenametime + '_' + self.ldat_type\n + xtra + '.h')\n with open(os.path.join(datapath, ldat_header_filename), 'w') as f:\n f.write('# LCU obs settings, header file\\n')\n f.write('# Header version'+' '+self.headerversion+'\\n')\n yaml.dump(contents, f, default_flow_style=False, width=1000)", "def _add_dimensions_to_file(locus_f):\n ld_lines = []\n i = 0\n with open(locus_f) as ld_file:\n for i, line in enumerate(ld_file):\n ld_lines.append(line)\n no_lines = i + 1\n file_out = locus_f.split('.matrix')[0] + '.LD'\n with open(file_out, 'w' ) as paintor_ld:\n paintor_ld.write(str(no_lines) + ' ' + str(no_lines) + '\\n')\n for line in ld_lines:\n paintor_ld.write(line)", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def write_tdm_to_file(pkt):\n\n global g_tdm_cnt\n global g_binfile\n\n if UDP in pkt:\n if pkt[UDP].dport == TDM_PORT:\n f = open(g_binfile, 'a+b')\n f.write(bytes(pkt[UDP].payload))\n f.close()\n g_tdm_cnt += 1\n print(\"\\rTDM Count: {0}. CTRL-C to quit\".format(g_tdm_cnt), end=\" \")", "def writeto(self, fileout):\n \n dump_pkl(self.data, fileout)", "def writeInput_for_LAMMPS(rd, listAtoms, filename):\n #f=open(\"geo.kirigami_d0.0_\"+str(rd),\"w+\")\n f=open(filename+str(rd),\"w+\")\n f.write(\"\\n\")\n f.write(\"%d atoms\\n\" %len(listAtoms))\n f.write(\"1 atom types\\n\")\n f.write(\"\\n\")\n f.write(\"%f\\t%f xlo xhi\\n\" %(xlo-1, xhi+1))\n f.write(\"%f\\t%f ylo yhi\\n\" %(ylo-1, yhi+1))\n f.write(\"%f\\t%f zlo zhi\\n\" %(zlo-1, zhi+1))\n f.write(\"\\n\")\n f.write(\"Atoms\\n\")\n f.write(\"\\n\")\n for i in range (len(listAtoms)):\n f.write(\"%d\\t1\\t%f\\t%f\\t%f\\n\" %(i+1, listAtoms[i][0], listAtoms[i][1], listAtoms[i][2]))\n f.close()", "def writeAD(self):\n ofname = self.ad_file\n ofh = open(ofname,'w')\n\n for line in self.lines_ad:\n f = line.strip().split()\n if (len(f) > 1 and f[1] == 'WindFile'):\n if (self.wind_file != None):\n f[0] = \"\\\"\"+self.wind_file+\"\\\"\"\n line = unsplit(f)\n ofh.write(line)\n\n ofh.close()\n\n # now also copy relevant airfoil files, if path is relative\n tmp = self.af_dict['polar_files'][0]\n if not os.path.isabs(tmp):\n tmp = tmp.split(\"\\\\\")\n tmp = tmp[0].split(\"/\")[0]\n # tmp is now root of relative path to airfoils\n dst = tmp\n src = os.path.join(self.fst_dir, tmp)\n print \"copying aerodata from \", src, \"TO \", dst\n if (not os.path.isdir(dst)):\n shutil.copytree(src, dst)\n\n # copy of relevant wind file in separate function writeWnd", "def _write_to_file(self):\n with open(self.filename + \".ir\", \"w+\") as file:\n file.writelines(\n [\"\\n\" + l if p != 0 else l for p, l in enumerate(self.lines)]\n )", "def write_file(self, lst_of_palidroms: list, result_file: str):\n with open(result_file, 'w', encoding='utf-8', errors='ignore') as result:\n for word in lst_of_palidroms:\n result.write(word + '\\n')", "def render_ldl(variables, output):\n\n f = open(output, 'w')\n\n # Include header\n f.write(\"#include \\\"ldl.h\\\"\\n\\n\")\n\n # Write ldl_lsolve\n write_ldl_lsolve(f, variables)\n\n # Write ldl_ltsolve\n write_ldl_ltsolve(f, variables)\n\n # Write ldl_dinvsolve\n write_ldl_dinvsolve(f, variables)\n\n # Write ldl_perm\n write_ldl_perm(f, variables)\n\n # Write ldl_permt\n write_ldl_permt(f, variables)\n\n f.close()", "def write(self, fname):\n pass", "def unix2dos(source, target = None):\n\tlines = open(adaptPath(source),\"r\").readlines()\n\t\n\tif target == None:\n\t\ttarget = source\n\n\toutput = open(adaptPath(target),\"wb\")\n\tfor line in lines:\n\t\toutput.write(line[:-1])\n\t\toutput.write(\"\\x0D\")\n\t\toutput.write(\"\\x0A\")", "def writeBlade(self):\n\n ofname = self.blade1_file ### note, assuming they're all the same\n ofh = open(ofname,'w')\n\n for line in self.lines_blade:\n ofh.write(line)\n ofh.close()", "def write_to_file(self, filename):\n self.octree.write(str.encode(filename))\n print(\"Save octomap to \"+filename)", "def write_data(xy, v, KL, iteration, h, outdir, fname):\n if v.size > 0:\n M = np.hstack((xy, v))\n xyvdir = outdir + 'xyv/'\n else:\n M = xy\n xyvdir = outdir + 'xy/'\n\n itstr = '%08d' % iteration\n dio.ensure_dir(xyvdir)\n dio.ensure_dir(outdir + 'KL/')\n if np.shape(M)[1] == 2:\n # data is 2D, just positions\n np.savetxt(xyvdir + fname + '_xy_' + itstr + '.txt', M, fmt='%.18e', delimiter=',',\n header='x,y (t=' + str(h * iteration) + ')')\n elif np.shape(M)[1] == 4:\n # data is 2D with velocities\n np.savetxt(xyvdir + fname + '_xyv_' + itstr + '.txt', M, fmt='%.18e', delimiter=',',\n header='x,y,vx,vy (t=' + str(h * iteration) + ')')\n elif np.shape(M)[1] == 6:\n # data is 3D with velocities\n np.savetxt(xyvdir + fname + '_xyv_' + itstr + '.txt', M, fmt='%.18e', delimiter=',',\n header='x,y,z,vx,vy,vz (t=' + str(h * iteration) + ')')\n elif np.shape(M)[1] == 8:\n # data is 2D with moving rest/pivor positions and displacements\n np.savetxt(xyvdir + fname + '_xyv_' + itstr + '.txt', M, fmt='%.18e', delimiter=',',\n header='X,Y,dX,dY,vX,vY,vdX,vdY (t=' + str(h * iteration) + ')')\n elif np.shape(M)[1] == 10:\n # data is 2D plus euler angles\n np.savetxt(xyvdir + fname + '_xyv_' + itstr + '.txt', M, fmt='%.18e', delimiter=',',\n header='x,y,theta,phi,psi,vx,vy,vtheta,vphi,vpsi (t=' + str(h * iteration) + ')')\n\n if KL.size > 0:\n '''KL may be changing for timestep to timestep, so save it for each one'''\n np.savetxt(outdir + 'KL/' + fname + '_KL_' + itstr + '.txt', KL, fmt='%.18e', delimiter=',',\n header='KL (t=' + str(h * iteration) + ')')\n else:\n '''KL is static. Refer to KL.txt saved in the datadir'''\n pass", "def to_file(self, file_path, smirnoff_data):\n pass", "def write(cls, vas):\n with open(Y, 'w') as f_i:\n for items in vas:\n f_i.write('%s ' % items)\n print(\"File written successfully. Check out \\\"output.txt\\\" file\")\n f_i.close()", "def _write_antti_location(lat, lon, rad, label, location_file):\n if location_file.split('.')[-1] == 'gz':\n ff = gzip.open(location_file, 'w')\n else:\n ff = open(location_file, 'w')\n\n ff.write(\"%% Geographic coordinates of the geoelectric field distribution \" +\n \" Data produced on %s\\n\"%(dt.datetime.utcnow()))\n ff.write(\"%% \\n\")\n ff.write(\"%% This data comes together with files DateTime.txt, B?.txt,\" +\n \" and Stations.txt. \\n\")\n ff.write(\"%% \\n\")\n ff.write(\"%% Contact: \\n\")\n ff.write(\"%% \\n\")\n ff.write(\"%% The format of the data is as follows:\\n\")\n ff.write(\"%% \\n\")\n ff.write(\"%% lat1 lon1 rad1 label1 \\n\")\n ff.write(\"%% lat2 lon2 rad2 label2 \\n\")\n ff.write(\"%% . . . \\n\")\n ff.write(\"%% . . . \\n\")\n ff.write(\"%% . . . \\n\")\n ff.write(\"%% \\n\")\n ff.write(\"\\n\")\n\n for l in range(len(lat)):\n ff.write(\"%02.2f %02.2f %08e %s\\n\"%(lat[l], lon[l], rad[l], label[l]))\n\n ff.close()", "def list_to_file(l, file_name):\r\n fw = open(file_name, 'w', encoding = 'utf-8')\r\n fw.write('\\n'.join(l))\r\n fw.close()", "def save_lod_files(files, filename, path=None, start_index=0):\n path = path_formatter(path)\n for i, target in enumerate(files):\n with open(\"{}{}_{}.mtxt\".format(path, filename, i + start_index),\n \"w\") as f:\n f.write(str(target))", "def write_ctl_file(self):\n # Make sure all paths are relative to the working directory\n try:\n self._set_rel_paths()\n except (AttributeError, ValueError) as error:\n raise error\n with open(self.ctl_file, 'w') as ctl_handle:\n ctl_handle.write(\"seqfile = {0}\\n\".format(self._rel_alignment))\n ctl_handle.write(\"outfile = {0}\\n\".format(self._rel_out_file))\n for option in self._options.items():\n if option[1] == None:\n # If an option has a value of None, there's no need\n # to write it in the control file; it's normally just\n # commented out.\n continue\n ctl_handle.write(\"{0} = {1}\\n\".format(option[0], \n option[1]))", "def __writeToFile(self, filePath, lst): \n \n if not self.outDir is None: \n filePath = os.path.join(self.outDir, filePath) \n \n open(filePath,'a').writelines(lst)", "def WriteDiary():\r\n from datetime import datetime\r\n\r\n diaryname = _getPOSCAR()\r\n diary = open(diaryname, \"w\")\r\n diary.write('***' + str(datetime.now()) + '***' + '\\n')\r\n diary.write('## ' + diaryname + '\\n')\r\n diary.close()\r\n _CopyWriteDiary('Readme', diaryname)\r\n _CopyWriteDiary('INCAR', diaryname)\r\n _CopyWriteDiary('KPOINTS', diaryname)\r\n _CopyWriteDiary('POSCAR', diaryname)\r\n _CopyWriteDiary('POTCAR', diaryname)\r\n os.rename(diaryname, diaryname + '.md')", "def write( data ):", "def __do_write(filestream, seq, header=None):\n if header is not None:\n filestream.write(header + '\\n') # double check newlines\n try:\n for line in chunks(seq, 70):\n filestream.write(line + '\\n')\n except Exception as e:\n print(e)", "def write_to_file_x(path):\n path1 = path + \"/x_Macros\"\n if not os.path.exists(path1):\n os.mkdir(path1)\n for e in range(int(e_steps)+1):\n filename = \"x%sy0z0ke%s.mac\" %(dx*x + x_min, e*de + e_min)\n path = path1\n fullpath = os.path.join(path, filename)\n f = open(fullpath, \"w\")\n f.write('/rat/physics_list/OmitMuonicProcesses true\\n')\n f.write(\"/rat/physics_list/OmitHadronicProcesses true \\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write('/rat/db/set DETECTOR geo_file \"geo/snoplus.geo\"\\n')\n f.write('/rat/db/set GEO[scint] material \"labppo_scintillator\"\\n')\n f.write('/rat/db/set DAQ dqxx_info 0\\n')\n f.write(\"/run/initialize \\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/rat/proc frontend\\n\")\n f.write(\"/rat/proc trigger\\n\")\n f.write(\"/rat/proc eventbuilder\\n\")\n f.write(\"/rat/proc count\\n\")\n f.write(\"/rat/procset update 100\\n\")\n f.write(\"/rat/proc calibratePMT\\n\")\n f.write(\"/rat/proc scintFitter\\n\")\n f.write(\"/rat/proclast outroot\\n\")\n f.write('/rat/procset file \"x%sy0z0ke%s.root\"\\n' %(dx*x + x_min, e*de + e_min))\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/generator/add combo gun:point:poisson\\n\")\n f.write(\"# want random, isotropic momentum distribution; energy given in MeV\\n\")\n f.write(\"/generator/vtx/set e- 0 0 0 %s\\n\" %(e*de + e_min))\n f.write(\"# position given in Cartesians, relative to detector center, in mm\\n\")\n f.write(\"/generator/pos/set %s 0 0\\n\" % (dx*x + x_min))\n f.write(\"/generator/rate/set 1\\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/rat/run/start %s\\n\" %(n))\n f.write(\"exit\")", "def write_data():", "def save(self,fout):\n\n # only process 0 should save\n if COMM_WORLD.rank == 0:\n\n # The file format is:\n # L,nterms,masks,signs,coefficients\n # where each is just a binary blob, one after the other.\n\n # do this first so that we haven't already created the file if\n # it fails for some reason\n msc = self.get_MSC()\n\n with open(fout,mode='wb') as f:\n\n # write the chain length to the file. This is the only parameter\n # that we save other than the MSC representation.\n L = self.L\n if L is None:\n raise ValueError('L must be set before saving to disk.')\n\n # cast it to the type that C will be looking for\n int_t = msc.dtype[0].type\n L = int_t(L)\n\n f.write(L.tobytes())\n\n # write out the length of the MSC representation\n size = int_t(msc.size)\n f.write(size.tobytes())\n\n f.write(msc['masks'].tobytes())\n f.write(msc['signs'].tobytes())\n f.write(msc['coeffs'].tobytes())\n\n COMM_WORLD.barrier()", "def ledgerWrite(self):\n if(self.back == 'y'):\n open(\"ledger.txt\",'a').write(str(num_files) + ' ' + str(self.file_count) + ' ' + self.file_path + ' ' + \"y\" + '\\n')\n else:\n open(\"ledger.txt\",'a').write(str(num_files) + ' ' + str(self.file_count) + ' ' + self.file_path + ' ' + \"n\" + '\\n')\n dbUp.upload_file(\"ledger.txt\", '/ledger.txt')", "def write_to_file_obj(self, dir, soup_obj):\n\t\tif not os.path.exists(dir):\n\t\t\twith open(dir, 'a') as f:\n\t\t\t\tfor obj in soup_obj:\n\t\t\t\t\t#print(verse.text)\n\t\t\t\t\tf.write(obj.text)\n\t\t\t\tf.write('\\n') # last line missing line break", "def write_file(file_, maxn, maxl):\n width = len(str(maxl))\n with open(file_, 'w') as out:\n for i in range(1, maxl+1):\n out.write(f'[{i:{width}}] ' + write_line(maxn) + '\\n')", "def Dump_File(output):\n now = datetime.now()\n log_date = now.strftime(\"%Y-%m-%d\")\n log_file = str(log_date + \"-cisco_output.txt\")\n try:\n os.mknod(log_file)\n with open(log_file, 'wa') as f:\n f.write(output)\n f.write(\"\\n\")\n f.close()\n except OSError as err:\n with open(log_file, 'wa') as f:\n f.write(output)\n f.write(\"\\n\")\n f.close()", "def write(self, filename): # real signature unknown; restored from __doc__\n pass", "def write_file(poet, info_dict):\r\n\r\n filename = SAVE_PATH + '/' + poet + '/' + str(info_dict['id']) + '_'+ str(info_dict['pagenum']) \\\r\n + '_' + info_dict['id2'] +'_' + info_dict['ord2'] \\\r\n + '_' + info_dict['id3'] + '_' + info_dict['ord3'] \\\r\n + '_' + info_dict['id4'] + '_' + info_dict['ord4'] + '.txt'\r\n\r\n print(filename)\r\n with open(filename, 'w', encoding='utf-16') as f:\r\n txt = ','.join([str(info_dict[k]) for k in KEYS ])\r\n txt = txt + '\\n' + '\\n'.join([x for x in info_dict['beyts']])\r\n f.write(txt)\r\n\r\n\r\n locale.setlocale(locale.LC_ALL, '')\r\n DELIMITER = ';'# if locale.localeconv()['decimal_point'] == ',' else ','\r\n\r\n list_of_lists = [[info_dict[k] for k in KEYS]]\r\n with open('D:/poem/molana.csv', 'a', newline='', encoding='utf-16') as csvfile:\r\n\r\n writer = csv.writer(csvfile, delimiter=DELIMITER)\r\n writer.writerows(list_of_lists)", "def save_lidar(data, data_directory, loc):\n t0 = dt.datetime(1970, 1, 1) + dt.timedelta(seconds=data[0][1])\n t0_day = dt.datetime(t0.year, t0.month, t0.day)\n secs = (t0_day - dt.datetime(1970, 1, 1)).total_seconds()\n\n t = [i[1] - secs for i in data]\n meas = [i[2] for i in data]\n try:\n with open(os.path.join(data_directory, loc, 'lidar', t0_day.strftime('%Y-%m-%d.txt')), 'a+') as f:\n for i, j in zip(t, meas):\n f.write(f'{i} {j}\\n')\n except FileNotFoundError:\n print(\"Data directory is bad. Try again. \")\n sys.exit(0)", "def write(data):", "def writeText2File(loginfo, mfile):\n with open(mfile,'a') as f:\n f.writelines('%s\\n' %loginfo)\n f.close()", "def add_data_to_file(data, mode, filename):\n path = \"/etc/atuned/webserver/\" + filename + \".txt\"\n file_handle = open(path, mode)\n file_handle.write(str(data))\n file_handle.write(\"\\n\")\n file_handle.close()", "def exportBulletFile(*argv):", "def makeSpkSetupFile(leapSecondFilePath, outputPath):\n\n # If the file already exists, delete it and rewrite it.\n if os.path.exists(outputPath):\n os.remove(outputPath)\n\n# print 'Generating LRONAC compatible .pvl file ' + halfResFilePath\n f = open(outputPath, 'w')\n f.write(\"\\\\begindata\\n\")\n f.write(\"INPUT_DATA_TYPE = 'STATES'\\n\")\n f.write(\"OUTPUT_SPK_TYPE = 13\\n\")\n f.write(\"OBJECT_ID = -85\\n\") # LRO\n f.write(\"CENTER_ID = 301\\n\") # Moon\n f.write(\"REF_FRAME_NAME = 'J2000'\\n\")\n f.write(\"PRODUCER_ID = 'Lronac Pipeline'\\n\")\n f.write(\"DATA_ORDER = 'epoch x y z vx vy vz'\\n\")\n f.write(\"DATA_DELIMITER = ','\\n\")\n f.write(\"LEAPSECONDS_FILE = '\" + leapSecondFilePath + \"'\\n\")\n f.write(\"LINES_PER_RECORD = 1\\n\")\n f.write(\"TIME_WRAPPER = '# ETSECONDS'\\n\")\n #f.write(\"EPOCH_STR_LENGTH = 16\\n\")\n f.write(\"INPUT_DATA_UNITS = ('ANGLES=DEGREES' 'DISTANCES=km')\\n\")\n f.write(\"POLYNOM_DEGREE = 11\\n\")\n f.write(\"SEGMENT_ID = 'SPK_STATES_13'\\n\")\n# f.write(\"INPUT_DATA_FILE = 'spkDataFile.txt'\")\n# f.write(\"OUTPUT_SPK_FILE = '/home/smcmich1/testSpkFile.bsp'\")\n f.write(\"\\\\begintext\\n\")\n f.close()", "def write_into_file(name, liste):\n file = open(name, \"w\")\n for item in liste:\n file.write(item)\n file.write('\\n')\n file.close()", "def write_conll(conll_file, sents):\n with codecs.open(conll_file, mode = 'w', errors = 'ignore', encoding = 'utf-8') as ofile:\n for sent in sents:\n if sent:\n for element in sent:\n word = element[0]\n tag = element[1]\n ofile.write(str(tag) + '\\t' + str(word) + '\\n')\n ofile.write('\\n')", "def FileWrite(offset, buf):\r\n return _hiew.HiewGate_FileWrite(offset, buf)", "def slabldos(h,energies=np.linspace(-1.0,1.0,40),delta=None,nk=40):\n if h.dimensionality!=2: raise # nope\n ds = ldosmap(h,energies=energies,delta=delta,nk=nk)\n if len(ds[0])!=len(h.geometry.z): \n print(\"Wrong dimensions\",len(ds[0]),len(h.geometry.z))\n raise\n f = open(\"DOSMAP.OUT\",\"w\")\n f.write(\"# energy, index, DOS, zposition\\n\")\n for ie in range(len(energies)):\n for iz in range(len(h.geometry.z)):\n f.write(str(energies[ie])+\" \")\n f.write(str(iz)+\" \")\n f.write(str(ds[ie,iz])+\" \")\n f.write(str(h.geometry.z[iz])+\"\\n\")\n f.close()\n return energies,np.transpose(ds) # retunr LDOS ", "def addTrailer(file):\n program = '\\t\\t</coordinates>\\n'\n program += '\\t</LineString>\\n'\n program += '\\t</Placemark>\\n'\n program += '</Document>\\n'\n program += '</kml>\\n'\n file.write(program)", "def write_bmad_linac_phasing_lines(filePath='linac_settings.bmad', epics=None, verbose=False):\n lines = bmad_linac_phasing_lines(epics)\n with open(filePath, 'w') as f:\n for l in lines:\n f.write(l+'\\n')\n if verbose:\n print('Written:', filePath)", "def write(self, s):\n ind = \" \" * self._ind\n l = None\n for l in s.splitlines(True):\n if not self._atEol:\n self.f.write(ind)\n self.f.write(l)\n self._atEol = False\n self._atEol = l and l[-1] != \"\\n\"", "def test_writetofile():\n sat_before_nuc = \\\n t('circumstance', [\n ('S', ['sat first']),\n ('N', ['nuc second'])\n ])\n\n tempfile = NamedTemporaryFile()\n rstc.write_rstlatex(sat_before_nuc, tempfile.name)\n\n with open(tempfile.name, 'r') as rstlatex_file:\n assert rstlatex_file.read() == '\\\\dirrel\\n\\t{circumstance}{\\\\rstsegment{sat first}}\\n\\t{}{\\\\rstsegment{nuc second}}\\n'", "def write_ts(ts,i):\n '''\n Write light curve to disk as space delimited text file\n\t\n\tParameters\n\t----------\n\tts: time series object\n\ti : a counter to be appended to the file name where it is stored \n\tReturns\n\t-------\n\tNone. \n '''\n path = \"ts-{}.txt\".format(i)\n datafile_id = open(path, 'wb')\n data = np.array([ts._times, ts._values])\n data = data.T\n\n np.savetxt(datafile_id, data, fmt=['%.3f','%8f'])\n datafile_id.close()", "def test1_write():\n with open(FILE_DIR + FILE_NAME, mode='w', encoding='utf-8') as f:\n f.write(DATA)", "def write_to_file(start_runtime, contents, write_mode='a'):\n with open(f\"{start_runtime}.txt\", write_mode) as f:\n f.write(\"Filename\\t\\tMaxTrack\\tNumInst\\t\\tTimeSig\\t\\tTPB\\n\")\n f.write(contents)", "def write_one(paths_out, solutii, current_fis, note=\"\"):\n f = open(f\"{paths_out[current_fis]}\", \"a\")\n f.write(note)\n for s in solutii:\n f.write(s)", "def _write_modefile(modefile, parameters, l_min):\n if os.path.exists(modefile):\n os.remove(modefile)\n\n\n m_codes = ['quit', 'radial', 'toroidal', 'spheroidal', 'inner core']\n wave_to_mode = {\n 'Rayleigh': 'spheroidal',\n 'Love': 'toroidal',\n }\n m_code = m_codes.index(wave_to_mode[parameters.Rayleigh_or_Love])\n\n fid = open(modefile, 'w')\n # First line: accuracy of mode calculation - eps eps1 eps2 wgrav\n # where eps - accuracy of integration scheme\n # eps1 - precision when finding roots\n # eps2 - minimum separation of roots\n # wgrav - freqeuncy (rad/s) above which gravitational terms\n # are neglected\n # Second line - m_code gives the index in the list above, m_codes\n fid.write('1.d-12 1.d-12 1.d-12 .126\\n{}\\n'.format(m_code))\n # Third line: lmin and lmax give range of angular order, fmin and fmax\n # give the range of frequency (mHz), final parameter is the number\n # of mode branches for Love and Rayleigh - hardwired to be 1 for\n # fundamental mode (= 2 would include first overtone)\n # Fourth line: not entirely sure what this 0 means\n fid.write('{:.0f} {:.0f} {:.3f} {:.3f} 1\\n0\\n'.format(l_min,\n parameters.l_max, parameters.freq_min, parameters.freq_max))\n\n fid.close()", "def _write_dx(self, FN, data):\n n_points = data['counts'][0] * data['counts'][1] * data['counts'][2]\n if FN.endswith('.dx'):\n F = open(FN, 'w')\n else:\n import gzip\n F = gzip.open(FN, 'w')\n\n F.write(\"\"\"object 1 class gridpositions counts {0[0]} {0[1]} {0[2]}\norigin {1[0]} {1[1]} {1[2]}\ndelta {2[0]} 0.0 0.0\ndelta 0.0 {2[1]} 0.0\ndelta 0.0 0.0 {2[2]}\nobject 2 class gridconnections counts {0[0]} {0[1]} {0[2]}\nobject 3 class array type double rank 0 items {3} data follows\n\"\"\".format(data['counts'], data['origin'], data['spacing'], n_points))\n\n for start_n in range(0, len(data['vals']), 3):\n F.write(' '.join(['%6e' % c\n for c in data['vals'][start_n:start_n + 3]]) + '\\n')\n\n F.write('object 4 class field\\n')\n F.write('component \"positions\" value 1\\n')\n F.write('component \"connections\" value 2\\n')\n F.write('component \"data\" value 3\\n')\n F.close()", "def write(self, filename, chars_per_line=70):\n tofile_with_line_sep(self._dna, filename, chars_per_line)", "def writeKitty(b, of):\n with open(of, \"w\") as fh:\n for r in b:\n fh.write(\"%s 0 0 0 %d %d %d %d 0 0 0 0 0 0 0\\n\" % ( r[0], int(r[1]*dw), int(r[2]*dh), int(r[3]*dw), int(r[4]*dh) ) )", "def write(self, f):\n if self.best_mhc_align:\n mhc_align_str = self.best_mhc_align.subject_str()\n mhc_score_str = str(self.best_mhc_align.bit_score)\n else:\n mhc_align_str = \".\"\n mhc_score_str = \"0\"\n\n if self.best_non_mhc_align:\n non_mhc_align_str = self.best_non_mhc_align.subject_str()\n non_mhc_score_str = str(self.best_non_mhc_align.bit_score)\n else:\n non_mhc_align_str = \".\"\n non_mhc_score_str = \"0\"\n \n f.write(\"\\t\".join([self.locus, self.short_samp_id, self.name,\n str(self.length), mhc_align_str, non_mhc_align_str,\n mhc_score_str, non_mhc_score_str,\n str(self.n_mhc_align), str(self.n_non_mhc_align)]) + \"\\n\")", "def write_directive_to_file(filename, arb_id, payload):\n fd = open(filename, \"a\")\n try:\n fd.write(arb_id + \"#\" + payload + \"\\n\")\n finally:\n fd.close()", "def lines_to_file(file_name: str, write_dir: str, lines: Sequence[str]):\n with open(os.path.join(write_dir, file_name), \"w\", encoding=\"utf-8\") as f:\n for l in lines:\n f.write(f\"{l}\\n\")", "def out(filename, s):\n\tf = open(filename, 'w')\n\tf.write(s)\n\tf.close()", "def to_file(self, f: str) -> None:\n with open(f, \"w\") as open_file:\n open_file.write(\"\\n\".join(self.itos) + \"\\n\")", "def writeToFile(fil, aks, tid):\r\n\r\n f = open(\"processed_\"+fil, 'w')\r\n \r\n f.write(\"Aks Tid\")\r\n for i in range(len(aks)):\r\n f.write(f\"\\n{aks[i]} {tid[i]}\")\r\n f.close()", "def write_file(self):\n if self._write_file == None:\n return\n\n try:\n out = file(self._write_file, \"w\")\n except IOError, e:\n print e\n sys.exit(1)\n out.writelines(\"A cases\") \n out.close()", "def writeslxsol(self, name, *values):\n with open(name, \"w\") as slx:\n for i, sol in enumerate(values):\n slx.write(\"NAME solution%d\\n\" % i)\n for name, value in sol:\n slx.write(f\" C {name} {value:.16f}\\n\")\n slx.write(\"ENDATA\\n\")" ]
[ "0.63052565", "0.5946495", "0.5859641", "0.58320624", "0.58302605", "0.57833433", "0.5733278", "0.5729144", "0.5711805", "0.56135124", "0.5603127", "0.55708736", "0.55650675", "0.55446815", "0.5522803", "0.552071", "0.55121875", "0.5507822", "0.5500877", "0.5473531", "0.54703724", "0.5468874", "0.54296786", "0.54293406", "0.5411746", "0.54058194", "0.53710556", "0.5369346", "0.535936", "0.53567374", "0.5337299", "0.5332512", "0.5331997", "0.5317902", "0.5298014", "0.52765083", "0.52743703", "0.5268459", "0.5268459", "0.526731", "0.5246851", "0.52354175", "0.5231427", "0.5229705", "0.5203367", "0.5201876", "0.5196639", "0.51911753", "0.5189473", "0.51886046", "0.5182962", "0.51815", "0.51809734", "0.5177733", "0.51775753", "0.51680785", "0.5166003", "0.5161371", "0.5156088", "0.5153224", "0.515281", "0.51505756", "0.5142322", "0.513775", "0.51371586", "0.513316", "0.51314574", "0.5129059", "0.51139003", "0.5113127", "0.51130813", "0.5112741", "0.51108426", "0.51089215", "0.51063746", "0.51056445", "0.51043427", "0.5096519", "0.5091813", "0.50843894", "0.50830704", "0.50824845", "0.50780797", "0.50751215", "0.5074201", "0.5065422", "0.50652874", "0.50565743", "0.505584", "0.50554425", "0.50537694", "0.5053021", "0.50386363", "0.5036775", "0.503481", "0.50341904", "0.5029226", "0.5023196", "0.50224143", "0.50153035" ]
0.6843431
0
Calculate the density of states for a finite system
def ldos_finite(h,e=0.0,n=10,nwf=4,delta=0.0001): if h.dimensionality!=1: raise # if it is not one dimensional intra = csc(h.intra) # convert to sparse inter = csc(h.inter) # convert to sparse interH = inter.H # hermitian m = [[None for i in range(n)] for j in range(n)] # full matrix for i in range(n): # add intracell m[i][i] = intra for i in range(n-1): # add intercell m[i][i+1] = inter m[i+1][i] = interH m = bmat(m) # convert to matrix (ene,wfs) = slg.eigsh(m,k=nwf,which="LM",sigma=0.0) # diagonalize wfs = wfs.transpose() # transpose wavefunctions dos = (wfs[0].real)*0.0 # calculate dos for (ie,f) in zip(ene,wfs): # loop over waves c = 1./(1.+((ie-e)/delta)**2) # calculate coefficient dos += np.abs(f)*c # add contribution odos = spatial_dos(h,dos) # get the spatial distribution go = h.geometry.supercell(n) # get the supercell write_ldos(go.x,go.y,odos) # write in a file return dos # return the dos
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getDensityOfStates(self, Elist):\n\t\treturn _modes.freerotor_densityofstates(Elist, self.frequencies, 1 if self.linear else 0)", "def getDensityOfStates(self, Elist):\n\t\trho = np.zeros((len(Elist)), np.float64)\n\t\trho0 = _modes.hinderedrotor_densityofstates(Elist, self.frequency, self.barrier)\n\t\tfor i in range(self.degeneracy):\n\t\t\trho = _modes.convolve(rho, rho0, Elist)\n\t\treturn rho", "def density(self):\r\n return self.count_ones() / float(self.xspan * self.yspan)", "def getDensityOfStates(self, Elist, V=1.0):\n\t\treturn _modes.translation_densityofstates(Elist, self.mass, self.dimension, V)", "def getDensityOfStates(self, Elist, linear):\n\n\t\timport states\n\n\t\t# Create energies in cm^-1 at which to evaluate the density of states\n\t\tconv = constants.h * constants.c * 100.0 * constants.Na # [=] J/mol/cm^-1\n\t\tEmin = min(Elist) / conv\n\t\tEmax = max(Elist) / conv\n\t\tdE = (Elist[1] - Elist[0]) / conv\n\t\tElist0 = np.arange(Emin, Emax+dE/2, dE)\n\n\t\t# Prepare inputs for density of states function\n\t\tvib = np.array([mode.frequency for mode in self.modes if isinstance(mode, HarmonicOscillator)])\n\t\trot = np.array([mode.frequencies for mode in self.modes if isinstance(mode, RigidRotor)])\n\t\thind = np.array([[mode.frequency, mode.barrier] for mode in self.modes if isinstance(mode, HinderedRotor)])\n\t\tif len(hind) == 0: hind = np.zeros([0,2],np.float64)\n\t\tlinear = 1 if linear else 0\n\t\tsymm = self.symmetry\n\n\t\t# Calculate the density of states\n\t\tdensStates, msg = states.densityofstates(Elist0, vib, rot, hind, symm, linear)\n\t\tmsg = msg.strip()\n\t\tif msg != '':\n\t\t\traise Exception('Error while calculating the density of states for species %s: %s' % (self, msg))\n\n\t\t# Convert density of states from (cm^-1)^-1 to mol/J\n\t\tdensStates /= conv\n\n\t\t# Return result\n\t\treturn densStates", "def getDensityOfStates(self, Elist):\n\t\tpass", "def density(temp,pres):\n g_p = liq_g(0,1,temp,pres)\n dliq = g_p**(-1)\n return dliq", "def plot_density_of_states(xlim=(-10, 5), ylim=(-1.5, 1.5), fmt='pdf'):\n\n efermi = Vasprun('vasprun.xml').efermi\n dos_lines = open ('DOSCAR').readlines()\n\n x, up, down = [], [], []\n nedos = Incar.from_file('INCAR').as_dict()['NEDOS'] - 1\n\n for line in dos_lines[6:6+nedos]:\n split_line = line.split()\n x.append(float(split_line[0]) - efermi)\n up.append(float(split_line[1]))\n down.append(-float(split_line[2]))\n\n x, up, down = np.array(x), np.array(up), np.array(down)\n sum = up + down\n\n ax = plt.figure().gca()\n ax.set_xlim(xlim[0], xlim[1])\n ax.set_ylim(ylim[0], ylim[1])\n\n ax.set_xlabel(r'$\\mathrm{E\\/(eV)}$')\n ax.set_ylabel(r'$\\mathrm{Density\\/of\\/States$')\n ax.set_xticklabels([r'$\\mathrm{%s}$' % t for t in ax.get_xticklabels()])\n ax.set_yticklabels([r'$\\mathrm{%s}$' % t for t in ax.get_yticklabels()])\n\n ax.plot(x, up, color='red' )\n ax.plot(x, down, color='green')\n ax.plot(x, sum, color='black' )\n if fmt is not None:\n plt.savefig('density_of_states.{}'.format(fmt))\n else:\n return ax\n\n plt.close()", "def density(self):\n return self.nnz/self.dim", "def main():\n N = 201 # Amount of gridpoints, odd number to include 0\n L = 10 # Size of the system\n k = 50 # Amount of energies and states calculated\n x = y = np.linspace(-L/2,L/2,N) # Gridpoints\n h = x[1]-x[0] # Spacing of gridpoints\n\n # Solve the system with and without perturbation\n E,psi,E_p,psi_p = fdm_2d(N,L,x,y,h,k)\n\n # Print the first two energies and the absolute error of the energies\n print('Energies of the two lowest states:')\n print('E_00 = %.4f' % E[0])\n print('E_01 = %.4f' % E[1], '\\n')\n print('Absolute error for E_00: %.4e' % np.abs(E[0]-1))\n print('Absolute error for E_01: %.4e' % np.abs(E[1]-2))\n\n print('\\nEnergies of the two lowest states after perturbation:')\n print('E_00 = %.4f' % E_p[0])\n print('E_01 = %.4f' % E_p[1])\n\n # Calculate the normalized densities of the states\n densities_norm = np.zeros((k,N,N))\n densities_norm_p = np.zeros((k,N,N))\n for i in range(k):\n # meshgrid form\n state = np.reshape(psi.T[i],(N,N))\n state_p = np.reshape(psi_p.T[i],(N,N))\n densities_norm[i] = normalized_density(state,x)\n densities_norm_p[i] = normalized_density(state_p,x)\n\n # Analytical solution of the ground state\n X,Y = np.meshgrid(x,y)\n psi00_exact = phi(X,0)*phi(Y,0)\n psi00_exact_density = normalized_density(psi00_exact,x)\n\n print('\\nMaximum absolute error of the normalized ground state densities of the unperturbated system:')\n print('errmax = {:.4e}'.format(np.max(np.abs(densities_norm[0]-psi00_exact_density))))\n\n # Plotting the ground state density of the unperturbated system\n fig1 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig1.add_subplot(1,2,1,projection='3d')\n surf1 = ax.plot_surface(X, Y, densities_norm[0], cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n fig1.suptitle(r'Normalized ground state density $|\\psi|^2$ using FDM')\n ax = fig1.add_subplot(1,2,2)\n ax.imshow(densities_norm[0],extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'FDM_psi00_unperturbated.png'))\n plt.close()\n\n\n fig2 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig2.add_subplot(1,2,1,projection='3d')\n surf2 = ax.plot_surface(X, Y, psi00_exact_density, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n\n\n # Plotting the ground state density of the perturbated system\n fig1 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig1.add_subplot(1,2,1,projection='3d')\n surf1 = ax.plot_surface(X, Y, densities_norm_p[0], cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n fig1.suptitle(r'Normalized ground state density $|\\psi|^2$ of the perturbated system using FDM')\n ax = fig1.add_subplot(1,2,2)\n ax.imshow(densities_norm_p[0],extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'FDM_psi00_perturbated.png'))\n plt.close()\n\n fig2 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig2.add_subplot(1,2,1,projection='3d')\n surf2 = ax.plot_surface(X, Y, psi00_exact_density, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n\n # Plotting the analytic ground state density\n fig1 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig1.add_subplot(1,2,1,projection='3d')\n surf1 = ax.plot_surface(X, Y, psi00_exact_density, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n fig1.suptitle(r'Analytical normalized ground state density $|\\psi|^2$')\n ax = fig1.add_subplot(1,2,2)\n ax.imshow(psi00_exact_density,extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'exact_psi00.png'))\n plt.close()\n\n # Plot some of the other densities and save them as pdf\n for i in range(1,20):\n density = densities_norm[i]\n fig = plt.figure(figsize=plt.figaspect(0.5))\n plt.imshow(density,extent=[-L/2,L/2,-L/2,L/2])\n plt.title('n={}'.format(i))\n plt.savefig(os.path.join(path,'FDM_unperturbated{}.png'.format(i)))\n plt.close()\n\n density_p = densities_norm_p[i]\n fig = plt.figure(figsize=plt.figaspect(0.5))\n plt.imshow(density_p,extent=[-L/2,L/2,-L/2,L/2])\n plt.title('n={}'.format(i))\n plt.savefig(os.path.join(path,'FDM_perturbated{}.png'.format(i)))\n plt.close() \n\n # Plot analytical states until nx,ny = 5\n for nx in range(6):\n for ny in range(6):\n state = phi(X,nx)*phi(Y,ny)\n density = normalized_density(state,x)\n plt.figure()\n plt.imshow(density,extent=[-L/2,L/2,-L/2,L/2])\n plt.title('$n_x={}, n_y={}$'.format(nx,ny))\n plt.savefig(os.path.join(path,'analytical_state_{}_{}.png'.format(nx,ny)))\n plt.close()\n\n # Get analytical energies from nx,ny = 0 to 10\n n = 10\n energies = analytical_energies(n)\n\n # Plot k analytical and the FDM energies\n index = np.arange(k)\n plt.figure()\n plt.plot(index,energies[0:k],label='Analytical energies')\n plt.plot(index,E,label='Unperturbated energies')\n plt.plot(index,E_p,label='Perturbated energies')\n plt.legend()\n plt.xlabel('n')\n plt.ylabel(r'$\\tilde{E} = \\frac{E}{\\hbar\\omega}$')\n plt.title('Energies')\n plt.savefig(os.path.join(path,'energies.png'))\n plt.close()", "def testDensityCalculation(self):\n known_densities = np.array([1.76776695e-01, 1.76776695e-01, 1.76776695e-01,\n 4.59619433e-01, 4.59619433e-01, 1.76776695e-01, 5.00000000e-01, \n 8.84538011e-02, 3.40206909e-02, 2.26040275e-04])\n densities = nb._get_local_densities() \n np.testing.assert_allclose(densities, known_densities)", "def density(self):\n return self.nnz / self.size", "def get_fiber_density():\n return Global_Module.global_fiber_density", "def density_of_state_plot(N=400,a=1.0,eita=0.01):\n foot_step=2*np.pi/N\n k=np.arange(0.0,2*np.pi/a,foot_step)\n Ek=band_energy(k)\n E=np.arange(-3.0,3.0,0.01)\n Ek.shape=(N,1)\n E.shape=(1,600)\n \"\"\"Reshape E and Ek series with broadcasting method.\"\"\"\n dirac_function=np.imag(np.true_divide(1/np.pi,np.subtract(E-Ek,1j*eita)))\n D=np.sum(np.true_divide(dirac_function,N),axis=0)\n \"\"\"Calculate the density of state with lorentzian broadenning method.\"\"\" \n E.shape=(600)\n plt.plot(D,E)", "def calc_D(state):\n\t\tif t < thresh:\n\t\t\tstate.D_g[t] = 0.5\n\t\t\tstate.D_n[t] = 0.5\n\t\telse:\n\t\t\tif mod == \"constant\":\n\t\t\t\tstate.D_g[t] = D\n\t\t\t\tstate.D_n[t] = 1-D\n\t\t\tif mod == \"value\":\n\t\t\t\t# NOTE: if rmag and lmag is 1/0, can just use V\n\t\t\t\t# average of two actions\n\t\t\t\tV = np.mean(1/2*(state.QG[t,:] - state.QN[t,:])) # state average(?) \n\t\t\t\tV = 1/(1 + np.exp(-V*k)) # translate between 0 and 1\n\t\t\t\tstate.D_g[t] = V \n\t\t\t\tstate.D_n[t] = 1 - V\n\t\treturn state", "def update_density(\n self,\n states: FlowFieldMap,\n additional_states: FlowFieldMap,\n ) -> FlowFieldVal:\n zz = additional_states.get('zz', [tf.constant(0, dtype=TF_DTYPE)] *\n self._params.nz)\n\n if 'T' in states:\n t = states['T']\n elif 'theta' in states:\n t = self._potential_temperature_to_temperature(states['theta'], zz)\n else:\n raise ValueError(\n 'Either temperature or potential temperature is required for the '\n 'ideal gas law.'\n )\n\n scalars = {\n sc_name: thermodynamics_utils.regularize_scalar_bound(states[sc_name])\n for sc_name in self._molecular_weights.keys()\n if sc_name != INERT_SPECIES\n }\n\n if scalars:\n scalars.update({\n INERT_SPECIES:\n thermodynamics_utils.compute_ambient_air_fraction(scalars)\n })\n sc_reg = thermodynamics_utils.regularize_scalar_sum(scalars)\n else:\n sc_reg = {\n INERT_SPECIES: [\n tf.ones_like(sc_i, dtype=TF_DTYPE)\n for sc_i in list(states.values())[0]\n ]\n }\n\n mixture_molecular_weight = (\n thermodynamics_utils.compute_mixture_molecular_weight(\n self._molecular_weights, sc_reg))\n\n return [\n self.density_by_ideal_gas_law(p_i, R_U / w_mix_i, t_i)\n for p_i, w_mix_i, t_i in zip(\n self.p_ref(zz, additional_states), mixture_molecular_weight, t)\n ]", "def getDensityEstimate(self):\n return self.density", "def _denormalizeState(self, Z : vector) -> vector:\n return Z / self.D", "def density( self ) :\n return self.__density", "def density( self ) :\n return self.__density", "def density( self ) :\n return self.__density", "def population_density(self) -> float:\n return self.population / self.area", "def calculate_KDE(self, states, bw=np.logspace(-1, 1, 20), cv=5):\n params = {'bandwidth': bw}\n grid = GridSearchCV(KernelDensity(), params, cv=cv)\n grid.fit(states)\n kde = grid.best_estimator_\n return kde", "def density(self):\n return self.get_density()", "def density(self) -> float:\n if self.is_directed():\n factor = 1\n else:\n factor = 2\n\n num_e = self._Impl.number_of_edges(directed_edges=True)\n num_v = self._Impl.number_of_vertices()\n\n density = (factor * num_e) / (num_v * (num_v - 1))\n return density", "def find_density(attr, D, h):\n d = D.shape[1]\n n = D.shape[0]\n total = 0\n for xi in D:\n kernel = find_kernel_value(attr, xi, h, d)\n total += kernel\n return total / (n * h ** d)", "def current_density(self, xyz):\n return self.sigma * self.electric_field(xyz)", "def current_density(self, xyz):\n return self.sigma * self.electric_field(xyz)", "def test_density(self):\n earth = CoreMantleCrustModel()\n assert earth.density(0) == 14\n assert earth.density(1e6) == 14\n assert earth.density(3.464e6) == 14\n assert earth.density(3.5e6) == 3.4\n assert earth.density(5e6) == 3.4\n assert earth.density(6.338e6) == 3.4\n assert earth.density(6.378e6) == 2.9", "def density(self):\n return self.num_arcs() / (self.nframes / FRATE)", "def _density(self):\n fraction = np.array([0.]+[m.value for m in self.fraction])\n # TODO: handle invalid fractions using penalty functions\n # S = sum(fraction)\n # scale = S/100 if S > 100 else 1\n # fraction[0] = 100 - S/scale\n # penalty = scale - 1\n fraction[0] = 100 - sum(fraction)\n if (fraction < 0).any():\n return NaN\n volume = self._volume(fraction)\n density = np.array([m.density() for m in [self.base]+self.material])\n return np.sum(volume*density)", "def gen_density_matrix(states=None, dimensions=None):\n if states is None:\n tdim = np.prod(dimensions)\n dmtotal0 = np.eye(tdim) / tdim\n\n return dmtotal0\n\n dmtotal0 = np.eye(1, dtype=np.complex128)\n\n for i, s in enumerate(states):\n\n if not hasattr(s, \"__len__\"):\n # assume s is int or float showing the spin projection in the pure state\n d = dimensions[i]\n dm_nucleus = np.zeros((d, d), dtype=np.complex128)\n state_number = int(round((d - 1) / 2 - s))\n dm_nucleus[state_number, state_number] = 1\n\n else:\n if s.shape.__len__() == 1:\n d = dimensions[i]\n dm_nucleus = np.zeros((d, d), dtype=np.complex128)\n np.fill_diagonal(dm_nucleus, s)\n\n else:\n dm_nucleus = s\n\n dmtotal0 = np.kron(dmtotal0, dm_nucleus)\n\n return dmtotal0", "def density(self):\n return (1e-3*self.molar_mass) * self.pressure / (gas_constant * self.temperature) # kg/m^3", "def plot_total_density_of_states(dos: dict, settings: PlotSettings):\n energies = resolve_energy_grid(dos)\n dos_values = dos['total_density_of_states']\n assert len(energies) == len(dos_values)\n\n xmin = min(energies) if settings.xmin is None else settings.xmin\n xmax = max(energies) if settings.xmax is None else settings.xmax\n ymin = 0 if settings.ymin is None else settings.ymin\n ymax = 1.1 * np.max(dos_values) if settings.ymax is None else settings.ymax\n\n fig, ax = plt.subplots()\n ax.set_xlabel(settings.xlabel)\n ax.set_ylabel(settings.ylabel)\n ax.set_xlim(xmin, xmax)\n ax.set_ylim(ymin, ymax)\n\n plt.plot(energies, dos_values)\n plt.axvline(x=dos['fermi_level'], color='black')\n\n return fig, ax", "def density(self):\n raise TypeError(\"The density function is not support on a Multigraph.\")", "def count_energy(self,state):\n\t\tassert len(state) == self._size\n\n\t\ttmp = 0\n\t\tfor i in range(self._size):\n\t\t\tfor j in range(self._size):\n\t\t\t\ttmp += self.myWeights[i][j]* state [i] * state [j]\n\t\treturn tmp - self.myB * sum(state)", "def density(self):\n return self._density", "def edge_density(self) -> float:\n return self.number_of_edges() / (\n self.number_of_nodes() * self.number_of_nodes()\n )", "def __density(self, x):\n\n z = np.power(self.rate, x) / m.factorial(x)\n return z * np.exp(-self.rate)", "def test_density(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n r = np.array([0.7, 0.8])\n t = 6.25e-6\n solrt = sol(r, t)\n np.testing.assert_allclose(solrt.density[0], 2.26666666666663)", "def _ion_densities(self):\n nD = self.ne_in*(6-self.zeff_in)/(5.)\n nC = self.ne_in*(self.zeff_in-1)/(30.)\n nC[np.where(nC<0)]=0.\n print(\"nC/nD: \"+str(np.mean(nC/nD)*100.)+\" %\")\n self.ni_in[0,:] = nD\n self.ni_in[1,:] = nC", "def density(ensembles):\n if len(ensembles.shape) < 2:\n return ketbra(ensembles)\n else:\n den_mat = ketbra(ensembles[0])\n for i in range(1, len(ensembles)):\n den_mat += ketbra(ensembles[i])\n den_mat /= len(ensembles)\n return den_mat", "def mask_density(mask):\n return get_number_of_unpruned_weights(mask).float() / get_number_of_weights(mask).float()", "def number_density(self) -> u.m**-3:\n return self._number_density", "def __init__(self, state_observations, continuous=True, pseudocount=1):\n self.continuous = continuous\n state_observations = [numpy.asarray(so) for so in state_observations]\n if continuous:\n self.state_distributions = [kde.gaussian_kde(so) for so in state_observations]\n else:\n max_val = max(so.max() for so in state_observations)\n state_counts = [numpy.bincount(so, minlength=max_val) + pseudocount for so in state_observations]\n self.state_distributions = [sc / sc.sum() for sc in state_counts]", "def densitychange(self,dt=0.1):\n #Using conservation of mass and diffusion\n dp_dt = -div(self.u*self.d)\n dp_dt += ndimage.laplace(self.d)\n #This term seems to make the density clump together, producing \n #waves which can make the simulation blow up.\n #dp_dt -= np.add.reduce(self.u*np.array(np.gradient(self.d)))\n #Edge density shouldn't change.\n dp_dt[[0,-1]] = dp_dt[:,[0,-1]] = 0\n self.d += dp_dt*dt\n #Change pressure accordingly to ideal gas law\n #AAAAAAAAAAAAAAAA this fixed most of the poblems from before!!!\n self.P = self.d*8.214*273\n #Conserve mass by spreading out fluctuations \n self.d[1:-1,1:-1] += (self.mass-np.sum(self.d))/self.vol", "def E(self, state):\n \n if state==0: # Invalid state has no energy\n return 0\n return sum([self.calcDistance(state[i+1], state[i]) for i in range(len(state)-1)])", "def density(self, arg):\n out = 0\n for weight, mean, std in zip(self.weights, self.means, self.stds):\n scale = std * self.data['maturity']**.5\n loc = ((mean - self.data['riskfree']) *\n self.data['maturity'] - scale**2)\n out += weight * scs.norm(loc, scale).pdf(arg)\n return out", "def diff_func(sat):\n state = sat.getstate()\n dstate = np.zeros(7)\n dstate[-1] = 1.0\n dstate[0] = state[1]\n dstate[2] = state[3]/(state[0])\n dstate[4] = state[5]/(state[0]*np.sin(state[2]))\n acc = tot_acc(sat)\n dstate[1], dstate[3], dstate[5] = sat.getvdot(acc[0], acc[1], acc[2])\n return dstate", "def get_density(matrix):\n return matrix.getnnz() / (matrix.shape[0] * matrix.shape[1])", "def density(self, x):\n\t\tN = len(self.train_data)\n\t\tpoints = list(self.train_data)\n\t\tdists = [np.linalg.norm(x-point)**2 for point in points]\n\t\texps = [np.exp(-dist / (2 * (self.bandwidth ** 2))) for dist in dists]\n\t\tunnormalized_sum = sum(exps)\n\t\tprobability = (1 / N) * self.normalizing_constant() * unnormalized_sum\n\t\treturn probability", "def count_neighbourhood_states(self,state):\n\t\tassert len(state) > 0\n\t\tenergies = [float(\"inf\") for i in range (self._size)]\n\t\ttaxes = [float(\"inf\") for i in range (self._size)]\n\n\t\tfor i in range (0,self._size):\n\t\t\tnewState = copy(state)\n\t\t\tnewState[i] = 1 - newState[i]\n\t\t\tenergies[i] = self.count_energy(newState)\n\t\t\ttaxes[i] = self.count_tax(newState)\n\n\t\treturn energies,taxes", "def dominance(counts):\n freqs = counts/float(counts.sum())\n return (freqs*freqs).sum()", "def density(self, arg):\n return self.gb2_density(np.exp(arg)) * np.exp(arg)", "def _derivatives(self, state, forces_moments):\n # extract the states\n pn = state[0]\n pe = state[1]\n pd = state[2]\n e0 = state[3]\n e1 = state[4]\n e2 = state[5]\n e3 = state[6]\n u = state[7]\n v = state[8]\n w = state[9]\n # state[6:10] = normalize(state[6:10])\n p = state[10]\n q = state[11]\n r = state[12]\n # extract forces/moments\n fx = forces_moments[0]\n fy = forces_moments[1]\n fz = forces_moments[2]\n l = forces_moments[3]\n m = forces_moments[4]\n n = forces_moments[5]\n\n\n # with warnings.catch_warnings():\n # warnings.filterwarnings('error')\n # try:\n # # position kinematics\n # except Warning as e:\n # pdb.set_trace()\n # print(e)\n\n pn_dot = (e1**2+e0**2-e2**2-e3**2)*u + 2*(e1*e2-e3*e0)*v + 2*(e1*e3+e2*e0)*w\n pe_dot = 2*(e1*e2+e3*e0)*u + (e2**2+e0**2-e1**2-e3**2)*v + 2*(e2*e3-e1*e0)*w\n pd_dot = 2*(e1*e3-e2*e0)*u + 2*(e2*e3+e1*e0)*v + (e3**2+e0**2-e1**2-e2**2)*w\n\n # pn_dot = (e0**2+e1**2-e2**2-e3**2)*u + 2*(e1*e2+e3*e0)*v + 2*(e1*e3-e2*e0)*w\n # pe_dot = 2*(e1*e2-e3*e0)*u + (e0**2-e1**2+e2**2-e3**2)*v + 2*(e2*e3+e1*e0)*w\n # pd_dot = 2*(e1*e3+e2*e0)*u + 2*(e2*e3-e1*e0)*v + (e0**2-e1**2-e2**2+e3**2)*w\n\n # pdb.set_trace()\n\n # position dynamics\n mass = self.mass\n u_dot = (r*v-q*w)+fx/mass\n v_dot = (p*w-r*u)+fy/mass\n w_dot = (q*u-p*v)+fz/mass\n\n # rotational kinematics\n e0_dot = 0.5*(-p*e1-q*e2-r*e3)\n e1_dot = 0.5*(p*e0+r*e2-q*e3)\n e2_dot = 0.5*(q*e0-r*e1+p*e3)\n e3_dot = 0.5*(r*e0+q*e1-p*e2)\n\n # rotatonal dynamics\n p_dot = self.gamma1*p*q - self.gamma2*q*r + self.gamma3*l + self.gamma4*n\n q_dot = self.gamma5*p*r - self.gamma6*(p**2-r**2) + m/self.Jy\n r_dot = self.gamma7*p*q - self.gamma1*q*r + self.gamma4*l + self.gamma8*n\n\n # collect the derivative of the states\n x_dot = np.array([pn_dot, pe_dot, pd_dot, e0_dot, e1_dot, e2_dot, e3_dot,\n u_dot, v_dot, w_dot, p_dot, q_dot, r_dot])\n # pdb.set_trace()\n\n\n # print(x_dot)\n return x_dot", "def __call__(self, observations):\n observations = numpy.asarray(observations)\n if self.continuous:\n state_probabilities = [kde(observations) for kde in self.state_distributions]\n else:\n state_probabilities = [hist[observations] for hist in self.state_distributions]\n return numpy.transpose(state_probabilities)", "def probability_density(self, X):\n raise NotImplementedError", "def get_fiber_density_average():\n return Global_Module.global_fiber_density_with_average", "def FD(f, s, p, d=1, z=1, m=1, dx=1e-6, gmix=False, k=['All']):\n if k == ['All']:\n ph = 't'\n cph = 'x'\n else:\n ph = k[0]\n cph = k[0]\n \n if d == 1:\n s.c[z][cph] += 0.5*dx\n X_d = []\n for i in range(1, p.m['n']):\n X_d.append(s.c[i][cph])\n \n s = s.update_state(s, p, X = X_d, Force_Update=True) \n if gmix:\n f1 = f(s, p).m['g_mix'][ph]\n else:\n f1 = f(s, p)\n \n s.c[z][cph] -= 1.0*dx\n X_d = []\n for i in range(1, p.m['n']):\n X_d.append(s.c[i][cph])\n \n s = s.update_state(s, p, X = X_d, Force_Update=True) \n if gmix:\n f2 = f(s, p).m['g_mix'][ph]\n else:\n f2 = f(s, p)\n \n return (f1 - f2)/dx\n \n if d == 2:\n s.c[z][cph] += 1.0*dx\n s.c[m][cph] += 1.0*dx\n X_d = []\n for i in range(1, p.m['n']):\n X_d.append(s.c[i][cph])\n \n s = s.update_state(s, p, X = X_d, Force_Update=True) \n if gmix:\n f1 = f(s, p).m['g_mix'][ph]\n else:\n f1 = f(s, p)\n \n s.c[m][cph] -= 2.0*dx\n X_d = []\n for i in range(1, p.m['n']):\n X_d.append(s.c[i][cph])\n \n s = s.update_state(s, p, X = X_d, Force_Update=True) \n if gmix:\n f2 = f(s, p).m['g_mix'][ph]\n else:\n f2 = f(s, p)\n \n s.c[z][cph] -= 2.0*dx\n s.c[m][cph] += 2.0*dx\n X_d = []\n for i in range(1, p.m['n']):\n X_d.append(s.c[i][cph])\n \n s = s.update_state(s, p, X = X_d, Force_Update=True) \n if gmix:\n f3 = f(s, p).m['g_mix'][ph]\n else:\n f3 = f(s, p)\n \n s.c[m][cph] -= 2.0*dx\n X_d = []\n for i in range(1, p.m['n']):\n X_d.append(s.c[i][cph])\n \n s = s.update_state(s, p, X = X_d, Force_Update=True) \n if gmix:\n f4 = f(s, p).m['g_mix'][ph]\n else:\n f4 = f(s, p)\n \n return (f1 - f2 - f3 + f4)/(4.0*dx*dx)", "def system(self, t, state, strength, density):\n v, m, theta, z, _, r = state\n A = np.pi*r**2 # radius generally varies with time after break-up\n rhoa = self.rhoa(z)\n\n # u = [dv/dt, dm/dt, dtheta/dt, dz/dt, dx/dt, dr/dt]\n u = np.zeros_like(state)\n u[0] = -self.Cd*rhoa*A*v**2 / (2*m) + self.g*np.sin(theta) # dv/dt\n u[1] = -self.Ch*rhoa*A*v**3/(2*self.Q) # dm/dt\n u[2] = self.g*np.cos(theta)/v - self.Cl*rhoa * A*v / \\\n (2*m) - (v*np.cos(theta) / (self.Rp+z)) # dtheta/dt\n u[3] = -v*np.sin(theta) # dz/dt\n u[4] = v*np.cos(theta)/(1+z/self.Rp) # dx/dt\n if rhoa * v**2 < strength:\n u[5] = 0\n else:\n u[5] = (7/2*self.alpha*rhoa/density)**0.5 * v # dr/dt\n\n return u", "def density_by_ideal_gas_law(\n p: tf.Tensor,\n r: tf.Tensor,\n t: tf.Tensor,\n ) -> tf.Tensor:\n return p / r / t", "def generate_dm0(dm0, dimensions, states=None):\n\n if states is None:\n dmtotal0 = expand(dm0, len(dimensions) - 1, dimensions) / np.prod(dimensions[:-1])\n elif len(dm0.shape) == 1:\n dmtotal0 = generate_pure_initial_state(dm0, dimensions, states)\n\n else:\n dmtotal0 = gen_density_matrix(states, dimensions[:-1])\n dmtotal0 = np.kron(dmtotal0, dm0)\n\n return dmtotal0", "def current_density(self, xyz):\n\n j = self.electric_field(xyz) / self.rho\n return j", "def derivatives(self, state):\n raise NotImplementedError()", "def stationary_distribution(self):\n P = self.markov_transition()\n N = len(P)\n I = np.identity(N)\n A = P.T - I # get right-kernel\n pi = null_space(A)\n pi = pi / sum(pi)\n pi = [float(item) for item in pi]\n return pi", "def idensity(n):\n I = zeroes(n, n)\n for i in range(n):\n I.g[i][i] = 1.0\n return I", "def f( self , x , u , t ):\n \n dx = np.zeros(self.n) # State derivative vector\n \n r = u # input of closed-loop global sys is ref of the controller\n \n # Compute output signal\n y = self.plant.h( x, self.plant.ubar, t)\n \n # Compute control inputs\n u = self.controller.c( y, r, t)\n \n # Compute state derivatives\n dx = self.plant.f( x, u, t)\n \n return dx", "def fGasDensity(GasGravity, Temperature, Pressure):\n\tGasConstant = 8.314\n\tPress = Pressure / 145.038 # MPa\n\tTemp = Temperature + 273.16 # Deg K\n\tPr = Press / (4.892 - (0.4048 * GasGravity))\n\tTr = Temp / (94.72 + (170.75 * GasGravity))\n\tA = 0.03 + 0.00527 * ((3.5 - Tr)**3)\n\tB = (0.642 * Tr) - (0.007 * (Tr**4)) - 0.52\n\tC = 0.109 * ((3.85 - Tr)**2)\n\tD = exp(-((0.45 + (8 * ((0.56 - (1 / Tr))**2))) * ((Pr**1.2) / Tr)))\n\tZ = (A * Pr) + B + (C * D)\n\treturn (28.8 * GasGravity * Press) / (Z * GasConstant * Temp)", "def find_freq(self):\n state = self.env.reset()\n state_dim = len(state)\n seq = [state]\n\n for _ in range(self.args.exploration_iterations*10):\n action = np.random.randint(self.env.action_space.n)\n next_state, reward, done, info = self.env.step(action)\n seq.append(next_state)\n if done:\n state = self.env.reset()\n else:\n state = next_state\n\n # Create a primitive MDP for every unique state explored\n states = set(seq)\n for state in states:\n primitive_mdp = MDP(level=0, state_var=state)\n primitive_mdp.exits = {x for x in range(self.env.action_space.n)}\n primitive_mdp.mer = frozenset({state})\n primitive_mdp.primitive_states = {state}\n self.mdps[0].add(primitive_mdp)\n\n freq = [{'sv': i, 'last': None, 'changes': 0} for i in range(state_dim)]\n for state in seq:\n for i in range(state_dim):\n if freq[i]['last'] is None:\n freq[i]['last'] = state[i]\n else:\n if state[i] != freq[i]['last']:\n freq[i]['changes'] += 1\n freq[i]['last'] = state[i]\n\n sorted_freq = sorted(freq, key=lambda x: x['changes'], reverse=True)\n return [d['sv'] for d in sorted_freq], state_dim", "def get_flux_density(self):\n if self.no_flux is False:\n return self.snu_at_1GHz\n else:\n return -1", "def electron_density(self):\n return N_avo * self.num_electrons * self.density / self.molar_mass", "def dof_satt(self):\n d1 = self.d1\n d2 = self.d2\n # this follows blindly the SPSS manual\n # except I use ``_var`` which has ddof=0\n sem1 = d1._var / (d1.nobs - 1)\n sem2 = d2._var / (d2.nobs - 1)\n semsum = sem1 + sem2\n z1 = (sem1 / semsum) ** 2 / (d1.nobs - 1)\n z2 = (sem2 / semsum) ** 2 / (d2.nobs - 1)\n dof = 1.0 / (z1 + z2)\n return dof", "def getDensity(optVector):\n newStates = optVector[:-self.nODEParams]\n newParams = optVector[-self.nODEParams:]*(10**thetaMagnitudes)\n return -2*newStates.size*self.getDensity(newStates, newParams)", "def density_ch(tensor):\n return 1 - sparsity_ch(tensor)", "def total_energy(state, k=1, m=1):\n return 0.5*k*state[..., 0]*state[..., 0]+0.5*m*state[..., 1]*state[..., 1]", "def _estimate_density(self, x):\n\n self.density_, self.bins_ = np.histogram(x, bins=10, density=True)", "def shannon(state_space):\n if isinstance(state_space, int) or len(state_space) == 1:\n return 0\n ws = sum(state_space.values())\n if ws == 0:\n print(state_space)\n return math.log(ws) - sum(map(lambda x: x * math.log(x), state_space.values())) / ws", "def get_state_vars(state):\n x_n = state[0, :] # x_n\n x_m = np.roll(x_n, -1) # x_{n+1}\n x_m[-1] += Length # the first one goes ahead by Length\n dx_n = state[1, :] # dx_n\n dx_m = np.roll(dx_n, -1) # dx_{n+1}\n dist = x_m - x_n\n if (dist <= 0).any():\n logging.critical(\"CRITICAL: Negative distances\")\n return x_n, x_m, dx_n, dx_m, dist", "def dynamics(state,t):\n global M,m\n f = control_upright(state)\n # f = 0\n dydx = np.zeros_like(state)\n x,x_dot,th,th_dot = state #unpacking the state\n dydx[0] = x_dot\n dydx[2] = th_dot\n\n den1 = M + (m*sin(th)*sin(th))\n dydx[1] = (f + (m*g*sin(th)*cos(th)) + m*L*th_dot*th_dot*sin(th) + (b/L)*(th_dot*cos(th)))/den1\n den2 = L*den1\n dydx[3] = (((M+m)*g*sin(th) + f*cos(th) + m*L*th_dot*th_dot*sin(th)*cos(th))/den2) + (b/(m*L*L))*th_dot\n dydx[3] = -dydx[3]\n\n return dydx", "def density(self, psi):\n return np.square(np.abs(psi))", "def state_norm_opt(state):\n fact_arr = np.array([factorial(x) for x in range(len(state))])\n tf2 = np.tensordot(fact_arr, fact_arr, axes=0)\n tf4 = np.tensordot(tf2, tf2, axes=0)\n st_abs_quad = np.power(np.abs(state), 2)\n mult = np.multiply(st_abs_quad, tf4)\n return sqrt(np.sum(mult))", "def state_norm(state):\n size = len(state)\n norm_ = 0\n for p1 in range(size):\n for p2 in range(size):\n for p3 in range(size):\n for p4 in range(size):\n norm_ = norm_ + abs(state[p1, p2, p3, p4])**2 * factorial(p1)*factorial(p2)*factorial(p3)*factorial(p4)\n return sqrt(norm_)", "def rate_density(x, a):\n return a * x", "def mcintosh_d(counts):\n u = sqrt((counts*counts).sum())\n n = counts.sum()\n return (n-u)/(n-sqrt(n))", "def ddx(n, dx, f):\n fx = np.zeros(n)\n for j in range(n):\n fx[j] = (f[get_index(j+1, n)]-f[get_index(j-1, n)])/(2*dx)\n return fx", "def PowerSpectralDensity(f):\n sky_averaging_constant = (20/3) # Sky Averaged <--- I got this from Jonathan's notes but I need\n # to check where he got it...\n L = 2.5*10**9 # Length of LISA arm\n f0 = 19.09*10**-3 \n\n Poms = ((1.5*10**-11)**2)*(1 + ((2*10**-3)/f)**4) # Optical Metrology Sensor\n Pacc = (3*10**-15)**2*(1 + (4*10**-3/(10*f))**2)*(1 + (f/(8*10**-3))**4) # Acceleration Noise\n Sc = 9*10**(-45)*f**(-7/3)*np.exp(-f**0.171 + 292*f*np.sin(1020*f)) * (1 \\\n + np.tanh(1680*(0.00215 - f))) \n\n PSD = (sky_averaging_constant)* ((10/(3*L**2))*(Poms + (4*Pacc)/((2*np.pi*f))**4)*(1 + 0.6*(f/f0)**2) + Sc) # PSD\n return PSD", "def computeChargeDensity(self):\n \n self.rho = np.zeros((self.ni, self.nj, self.nk))\n \n for species in self.speciesList:\n if species.charge!=0:\n self.rho += species.charge*species.den", "def compute_stationary_distribution(self):\n return self.mc.stationary_distributions", "def calculate(self, density):\n if density not in self.potential_memo:\n\n if density == 0:\n self.potential_memo[density] = 0\n else:\n a = self.a\n x_0 = self.x_0\n b = self.b\n c = self.c\n x = self.wigner_seitz_radius(density)**(1/2)\n x_x = x**2 + b * x + c\n x_x_0 = x_0**2 + b * x_0 + c\n q = (4 * c - b**2)**(1/2)\n\n self.potential_memo[density] = a * (log(x**2 / x_x) + (2 * b / q) * atan(q / (2 * x + b))\n - (b * x_0 / x_x_0) * (log((x - x_0)**2 / x_x) + (2 * (b + 2 * x_0) / q) * atan(q / (2 * x + b)))) \\\n - (a / 3) * ((1 + x_0 * x) / (1 + x_0 * x + b * x**2 + c * x**3))\n\n return self.potential_memo[density]", "def det_probability(input_state, detection_event):\n st_aft_det_unappl = detection(input_state, detection_event)\n st_aft_det_conj_app = np.conj(make_state_appliable_4ch(st_aft_det_unappl))\n input_state_appl = make_state_appliable_4ch(input_state)\n st = np.multiply(input_state_appl, st_aft_det_conj_app)\n return np.real(np.sum(st))", "def density(wair,pres,entr=None,temp=None,airf=None,dhum=None,\n chkvals=False,chktol=_CHKTOL,airf0=None,temp0=None,dhum0=None,\n chkbnd=False,mathargs=None):\n airf, temp, dhum = eq_wpte(wair,pres,entr=entr,temp=temp,airf=airf,\n dhum=dhum,chkvals=chkvals,chktol=chktol,airf0=airf0,temp0=temp0,\n dhum0=dhum0,chkbnd=chkbnd,mathargs=mathargs)\n h_p = iceair_h(0,0,1,wair,pres,temp=temp,airf=airf,dhum=dhum)\n dens = h_p**(-1)\n return dens", "def f( self , x , u , t ):\n \n dx = np.zeros(self.n) # State derivative vector\n \n ################################################\n # Place holder: put the equations of motion here\n raise NotImplementedError\n ################################################\n \n return dx", "def Ag_density():\n # initialise no infection default for the number of infections required\n agcurves = [np.zeros(cf.endtime + 1) for inf in cf.tinf]\n # for every infection, calculate its individual effect per timepoint\n for i in range(len(cf.tinf)):\n pag = cf.dose[i] # peak\n tai = 0 # tnow after infection\n while pag > 0.01:\n pag = cf.dose[i] * math.exp(-float(tai) / cf.tdecay)\n agcurves[i][cf.tinf[i] + tai] = pag\n tai += 1\n if cf.tinf[i] + tai >= cf.endtime:\n break\n # sum up all effects\n agcurve_uncapped = np.sum(agcurves, axis=0)\n # set all values above 100% to 100%\n agcurve = [np.min([val, 1]) for val in agcurve_uncapped]\n\n return agcurve", "def linkDensity(self, time=None):\r\n listofDensities = list()\r\n for cell in self.cells:\r\n listofDensities.append(cell.cellDensity())\r\n return listofDensities", "def density(self):\n return _cantera.reactor_density(self.__reactor_id)", "def dset(self):\n\n a = 0.0\n b = 0.0\n sums = np.sum(self.descriptors, axis=0)\n for sum in sums:\n if sum > 0:\n if sum == self.d_length:\n b += 1.\n else:\n a += 1.\n return a / (a+b)", "def _compute_f(self, p, dh, dv):\n return dh / (self.beta * p * dv)", "def numberDensity(frametracks):\n ftr = frametracks\n return ftr.x.count() / ((ftr.x.max() - ftr.x.min()) * (ftr.y.max() - ftr.y.min()))", "def density(self):\n self.convert_window(\"Density\", \"kilograms/liter\", [\"grains/gallon(UK)\", \"grains/gallon(US)\", \"grams/cubic centimeters\", \"grams/liter\", \"grams/millimeters\", \"kilograms/cubic meters\", \"kilograms/liter\", \"megagrams/cubic meter\", \"milligrams/liter\", \"milligrams/millimeters\", \"ounces/cubic inch\", \"ounces/gallon(UK)\", \"ounces/gallon(US)\", \"pounds/cubic foot\", \"pounds/cubic inch\", \"pounds/gallon(UK)\", \"pounds/gallon(US)\", \"slugs/cubic foot\", \"tonnes/cubic meter\", \"tons(UK)/cubic yard\", \"tons(US)/cubic yard\"])", "def test_densities():\n\n actual, r, wt = GridGenerator.make_grid(400)\n grid = 4*pi*r**2*wt\n\n data = AtomData()\n\n print(\"\\nINTEGRATED DENSITY TEST\")\n print(\"=======================\")\n for a in list(data.nuclear_charge.keys()):\n atom = Atom(a)\n Nel = data.electron_count[a]\n d0, d1, g0, g1, t0, t1, l0, l1 = atom.get_densities(r)\n\n # Count electrons per spin channel\n s_occ = AtomData.s_occ.get(a, [0, 0])\n p_occ = AtomData.p_occ.get(a, [0, 0])\n d_occ = AtomData.d_occ.get(a, [0, 0])\n f_occ = AtomData.f_occ.get(a, [0, 0])\n nela = np.sum(s_occ[0])+np.sum(p_occ[0])+np.sum(d_occ[0])+np.sum(f_occ[0])\n nelb = np.sum(s_occ[1])+np.sum(p_occ[1])+np.sum(d_occ[1])+np.sum(f_occ[1])\n assert(nela+nelb == Nel)\n\n id0 = np.dot(d0, grid)\n id1 = np.dot(d1, grid)\n\n diff_0 = id0 - nela\n percent_diff_0 = 100*diff_0/nela\n\n # Check to catch for Hydrogen having no beta electrons\n if nelb > 0.0:\n diff_1 = id1 - nelb\n percent_diff_1 = 100*diff_1/nelb\n else:\n diff_1 = 0.0\n percent_diff_1 = 0.0\n\n print(\"{:>3} - N_0 = ({:4.1f}) {:+2.6e}%, N_1 = ({:4.1f}) {:+2.6e}%, {:}\".format(a, id0, percent_diff_0, id1, percent_diff_1, \"PASSED\" if max(abs(diff_0), abs(diff_1)) < 1e-4 else \"FAILED - \"))\n\n print(\"\\nINTEGRATED KINETIC TEST\")\n print(\"=======================\")\n for a in list(data.ke_test.keys()):\n atom = Atom(a)\n t_bm = data.ke_test[a]\n d0, d1, g0, g1, t0, t1, l0, l1 = atom.get_densities(r)\n\n it0 = np.dot(t0, grid)\n it1 = np.dot(t1, grid)\n itot = it0 + it1\n\n diff = itot - t_bm\n print(\"{:>3} - T = {:+.6e}%, {:}\".format(a, 100*diff/t_bm, \"PASSED\" if abs(100*diff/t_bm) < 1e-2 else \"FAILED - \"))\n\n\n # The integral of the Laplacian over all space should be 0. Check that.\n print(\"\\nINTEGRATED LAPLACIAN TEST\")\n print(\"=========================\")\n for a in list(AtomData.ke_test.keys()):\n atom = Atom(a)\n\n d0, d1, g0, g1, t0, t1, l0, l1 = atom.get_densities(r)\n\n il0 = np.dot(grid, l0)\n il1 = np.dot(grid, l1)\n print(\"{:>3} - L_0 = {:+.6e}, L_1 = {:+.6e}, {:}\".format(a, il0, il1, \"PASSED\" if max(abs(il0), abs(il1)) < 1e-6 else \"FAILED - \"))\n\n\n print(\"\\nFINITE DIFFERENCE GRADIENT TEST\")\n print(\"===============================\")\n print(\"Testing gradient evaluation function against finite difference estimate...\")\n ne = Atom(\"Ne\") # Let's use \"the guvnor\"\n # We only need to test a few points around the core\n fdh = 1e-8\n fdr = np.arange(0.9, 0.9+fdh*10, fdh)\n d0, d1, g0, g1, t0, t1, l0, l1 = ne.get_densities(fdr)\n\n # First the first central difference\n fdiff = (d0[2:]-d0[:-2])/(2*fdh) # Construct the central difference\n if np.allclose(fdiff, g0[1:-1], atol=1e-1): # finite difference is not perfect, so lenient tollerance\n print(\"Gradient: PASSED\")\n else:\n print(\"Gradient: FAILED -\")\n\n print(\"\\nELEMENT COLOR FUNCTIONS TEST\")\n print(\"===========================\")\n test_obj = [Atom(\"H\"), Atom(\"C\"), Atom(\"O\")]\n test_str = [\"H\", \"C\", \"O\"]\n ref = np.array([[1., 1., 1.], [0.565, 0.565, 0.565], [1. , 0.051, 0.051]])\n\n if np.allclose( np.array(get_colors_for_elements(test_obj)), ref):\n print(\"\\nColor from objects: PASSED\")\n else:\n print(\"\\nColor from objects: FAILED -\")\n\n if np.allclose( np.array(get_colors_for_elements(test_str)), ref):\n print(\"Color from strings: PASSED\")\n else:\n print(\"Color from strings: FAILED -\")\n\n if HAVE_LIBXC:\n test_functional='GGA_X_PBE'\n print(\"\\nATOMIC EXCHANGE ENERGIES WITH {}\".format(test_functional))\n print(\"============================================\")\n for a in list(data.ke_test.keys()):\n atom = Atom(a)\n nE, vrho, vsigma, vtau, vlapl = atom.libxc_eval(r, functional=test_functional, restricted=False)\n Exc = (np.dot(nE, grid)).item()\n print('{:3s} {:.10f}'.format(a, Exc))\n else:\n print(\"\\nNot doing energy calculations due to lack of libxc.\\n\")", "def thermal_state(self, beta: float = .1) -> numpy.ndarray:\n rho = numpy.exp(-beta * self.cost)\n return rho / numpy.sum(rho)" ]
[ "0.6842676", "0.6838163", "0.66934264", "0.66780734", "0.6619658", "0.6589051", "0.6474599", "0.63931894", "0.63873273", "0.63801605", "0.6272464", "0.62401986", "0.6229511", "0.61976075", "0.6176228", "0.6112314", "0.6104218", "0.60965395", "0.60946393", "0.60946393", "0.60946393", "0.6065919", "0.60517204", "0.60499954", "0.604895", "0.60373455", "0.6029974", "0.6029974", "0.60247064", "0.60236526", "0.6022079", "0.59871566", "0.5985088", "0.5963633", "0.59225774", "0.59217376", "0.5912155", "0.5869855", "0.5866905", "0.5856531", "0.583654", "0.5808606", "0.5770125", "0.5766015", "0.57517755", "0.57328755", "0.5724499", "0.5720058", "0.5717292", "0.5714345", "0.5704078", "0.56913495", "0.5685364", "0.5684168", "0.5673596", "0.56344146", "0.5608293", "0.56074405", "0.56073123", "0.5603386", "0.55970716", "0.5595069", "0.55937475", "0.559278", "0.5582785", "0.558076", "0.555097", "0.5545012", "0.55414027", "0.55372477", "0.55324036", "0.5529536", "0.55272377", "0.5525081", "0.5514888", "0.5508626", "0.5505013", "0.5503138", "0.5494338", "0.5489052", "0.54876405", "0.548576", "0.54833853", "0.54730517", "0.5465104", "0.54607487", "0.54607403", "0.5456149", "0.5454478", "0.54507303", "0.54365855", "0.5433864", "0.5429935", "0.5428571", "0.54258615", "0.541954", "0.54163194", "0.541516", "0.54146135", "0.53996336", "0.5396449" ]
0.0
-1
Calculates the LDOS of a cell with a defect, writting the n neighring cells
def ldos_defect(h,v,e=0.0,delta=0.001,n=1): raise # still not finished import green # number of repetitions rep = 2*n +1 # calculate pristine green function g,selfe = green.supercell_selfenergy(h,e=e,delta=delta,nk=100,nsuper=rep) # now calculate defected green function ez = e + 1j*delta # complex energy emat = np.matrix(np.identity(len(g)))*ez # E +i\delta import supercell pintra = supercell.intra_super2d(h,n=rep) # pristine vintra = supercell.intra_super2d(h,n=rep,central=v) # defective selfe = emat - pintra - g.I # dyson euqation, get selfenergy gv = (emat - vintra -selfe).I # Green function of a vacancy, with selfener return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slabldos(h,energies=np.linspace(-1.0,1.0,40),delta=None,nk=40):\n if h.dimensionality!=2: raise # nope\n ds = ldosmap(h,energies=energies,delta=delta,nk=nk)\n if len(ds[0])!=len(h.geometry.z): \n print(\"Wrong dimensions\",len(ds[0]),len(h.geometry.z))\n raise\n f = open(\"DOSMAP.OUT\",\"w\")\n f.write(\"# energy, index, DOS, zposition\\n\")\n for ie in range(len(energies)):\n for iz in range(len(h.geometry.z)):\n f.write(str(energies[ie])+\" \")\n f.write(str(iz)+\" \")\n f.write(str(ds[ie,iz])+\" \")\n f.write(str(h.geometry.z[iz])+\"\\n\")\n f.close()\n return energies,np.transpose(ds) # retunr LDOS ", "def ldfe(n=3):\n\n # We will use the following coordinate system.\n #\n # | z, top\n # |\n # |\n # |\n # o------- x, right\n # /\n # /\n # /\n # / y, front\n\n # Cube inside the octant that touches the sphere at\n a = 1 / sqrt(3)\n\n # We have three important faces of the cube.\n # Start with the front face and refine it in N segments.\n x = linspace(0, a, n + 1)\n z = linspace(0, a, n + 1)\n\n # Then delta Omega_ij = [x_i,x_i+1] x [z_j,z_j+1]\n # Now go through every cell.\n points = zeros((1 * 1 * 4 * n * n, 3)) # 1/3 of the octants\n weights = zeros(1 * 1 * 4 * n * n)\n square = zeros(1 * 1 * 4 * n * n)\n counter = 0\n rhos0 = 0.1 * ones(4)\n for i in range(n):\n for j in range(n):\n x0, x1, z0, z1 = x[i], x[i + 1], z[j], z[j + 1]\n\n omegas = computeomegas(x0, x1, z0, z1)\n areas = computeareas(omegas, x0, x1, z0, z1)\n print(\"\\n\\nOptimiztation for:\")\n print(\"Domain:\")\n print([x0, x1, z0, z1])\n\n rhos = optimizeposition_leastsquares(areas, omegas, x0, x1, z0, z1,\n rhos0)\n rhos0 = rhos # take the optimal parameter of this cell as the starting value for the optimizer in the next cell\n dummy = rand()\n for k in range(4):\n points[counter, :] = project(omegas[k](rhos[k]))\n weights[counter] = areas[k]\n square[counter] = dummy\n counter += 1\n scatterplot(points, weights, square)\n return points, weights", "def nodalLaplacian(self):\n if getattr(self, '_nodalLaplacian', None) is None:\n print('Warning: Laplacian has not been tested rigorously.')\n # The number of cell centers in each direction\n n = self.vnC\n # Compute divergence operator on faces\n if(self.dim == 1):\n D1 = sdiag(1./self.hx) * ddx(self.nCx)\n L = - D1.T*D1\n elif(self.dim == 2):\n D1 = sdiag(1./self.hx) * ddx(n[0])\n D2 = sdiag(1./self.hy) * ddx(n[1])\n L1 = sp.kron(speye(n[1]+1), - D1.T * D1)\n L2 = sp.kron(- D2.T * D2, speye(n[0]+1))\n L = L1 + L2\n elif(self.dim == 3):\n D1 = sdiag(1./self.hx) * ddx(n[0])\n D2 = sdiag(1./self.hy) * ddx(n[1])\n D3 = sdiag(1./self.hz) * ddx(n[2])\n L1 = kron3(speye(n[2]+1), speye(n[1]+1), - D1.T * D1)\n L2 = kron3(speye(n[2]+1), - D2.T * D2, speye(n[0]+1))\n L3 = kron3(- D3.T * D3, speye(n[1]+1), speye(n[0]+1))\n L = L1 + L2 + L3\n self._nodalLaplacian = L\n return self._nodalLaplacian", "def find_halos(pos, ngrid, log, level=3000):\n print('Binning particles', file=log)\n cells = get_cells(pos, ngrid, log)\n count = bincount(cells, minlength=ngrid**3)\n count.shape = (ngrid,ngrid,ngrid)\n print('Count in', count.min(), count.max(), file=log)\n idx = flatnonzero(count>level)\n print('Number of cells above', level, 'is', len(idx), file=log)\n \n \n labels, num_features = ndimage.label(count>level)\n print('Number fo features', num_features, file=log)\n print('Labels in', labels.min(), labels.max(), file=log)\n locations = ndimage.find_objects(labels)\n\n dense_regions = []\n\n for i in range(num_features):\n loc = locations[i]\n hw = max(l.stop - l.start for l in loc) * 0.5 /ngrid\n hw_padded = hw + 0.0/ngrid\n\n ctr =[(0.5/ngrid)*(l.stop + l.start) for l in loc]\n count_i = count[loc][labels[loc]==(i+1)].sum()\n print('Count', count_i, file=log)\n dense_regions.append((count_i, ctr, hw_padded))\n\n # sort by number of particles in the region\n dense_regions = sorted(dense_regions, key = lambda num_ctr_hw :num_ctr_hw[0], reverse=True)\n\n return dense_regions", "def ggpl_house():\n\n\t# .lines ogni riga ha due coppie di x/y che costituiscono un segmento\n\tverts = []\n\tcells = []\n\ti = 0\n\treader = csv.reader(open(\"lines/muri_esterni.lines\", 'rb'), delimiter=',') \n\tfor row in reader:\n\t\tverts.append([float(row[0]), float(row[1])])\n\t\tverts.append([float(row[2]), float(row[3])])\n\t\ti+=2\n\t\tcells.append([i-1,i])\n\n\texternalWalls = MKPOL([verts,cells,None])\n\tfloor = SOLIDIFY(externalWalls)\n\tfloor = S([1,2,3])([.04,.04,.04])(floor)\n\texternalWalls = S([1,2,3])([.04,.04,.04])(externalWalls)\n\texternalWalls = OFFSET([.2,.2,4])(externalWalls)\n\theightWalls = SIZE([3])(externalWalls)[0]\n\tthicknessWalls = SIZE([2])(externalWalls)[0]\n\n\tverts = []\n\tcells = []\n\ti = 0\n\treader = csv.reader(open(\"lines/muri_interni.lines\", 'rb'), delimiter=',') \n\tfor row in reader:\n\t\tverts.append([float(row[0]), float(row[1])])\n\t\tverts.append([float(row[2]), float(row[3])])\n\t\ti+=2\n\t\tcells.append([i-1,i])\n\n\tinternalWalls = MKPOL([verts,cells,None])\n\tinternalWalls = S([1,2,3])([.04,.04,.04])(internalWalls)\n\tinternalWalls = OFFSET([.2,.2,4])(internalWalls)\n\twalls = STRUCT([externalWalls, internalWalls])\n\n\tverts = []\n\tcells = []\n\ti = 0\n\treader = csv.reader(open(\"lines/porte.lines\", 'rb'), delimiter=',') \n\tfor row in reader:\n\t\tverts.append([float(row[0]), float(row[1])])\n\t\tverts.append([float(row[2]), float(row[3])])\n\t\ti+=2\n\t\tcells.append([i-1,i])\n\n\tdoors = MKPOL([verts,cells,None])\n\tdoors = SOLIDIFY(doors)\n\tdoors = S([1,2,3])([.04,.04,.04])(doors)\n\tdoors = OFFSET([.2,.2,3])(doors)\n\twalls = DIFFERENCE([walls, doors])\n\n\n\tverts = []\n\tcells = []\n\ti = 0\n\treader = csv.reader(open(\"lines/finestre.lines\", 'rb'), delimiter=',') \n\tfor row in reader:\n\t\tverts.append([float(row[0]), float(row[1])])\n\t\tverts.append([float(row[2]), float(row[3])])\n\t\ti+=2\n\t\tcells.append([i-1,i])\n\n\twindows = MKPOL([verts,cells,None])\n\twindows = SOLIDIFY(windows)\n\twindows = S([1,2,3])([.04,.04,.04])(windows)\n\twindows = OFFSET([.2,.2,2])(windows)\n\theightWindows = SIZE([3])(windows)[0]\n\twindows = T(3)((heightWalls-heightWindows)/2.)(windows)\n\twalls = DIFFERENCE([walls, windows])\n\n\tfloor = TEXTURE(\"texture/floor.jpg\")(floor)\n\twalls = TEXTURE(\"texture/wall.jpg\")(walls)\n\thome = STRUCT([floor, walls])\n\treturn home", "def NPL(self):\n self.edge = np.zeros((np.sum(self.Adjmatrix), 3))\n Temp = 0\n for i in range(self.nodenum):\n for j in range(self.nodenum):\n if(self.Adjmatrix[i, j] == 1):\n self.edge[Temp, 0], self.edge[Temp, 1], self.edge[Temp, 2] = i, j, self.Dismatrix[i, j]\n Temp += 1\n \n self.Totallength = ((np.max(self.Geox) - np.min(self.Geox))**2 + (np.max(self.Geoy) - np.min(self.Geoy))**2)**0.5\n self.norm_edge = self.edge[:, 2]/self.Totallength", "def ddxCellGrad(n, bc):\n bc = checkBC(bc)\n\n D = sp.spdiags((np.ones((n+1, 1))*[-1, 1]).T, [-1, 0], n+1, n,\n format=\"csr\")\n # Set the first side\n if(bc[0] == 'dirichlet'):\n D[0, 0] = 2\n elif(bc[0] == 'neumann'):\n D[0, 0] = 0\n # Set the second side\n if(bc[1] == 'dirichlet'):\n D[-1, -1] = -2\n elif(bc[1] == 'neumann'):\n D[-1, -1] = 0\n return D", "def h(pos,obj):\n return D(pos)*(distancia_nodos(pos,obj))", "def Build_dof_handler(self, cross_section, n_mom):\n\n x = np.zeros((4))\n y = np.zeros((4))\n self.grid = []\n\n for i in range(0, self.ny_cells):\n for j in range(0, self.nx_cells):\n x[0] = self.x[j+i*(self.nx_cells+1)]\n y[0] = self.y[j+i*(self.nx_cells+1)]\n x[1] = self.x[j+1+i*(self.nx_cells+1)]\n y[1] = self.y[j+1+i*(self.nx_cells+1)]\n x[2] = self.x[j+1+(i+1)*(self.nx_cells+1)]\n y[2] = self.y[j+1+(i+1)*(self.nx_cells+1)]\n x[3] = self.x[j+(i+1)*(self.nx_cells+1)]\n y[3] = self.y[j+(i+1)*(self.nx_cells+1)]\n sigma_t = cross_section[i][j][0]\n sigma_s = np.zeros(n_mom)\n sigma_s[0:(cross_section.shape[2]-1)] = cross_section[i][j][1:]\n\n if self.fe_type == 'BLD':\n self.grid.append(FECell(BLD.BLD(x.copy(), y.copy(), len(self.grid)),\n sigma_t, sigma_s, sigma_t-sigma_s[0]))\n elif self.fe_type == 'PWLD':\n self.grid.append(FECell(PWLD.PWLD(x.copy(), y.copy(), len(self.grid)),\n sigma_t, sigm_s, sigma_t-sigma_s[0]))\n else:\n raise NotImplementedError('Unknow discretization.')", "def multi_ldos(h,es=[0.0],delta=0.001,nrep=3,nk=2,numw=3,random=False):\n print(\"Calculating eigenvectors in LDOS\")\n if h.is_sparse: # sparse Hamiltonian\n from bandstructure import smalleig\n print(\"SPARSE Matrix\")\n evals,ws = [],[] # empty list\n ks = klist.kmesh(h.dimensionality,nk=nk) # get grid\n hk = h.get_hk_gen() # get generator\n for k in ks: # loop\n print(\"Diagonalizing in LDOS, SPARSE mode\")\n if random:\n k = np.random.random(3) # random vector\n print(\"RANDOM vector in LDOS\")\n e,w = smalleig(hk(k),numw=numw,evecs=True)\n evals += [ie for ie in e]\n ws += [iw for iw in w]\n# evals = np.concatenate([evals,e]) # store\n# ws = np.concatenate([ws,w]) # store\n# raise\n# (evals,ws) = h.eigenvectors(nk) # get the different eigenvectors\n else:\n print(\"DENSE Matrix\")\n (evals,ws) = h.eigenvectors(nk) # get the different eigenvectors\n ds = [(np.conjugate(v)*v).real for v in ws] # calculate densities\n del ws # remove the wavefunctions\n os.system(\"rm -rf MULTILDOS\") # remove folder\n os.system(\"mkdir MULTILDOS\") # create folder\n go = h.geometry.copy() # copy geometry\n go = go.supercell(nrep) # create supercell\n fo = open(\"MULTILDOS/MULTILDOS.TXT\",\"w\") # files with the names\n for e in es: # loop over energies\n print(\"MULTILDOS for energy\",e)\n out = np.array([0.0 for i in range(h.intra.shape[0])]) # initialize\n for (d,ie) in zip(ds,evals): # loop over wavefunctions\n fac = delta/((e-ie)**2 + delta**2) # factor to create a delta\n out += fac*d # add contribution\n out /= np.pi # normalize\n out = spatial_dos(h,out) # resum if necessary\n name0 = \"LDOS_\"+str(e)+\"_.OUT\" # name of the output\n name = \"MULTILDOS/\" + name0\n write_ldos(go.x,go.y,out.tolist()*(nrep**h.dimensionality),\n output_file=name) # write in file\n fo.write(name0+\"\\n\") # name of the file\n fo.flush() # flush\n fo.close() # close file\n # Now calculate the DOS\n from dos import calculate_dos\n es2 = np.linspace(min(es),max(es),len(es)*10)\n ys = calculate_dos(evals,es2,delta) # use the Fortran routine\n from dos import write_dos\n write_dos(es2,ys,output_file=\"MULTILDOS/DOS.OUT\")", "def house_oriented_TLE(self, grid):\n # write your code here\n # write your code here\n n, m = len(grid), len(grid[0])\n dist = [[0 for _ in range(m)] for _ in range(n)]\n reachable_count = [[0 for _ in range(m)] for _ in range(n)]\n \n house_count = 0\n for i in range(n):\n for j in range(m):\n if grid[i][j] == DataType.HOUSE:\n house_count += 1\n self.bfs_house_oriented(grid, i, j, dist, reachable_count)\n \n # print(f'dist: {dist}')\n # print(f'reachable_count: {reachable_count}')\n \n min_dist = sys.maxsize\n for i in range(n):\n for j in range(m):\n if reachable_count[i][j] == house_count and dist[i][j] < min_dist : # and dist[i][j] != 0: # shouldn't be a HOUSE, should be handled in reachable_count\n min_dist = dist[i][j]\n \n if min_dist == sys.maxsize:\n return -1\n return min_dist", "def dolomite():\n\n rho = 2840.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 205.; C[0,1] = 71.; C[0,2] = 57.4; C[0,3] = -19.5; C[0,4] = 13.7; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 205.; C[1,2] = 57.4; C[1,3] = 19.5; C[1,4] = -13.7; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 113.; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 39.8; C[3,4] = 0.; C[3,5] = -13.7\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 39.8; C[4,5] = -19.5\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 67.\n\n return C, rho", "def neighbour_cells(id, Nx):\n r = cell_coord(id, Nx)\n neighs = []\n tmp = np.arange(3) - 1\n for p in itertools.product(tmp, tmp, tmp):\n neigh = (r + p) % Nx\n neighs.append(neigh)\n return [id_from_coord(neigh, Nx) for neigh in neighs]", "def ldos2d(h,e=0.0,delta=0.001,nrep=3,nk=None,mode=\"green\",\n random=True,num_wf=20):\n if mode==\"green\":\n import green\n if h.dimensionality!=2: raise # only for 1d\n if nk is not None:\n print(\"LDOS using normal integration with nkpoints\",nk)\n gb,gs = green.bloch_selfenergy(h,energy=e,delta=delta,mode=\"full\",nk=nk)\n d = [ -(gb[i,i]).imag for i in range(len(gb))] # get imaginary part\n else:\n print(\"LDOS using renormalization adaptative Green function\")\n gb,gs = green.bloch_selfenergy(h,energy=e,delta=delta,mode=\"adaptive\")\n d = [ -(gb[i,i]).imag for i in range(len(gb))] # get imaginary part\n elif mode==\"arpack\": # arpack diagonalization\n import klist\n if nk is None: nk = 10\n hkgen = h.get_hk_gen() # get generator\n ds = [] # empty list\n for k in klist.kmesh(h.dimensionality,nk=nk): # loop over kpoints\n print(\"Doing\",k)\n if random:\n print(\"Random k-point\")\n k = np.random.random(3) # random k-point\n hk = csc_matrix(hkgen(k)) # get Hamiltonian\n ds += [ldos_arpack(hk,num_wf=num_wf,robust=False,\n tol=0,e=e,delta=delta)]\n d = ds[0]*0.0 # inititlize\n for di in ds: d += di # add\n d /=len(ds) # normalize\n d = spatial_dos(h,d) # convert to spatial resolved DOS\n g = h.geometry # store geometry\n x,y = g.x,g.y # get the coordinates\n go = h.geometry.copy() # copy geometry\n go = go.supercell(nrep) # create supercell\n write_ldos(go.x,go.y,d.tolist()*(nrep**2),z=go.z) # write in file", "def construct_linear_system(self):\n N=self.grid.Ncells()\n Nbc = len(self.dirichlet_bcs)\n self.Ncalc=Ncalc = N - Nbc\n\n # map cells to forced values\n dirichlet = dict( [ (c,v) for c,v,xy in self.dirichlet_bcs])\n\n self.is_calc_c = is_calc_c = np.ones(N,np.bool8)\n for c,v,xy in self.dirichlet_bcs:\n is_calc_c[c] = False\n\n # is_calc_c[self.c_mask] = False\n\n # c_map is indexed by real cell indices, and returns the matrix index\n c_map = self.c_map = np.zeros(N,np.int32)\n self.c_map[is_calc_c] = np.arange(Ncalc)\n\n dzc=self.dzc\n dzf=self.dzf\n area_c=self.area_c\n\n meth='coo' # 'dok'\n if meth == 'dok':\n A=sparse.dok_matrix((Ncalc,Ncalc),np.float64)\n else:\n # construct the matrix from a sequence of indices and values\n ij=[]\n values=[] # successive value for the same i.j will be summed\n \n b = np.zeros(Ncalc,np.float64)\n flux_per_gradient_j = -self.K_j * self.l_j * dzf / self.d_j * self.dt\n\n self.grid.edge_to_cells() # makes sure that edges['cells'] exists.\n \n for j in range(self.grid.Nedges()):\n e = self.grid.edges[j]\n ic1,ic2 = e['cells']\n \n if ic1<0 or ic2<0 or e['deleted']:\n continue # boundary edge, or deleted edge\n \n flux_per_gradient=flux_per_gradient_j[j]\n \n # this is the desired operation:\n # Cdiff[ic1] -= flux_per_gradient / (An[ic1]*dzc) * (C[ic2] - C[ic1])\n # Cdiff[ic2] += flux_per_gradient / (An[ic2]*dzc) * (C[ic2] - C[ic1])\n # Where Cdiff is row, C is col\n\n if is_calc_c[ic1] and is_calc_c[ic2]:\n mic2 = c_map[ic2]\n mic1 = c_map[ic1]\n v1=flux_per_gradient / (area_c[ic1]*dzc[ic1])\n v2=flux_per_gradient / (area_c[ic2]*dzc[ic2])\n \n if meth == 'dok':\n A[mic1,mic2] -= v1\n A[mic1,mic1] += v1\n A[mic2,mic2] += v2\n A[mic2,mic1] -= v2\n else:\n ij.append( (mic1,mic2) ) ; values.append(-v1)\n ij.append( (mic1,mic1) ) ; values.append(v1)\n ij.append( (mic2,mic2) ) ; values.append(v1)\n ij.append( (mic2,mic1) ) ; values.append(-v1)\n \n elif not ( is_calc_c[ic1] or is_calc_c[ic2] ):\n # both are dirichlet, so nothing to do\n pass\n elif not is_calc_c[ic2]:\n mic1 = c_map[ic1]\n v=flux_per_gradient / (self.area_c[ic1]*dzc[ic1])\n if meth == 'dok':\n A[mic1,mic1] += v\n else:\n ij.append( (mic1,mic1) )\n values.append(v)\n\n # roughly\n # A[1,1]*x[1] + A[1,2]*x[2] + ... = b[1]\n # but we already know x[2],\n # A[1,1]*x[1] + ... = b[1] - A[1,2]*x[2]\n # so flip the sign, multiply by known dirichlet value, and\n # add to the RHS\n b[mic1] += flux_per_gradient / (area_c[ic1]*dzc[ic1]) * dirichlet[ic2]\n else: # not is_calc_c[c1]\n mic2 = c_map[ic2]\n # A[mic2,mic2] += flux_per_gradient / (area_c[ic2]*dzc[ic2])\n # A[mic2,mic1] -= flux_per_gradient / (area_c[ic2]*dzc[ic2])\n\n # A[mic2,mic2]*x[2] + A[mic2,mic1]*x[1] = b[2]\n # ...\n # A[mic2,mic2]*x[2] - flux_per_gradient / (area_c[ic2]*dzc[ic2])*x[1] = b[2]\n # ...\n # A[mic2,mic2]*x[2] = b[2] + flux_per_gradient / (area_c[ic2]*dzc[ic2])*x[1]\n v=flux_per_gradient / (area_c[ic2]*dzc[ic2])\n if meth == 'dok':\n A[mic2,mic2] += v\n else:\n ij.append( (mic2,mic2) )\n values.append(v)\n b[mic2] += flux_per_gradient / (area_c[ic2]*dzc[ic2]) * dirichlet[ic1]\n\n # Used to test 'is not 0:' but modern python complains\n if isinstance(self.alpha,np.ndarray): \n for c in range(N):\n if self.is_calc_c[c]:\n mic=self.c_map[c]\n v=self.alpha[c]*self.dt\n if meth == 'dok':\n A[mic,mic] -= v\n else:\n ij.append( (mic,mic) )\n values.append(-v)\n\n # Flux boundary conditions:\n for ic,value,xy in self.neumann_bcs:\n mic=c_map[ic]\n # make mass/time into concentration/step\n # arrived at minus sign by trial and error.\n # 2023-08-04: there was a bug here that used ic2 instead of ic.\n b[mic] -= value/(area_c[ic]*dzc[ic]) * self.dt\n\n if meth == 'dok':\n self.A = sparse.coo_matrix(A)\n else:\n ijs=np.array(ij,dtype=np.int32)\n data=np.array(values,dtype=np.float64)\n A=sparse.coo_matrix( (data, (ijs[:,0],ijs[:,1]) ), shape=(Ncalc,Ncalc) )\n self.A=A\n \n # report scale to get a sense of whether dt is too large\n Ascale = A.diagonal().min()\n log.debug(\"Ascale is %s\"%Ascale)\n\n self.b = b", "def calc_Lr(rho,mld,f,g=9.8,po=1027.):\n n2ml=np.ndarray(len(rho[1,:-1]))\n for i in range(len(rho[1,:-1])):\n n2ml[i]=-(g/po)*((rho[15,i]-rho[np.int8(mld[i])+15,i])/mld[i])\n Lr=(np.sqrt(n2ml)*mld[:-1])/f\n\n return Lr", "def get_neighbours(lat, long):\n # ns = north east, ew = east west (ratio between 1 feet and degree) \n # its different on diferent places on earth (sphere)!!\n ns = 0.0025\n ew = 0.0025\n walk = []\n for i in range(-2, 3):\n for j in range(-2, 3):\n thiscell = CellId.from_lat_lng(LatLng.from_degrees(lat + ns*i, long + ew*j)).parent(S2_CELL_LEVEL)\n if abs(i * j) < 4:\n walk.append(thiscell.id())\n return sorted(walk)", "def ldos_finite(h,e=0.0,n=10,nwf=4,delta=0.0001):\n if h.dimensionality!=1: raise # if it is not one dimensional\n intra = csc(h.intra) # convert to sparse\n inter = csc(h.inter) # convert to sparse\n interH = inter.H # hermitian\n m = [[None for i in range(n)] for j in range(n)] # full matrix\n for i in range(n): # add intracell\n m[i][i] = intra\n for i in range(n-1): # add intercell\n m[i][i+1] = inter\n m[i+1][i] = interH\n m = bmat(m) # convert to matrix\n (ene,wfs) = slg.eigsh(m,k=nwf,which=\"LM\",sigma=0.0) # diagonalize\n wfs = wfs.transpose() # transpose wavefunctions\n dos = (wfs[0].real)*0.0 # calculate dos\n for (ie,f) in zip(ene,wfs): # loop over waves\n c = 1./(1.+((ie-e)/delta)**2) # calculate coefficient\n dos += np.abs(f)*c # add contribution\n odos = spatial_dos(h,dos) # get the spatial distribution\n go = h.geometry.supercell(n) # get the supercell\n write_ldos(go.x,go.y,odos) # write in a file\n return dos # return the dos", "def cell_cnc_tracker(Out, U, V, W, t, cell0, cellG, cellD, SHP, cryoconite_locations):\n\n Cells = np.random.rand(len(t),SHP[0],SHP[1],SHP[2]) * cell0\n CellD = np.zeros(shape=(SHP[0],SHP[1],SHP[2])) + cellD\n CellG = np.zeros(shape=(SHP[0],SHP[1],SHP[2])) + cellG\n \n \n for i in np.arange(0,SHP[0],1):\n CellG[i,:,:] = np.where(cryoconite_locations==True, CellG[i,:,:]*1000, CellG[i,:,:])\n \n for t in np.arange(0,len(Out.Qz[:,0,0,0]),1):\n \n for layer in np.arange(0,len(Out.Qz[0,:,0,0]),1):\n\n # normalise lateral flow so that -ve= flow out of cells towards edges\n # and positive flow is towards centre line. In Qx -ve = leftwards flow\n # and +ve = rightwards flow. This leads to a rightwards drift in cell\n # fluxes if not normalised in this way.\n U[t,layer,:,0:int(U.shape[2]/2)] = 0-U[t,layer,:,0:int(U.shape[2]/2)]\n\n # nabla is the inverted delta operator used to denote the divergence \n # of a vector field, here applied to hydrological flow in m3/d \n # calculated as dx/dt + dy/dt + dz/dt\n\n nabla = (U[t,layer,:,:] + V[t,layer,:,:] + W[t,layer,:,:])\n \n # divergence gives net in/outflow in m3/t\n # cells/m3 = cells/mL *1000\n\n delC = Out.Q[t,layer,:,:] * (1+CellG[layer,:,:] - CellD[layer,:,:])\n\n Cells[t,layer,:,:] = Cells[t,layer,:,:] + (delC * 1000) \n\n Cells[Cells<0] = 0\n \n CellColumnTot = Cells.sum(axis=1)\n \n\n return Cells, CellColumnTot", "def probability(N_dr, L_opmin, L_opmax, L_min, L_max, L_d):\n opening_nomullignas = []\n opening_withmullignas = []\n sum_nomulligans = 0\n sum_withmulligans = 0\n mulligan_coeff = 0\n\n for i in range(L_opmin, min(L_opmax + 1, 8)): # first make a list of tuples of the form:\n # (number_of_lands_in_opening_hand, probability_of_drawing_such_a_hand)\n a = hypergeom(i, 7, 60, L_d)\n opening_nomullignas.append((i, a))\n mulligan_coeff = mulligan_coeff + a # this will be used later for calculating the probability of\n # taking the mulligan and is used as a coefficient before the mulligan sum\n for (x, y) in opening_nomullignas: # use the list of tuples to calculate the first part of equation 5\n partial_nomulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_nomulligans = partial_nomulligans + hypergeom(j, N_dr, 53, L_d - x)\n sum_nomulligans = sum_nomulligans + partial_nomulligans * y\n\n mulligan_coeff = 1 - mulligan_coeff # probability of mulliganing\n for i in range(L_opmin, min(L_opmax + 1, 7)): # doing the same thing as before, but drawing 6 instead of 7 cards\n a = hypergeom(i, 6, 60, L_d)\n opening_withmullignas.append((i, a))\n\n for (x, y) in opening_withmullignas:\n partial_withmulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_withmulligans = partial_withmulligans + hypergeom(j, N_dr, 54, L_d - x)\n sum_withmulligans = sum_withmulligans + partial_withmulligans * y\n total_withmulligans = mulligan_coeff * sum_withmulligans\n\n return total_withmulligans + sum_nomulligans", "def createDistance(x, y, distance, n):\n for i in range(0, n+1):\n for j in range(0, n+1):\n if (math.sqrt((x-i)**2 + (y-j)**2))<= 3.5:\n distance[i][j] = 1\n else: \n distance[i][j] = 1\n \n distance[x][y] = 0\n return distance", "def numIslands(grid):\n # count to store each new island found\n count = 0\n # If the grid is empty, return 0\n if not grid:\n return count\n\n y_max = len(grid)\n x_max = len(grid[0])\n \n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == '1':\n dfs(grid, i, j)\n count += 1\n return count", "def lltnum(self,):\n m = self.m\n n = self.n\n diag = self.diag\n perm = self.perm\n AAt = self.AAt\n kAAt = self.kAAt\n iAAt = self.iAAt\n mark = self.mark\n self.denwin\n\n m2 = m+n\n #/*------------------------------------------------------+\n #| initialize constants */\n\n temp = np.zeros(m2)\n first = np.zeros(m2, dtype=np.int)\n link = np.empty(m2, dtype=np.int)\n for i in range(m2):\n link[i] = -1\n\n maxdiag=0.0\n for i in range(m2):\n if abs(diag[i]) > maxdiag:\n maxdiag = abs(diag[i])\n\n self.ndep=0\n\n #/*------------------------------------------------------+\n #| begin main loop - this code is taken from George and |\n #| Liu's book, pg. 155, modified to do LDLt instead |\n #| of LLt factorization. */\n\n for i in range(m2):\n diagi = diag[i]\n sgn_diagi = -1 if perm[i] < n else 1\n j = link[i]\n while j != -1:\n newj = link[j]\n k = first[j]\n lij = AAt[k]\n lij_dj = lij*diag[j]\n diagi -= lij*lij_dj\n k_bgn = k+1\n k_end = kAAt[j+1]\n if k_bgn < k_end:\n first[j] = k_bgn\n row = iAAt[k_bgn]\n link[j] = link[row]\n link[row] = j\n if j < self.denwin:\n for kk in range(k_bgn, k_end):\n temp[iAAt[kk]] += lij_dj*AAt[kk]\n else:\n ptr = row\n for kk in range(k_bgn, k_end):\n temp[ptr] += lij_dj*AAt[kk]\n ptr+=1\n\n j=newj\n\n k_bgn = kAAt[i]\n k_end = kAAt[i+1]\n for kk in range(k_bgn, k_end):\n row = iAAt[kk]\n AAt[kk] -= temp[row]\n\n if abs(diagi) <= self.epsnum*maxdiag or mark[i] == False:\n\n #if (sgn_diagi*diagi <= epsnum*maxdiag || mark[i] == FALSE)\n\n self.ndep+=1\n maxoffdiag = 0.0\n for kk in range(k_bgn, k_end):\n maxoffdiag = max( maxoffdiag, abs( AAt[kk] ) )\n\n if maxoffdiag < 1.0e+6*self._EPS:\n mark[i] = False\n else:\n diagi = sgn_diagi * self._EPS\n\n diag[i] = diagi\n if k_bgn < k_end:\n first[i] = k_bgn\n row = iAAt[k_bgn]\n link[i] = link[row]\n link[row] = i\n for kk in range(k_bgn, k_end):\n row = iAAt[kk]\n if mark[i]:\n AAt[kk] /= diagi\n else:\n AAt[kk] = 0.0\n\n temp[row] = 0.0\n\n del(link)\n del(first)\n del(temp)", "def get_neighb_dist(self, i, ci):\n ri = self.xyz[i]\n j = self.conn[i][ci]\n rj = self.xyz[j].copy()\n if self.periodic:\n if self.use_pconn:\n img = self.pconn[i][ci]\n rj += np.dot(img, self.cell)\n else:\n all_rj = rj + self.images_cellvec\n all_r = all_rj - self.xyz[i]\n all_d = np.sqrt(np.add.reduce(all_r*all_r,1))\n closest = np.argsort(all_d)[0]\n return all_rj[closest]\n dr = ri-rj\n d = np.sqrt(np.sum(dr*dr))\n return d", "def get_peridym_edge_length(cell_cent, struct_grd=False):\n dim = len(cell_cent[0])\n el = np.zeros(dim, dtype = float)\n\n if(struct_grd):\n el_fact = 1.0\n else:\n el_fact = 3.0\n\n for d in range(dim):\n xx = np.unique(cell_cent[:,d])\n el[d] = el_fact*np.max(np.abs(np.diff(xx[0:2])))\n\n return el", "def ohodnotL(row, col, znak, prevx, prevy, pocet_ciest, hlbka, mx): # vlavo\r\n\r\n susedia = getSusedia_ohodnot(row, col, znak)\r\n\r\n if (values[row][col] != \"W\" and col != 0):\r\n hlbka += 1\r\n\r\n if col == 0:\r\n if values[row][col] == \"W\" and hlbka != 0:\r\n hlbka -= 1\r\n dlzkyL.append(hlbka)\r\n\r\n if (col != 0 and hlbka < mx):\r\n for sused in susedia:\r\n if (sused[1] == col - 1 or (sused[1] == col and (sused[0] != prevx or sused[1] != prevy))):\r\n if sused[1] == 0:\r\n pocet_ciest += 1\r\n pocet_ciest += ohodnotL(sused[0], sused[1], znak, row, col, 0, hlbka, mx)\r\n if (values[sused[0]][sused[1]] == \"W\") and col == 1: # nema zmysel sem umiestnovat - radsej inde\r\n pocet_ciest = 0\r\n return pocet_ciest", "def compute_distance_field(self, entity_type):\n GRID_HEIGHT = poc_grid.Grid.get_grid_height(self)\n GRID_WIDTH = poc_grid.Grid.get_grid_width(self)\n \n visited = poc_grid.Grid(self.get_grid_height(), self.get_grid_width())\n for row in range(self.get_grid_height()):\n for col in range(self.get_grid_width()):\n if not self.is_empty(row, col):\n visited.set_full(row, col)\n \n \n distance_field = [[ GRID_HEIGHT * GRID_WIDTH \\\n for dummy_col in range(GRID_WIDTH)] \\\n for dummy_row in range(GRID_HEIGHT)]\n \n boundary = poc_queue.Queue()\n entity_cells = []\n if entity_type == ZOMBIE:\n entity_cells = self._zombie_list\n \n else:\n entity_cells = self._human_list\n \n for entity_cell in entity_cells:\n \n row = entity_cell[0]\n col = entity_cell[1]\n # For cells in the queue, initialize visited to be FULL\n visited.set_full(row, col)\n #and distance_field to be zero.\n distance_field[row][col] = 0\n #Create a queue boundary that is a copy of either the zombie list or the human list.\n boundary.enqueue(entity_cell)\n \n # a modified version of the BFS\n while len(boundary) > 0:\n #current_cell ← dequeue boundary\n current_cell = boundary.dequeue()\n \n # 4 neighbors\n neighbors = poc_grid.Grid.four_neighbors(self,current_cell[0], current_cell[1])\n distance = distance_field[current_cell[0]][current_cell[1]] + 1\n \n #for all neighbors neighbor_cell of current_cell:\n for neighbor in neighbors:\n # if neighbor_cell is not in visited:\n if visited.is_empty(neighbor[0], neighbor[1]):\n # add neighbor_cell to visited\n if distance < distance_field[neighbor[0]][neighbor[1]]:\n distance_field[neighbor[0]][neighbor[1]] = distance\n visited.set_full(neighbor[0], neighbor[1])\n # enqueue neighbor_cell onto boundary\n boundary.enqueue(neighbor)\n \n return distance_field", "def get_incorrect_distance_matrix(L):\n n = len(L)\n D = np.zeros((n,n))\n for i in range(n):\n for j in range(n):\n if i != j:\n D[i][j] = get_minor(L, [i], [j]) / get_minor(L, [i], [i])\n return D", "def all_dhkl(self, crystal):\n #d_min = self.wavelength/self.max2theta*pi/2\n d_min = self.wavelength/sin(self.max2theta/2)/2\n \n # This block is to find the shortest d_hkl, \n # for all basic directions (1,0,0), (0,1,0), (1,1,0), (1,-1,0) and so on, 26 in total \n hkl_max = np.array([1,1,1])\n for h1 in [-1, 0, 1]:\n for k1 in [-1, 0, 1]:\n for l1 in [-1, 0, 1]:\n hkl_index = np.array([[h1,k1,l1]])\n d = float(np.linalg.norm( np.dot(hkl_index, crystal.rec_matrix), axis=1))\n if d>0:\n multiple = 1/d/d_min\n hkl_index *= round(multiple)\n for i in range(len(hkl_max)):\n if hkl_max[i] < hkl_index[0,i]:\n hkl_max[i] = hkl_index[0,i]\n #h1 = 2*ceil(np.linalg.norm(crystal.cell_para[0])/d_min)\n #k1 = 2*ceil(np.linalg.norm(crystal.cell_para[1])/d_min)\n #l1 = 2*ceil(np.linalg.norm(crystal.cell_para[2])/d_min)\n h1, k1, l1 = hkl_max\n h = np.arange(-h1,h1)\n k = np.arange(-k1,k1)\n l = np.arange(-l1,l1)\n \n hkl = np.array((np.meshgrid(h,k,l))).transpose()\n hkl_list = np.reshape(hkl, [len(h)*len(k)*len(l),3])\n hkl_list = hkl_list[np.where(hkl_list.any(axis=1))[0]]\n d_hkl = 1/np.linalg.norm( np.dot(hkl_list, crystal.rec_matrix), axis=1)\n #for ix, a in enumerate(hkl_list):\n # if np.array_equal(a, np.array([1,-1,3])) is True:\n # print(a)\n # break\n #\n #print(ix, hkl_list[ix], d_hkl[ix], d_min)\n\n shortlist = d_hkl > (d_min)\n d_hkl = d_hkl[shortlist]\n hkl_list = hkl_list[shortlist]\n sintheta = self.wavelength/2/d_hkl\n\n self.theta = np.arcsin(sintheta)\n self.hkl_list = hkl_list\n self.d_hkl = d_hkl\n \n #return hkl_list, d_hkl, sintheta", "def count_cells(rule, n=500):\n ca = Cell1D(rule, n)\n ca.start_single()\n\n res = []\n for i in range(1, n):\n cells = np.sum(ca.array)\n res.append((i, i**2, cells))\n ca.step()\n\n return res", "def entropy(self):\n\n \"\"\"Gets the first neighbours, which are the first 2*r+1 cells.\"\"\"\n current_neighbours = []\n amount = [0] * self.k ** (2 * self.r + 1)\n for i in range(2 * self.r + 1):\n current_neighbours.append(self.config[self.t, i % self.width])\n\n \"\"\"Calculates the rule and adds one to it's amount. It then removes the\n leftmost cell and adds a cell to the right.\"\"\"\n for i in range(len(self.config[self.t]) - 1):\n rule = 0\n for j in range(len(current_neighbours)):\n rule += int(current_neighbours[j] *\n self.k ** ((2 * self.r + 1) - j - 1))\n amount[len(amount) - 1 - rule] += 1\n current_neighbours.pop(0)\n current_neighbours.append(\n self.config[self.t, (2 * self.r + 1 + i) % self.width])\n\n \"\"\"Calculates the rule for the last neighbourhood.\"\"\"\n rule = 0\n for j in range(len(current_neighbours)):\n rule += int(current_neighbours[j] *\n self.k ** ((2 * self.r + 1) - j - 1))\n amount[len(amount)-1 - rule] += 1\n\n \"\"\"Calculates the Shannon entropy and the the average entropy so far.\"\"\"\n shannon = 0\n for i in range(len(amount)):\n if(amount[i] != 0):\n probability = amount[i] / self.width\n shannon -= probability * np.log2(probability)\n self.average_entropy = (self.average_entropy *\n self.t + shannon) / (self.t + 1)", "def assign_cell(districts):\r\n hyp_dists = []\r\n created_district = False\r\n #Find the next unassigned cell:\r\n unassigned = np.where(districts==0)\r\n r = unassigned[0][0]\r\n c = unassigned[1][0]\r\n #Try setting the cell to each possible tag:\r\n # continuity_count(districts,1,r,c)\r\n for tag in range(1,6):\r\n #Make sure we havent skipped a tag:\r\n if (tag not in districts and not created_district) or (tag in districts):\r\n #Make sure we havent exceeded 5 cells with this tag:\r\n if (districts == tag).sum() < 5:\r\n if tag not in districts:\r\n #It doesnt make sense to create a new district with each tag, so make sure we only do it once per cell:\r\n created_district = True\r\n #This tag might work, so create a hypothetical case from it:\r\n hyp = np.copy(districts)\r\n hyp[r,c] = tag\r\n if continuity_test(hyp):\r\n #See if the hypothetical map is complete:\r\n if 0 in hyp:\r\n hyp_dists += assign_cell(hyp)\r\n else:\r\n hyp_dists += [hyp]\r\n if r==2 and c==2:\r\n print(len(hyp_dists))\r\n return hyp_dists", "def fermionic_cells(self):\n cells = self.cells()\n cells_and_circles = self.all_cells()\n circles = [x for x in cells_and_circles if x not in cells]\n coords = [(i, jprime)\n for iprime, jprime in circles\n for i, j in circles\n if iprime > i\n ]\n coords.sort()\n return coords", "def potentialSolver5(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def occupied_cells(self):\n\n for lm in self.landmarks:\n if self.cell_size < 1:\n # expand the range the landmark exists\n lm_x_range = np.arange(lm[0]-self.R, lm[0]+self.R, self.cell_size)\n lm_y_range = np.arange(lm[1]-self.R, lm[1]+self.R, self.cell_size)\n\n # loop through expanded ranges and compute grid positions\n for lm_x in lm_x_range:\n for lm_y in lm_y_range:\n\n row, col = self.cell_index([lm_x, lm_y])\n\n # apply cost of occupied cell\n try:\n self.world[row][col] = 1000\n except IndexError:\n pass\n\n else:\n # apply cost of occupied cell\n row, col = self.cell_index(lm)\n try:\n self.world[row][col] = 1000\n except IndexError:\n pass", "def dfs(cell):\n r, c = cell\n if (0 <= r < len(grid)) and (0 <= c < len(grid[0])) and (cell not in visited) and (grid[r][c] != 0):\n\n visited.add((r, c)) # save cell\n grid[r][c] = self.num_islands\n # update current island size\n dfs((r, c+1))\n dfs((r+1, c))\n dfs((r-1, c))\n dfs((r, c-1))\n\n else:\n # out of bounds or visited\n return", "def lherzolite():\n\n rho = 3270.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 187.4; C[0,1] = 63.71; C[0,2] = 63.87; C[0,3] = 0.78; C[0,4] = 2.02; C[0,5] = -3.2\n C[1,0] = C[0,1]; C[1,1] = 211.25; C[1,2] = 64.5; C[1,3] = -3.07; C[1,4] = 0.87; C[1,5] = -5.78\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 190.; C[2,3] = 0.38; C[2,4] = 2.38; C[2,5] = -0.12\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 67.9; C[3,4] = -2.12; C[3,5] = 1.6\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 63.12; C[4,5] = -0.55\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 66.83\n\n return C, rho", "def refl_analysis(self,dials_model):\n Z = self.refl_table\n indices = Z['miller_index']\n expts = ExperimentListFactory.from_json_file(dials_model,\n check_format=False)\n self.dials_model=expts[0]\n CRYS = self.dials_model.crystal\n UC = CRYS.get_unit_cell()\n strong_resolutions = UC.d(indices)\n order = flex.sort_permutation(strong_resolutions, reverse=True)\n Z[\"spots_order\"] = order\n self.spots_pixels = flex.size_t()\n spots_offset = flex.int(len(order),-1)\n spots_size = flex.int(len(order),-1)\n\n P = panels = Z['panel']\n S = shoeboxes = Z['shoebox']\n N_visited = 0; N_bad = 0\n for oidx in range(len(order)): #loop through the shoeboxes in correct order\n sidx = order[oidx] # index into the Miller indices\n ipanel = P[sidx]\n slow_size = 254\n fast_size = 254\n panel_size=slow_size*fast_size\n bbox = S[sidx].bbox\n first_position = spots_offset[sidx] = self.spots_pixels.size()\n for islow in range(max(0,bbox[2]-3), min(slow_size,bbox[3]+3)):\n for ifast in range(max(0,bbox[0]-3), min(fast_size,bbox[1]+3)):\n value = self.trusted_mask[ipanel][islow*slow_size + ifast]\n N_visited += 1\n if value: self.spots_pixels.append(ipanel*panel_size+islow*slow_size+ifast)\n else: N_bad+=1\n spot_size = spots_size[sidx] = self.spots_pixels.size() - first_position\n Z[\"spots_offset\"] = spots_offset\n Z[\"spots_size\"] = spots_size\n print (N_visited,\"pixels were visited in the %d shoeboxes (with borders)\"%len(order))\n print (N_bad,\"of these were bad pixels, leaving %d in target\"%(len(self.spots_pixels)))", "def neigh_comm(n):\n\n nc = {bl[n]: 0.0}\n for idx in range(0, node_count):\n neigh = idx\n ncomm = bl[neigh]\n nei_weight = network[n][idx]\n if (neigh != n) & (nei_weight > 0.0):\n if ncomm in nc:\n nc[ncomm] += nei_weight\n else:\n nc[ncomm] = nei_weight\n return nc", "def calc_ell_list(chain) :\n ell_list = np.zeros(len(chain.bridges_dict))\n \n for b in chain.bridges_dict.keys() :\n i, j = chain.bridges_dict[b].lumen1, chain.bridges_dict[b].lumen2\n L_i, pos_i = chain.lumens_dict[i].length, chain.lumens_dict[i].pos\n L_j, pos_j = chain.lumens_dict[j].length, chain.lumens_dict[j].pos\n \n chain.bridges_dict[b].length = np.abs(pos_j - pos_i) - (L_i + L_j)", "def calc_number_neighbours(num_electrons: int):\r\n if num_electrons < -4 or num_electrons > 4 : \r\n # if number of missing/extra e- higher than 4, then distort 8-num_electrons\r\n num_neighbours = abs(8 - abs(num_electrons) )\r\n elif -4 < num_electrons < 4:\r\n num_neighbours = abs(num_electrons)\r\n elif abs(num_electrons) == 4:\r\n num_neighbours = abs(num_electrons)\r\n \r\n return abs(num_neighbours)", "def countNeighbors(oldgen, x, y):\n temp = 1\n\n count = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n\n # TODO: this needs rewritin to be more understandable\n if not (i == 0 and j == 0):\n count += int(oldgen[(x + i + WID) % WID][(y + j + HGT) % HGT])\n\n for i in range(-1, 2):\n for j in range(-1, 2):\n temp += 1\n\n count -= int(oldgen[x][y])\n\n return count", "def get_correct_distance_matrix(L):\n n = len(L)\n D = np.zeros((n,n))\n for i in range(n):\n for j in range(n):\n if i != j:\n D[i][j] = get_minor(L, [i, j], [i, j]) / get_minor(L, [i], [i])\n return D", "def potentialSolver4(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def ldosmap(h,energies=np.linspace(-1.0,1.0,40),delta=None,nk=40):\n if delta is None:\n delta = (np.max(energies)-np.min(energies))/len(energies) # delta\n hkgen = h.get_hk_gen() # get generator\n dstot = np.zeros((len(energies),h.intra.shape[0])) # initialize\n for ik in range(nk): \n print(\"Random k-point\",ik,nk,end=\"\\r\")\n k = np.random.random(3) # random k-point\n hk = hkgen(k) # ge Hamiltonian\n ds = ldos_waves(hk,es=energies,delta=delta) # LDOS for this kpoint\n dstot += ds # add\n print(\"LDOS finished\")\n dstot /=nk # normalize\n dstot = [spatial_dos(h,d) for d in dstot] # convert to spatial resolved DOS\n return np.array(dstot)", "def get_leaves_dist(llcnt, lcnt, nodes, links, mode, ED):\n # TODO: this function is quite similar as \"get_leaves_dist\". Because I rewrite mapping_leaves without move labeled leaves to the front first. So UK and l just record the position of labeled leaves and unlabeled leaves without change their order.\n \n G=nx.Graph()\n G.add_nodes_from([0, len(nodes)])\n for i in range(0, len(links)):\n E_dist = np.linalg.norm(nodes[links[i][0]][2]-nodes[links[i][1]][2])\n G.add_edge(links[i][0],links[i][1],weight=E_dist)\n dist = np.zeros((lcnt, llcnt))\n for i in range(0, lcnt):\n for j in range (0, llcnt):\n if mode == \"td-mapping\":\n dist[i,j] = nx.shortest_path_length(G, source=i, target=j, weight='weight')\n if mode == \"ed-mapping\":\n dist[i,j] = np.linalg.norm(nodes[i][[0,2]]-nodes[j][[0,2]])\n if mode == \"et-mapping\":\n dist[i,j] = (1-ED)*nx.shortest_path_length(G, source=i, target=j, weight='weight')+ED*np.linalg.norm(nodes[i][[0,2]]-nodes[j][[0,2]])\n return dist", "def matrix_neumann2D(Omega,Nx,Ny):\r\n \r\n hx = (Omega[1]-Omega[0])/Nx\r\n hy = (Omega[3]-Omega[2])/Ny\r\n hx2 = hx*hx\r\n hy2 = hy*hy\r\n\r\n # Les inconnues sont numérotés de 0 à Nx suivant x et 0 à Ny\r\n # suivant y. La taille du problème est donc (Nx+1)*(Ny+1).\r\n\r\n # Pour -Laplacien(u), la matrice est constituée de (Ny+1)x(Ny+1)\r\n # blocs de taille (Nx+1)x(Nx+1), de la forme\r\n #\r\n # A = [ A0 B ]\r\n # [ B A1 B ]\r\n # [ B A1 B ]\r\n # [ . . . ]\r\n # [ B A1 B ]\r\n # [ B A0 ]\r\n #\r\n # Au final, on peut commencer à remplir avec des diagonales\r\n N = (1+Nx)*(1+Ny)\r\n diags = np.zeros((5,N))\r\n # La diagonale est constante\r\n diags[2,:] = 2./hx2+2./hy2\r\n # Diagonale -1\r\n diags[1,:] = -1./hx2 # en général\r\n diags[1,np.arange(Nx,N,Nx+1)] = 0. # bord gauche\r\n diags[1,np.arange(Nx-1,N,Nx+1)] = -2./hx2 # bord droit\r\n # Diagonale +1\r\n diags[3,:] = -1./hx2 # en général\r\n diags[3,np.arange(0,N,Nx+1)] = 0. # bord droit\r\n diags[3,np.arange(1,N,Nx+1)] = -2./hx2 # bord gauche\r\n # Diagonale -(Nx+1)\r\n diags[0,:] = -1./hy2 # en général\r\n diags[0,(Nx+1)*(Ny-1):(Nx+1)*Ny] = -2./hy2 # bord bas\r\n # Diagonale +(Nx+1)\r\n diags[4,:] = -1./hy2 # en général\r\n diags[4,Nx+1:2*(Nx+1)] = -2./hy2 # bord haut\r\n\r\n # Construction de la matrice creuse de u --> -Laplacien(u)\r\n A = sp.spdiags(diags,[-(Nx+1),-1,0,1,(Nx+1)], (Nx+1)*(Ny+1),\r\n (Nx+1)*(Ny+1), format=\"csc\")\r\n\r\n return A", "def __dNdlog2dN(self,Dp,dNdlogDp):\n \n x = np.log10(Dp)\n y = (x[1:]+x[:-1])/2.\n y = np.pad(y,1,'constant',constant_values=(x[0]-(y[0]-x[0]),x[-1]+(x[-1]-y[-1])))\n dlogDp = np.diff(y)\n return dNdlogDp*dlogDp # cm-3", "def ldos1d(h,e=0.0,delta=0.001,nrep=3):\n import green\n if h.dimensionality!=1: raise # only for 1d\n gb,gs = green.green_renormalization(h.intra,h.inter,energy=e,delta=delta)\n d = [ -(gb[i,i]).imag for i in range(len(gb))] # get imaginary part\n d = spatial_dos(h,d) # convert to spatial resolved DOS\n g = h.geometry # store geometry\n x,y = g.x,g.y # get the coordinates\n go = h.geometry.copy() # copy geometry\n go = go.supercell(nrep) # create supercell\n write_ldos(go.x,go.y,d.tolist()*nrep) # write in file\n return d", "def numIslands3(self, grid: List[List[str]]) -> int:\n m = len(grid)\n if m > 0:\n n = len(grid[0])\n else:\n return 0\n\n def dfs(grid, i, j):\n if grid[i][j] != '0':\n grid[i][j] = '0'\n\n for direction in self.directions(grid, i, j):\n dfs(grid, direction[0], direction[1])\n\n island = 0\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '1':\n island += 1 # count the number of CCs\n dfs(grid, i, j)\n return island", "def test_d1_get_neighborhood(self):\n config.NR_COLS = 10\n config.NR_ROWS = 10\n gamefield = [\n [1, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n [1, 0, 0, 0, 0, 0, 0, 0, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n ]\n # top left\n nh = logic.get_neighborhood(gamefield, 0, 0)\n self.assertEqual(nh, 4)\n # top right\n nh = logic.get_neighborhood(gamefield, 0, 8)\n self.assertEqual(nh, 2)\n # bottom left\n nh = logic.get_neighborhood(gamefield, 9, 1)\n self.assertEqual(nh, 4)\n # bottom right\n nh = logic.get_neighborhood(gamefield, 9, 9)\n self.assertEqual(nh, 4)\n # center\n nh = logic.get_neighborhood(gamefield, 4, 5)\n self.assertEqual(nh, 3)", "def island_perimeter(grid):\n c = 0\n length = len(grid) - 1\n width = len(grid[0]) - 1\n\n for i, r in enumerate(grid):\n for j, n in enumerate(r):\n if n == 1:\n if i == 0 or grid[i - 1][j] != 1:\n c += 1\n if j == 0 or grid[i][j - 1] != 1:\n c += 1\n if j == width or grid[i][j + 1] != 1:\n c += 1\n if i == length or grid[i + 1][j] != 1:\n c += 1\n return c", "def neighbors(districts, r, c):\r\n n_list = []\r\n if r>0:\r\n n_list += [districts[r-1,c]]\r\n if r<4:\r\n n_list += [districts[r+1,c]]\r\n if c>0:\r\n n_list += [districts[r,c-1]]\r\n if c<4:\r\n n_list += [districts[r,c+1]]\r\n return n_list", "def opls_lj(self):\n\n # Get the system information from the openmm system\n forces = {self.system.getForce(index).__class__.__name__: self.system.getForce(index) for index in\n range(self.system.getNumForces())}\n # Use the nondonded_force to get the same rules\n nonbonded_force = forces['NonbondedForce']\n lorentz = mm.CustomNonbondedForce(\n 'epsilon*((sigma/r)^12-(sigma/r)^6); sigma=sqrt(sigma1*sigma2); epsilon=sqrt(epsilon1*epsilon2)*4.0')\n lorentz.setNonbondedMethod(nonbonded_force.getNonbondedMethod())\n lorentz.addPerParticleParameter('sigma')\n lorentz.addPerParticleParameter('epsilon')\n lorentz.setCutoffDistance(nonbonded_force.getCutoffDistance())\n self.system.addForce(lorentz)\n\n l_j_set = {}\n # For each particle, calculate the combination list again\n for index in range(nonbonded_force.getNumParticles()):\n charge, sigma, epsilon = nonbonded_force.getParticleParameters(index)\n l_j_set[index] = (sigma, epsilon, charge)\n lorentz.addParticle([sigma, epsilon])\n nonbonded_force.setParticleParameters(index, charge, 0, 0)\n\n for i in range(nonbonded_force.getNumExceptions()):\n (p1, p2, q, sig, eps) = nonbonded_force.getExceptionParameters(i)\n # ALL THE 12,13 and 14 interactions are EXCLUDED FROM CUSTOM NONBONDED FORCE\n lorentz.addExclusion(p1, p2)\n if eps._value != 0.0:\n charge = 0.5 * (l_j_set[p1][2] * l_j_set[p2][2])\n sig14 = np.sqrt(l_j_set[p1][0] * l_j_set[p2][0])\n nonbonded_force.setExceptionParameters(i, p1, p2, charge, sig14, eps)\n # If there is a virtual site in the molecule we have to change the exceptions and pairs lists\n # Old method which needs updating\n # if excep_pairs:\n # for x in range(len(excep_pairs)): # scale 14 interactions\n # if p1 == excep_pairs[x, 0] and p2 == excep_pairs[x, 1] or p2 == excep_pairs[x, 0] and p1 == \\\n # excep_pairs[x, 1]:\n # charge1, sigma1, epsilon1 = nonbonded_force.getParticleParameters(p1)\n # charge2, sigma2, epsilon2 = nonbonded_force.getParticleParameters(p2)\n # q = charge1 * charge2 * 0.5\n # sig14 = sqrt(sigma1 * sigma2) * 0.5\n # eps = sqrt(epsilon1 * epsilon2) * 0.5\n # nonbonded_force.setExceptionParameters(i, p1, p2, q, sig14, eps)\n #\n # if normal_pairs:\n # for x in range(len(normal_pairs)):\n # if p1 == normal_pairs[x, 0] and p2 == normal_pairs[x, 1] or p2 == normal_pairs[x, 0] and p1 == \\\n # normal_pairs[x, 1]:\n # charge1, sigma1, epsilon1 = nonbonded_force.getParticleParameters(p1)\n # charge2, sigma2, epsilon2 = nonbonded_force.getParticleParameters(p2)\n # q = charge1 * charge2\n # sig14 = sqrt(sigma1 * sigma2)\n # eps = sqrt(epsilon1 * epsilon2)\n # nonbonded_force.setExceptionParameters(i, p1, p2, q, sig14, eps)", "def NN_z(x, y, con_ver, nbr_ver, cellsize):\n gx, gy, elevNNGrid = interpolate_to_grid(con_ver[:, 0], con_ver[:,1], con_ver[:,2], \n interp_type = \"natural_neighbor\", \n hres = cellsize[0])\n elev_NN = elevNNGrid[0, 0]\n if not(np.isnan(elev_NN)):\n elev_i = elev_NN\n else:\n print(\"elev_NN is nan: evaluating else loop\")\n d_nbr = np.zeros(3)\n for n in range(0, 3):\n d_nbr[n] = ((x - nbr_ver[n][0])**2 + (y - nbr_ver[n][1])**2)**0.5\n nearest_ver = nbr_ver[d_nbr.argmax(0)]\n elev_i = nearest_ver[2]\n return elev_i", "def entropycell(self):\n cells = [0] * self.k\n for i in range(self.width):\n cells[int(self.config[self.t, i])] += 1\n\n \"\"\"Calculates the Shannon entropy and the the average entropy so far.\"\"\"\n shannon = 0\n for i in range(self.k):\n if(cells[i] != 0):\n probability = cells[i] / self.width\n shannon -= probability * np.log2(probability)\n self.average_cell = (self.average_cell * self.t + shannon) / (self.t + 1)", "def force ( box, strain, r ):\n\n import numpy as np\n from itertools import product\n import math\n \n # It is assumed that positions are in units where box = 1\n # Forces are calculated in units where sigma = 1 and epsilon = 1\n # Lees-Edwards boundaries, in sliding brick arrangement\n # Flow/gradient/vorticity directions are x/y/z == 0/1/2\n # Uses neighbour lists\n\n n = r.shape[0]\n\n # Set up vectors to half the cells in neighbourhood of 3x3x3 cells in cubic lattice\n # The cells are chosen so that if (d0,d1,d2) appears, then (-d0,-d1,-d2) does not.\n # The last three cells are extra ones, to cope with the sheared system\n d = np.array ( [ [ 0, 0, 0], [ 1, 0, 0], [ 1, 0, 1], [-1, 0, 1], [ 0, 0, 1], # 5 cells with d1=0\n [ 1, 1, -1], [ 1, 1, 0], [ 1, 1, 1], # 3 cells with d0= 1, d1=1\n [ 0, 1, -1], [ 0, 1, 0], [ 0, 1, 1], # 3 cells with d0= 0, d1=1\n [-1, 1, -1], [-1, 1, 0], [-1, 1, 1], # 3 cells with d0=-1, d1=1\n [-2, 1, -1], [-2, 1, 0], [-2, 1, 1] ] ) # 3 cells with d0=-2, d1=1\n\n r[:,0] = r[:,0] - np.rint(r[:,1])*strain # Extra correction in box=1 units\n r = r - np.rint(r) # Ensure all atoms in periodic box\n \n sr2_ovr = 1.77 # Overlap threshold (pot > 100)\n r_cut_box = r_cut / box\n r_cut_box_sq = r_cut_box ** 2\n box_sq = box ** 2\n\n # Initialize\n f = np.zeros_like(r)\n total = PotentialType ( pot=0.0, vir=0.0, pyx=0.0, lap=0.0, ovr=False )\n\n # Calculate cell index triplets\n sc = math.floor(box/r_cut) # Number of cells along box edge\n assert sc >= 3, 'System is too small for cells' # Guard against box being too small\n c = np.floor((r+0.5)*sc).astype(np.int_) # N*3 array of cell indices for all atoms\n assert np.all(c>=0) and np.all(c<sc), 'Index error' # Simplistic \"guard\" against roundoff\n\n shift = math.floor(strain*sc) # Strain measured in cell lengths\n\n if fast:\n \n # Build list of arrays, each array holding positions of atoms in a cell\n # At the same time, define a matching set of force arrays in each cell\n # i and j number the atoms in each cell; we do not refer explicitly to indices in r\n rc, fc = [], [] # Initially empty lists of positions and forces\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n rc.append(r[mask,:]) # Copy atom coordinates into array, add to list\n fc.append(np.zeros_like(rc[-1])) # Zero corresponding forces, add to list\n\n for ci1, rci in enumerate(rc): # Loop over i-cells, getting all atoms in each i-cell as an array\n ci = np.unravel_index(ci1,(sc,sc,sc)) # Get i-cell triple-indices\n if rci.size==0: # Handle empty cell\n continue\n\n # Set up correct neighbour cell indices\n if ci[1]==sc-1: # i-cell is in the top layer\n dd = d.copy() # Standard list copied, including extra 3 cells\n dd[5:,0] = d[5:,0] - shift # All those looking up need adjustment in the x direction\n else: # i-cell is not in top layer\n dd = d[:-3,:].copy() # Last three extra cells are not needed; shift is not needed\n \n for dj in dd: # Loop over neighbouring j-cells\n cj = ci + dj # Compute neighbour j-cell triple-indices\n cj1 = np.ravel_multi_index(cj,(sc,sc,sc),mode='wrap') # Convert j-cell to single-index\n rcj = rc[cj1] # Get atoms in j-cell as an array\n if rcj.size==0: # Handle empty cell\n continue\n\n rij = rci[:,np.newaxis,:]-rcj[np.newaxis,:,:] # Separation vectors for all i and j\n rij[:,:,0] = rij[:,:,0] - np.rint(rij[:,:,1])*strain # Extra correction in box=1 units\n rij = rij - np.rint(rij) # PBCs in box=1 units\n rij_sq = np.sum(rij**2,axis=2) # Squared separations\n in_range = rij_sq < r_cut_box_sq # Set flags for within cutoff\n\n if ci1==cj1:\n np.fill_diagonal(in_range,False) # Eliminate i==j when i-cell==j-cell\n np.fill_diagonal(rij_sq,1.0) # Avoid divide-by-zero below\n\n rij_sq = rij_sq * box_sq # Now in sigma=1 units\n rij = rij * box # Now in sigma=1 units\n sr2 = np.where ( in_range, 1.0/rij_sq, 0.0 ) # (sigma/rij)**2, only if in range\n ovr = sr2 > sr2_ovr # Overlap if too close\n sr6 = sr2 ** 3\n sr12 = sr6 ** 2\n pot = sr12 - sr6 # LJ potential (cut but not shifted)\n vir = pot + sr12 # LJ virial\n pot = np.where ( in_range, pot+0.25, 0.0 ) # WCA LJ pair potential (cut-and-shifted)\n lap = ( 22.0*sr12 - 5.0*sr6 ) * sr2 # LJ Laplacian\n fij = vir * sr2 # LJ scalar part of forces\n fij = rij * fij[:,:,np.newaxis] # LJ pair forces\n pyx = rij[:,:,1]*fij[:,:,0] # Off-diagonal element of pressure tensor\n\n if ci1==cj1: # Correct for double-counting ij and ji when i-cell==j-cell\n fij = fij / 2\n total = total + PotentialType ( pot=np.sum(pot)/2, vir=np.sum(vir)/2, \n pyx=np.sum(pyx)/2, lap=np.sum(lap)/2, ovr=np.any(ovr) )\n else:\n total = total + PotentialType ( pot=np.sum(pot), vir=np.sum(vir), \n pyx=np.sum(pyx), lap=np.sum(lap), ovr=np.any(ovr) )\n\n fc[ci1][:,:] = fc[ci1][:,:] + np.sum(fij,axis=1) # Aggregate force on atoms in i-cell\n fc[cj1][:,:] = fc[cj1][:,:] - np.sum(fij,axis=0) # Aggregate force on atoms in j-cell\n\n # Copy forces from list of cell arrays to main force array\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n ci1 = np.ravel_multi_index(ci,(sc,sc,sc),mode='wrap') # Single-index\n f[mask,:] = fc[ci1] # Copy atom forces from correct cell\n\n else:\n \n # Build list of arrays, each array holding indices of atoms in a cell\n # ki and kj are atom indices in the r array; i and j number the atoms in each cell\n k_array = np.arange(n) # Atom indices 0..N-1\n kc = [] # Initially empty list of indices\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n kc.append(k_array[mask]) # Copy atom indices into array, add to list\n\n for ci1, kci in enumerate(kc): # Loop over i-cells, getting atom indices as an array\n ci = np.unravel_index(ci1,(sc,sc,sc)) # Get i-cell triple-indices\n\n # Set up correct neighbour cell indices\n if ci[1]==sc-1: # i-cell is in the top layer\n dd = d # Standard list copied, including extra 3 cells\n dd[5:,0] = dd[5:,0] - shift # All those looking up need adjustment in the x direction\n else:\n dd = d[:-3,:] # Last three extra cells are not needed; shift is not needed\n\n for dj in dd: # Loop over neighbouring j-cells\n cj = ci + dj # Compute neighbour j-cell triple-indices\n cj1 = np.ravel_multi_index(cj,(sc,sc,sc),mode='wrap') # Convert to single-index\n kcj = kc[cj1] # Get indices of atoms in j-cell as an array\n\n for i, ki in enumerate(kci): # Loop over individual atoms in i-cell\n j0 = i+1 if cj1==ci1 else 0 # Only look upwards if i-cell==j-cell\n if j0 >= kcj.size: # Handles (redundantly) empty j-cell and the case \n continue # where j-cell==i-cell and i is last atom\n\n for kj in kcj[j0:]: # Loop over individual atoms in j-cell\n rij = r[ki,:]-r[kj,:] # Separation vector\n rij[0] = rij[0] - np.rint(rij[1])*strain # Extra correction in box=1 units\n rij = rij - np.rint(rij) # Periodic boundary conditions in box=1 units\n rij_sq = np.sum(rij**2) # Squared separation\n\n if rij_sq < r_cut_box_sq: # Check within cutoff\n rij_sq = rij_sq * box_sq # Now in sigma=1 units\n rij = rij * box # Now in sigma=1 units\n sr2 = 1.0 / rij_sq # (sigma/rij)**2\n ovr = sr2 > sr2_ovr # Overlap if too close\n sr6 = sr2 ** 3\n sr12 = sr6 ** 2\n pot = sr12 - sr6 # LJ potential (cut but not shifted)\n vir = pot + sr12 # LJ virial\n pot = pot + 0.25 # WCA LJ potential (cut-and-shifted)\n lap = ( 22.0*sr12 - 5.0*sr6 ) * sr2 # LJ Laplacian\n fij = rij * vir * sr2 # LJ forces\n pyx = rij[1]*fij[0] # Off-diagonal element of pressure tensor\n total = total + PotentialType ( pot=pot, vir=vir, pyx=pyx, lap=lap, ovr=ovr )\n f[ki,:] = f[ki,:] + fij\n f[kj,:] = f[kj,:] - fij\n\n # Multiply results by numerical factors\n f = f * 24.0 # 24*epsilon\n total.pot = total.pot * 4.0 # 4*epsilon\n total.vir = total.vir * 24.0 / 3.0 # 24*epsilon and divide virial by 3\n total.pyx = total.pyx * 24.0 # 24*epsilon\n total.lap = total.lap * 24.0 * 2.0 # 24*epsilon and factor 2 for ij and ji\n \n return total, f", "def get_distances(self):\n N = len(self.cells) # Number of cells\n distances = np.zeros([N, N]) # distances between cells\n positions = self.position_matrix() # positions of cells \n \n # get distances between cells (exploit symmetry between upper and lower triangular form)\n for i, position in enumerate(positions[:-1, :]): # Iterate matrix except the last one\n directions = positions[i+1:, :] - position # direction from i to j > i\n distances[i, i+1:] = np.linalg.norm(directions, axis=1) # length of directions\n \n return distances + distances.T # Add lower triangle of matrix to upper ", "def potentialSolver3(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def potentialSolver3(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def get_l_interface_boundary(n_v,n_c, neighbours, vs, CV_matrix):\n h_j = np.empty((n_v, 3, 2))\n for i in range(3):\n h_j[:, i] = vs\n h_jp1 = np.dstack((roll_reverse(neighbours[:,:,0]),roll_reverse(neighbours[:,:,1])))\n l = h_j - h_jp1\n l = np.sqrt(l[:,:,0]**2 + l[:,:,1]**2)\n LI = np.zeros((n_c,n_c),dtype=np.float32)\n for i in range(3):\n LI+= np.asfortranarray(l[:,i]*CV_matrix[:,:,i])@np.asfortranarray(CV_matrix[:,:,np.mod(i+2,3)].T)\n LI = np.dstack((LI,LI.T)).max(axis=2)\n return LI", "def analyse_doms(doms, counts, log):\n dom_masks = 1<<(doms.astype(np.uint64))\n\n # initialise space for masks\n ngb_masks = np.zeros_like(dom_masks)\n\n # Fill in the masks of all the neighbouring cells\n inc = [1,0,-1] # roll increments for left, middle, right\n for i in inc:\n ri = np.roll(dom_masks, i, axis=0)\n for j in inc:\n rj = np.roll(ri, j, axis=1)\n for k in inc:\n ngb_masks |= np.roll(rj, k, axis=2)\n\n\n\n count_ds, count_alls, pcts = [], [], []\n \n for d in range(doms.max()+1):\n idx = np.flatnonzero(doms==d)\n idx_all = np.flatnonzero(ngb_masks&(1<<d))\n \n count_d = counts.ravel()[idx].sum()\n count_all = counts.ravel()[idx_all].sum()\n \n pct_ghosts = ((count_all - count_d)*100)//count_all\n pcts.append(pct_ghosts)\n print('Domain %2d'%d, 'has {:,} real points, {:,} total of which'.format(count_d, count_all), \n '%d%% are ghosts'%pct_ghosts, file=log)\n\n count_ds.append(count_d)\n count_alls.append(count_all)\n\n\n\n print('Total particles {:,}, total evaluated {:,} (average ghosts {:,}%)'.format(sum(count_ds), sum(count_alls), ((sum(count_alls)-sum(count_ds))*100)//sum(count_alls)), file=log)\n print('maximum {:,} on a single proc, worst ghost percentage {:,}%'.format(max(count_alls), max(pcts)), file=log)", "def gridgen4(num_points, diameter, min_dist, n_miss_max=10000):\n\n # Grid size and scaling onto the grid\n grid_size = min(100, int(floor(float(diameter) / min_dist)))\n grid_cell = float(diameter) / grid_size # Grid sector cell size\n scale = 1.0 / grid_cell # Scaling onto the sector grid.\n print('- Grid size: %i' % grid_size)\n print('- Grid cell: %f' % grid_cell)\n\n r = diameter / 2.0 # Radius\n r_sq = r**2 # Radius, squared\n min_dist_sq = min_dist**2 # minimum distance, squared\n\n # Pre-allocate coordinate arrays\n x = numpy.zeros(num_points)\n y = numpy.zeros(num_points)\n\n # Grid meta-data\n next = numpy.zeros(num_points, dtype='i8') # Next coordinate index.\n h1 = -numpy.ones((grid_size, grid_size), dtype='i8') # First index in the grid\n h2 = -numpy.ones((grid_size, grid_size), dtype='i8') # Last index in the grid\n grid_count = numpy.zeros((grid_size, grid_size), dtype='i8') # Points in grid cell.\n\n n = num_points\n n_req = num_points\n num_miss = 0\n for j in range(n_req):\n\n # First time no need to check the minimum distance req, just needs\n # to be inside the diameter.\n if j == 0:\n done = False\n while not done:\n x[j], y[j] = get_trail_position(r)\n done = (x[j]**2 + y[j]**2) <= r_sq\n jx, jy = grid_position(x[j], y[j], scale, r)\n grid_count[jx, jy] += 1\n h1[jx, jy] = 0\n h2[jx, jy] = 0\n\n # All other points have to be inside the diameter and match the\n # minimum separation requirements.\n else:\n done = False\n while not done:\n xt, yt = get_trail_position(r)\n\n # Check if the point is inside the diameter\n if (xt**2 + yt**2) > r_sq:\n num_miss += 1\n else:\n # Scale onto grid.\n jx, jy = grid_position(xt, yt, scale, r)\n # Find minimum distance to other points\n y0 = max(0, jy - 1)\n y1 = min(grid_size - 1, jy + 1)\n x0 = max(0, jx - 1)\n x1 = min(grid_size - 1, jx + 1)\n dmin_sq = diameter\n for ky in range(y0, y1 + 1):\n for kx in range(x0, x1 + 1):\n if grid_count[kx, ky] > 0:\n kh1 = h1[kx, ky]\n for kh in range(grid_count[kx, ky]):\n dx = xt - x[kh1]\n dy = yt - y[kh1]\n dist_sq = dx**2 + dy**2\n dmin_sq = min(dist_sq, dmin_sq)\n kh1 = next[kh1]\n\n # Check if the minimum distance requirement is met.\n if dmin_sq >= min_dist_sq:\n x[j] = xt\n y[j] = yt\n if h1[jx, jy] == -1:\n h1[jx, jy] = j\n else:\n next[h2[jx, jy]] = j\n h2[jx, jy] = j\n grid_count[jx, jy] += 1\n num_miss = 0\n done = True\n else:\n num_miss += 1\n\n if num_miss >= n_miss_max:\n n = j - 1\n done = True\n\n if num_miss >= n_miss_max:\n break\n\n if n < n_req:\n x = x[0:n]\n y = y[0:n]\n\n return x, y", "def Log_Sn(xref,x):\n\n nX = np.shape(x)\n\n m = nX[0]\n t = nX[1]\n\n G = np.zeros((t,))\n Gv = np.zeros((m,t))\n\n for r in range(t):\n\n # Correct for permuations\n\n Xout = dp(x[:,r])\n\n a = np.sum(Xout*xref)/np.sqrt(np.sum(xref**2)*np.sum(Xout**2)) # Should have unit L2 norm\n\n if a > 1:\n a = 1\n if a < -1:\n a = -1\n\n G[r] = np.arccos(a) # Computing the angles\n\n v = Xout - a*xref\n Gv[:,r] = v / (1e-24 + np.linalg.norm(v)) # Unit vector in the tangent subspace\n\n return G,Gv", "def get_34cohom_dim(v,l, even_e):\n op1 = OrdinaryGraphComplex.ContractEdgesGO.generate_operator(v,l, even_e)\n op2 = OrdinaryGraphComplex.ContractEdgesGO.generate_operator(v+1,l, even_e)\n fullvs = op1.domain\n fullvs2 = op2.domain\n \n vs34 = Valence34Mask(fullvs)\n vs342 = Valence34Mask(fullvs2)\n\n D34rank = 0\n if op1.is_valid():\n D = op1.get_matrix()\n # P34 = vs34.get_P34()\n # D34 = D * P34 \n i34 = vs34.get_34index_list()\n D34 = D[:, i34]\n D34rank = D34.rank()\n\n DD34rank = 0\n DD5rank = 0\n if op2.is_valid():\n DD = op2.get_matrix()\n # PP34 = vs342.get_P34()\n ii34 = vs342.get_34index_list()\n # DD34 = DD * PP34 \n DD34 = DD[:,ii34]\n DD34rank = DD34.rank()\n\n # P5 = vs34.get_P5()\n # DD5 = P5 * DD * PP34\n i5 = vs34.get_5index_list()\n DD5 = DD[i5, ii34]\n DD5rank = DD5.rank()\n\n\n return vs34.get_34dimension() - D34rank -DD34rank + DD5rank", "def wallsAndGates(self, rooms: List[List[int]]) -> None:\n if rooms == []:\n return\n \n row = len(rooms)\n column = len(rooms[0])\n visited = [[False for i in range(column)] for j in range(row)]\n def valid(row_index, column_index):\n if row_index < row and row_index >= 0 and column_index< column and column_index >= 0:\n return True\n return False\n \n \n def bfs_traverse(row_index, column_index, distance):\n if valid(row_index, column_index) == False or rooms[row_index][column_index] < distance:\n return\n else:\n # if rooms[row_index] [column_index] != -1 and rooms[row_index] [column_index] != 0:\n if distance < rooms[row_index][column_index]:\n\n rooms[row_index][column_index] = distance\n if rooms[row_index] [column_index] != -1:\n if valid(row_index+1, column_index):\n bfs_traverse(row_index+1, column_index, distance+1)\n if valid(row_index, column_index+1):\n bfs_traverse(row_index, column_index +1 , distance+1)\n if valid(row_index-1, column_index):\n bfs_traverse(row_index-1, column_index, distance+1)\n if valid(row_index, column_index-1):\n bfs_traverse(row_index, column_index-1, distance+1)\n \n \n for row_index in range(row):\n for column_index in range(column):\n if rooms[row_index][column_index] == 0:\n bfs_traverse(row_index, column_index, 0)", "def cellsize_2d(self):\t\r\n return self.dx * self.dy", "def NDon(row):\n m = Chem.MolFromSmiles(row.SMILES)\n donors = Descriptors.NumHDonors(m)\n return donors", "def NDon(row):\n m = Chem.MolFromSmiles(row.SMILES)\n donors = Descriptors.NumHDonors(m)\n return donors", "def populate_link_cells(lc, xyz, Lx, Nx):\n N = len(xyz)\n for i in range(N):\n num = xyz[i] // Lx % Nx\n lc[id_from_coord(num, Nx)].append(i)", "def _compute_ddistortion_dgnomic(self, gnomic: ARRAY_LIKE,\n radius2: float, radius4: float, radius6: float) -> np.ndarray:\n\n row = gnomic[1]\n col = gnomic[0]\n\n vector_part = ((self.k1 * radius2 + self.k2 * radius4 + self.k3 * radius6) * np.eye(2) +\n np.array([[2 * self.p1 * row + 4 * self.p2 * col, 2 * self.p1 * col],\n [2 * self.p2 * row, 4 * self.p1 * row + 2 * self.p2 * col]]))\n\n scalar_part = ((2 * self.k1 + 4 * self.k2 * radius2 + 6 * self.k3 * radius4) * np.outer(gnomic, gnomic) +\n 2 * np.outer([self.p2, self.p1], gnomic))\n\n return vector_part + scalar_part", "def write_ldos(x,y,dos,output_file=\"LDOS.OUT\",z=None):\n fd = open(output_file,\"w\") # open file\n fd.write(\"# x, y, local density of states\\n\")\n ii = 0\n for (ix,iy,idos) in zip(x,y,dos): # write everything\n fd.write(str(ix) +\" \"+ str(iy) + \" \"+ str(idos))\n if z is not None: fd.write(\" \"+str(z[ii]))\n fd.write(\"\\n\")\n ii += 1\n fd.close() # close file", "def calc_dist_to_poi(self,agent):\n mini_dist = 100000 \n for poi in self.poi_pos_list:\n mini_dist = np.linalg.norm(agent.get_pos() - poi)\n\n return mini_dist", "def _set_branch_nseg(geo, sec_idx, seg_L):\n\n # iterate over trees in section list\n for tree_key, tree in sec_idx.iteritems():\n\n for sec_i, sec in enumerate(tree):\n\n section = geo[tree_key][sec]\n\n # get section length\n sec_L = section.L\n print 'section length', section.L\n\n # determine number of segments\n n_seg = int(np.ceil(sec_L/seg_L))\n\n # # check that number of segments is odd\n if n_seg % 2 != 0:\n n_seg+=1\n\n # # set number of segments\n section.nseg = n_seg\n print 'nseg', section.nseg\n return geo", "def neighbours(number: int, number_sectors: int) -> [int, int, int, int]:\n col = number % number_sectors\n row = number // number_sectors\n\n nieg = [number - number_sectors, number + number_sectors, number - 1, number + 1]\n\n if row == 0:\n nieg[0] = -1\n if row == number_sectors - 1:\n nieg[1] = -1\n if col == 0:\n nieg[2] = -1\n if col == number_sectors - 1:\n nieg[3] = -1\n return nieg", "def ddxCellGradBC(n, bc):\n bc = checkBC(bc)\n\n ij = (np.array([0, n]), np.array([0, 1]))\n vals = np.zeros(2)\n\n # Set the first side\n if(bc[0] == 'dirichlet'):\n vals[0] = -2\n elif(bc[0] == 'neumann'):\n vals[0] = 0\n # Set the second side\n if(bc[1] == 'dirichlet'):\n vals[1] = 2\n elif(bc[1] == 'neumann'):\n vals[1] = 0\n D = sp.csr_matrix((vals, ij), shape=(n+1, 2))\n return D", "def get_cells(self):\r\n return \\\r\n (self.nx-1 if self.nx>1 else 1)* \\\r\n (self.ny-1 if self.ny>1 else 1)* \\\r\n (self.nz-1 if self.nz>1 else 1)", "def wallsAndGates(self, rooms: List[List[int]]) -> None:\n if rooms==[]: return\n xcord=len(rooms)\n ycord=len(rooms[0])\n indexstack=[(i,j) for i in range(len(rooms)) for j in range(len(rooms[0])) if rooms[i][j] == 0]\n direction=[(0,1),(1,0),(0,-1),(-1,0)]\n gatenum=1\n while indexstack != []:\n newindex=[]\n for item in indexstack:\n for mapdir in direction:\n xpoint=item[0]+mapdir[0]\n ypoint=item[1]+mapdir[1]\n if 0<=xpoint <len(rooms) and 0<=ypoint<len(rooms[0]):\n if rooms[xpoint][ypoint]==pow(2,31)-1:\n rooms[xpoint][ypoint]=gatenum\n newindex.append((xpoint,ypoint))\n indexstack=newindex\n gatenum+=1\n ''''\n for item in index_0:\n for mapdir in direction:\n xpoint=item[0]+mapdir[0]\n ypoint=item[1]+mapdir[1]\n if xpoint <len(rooms) and ypoint<len(rooms[0]):\n if rooms[xpoint][ypoint]==pow(2,31)-1:\n rooms[xpoint][ypoint]=1\n index_1.append((xpoint,ypoint))\n for item in index_1:\n for mapdir in direction:\n xpoint=item[0]+mapdir[0]\n ypoint=item[1]+mapdir[1]\n if xpoint <len(rooms) and ypoint<len(rooms[0]):\n if rooms[xpoint][ypoint]==pow(2,31)-1:\n rooms[xpoint][ypoint]=2\n index_2.append((xpoint,ypoint))\n for item in index_2:\n for mapdir in direction:\n xpoint=item[0]+mapdir[0]\n ypoint=item[1]+mapdir[1]\n if xpoint <len(rooms) and ypoint<len(rooms[0]):\n if rooms[xpoint][ypoint]==pow(2,31)-1:\n rooms[xpoint][ypoint]=3\n index_3.append((xpoint,ypoint))\n for item in index_3:\n for mapdir in direction:\n xpoint=item[0]+mapdir[0]\n ypoint=item[1]+mapdir[1]\n if xpoint <=len(rooms) and ypoint<=len(rooms[0]):\n if rooms[xpoint][ypoint]==pow(2,31)-1:\n rooms[xpoint][ypoint]=4\n #index_3.append((xpoint,ypoint))'''", "def get_deep_distortions(defect_charges: dict, \r\n bdm_type: str='BDM',\r\n stol = 0.2,\r\n ):\r\n fancy_defects = {} #dict of defects undergoing deep distortions\r\n sm = StructureMatcher(ltol=0.2, stol=stol)\r\n for defect in defect_charges.keys():\r\n print(\"\\n\",defect)\r\n for charge in defect_charges[defect]:\r\n defect_name = \"{}_{}\".format(defect, str(charge)) #defect + \"_\" + str(charge)\r\n file_energies = \"{}{}/{}/{}.txt\".format(base_path, defect_name, bdm_type ,defect_name ) \r\n dict_energies, energy_diff, gs_distortion = sort_data(file_energies)\r\n \r\n if float(energy_diff) < -0.1 : #if a significant E drop occured , then store this fancy defect\r\n print(\"Deep distortion found for \", defect_name) \r\n if gs_distortion != \"rattle\":\r\n bdm_distortion = str(round(gs_distortion * 100, 1)) #change distortion format to the one used in file name (eg from 0.1 to 10.0)\r\n if bdm_distortion == \"0.0\":\r\n bdm_distortion = \"-0.0\"\r\n file_path=\"{}{}/{}/{}_{}%_BDM_Distortion/vasp_gam/CONTCAR\".format(base_path, defect_name, bdm_type ,defect_name, bdm_distortion) \r\n else:\r\n bdm_distortion = \"only_rattled\" # file naming format used for rattle\r\n file_path=\"{}{}/{}/{}_{}/vasp_gam/CONTCAR\".format(base_path, defect_name, bdm_type ,defect_name, bdm_distortion) \r\n try:\r\n gs_struct = grab_contcar(file_path) # get the final structure of the E lowering distortion\r\n if gs_struct == \"Not converged\":\r\n print(f\"Problem grabbing gs structure for {bdm_distortion} of {defect_name}\")\r\n except FileNotFoundError:\r\n print(\"NO CONTCAR for ground-state distortion\")\r\n break\r\n if defect in fancy_defects.keys(): #check if defect already in dict (other charge state lead to a lower E structure)\r\n \r\n gs_struct_in_dict = fancy_defects[defect][\"structure\"] \r\n \r\n if energy_diff < fancy_defects[defect][\"energy_diff\"]: #if E drop is greater (more negative), then update the dict with the lowest E distortion\r\n print(\"Charge {} lead to greatest E lowering distortion\".format(charge))\r\n fancy_defects[defect].update(\r\n {\"structure\": gs_struct, \"BDM_distortion\": gs_distortion,\"energy_diff\": energy_diff, \"charges\":[charge]}\r\n ) \r\n \r\n elif defect not in fancy_defects.keys(): # if defect not in dict, add it\r\n print(\"New defect! Adding {} with charge {} to dict\".format(defect, charge))\r\n fancy_defects[defect] = {\"charges\" : [charge], \"structure\": gs_struct, \"energy_diff\": energy_diff, \"BDM_distortion\": gs_distortion}\r\n \r\n #let's check that the gs structure wasn`t found already by BDM for the other charge states \r\n if defect in fancy_defects.keys(): # if the defect lead to an E lowering distortion\r\n for charge in defect_charges[defect]: # for all charge states of the defect\r\n if charge not in fancy_defects[defect][\"charges\"]: #if gs struct wasn't found already for that charge state\r\n defect_name = \"{}_{}\".format(defect, str(charge)) #defect + \"_\" + str(charge)\r\n gs_struct_in_dict = fancy_defects[defect][\"structure\"] \r\n if compare_gs_struct_to_BDM_structs( gs_struct_in_dict, defect_name, base_path, stol = stol ) : \r\n # structure found in BDM calcs for this charge state. Add it to the list to avoid redundant work\r\n fancy_defects[defect][\"charges\"].append(charge)\r\n #print(\"Ground-state structure found for {}_{} has been also found for the charge states: {}\".format(defect, fancy_defects[defect][\"charges\"][0], fancy_defects[defect][\"charges\"] ))\r\n return fancy_defects", "def calculate_distance_interaction(self):\n\n self.tau = -np.log(np.random.rand(1)[0])\n return self.tau / self.cell_chi", "def wetting(lgca):\n if hasattr(lgca, 'spheroid'):\n birth = npr.random(lgca.nodes[lgca.spheroid].shape) < lgca.r_b\n ds = (1 - lgca.nodes[lgca.spheroid]) * birth\n lgca.nodes[lgca.spheroid, :] = np.add(lgca.nodes[lgca.spheroid, :], ds, casting='unsafe')\n lgca.update_dynamic_fields()\n newnodes = lgca.nodes.copy()\n relevant = (lgca.cell_density[lgca.nonborder] > 0)\n coords = [a[relevant] for a in lgca.nonborder]\n nbs = lgca.nb_sum(lgca.cell_density) # + lgca.cell_density\n nbs *= np.clip(1 - nbs / lgca.n_crit, a_min=0, a_max=None) / lgca.n_crit * 2\n g_adh = lgca.gradient(nbs)\n pressure = np.clip(lgca.cell_density - lgca.rho_0, a_min=0., a_max=None) / (lgca.K - lgca.rho_0)\n g_pressure = -lgca.gradient(pressure)\n\n resting = lgca.nodes[..., lgca.velocitychannels:].sum(-1)\n resting = lgca.nb_sum(resting) / lgca.velocitychannels / lgca.rho_0\n g = lgca.calc_flux(lgca.nodes)\n g = lgca.nb_sum(g)\n\n for coord in zip(*coords):\n n = lgca.cell_density[coord]\n permutations = lgca.permutations[n]\n restc = permutations[:, lgca.velocitychannels:].sum(-1)\n j = lgca.j[n]\n j_nb = g[coord]\n weights = np.exp(\n lgca.beta * (j_nb[0] * j[0] + j_nb[1] * j[1]) / lgca.velocitychannels / 2\n + lgca.beta * resting[coord] * restc #* np.clip(1 - restc / lgca.rho_0 / 2, a_min=0, a_max=None) * 2\n + lgca.beta * np.einsum('i,ij', g_adh[coord], j)\n # + lgca.alpha * np.einsum('i,ij', g_subs[coord], j)\n + restc * lgca.ecm[coord]\n + lgca.gamma * np.einsum('i,ij', g_pressure[coord], j)\n ).cumsum()\n ind = bisect_left(weights, random() * weights[-1])\n newnodes[coord] = permutations[ind]\n\n lgca.nodes = newnodes\n lgca.ecm -= lgca.alpha * lgca.ecm * lgca.cell_density / lgca.K", "def orthogonality_of_K(fname,outname,p_save=None):\r\n if p_save is None:\r\n p_save = os.path.join(os.environ['BOX_PATH'],r'__VG3D\\_deflection_trials\\_NEO\\results')\r\n\r\n df = pd.read_csv(fname,index_col=0)\r\n is_stim = pd.read_csv(os.path.join(os.environ['BOX_PATH'],r'__VG3D\\_deflection_trials\\_NEO\\results\\cell_id_stim_responsive.csv'))\r\n df = df.merge(is_stim,on='id')\r\n df = df[df.stim_responsive]\r\n DF_OUT = pd.DataFrame()\r\n ORTHO_MAT = np.empty([3,3,len(df.id.unique())])\r\n\r\n for cell_id,cell in enumerate(df.id.unique()):\r\n df_out = pd.DataFrame()\r\n sub_df = df[df.id==cell]\r\n X = sub_df[['Filter_0','Filter_1','Filter_2']].as_matrix()\r\n norms = np.linalg.norm(X,2,axis=0)\r\n X = X/norms\r\n ortho_mat=np.empty((X.shape[1],X.shape[1]))\r\n for ii,x in enumerate(X.T):\r\n for jj,y in enumerate(X.T):\r\n ortho_mat[ii,jj]=np.abs(np.dot(x,y))\r\n ORTHO_MAT[:,:,cell_id] = ortho_mat\r\n df_out['norm0'] = [norms[0]]\r\n df_out['norm1'] = [norms[1]]\r\n df_out['norm2'] = [norms[2]]\r\n df_out['id'] = cell\r\n DF_OUT = DF_OUT.append(df_out)\r\n DF_OUT.to_csv(os.path.join(p_save,'{}.csv'.format(outname)),index=False)\r\n np.save(os.path.join(p_save,'{}.npy'.format(outname)),ORTHO_MAT)", "def velcut(cellz, cellN, cellb, cellID, linesfile, redshift, testing=0):\n # Define constants\n c = 3.0e5 # Speed of light in km/s\n\n # Get the redshift of the absorption\n # This is the first line of the the lines file\n\n # Open the sysabs file\n sysabsfile = linesfile.replace('lines', 'sysabs')\n f = open(sysabsfile)\n f.readline()\n line = f.readline()\n neg_vel_limit = float(line.split()[1])\n pos_vel_limit = float(line.split()[2])\n f.close()\n\n if testing==1:\n print '\\t\\tFrom sysabs:'\n print '\\t\\t\\tNeg Vel Limt: {0:f}'.format(neg_vel_limit)\n print '\\t\\t\\tPos_vel_limi: {0:f}'.format(pos_vel_limit)\n print '\\t\\t\\tEW: {0:f}'.format(EW_sysabs)\n\n\n velz, velN, velb, velID = [], [], [], []\n\n velcutCount = 0\n for i in range(0,len(cellz)):\n\n # Calcuate the peculiar velocity of the cell\n vpec = c*( (cellz[i]-redshift) / (1+redshift) )\n # If the cell is inside the velocity range, write to file\n if vpec>neg_vel_limit and vpec<pos_vel_limit:\n velz.append(cellz[i])\n velN.append(cellN[i])\n velb.append(cellb[i])\n velID.append(cellID[i])\n velcutCount += 1\n \n if testing==1:\n print '\\t\\tAfter velcut, number of cells: ', velcutCount\n\n return velz, velN, velb, velID", "def _count_living_neighbors(self, cell: Cell) -> int:\n count = 0\n # borders of the area in which we are trying to find neighbors\n # Let's assume y axis directs downside and x axis directs to the left\n \n for x in range(cell.x - 1, cell.x + 2):\n for y in range(cell.y - 1, cell.y + 2):\n if cell.x == x and cell.y == y:\n continue\n if (x, y) in self.living_cells.keys():\n count += 1\n \n return count", "def nematic(lgca):\n newnodes = lgca.nodes.copy()\n relevant = (lgca.cell_density[lgca.nonborder] > 0) & \\\n (lgca.cell_density[lgca.nonborder] < lgca.K)\n coords = [a[relevant] for a in lgca.nonborder]\n\n s = np.einsum('ijk,klm', lgca.nodes[..., :lgca.velocitychannels], lgca.cij)\n sn = lgca.nb_sum(s)\n\n for coord in zip(*coords):\n n = lgca.cell_density[coord]\n sni = sn[coord]\n permutations = lgca.permutations[n]\n si = lgca.si[n]\n weights = np.exp(lgca.beta * np.einsum('ijk,jk', si, sni)).cumsum()\n ind = bisect_left(weights, random() * weights[-1])\n newnodes[coord] = permutations[ind]\n\n lgca.nodes = newnodes", "def setNeighbors(self):\n for cellIndex in range(len(self.cells)):\n cell = self.cells[cellIndex]\n\n #Checks the 8 cells around the living one. \n for neighborsX in range(cell.x - 1, cell.x + 2):\n for neighborsY in range(cell.y - 1, cell.y + 2):\n\n #If the position is outside the world, loop around.\n neighborsX = neighborsX % self.screen.worldSize\n neighborsY = neighborsY % self.screen.worldSize\n\n #Skipping itself. Becouse we do not want to calculate itself as a neighbor\n if(neighborsX == cell.x and neighborsY == cell.y):\n continue\n else:\n #Checks if a cell exist at neighborsX, neighborsY\n cellToCheck = self.getCellFromPosition(neighborsX, neighborsY)\n if(cellToCheck != False):\n #Add one to the neighbor var if there already exist and cell for the given position.\n cellToCheck.numOfNeighbor += 1\n else:\n #Creates a new cell if it do not exist any.\n newCell = Cell(self.screen, neighborsX, neighborsY, True)\n newCell.numOfNeighbor += 1\n self.cells.append(newCell)", "def traffic_concentration(rFile, sheets, x, y, pollutant):\r\n p = Point(x, y)\r\n road, dis = nearest_road(p, rFile)\r\n \r\n # step 1. check whether this location is within the calculation range. If so, go on. Otherwise exit.\r\n if dis > 60: # I think we dont have to consider points that are too far from the streets.\r\n return 'e1' # error 1\r\n \r\n if dis < 3.5:\r\n dis = 3.5 # In the NSL calculation tool, calculation distance smaller than 3.5 meters are limited to 3.5 meters.\r\n \r\n # step 2. determine all the parameters required.\r\n \r\n #calibration factor\r\n Fk = 0.62\r\n \r\n # Emission number. for SO2, NO2, NOx, PM10, PM2.5, lead, and CO\r\n N = int(road['properties']['intensity']) #the traffic intensity, being the number of vehicles per day\r\n Fs = float(road['properties']['f_cong']) #fraction of stagnant traffic, a number between 0 and 1\r\n Fm = float(road['properties']['f_medium']) #fraction of medium-weight motor vehicles\r\n Fz = float(road['properties']['f_heavy']) #fraction of heavy motor vehicles\r\n Fb = float(road['properties']['f_bus']) #fraction of buses\r\n st = str(road['properties']['speed_type']) #intotal 5 types: a:100, b:44, c:19, d:13, e:26 (km/h)\r\n El = emission_factor(sheets, 'p', st, pollutant) #emission factor of light motor vehicles\r\n Em = emission_factor(sheets, 'm', st, pollutant) #emission factor of medium-weight motor vehicles\r\n Ez = emission_factor(sheets, 'v', st, pollutant) #emission factor of heavy motor vehicles\r\n Eb = emission_factor(sheets, 'b', st, pollutant) #emission factor of buses\r\n Eld = emission_factor(sheets, 'p', 'd', pollutant) #emission factor of light motor vehicles (speedType: d)\r\n Emd = emission_factor(sheets, 'm', 'd', pollutant) #emission factor of medium-weight motor vehicles (speedType: d)\r\n Ezd = emission_factor(sheets, 'v', 'd', pollutant) #emission factor of heavy motor vehicles (speedType: d)\r\n Ebd = emission_factor(sheets, 'b', 'd', pollutant) #emission factor of buses (speedType: d)\r\n \r\n E_regular = N * (1 - Fs) * ((1 - Fm - Fz - Fb) * El + Fm * Em + Fz * Ez + Fb * Eb) * 1000 / 24 / 3600\r\n E_cong = N * Fs * ((1 - Fm - Fz - Fb) * Eld + Fm * Emd + Fz * Ezd + Fb * Ebd) * 1000 / 24 / 3600\r\n E = E_regular + E_cong\r\n# print(\"{}: {}, {}\".format(pollutant, E_regular, E_cong))\r\n #dilution factor\r\n roadType = str(road['properties']['class'])\r\n if roadType == '1': # Broad street canyon\r\n a = 0.000325\r\n b = -0.0205\r\n c = 0.39\r\n alpha = 0.856\r\n elif roadType == '2': # Small street canyon\r\n a = 0.000488\r\n b = -0.0308\r\n c = 0.59\r\n alpha = None\r\n elif roadType == '3': # One-sided buildings\r\n a = 0.0005\r\n b = -0.0316\r\n c = 0.57\r\n alpha = None\r\n elif roadType == '4': # General urban\r\n a = 0.000310\r\n b = -0.0182\r\n c = 0.33\r\n alpha = 0.799\r\n \r\n if dis > 30 and (roadType == 1 or roadType == 4):\r\n theta = alpha * pow(dis, -0.747)\r\n else:\r\n theta = a * dis**2 + b * dis + c\r\n \r\n #tree factor\r\n Fb = road['properties']['t_factor']\r\n \r\n #wind speed\r\n ws = wind_speed(sheets, x, y) # average speed from CAR VL3.0\r\n \r\n #regional factor related to meteorology\r\n Fregio = 5 / ws\r\n \r\n # step 3. calculate the traffic concentration based on the parameters above.\r\n C_traffic = Fk * E * theta * Fb * Fregio\r\n \r\n # If it is NO2, then NOx has to be considered due to its chemical reaction with O3.\r\n if pollutant == 'NO2':\r\n B = 0.6 # fixed number?\r\n K = 100 # parameter for the conversion from NO to NO2\r\n C_background_O3 = background_concentration(sheets, x, y, 'O3')\r\n C_traffic_NOx = traffic_concentration(rFile, sheets, x, y, 'NOx')\r\n C_traffic = C_traffic + B * C_background_O3 * (C_traffic_NOx - C_traffic) / (C_traffic_NOx - C_traffic + K)\r\n \r\n return C_traffic", "def get_further_neighbours(self, cell):\n\t\tneighs = self.get_neighbours(cell)\n\t\ti, j = cell.find_id()\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tx, y = neigh.find_id()\n\t\t\tif abs(x-i)+abs(y-j) > 1 or abs(x-i)+abs(y-j) == 0: \n\t\t\t\tneighbours.append(self.space[y,x])\n\t\treturn neighbours", "def num_solution(self):\r\n\r\n m = float(self.mass) / self.nu_m\r\n c = self.light_vel\r\n p1 = 0.0\r\n x1 = 0.0\r\n self.xn_track.append(x1)\r\n self.vn.append(0.0)\r\n e = m * c * c\r\n self.en.append(e)\r\n for i in range(1, len(self.obs.obt_g)):\r\n dt = self.t[i] - self.t[i - 1]\r\n ddt = float(self.obs.obt[i] - self.obs.obt[i - 1])\r\n qe = 1 / self.nu_m * (self.nu_t * self.nu_t / self.nu_x) \\\r\n * 1.0 / float(self.size_tick * self.size_tick)\r\n\r\n # print \"qE=\", qe\r\n\r\n p2 = p1 + qe * dt\r\n self.vn.append(p2 / math.sqrt(m ** 2 + (p2 / c) ** 2))\r\n e = e + qe * (self.x[i] - self.x[i - 1])\r\n self.en.append(e)\r\n v = p2 / math.sqrt(m ** 2 + (p2 / c) ** 2)\r\n x2 = x1 + v * dt\r\n self.xn_track.append(x2)\r\n p1 = p2\r\n x1 = x2\r\n print 'Numerical solution of the differential equation of motion'", "def obtain_rectangular_segmentation(celled_hist, cells):\n celled_hist = np.array(celled_hist).reshape(cells[0]*cells[1], -1)\n scores = l1_dist(celled_hist, celled_hist)\n\n layers = cells[0]//2\n \n left, right, top, bottom = compute_positions(scores, layers, cells, \"lr\"), compute_positions(scores, layers, cells, \"rl\"), \\\n compute_positions(scores, layers, cells, \"tb\"), compute_positions(scores, layers, cells, \"bt\")\n\n return left, right, top, bottom", "def wallsAndGates(self, rooms: List[List[int]]) -> None:\n if rooms == []:\n return\n \n row = len(rooms)\n column = len(rooms[0])\n visited = [[False for i in range(column)] for j in range(row)]\n def valid(row_index, column_index):\n if row_index < row and row_index >= 0 and column_index< column and column_index >= 0:\n return True\n return False\n \n \n def bfs_traverse(row_index, column_index, distance):\n if valid(row_index, column_index) == False or rooms[row_index][column_index] < distance:\n return\n else:\n # if rooms[row_index] [column_index] != -1 and rooms[row_index] [column_index] != 0:\n\n rooms[row_index][column_index] = distance\n \n bfs_traverse(row_index+1, column_index, distance+1)\n\n bfs_traverse(row_index, column_index +1 , distance+1)\n\n bfs_traverse(row_index-1, column_index, distance+1)\n\n bfs_traverse(row_index, column_index-1, distance+1)\n \n \n for row_index in range(row):\n for column_index in range(column):\n if rooms[row_index][column_index] == 0:\n bfs_traverse(row_index, column_index, 0)", "def customNcuts(self):\n # computing neighboors graph\n A = kneighbors_graph(self.values, self.k, mode='distance', include_self=False).toarray()\n\n for i in range(self.values.shape[0]):\n for j in range(self.values.shape[0]):\n if A[i][j] > 0:\n\n v1 = (self.values[i][3], self.values[i][4], self.values[i][5])\n v2 = (self.values[j][3], self.values[j][4], self.values[j][5])\n\n magnitude1 = np.sqrt(v1[0] * v1[0] + v1[1] * v1[1] + v1[2] * v1[2])\n magnitude2 = np.sqrt(v2[0] * v2[0] + v2[1] * v2[1] + v2[2] * v2[2])\n ang = np.arccos(np.dot(v1, v2) / (magnitude1 * magnitude2))\n\n A[i][j] = max(self.values[i][7], self.values[j][7]) * A[i][j]\n\n # init SpectralClustering\n sc = SpectralClustering(4, affinity='precomputed', n_init=10, assign_labels = 'discretize')\n\n # cluster\n labels = sc.fit_predict(A)\n\n return labels", "def get_l_interface(n_v,n_c, neighbours, vs, CV_matrix,L):\n h_j = np.empty((n_v, 3, 2))\n for i in range(3):\n h_j[:, i] = vs\n h_jp1 = np.dstack((roll_reverse(neighbours[:,:,0]),roll_reverse(neighbours[:,:,1])))\n l = np.mod(h_j - h_jp1 + L/2,L) - L/2\n l = np.sqrt(l[:,:,0]**2 + l[:,:,1]**2)\n LI = np.zeros((n_c,n_c),dtype=np.float32)\n for i in range(3):\n LI+= np.asfortranarray(l[:,i]*CV_matrix[:,:,i])@np.asfortranarray(CV_matrix[:,:,np.mod(i+2,3)].T)\n return LI", "def island_perimeter(grid):\n sum = 0\n\n for line in range(len(grid)):\n for column in range(len(grid[line])):\n value = grid[line][column]\n water_borders = 4\n if value == 1:\n if line != len(grid) - 1 and grid[line + 1][column] == 1:\n water_borders -= 1\n if line != 0 and grid[line - 1][column] == 1:\n water_borders -= 1\n if column != len(grid[0]) - 1 and grid[line][column + 1] == 1:\n water_borders -= 1\n if column != 0 and grid[line][column - 1] == 1:\n water_borders -= 1\n sum += water_borders\n return sum", "def _compute_logarithmic_distance_term(index, M, Rjb):\r\n return ((a4[index] + a5 * (M - c1)) * np.log(np.sqrt(Rjb**2. + a6**2.)))", "def build_linear_diags(self):\n N = self.N\n dx = self.dx\n j = self._j # Index of the mid-point\n\n diags = np.zeros((2*self._j+1, self.N))\n\n # Advection term\n cff1 = -1/(2*dx)\n\n # Need to stagger these diagonals so lower and upper bands are symmetric\n diags[j-1, :-2] += -1*cff1*self.c[2:]\n diags[j+1, :] += 1*cff1*self.c[:]\n\n # Sponge term\n x = np.arange(0,N*dx,dx)\n rdist = x[-1] - x # Distance from right boundary\n spongefac = -np.exp(-6*rdist/self.spongedist)/self.spongetime\n diags[j,:] += spongefac \n\n return diags", "def get_nb_vals(i, pnts, dem, top_left_cor, cellsize, rows, cols):\n nb_x = np.zeros((5,5)) # this 5 by 5 max would contain the x coordinate of 16 neighbor pixels of a sample point\n nb_y = np.zeros((5,5)) # this 5 by 5 matrix would contain the y coordinate of 16 neighbor pixels of a sample point\n nb_z = np.zeros((5,5))\n # get index and value of cell in DEM containing current point\n (cell_X, cell_Y, cell_Z) = misc.getCellValue(pnts[i], \n dem, \n top_left_cor, \n cellsize)\n #Deal with sample points near boundary of the DEM\n point_within_dem = (cell_X-2) >=0 and (cell_Y-2>=0) and (cell_X+3)<=cols and (cell_Y+3)<=rows\n if point_within_dem:\n nb_z[0:5,0:5] = misc.RasterSubset(dem,(cell_Y-2),(cell_Y+3),(cell_X-2),(cell_X+3))\n else:\n #Get the part of moving window within the DEM domain\n in_data= misc.RasterSubset(dem,max((cell_Y-2),0),min((cell_Y+3),rows),max((cell_X-2),0),min((cell_X+3),cols))\n #in_data=dem[\"array\"][max((cell_Y-2),0):min((cell_Y+3),rows),max((cell_X-2),0):min((cell_X+3),cols)]\n nb_z[max((2-cell_Y),0):min((5-(cell_Y+3-rows)),5),max((2-cell_X),0):min((5-(cell_X+3-cols)),5)]=in_data[0:in_data.shape[0],0:in_data.shape[1]]\n in_data_avg=np.mean(in_data[in_data>-3.4e+10])\n nb_z[nb_z==0]=in_data_avg\n nb_z[nb_z<-3.4e+10]=in_data_avg\n\n\n \n # If there is missing data in the neighborhood of the sample point \n # use neighborhood average to replace the missing value \n has_missing_data = (nb_z>8848).sum()>0 or (nb_z<-413).sum()>0\n if has_missing_data:\n avgValue=np.mean(nb_z[np.where(np.logical_and(nb_z<8848, nb_z>-413))])\n nb_z[nb_z>8848]=avgValue\n nb_z[nb_z<-413]=avgValue\n \n # Obtain the coordinate of cell centroid of a 5*5 neighborhood around the sample point\n for ii in [0,1,2,3,4]:\n cor_y=ii-2\n dy = (cell_Y+cor_y+0.5) * cellsize[1]\n nb_y[ii,:] = top_left_cor[1] + dy\n for jj in [0,1,2,3,4]:\n cor_x=jj-2\n dx = (cell_X+cor_x+0.5) * cellsize[0]\n nb_x [:,jj] = top_left_cor[0] + dx\n return nb_x, nb_y, nb_z", "def test_nodal_efficiency_after_element_perturbation_initially_closed(self):\n F = FaultDiagnosis(\"tests/TOY_graph_initiallyopen.csv\")\n F.simulate_element_perturbation([\"1\"])\n\n nod_eff_2closed = {\n '1': 0.19596961680295014,\n '2': 0.19689554272887605,\n '3': 0.15185185185185185,\n '4': 0.20222663139329808,\n '5': 0.14814814814814814,\n '6': 0.22583774250440916,\n '7': 0.1744488536155203,\n '8': 0.24920634920634926,\n '9': 0.16124338624338622,\n '10': 0.14814814814814814,\n '11': 0.14814814814814817,\n '12': 0.1574074074074074,\n '13': 0.16666666666666666,\n '14': 0.19444444444444445,\n '15': 0.16587301587301584,\n '16': 0.15648148148148147,\n '17': 0.20740740740740743,\n '18': 0.0,\n '19': 0.16666666666666666\n }\n\n nod_eff_3closed = {\n '1': 0.15648148148148147,\n '2': 0.19689554272887605,\n '3': 0.15185185185185185,\n '4': 0.20222663139329808,\n '5': 0.14814814814814814,\n '6': 0.22583774250440916,\n '7': 0.1744488536155203,\n '8': 0.24920634920634926,\n '9': 0.16124338624338622,\n '10': 0.14814814814814814,\n '11': 0.14814814814814817,\n '12': 0.1574074074074074,\n '13': 0.16666666666666666,\n '14': 0.19444444444444445,\n '15': 0.16587301587301584,\n '16': 0.15648148148148147,\n '17': 0.20740740740740743,\n '18': 0.0,\n '19': 0.16666666666666666\n }\n\n if F.G.final_status == {'2': 1, '3': 0}:\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(nod_eff_2closed.values())),\n np.asarray(sorted(F.G.nodal_efficiency.values())),\n err_msg=\"FINAL NODAL EFFICIENCY failure: perturbation of element 1\")\n else:\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(nod_eff_3closed.values())),\n np.asarray(sorted(F.G.nodal_efficiency.values())),\n err_msg=\"FINAL NODAL EFFICIENCY failure: perturbation of element 1\")", "def Ncells(self):\n return len(self.cells)", "def get_density_from_neighbours(x: float, y: float, tree: KDTree, n: int = 10):\n\n dist, _ = tree.query([[x, y]], k=n)\n\n hsml = dist.max() / 2 # By definition!!!\n\n density = np.sum(kernel(dist, hsml))\n\n return density" ]
[ "0.6147432", "0.5673565", "0.5628075", "0.5538174", "0.5529582", "0.54913837", "0.54828596", "0.5471573", "0.54613155", "0.5456147", "0.5440528", "0.5422581", "0.54167837", "0.5389832", "0.5379038", "0.53487825", "0.53432304", "0.53047895", "0.53021795", "0.52806985", "0.52501726", "0.5233247", "0.52289796", "0.52258766", "0.5211618", "0.51888496", "0.51842767", "0.5180055", "0.5165327", "0.51652473", "0.5160581", "0.51561743", "0.51561457", "0.5145728", "0.51456493", "0.51360196", "0.51242524", "0.5123686", "0.5121884", "0.511815", "0.5113287", "0.5112236", "0.51045316", "0.51004535", "0.5084544", "0.5083967", "0.5083221", "0.50749063", "0.5068789", "0.5061982", "0.50617045", "0.50561565", "0.5052627", "0.5047171", "0.5046899", "0.5040699", "0.50258136", "0.50256544", "0.50252813", "0.50252813", "0.5020536", "0.5020076", "0.49994856", "0.49962315", "0.49911258", "0.4988896", "0.49873233", "0.4983324", "0.4983324", "0.49774674", "0.49752146", "0.49723607", "0.4970308", "0.49685845", "0.4966518", "0.49543193", "0.495078", "0.4949724", "0.49427387", "0.49387786", "0.4937706", "0.49368194", "0.4932755", "0.49281096", "0.4920181", "0.49169415", "0.49162632", "0.4916139", "0.49121448", "0.49083424", "0.49034116", "0.48958564", "0.4894751", "0.48946387", "0.4894572", "0.48925817", "0.48916915", "0.48881567", "0.48874852", "0.48874235" ]
0.572953
1
Calculate next best sample location
def acquisition(self): fs, _ = self.gp.predict(self.gp.X) next_fs, vars = self.gp.predict(self.X_s) opt = np.min(fs) improves = opt - next_fs - self.xsi Z = improves / vars eis = improves * norm.cdf(Z) + vars * norm.pdf(Z) return self.X_s[np.argmax(eis)], eis
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_next_sample(self):", "def decide_next_query(self):\n for gp in self.gps:\n build_gp_posterior(gp)\n # Find the best mean values for each gp.\n best_f, best_pt, best_gain = None, None, float('-inf')\n queries = self._get_queried_pts()\n for f_idx, f_name in enumerate(self.f_names):\n gp = self.gps[f_idx]\n f_qs = queries[f_name]\n # Assemble points to draw sample from.\n low, high = zip(*self.domains[f_idx])\n rand_pts = np.random.uniform(low, high,\n (self.options.max_opt_evals, len(low)))\n samp_pts = np.vstack([f_qs, rand_pts])\n samp_vals = gp.draw_sample(samp_pts=samp_pts).ravel()\n max_prev = np.max(samp_vals[:len(f_qs)])\n best_new_idx = np.argmax(samp_vals[len(f_qs):]) + len(f_qs)\n gain = samp_vals[best_new_idx] - max_prev\n if gain > best_gain:\n best_f = f_idx\n best_pt = samp_pts[best_new_idx]\n best_gain = gain\n return best_f, best_pt", "def best_step(self):\r\n return self._best_value_step", "def pick_next_station(self, station):\n self.best_score = 0\n\n stations = self.grid.stations\n # all connections of the last added added station \n lookahead_1 = self.grid.get_station(self.best_connection[1]).connections\n\n for la1 in lookahead_1.values():\n next_station = la1[0].name\n # if adding the connection exceeds the tracks max time length \n if self.track.add_station(self.grid, next_station) is False:\n break\n\n lookahead_2 = self.grid.get_station(la1[0].name).connections\n\n # keeps adding stations untill the time limit is reached\n for la2 in lookahead_2:\n la2 = stations.get(la2)\n if self.track.add_station(self.grid, la2.name) is False:\n break\n \n quality = self.grid.get_quality()\n \n self.track.remove_last_station()\n\n # if quality improves, add first station to the track\n if quality > self.best_score:\n self.best_score = quality \n self.best_connection = [la2.name, la1[0].name]\n \n self.track.remove_last_station()", "def get_last_sample(self) -> InternalSample:", "def next_step(self, location, prev_step):\n vox_data = self._interpolator[location]\n\n sampling_points = self._model.sampling_points\n sampling_edges = self._model.sampling_edges\n samples = self._model.evaluate(vox_data)\n\n peak_values, peak_inds = peak_finding_onedge(samples, sampling_edges)\n peak_points = sampling_points[peak_inds]\n peak_points = _robust_peaks(peak_points, peak_values,\n self.min_relative_peak, self.peak_spacing)\n step = _closest_peak(peak_points, prev_step, self.dot_limit)\n return step", "def calc_nearest_ind(self, robot_pose):\n pass", "def choose_bestnext(self, round):\n board_percentage = []\n \n for i in self.possible_coords:\n iSq = round.getSq(i[0], i[1])\n \n if round.pr_hook(iSq) == ' X ':\n sq_percentage = []\n surroundings = iSq.point_neighbors()\n \n for j in surroundings:\n jSq = round.getSq(j[0], j[1])\n\n if round.as_int(jSq) != None:\n count_X = 0\n count_F = 0\n check = jSq.point_neighbors()\n\n for k in check:\n kSq = round.getSq(k[0], k[1])\n if round.pr_hook(kSq) == ' X ':\n count_X += 1\n elif round.pr_hook(kSq) == ' f ':\n count_F += 1 \n if count_X != 0:\n sq_percentage.append((jSq.mine_neighbors() - count_F)/ count_X)\n\n avg_percent = 0\n if len(sq_percentage) == 0:\n avg_percent = 0.8\n elif sq_percentage.count(1) != 0:\n avg_percent = 1\n round.flagSq(i[0], i[1])\n else:\n sum_so_far = 0\n for p in sq_percentage:\n sum_so_far += p\n avg_percent = sum_so_far / len(sq_percentage)\n \n board_percentage.append(avg_percent)\n\n else:\n board_percentage.append(100)\n\n sorted_percentages = board_percentage.copy()\n sorted_percentages.sort()\n\n best_choice = board_percentage.index(sorted_percentages[0])\n\n return self.possible_coords[best_choice]", "def _get_nearest_neighbor(self, sample):\n d_min=float('inf') #minimum distance\n node_neighbor=self.start\n\n for iter in self.start:\n d=0 #distance between sample and each node in the node tree\n for j in range(sample.size):\n d+=(iter.state[j]-sample[j])**2\n if(d<d_min):\n d_min=d\n node_neighbor=iter\n\n return node_neighbor", "def get_next_position(self):", "def FindClosestInsertedPoint(self, ):\n ...", "def _get_best_level_for_downsample(downsample, arr):\n L = np.array(arr)\n diff = np.absolute(L-downsample)\n index = np.argmin(diff)\n return index", "def optimize(self):\n \n if self.verbose:\n print('Starting grid search with bounds: [' + \\\n ';'.join(['%5g to %5g']*len(self.steps))%tuple([(self.steps[i][0], self.steps[i][-1]) for i in range(len(self.steps))]) +']')\n\n for params in self._get_next_point():\n self.transform.set_params(params)\n\n v, _ = self.measure.value_and_derivatives(self.transform)\n\n if v < self.best_value:\n self.best_value = v\n self.best_params = params\n# print('New best value %2.4f at ('%v, ', '.join(['%8.3f']*len(params))%tuple(params), ')')\n\n self.value_history.append(v)\n self.last_value = v\n self.iteration += 1\n\n if self.report_freq > 0 and (self.iteration % self.report_freq == 0) and self.report_func is not None:\n self.report_func(self)\n\n # Set the best transform\n self.transform.set_params(self.best_params)\n self.last_value = self.best_value\n return self.best_value", "def FindClosestPoint(self, ):\n ...", "def min(self):\r\n\t\treturn min(self.sample)", "def test_nearest_location():\n locations = [(10, 20), (30, 40), (50, 60)]\n\n assert nearest_location(locations, 8) == 0\n assert nearest_location(locations, 15) == 0\n assert nearest_location(locations, 22) == 0\n\n assert nearest_location(locations, 28) == 1\n assert nearest_location(locations, 35) == 1\n assert nearest_location(locations, 42) == 1\n\n assert nearest_location(locations, 48) == 2\n assert nearest_location(locations, 55) == 2\n assert nearest_location(locations, 62) == 2", "def sample(self):\n\n\t\t# get newest sample\n\t\ts = self.eyetribe.sample()\n\t\t\n\t\t# invalid data\n\t\tif s == (None,None):\n\t\t\treturn (-1,-1)\n\t\t\n\t\t# check if the new sample is the same as the previous\n\t\tif s != self.prevsample:\n\t\t\t# update the current sample\n\t\t\tself.prevsample = copy.copy(s)\n\t\t\n\t\treturn self.prevsample", "def optimal(count):\n\n return _optimal(start, count)[0]", "def find_best_point(self, start_i, end_i, ranges):\n max_val = 0\n target = start_i\n for i in range(start_i, end_i):\n if ranges[i] > max_val:\n target = i\n max_val = ranges[i]\n \n angle = -(540-target)*3\n return float(angle)/1080, target", "def GetPts(self):\n return self.best", "def prior_sample(self):\n pass", "def _sampler(self, best_th_, th_, samples):\n # th_ descending.\n # th_range ascending.\n desired = np.linspace(max(best_th_ / 100, np.min(th_)),\n min(best_th_ * 2, 1), samples)\n # Find index of nearest from th_reverse (a[i-1] < v <= a[i]).\n index_rev = np.searchsorted(th_[::-1], desired, side='left') # Asc.\n # If not enough data, th could duplicate (remove).\n index_rev = np.unique(index_rev)\n index = len(th_) - index_rev - 1\n th_range = np.clip(th_[index], a_min=None, a_max=1)\n return th_range, index", "def find_best_point(self, start_i, end_i, ranges):\n # do a sliding window average over the data in the max gap, this will\n # help the car to avoid hitting corners\n averaged_max_gap = np.convolve(ranges[start_i:end_i], np.ones(self.BEST_POINT_CONV_SIZE), 'same') / self.BEST_POINT_CONV_SIZE\n return averaged_max_gap.argmax() + start_i", "def _get_lip_best(self) -> float:\n pass", "def _get_next_point(self):\n #Get the index of the current step in each dimension\n nparams = len(self.transform.get_params())\n indices = [0]*nparams\n #Get the number of steps in each dimension\n lengths = [len(self.steps[i]) for i in range(nparams)]\n\n end = False\n while not end:\n yield [self.steps[i][indices[i]] for i in range(nparams)]\n\n #Increment the index of the last paramenter and then check whether it goes over the end\n indices[-1] += 1\n for p in reversed(range(nparams)):\n if indices[p] == lengths[p]:\n indices[p] = 0\n if p > 0:\n indices[p-1] += 1\n else:\n end = True", "def get_best(self):\n scores, ids = self.sort_best()\n return scores[1], ids[1]", "def getNextOptimal(self):\n\t\tnodes=self.optNodes\n\t\texceeds=self.m.exceedsAngleLim\n\t\tif self.optNode is len(nodes)-1: #last node\n\t\t\tself.noMoreSpots=True\n\t\t\treturn self.pos\n\t\telif len(nodes) is 0 or (self.otherDevice is not None and exceeds(self,nodes[self.optNode+1],self.otherDevice)):\n\t\t\treturn self.pos #could not go to next ideal, other arm is blocking.\n\t\telse:\n\t\t\t#get the next optimal in list and iterate until it is \"forward\" angularly.\n\t\t\tself.optNode+=1\n\t\t\tif '2a' in self.m.type:\n\t\t\t\twhile self.m.getCylindrical(nodes[self.optNode])[1] > self.posCyl[1] and self.optNode<len(nodes)-1 and not exceeds(self,nodes[self.optNode+1],self.otherDevice): \n\t\t\t\t\tself.optNode+=1\n\t\treturn nodes[self.optNode]", "def getlocalbestcoordinate(self):\n return self.localbest.coordinate", "def get_best_position(self):\n # Todo: implement\n best_value_global = -inf\n position = None\n for particle in self.particles:\n if particle.best_value >= best_value_global:\n position = particle.best_position\n best_value_global = particle.best_value\n return position", "def FindBest(ax, x, npoints):\r\n\r\n # get size of data array (goes 0 to ndata-1)\r\n ndata = len(ax)\r\n\r\n # Find index of point closest to x\r\n iclosest = BSearch(ax,x)\r\n if (verbose): \r\n print 'looking for closest to x=',x\r\n print 'ax',ax\r\n print 'closest point at ',ax[iclosest],'iclosest=',iclosest\r\n \r\n # Get npoints points in each direction, and find distance \r\n # from x for each. \r\n # This will handle cases where point is at start or end of \r\n # data set, or where all closest points lie in one direction.\r\n imin = iclosest-npoints\r\n imax = iclosest+npoints\r\n # make sure imin and imax are in array range\r\n if (imin < 0): imin = 0\r\n if (imax >= ndata): imax = ndata-1\r\n ncandidates = imax-imin+1\r\n if (verbose):\r\n print 'imin,imax,ncandidates',imin,imax,ncandidates\r\n print 'candidate points:'\r\n print ' j,i,xdata[i],xdelta[j]:'\r\n xdelta = zeros(ncandidates,'d') # initialize array\r\n for i in range(imin,imax+1): # do i=imin,imax\r\n j = i-imin\r\n xdelta[j] = abs(ax[i]-x) # distance from x\r\n if (verbose): print ' ',j,i,ax[i],xdelta[j]\r\n \r\n # Sort points by xdelta, in ascending order\r\n ibest = IndexSort(xdelta)\r\n \r\n # Exclude closest point if it's actually the point we're searching for\r\n # (dr mayes requirement)\r\n npoints2 = npoints\r\n if (xdelta[ibest[0]] == 0.0):\r\n if (verbose): print 'excluding point with xdelta=0'\r\n # reduce number of available candidates by one\r\n ncandidates -=1\r\n # make sure we don't have more points than candidates\r\n npoints2 = ncandidates \r\n # shift candidates down by one\r\n for i in range(ncandidates): # do i=0,ncandidates-1\r\n ibest[i]=ibest[i+1]\r\n \r\n # trim the array down to the number of requested or available points\r\n# ibest.resize(npoints) # having trouble with this sometimes\r\n ibest = ibest[:npoints2]\r\n\r\n # adjust ibest array to correct range\r\n # note: at this point the first <npoints> is all we need\r\n# for i in range(npoints): # do i=0,npoints-1\r\n for i in range(npoints2): # do i=0,npoints-1\r\n ibest[i]=ibest[i]+imin\r\n \r\n if (verbose):\r\n print 'best points (sorted by xdelta):'\r\n print ' i,ibest,xdata,xdelta'\r\n# for i in range(npoints): # do i=0,npoints-1\r\n for i in range(npoints2): # do i=0,npoints-1\r\n print ' ',i,ibest[i],ax[ibest[i]],abs(x-ax[ibest[i]])\r\n \r\n return ibest", "def _determine_next_eval_point(self):\n anc_data = self._get_ancillary_data_for_acquisition()\n acq_to_use = getattr(acquisitions.asy, self.options.acq.lower())\n next_eval_point = acq_to_use(self.gp, self.acq_optimise, anc_data)\n return next_eval_point", "def get_next_map_point(self) -> int:\n if self.current_map_point > self.map_length - c.SAFE_EXCESS:\n self.generate(self.map_points[self.current_map_point])\n self.current_map_point = 0\n self.current_map_point += 1\n return self.map_points[self.current_map_point]", "def step(self):\n if self.Y.shape[0]<self.initial_design_numdata:\n self.suggested_sample = initial_design('random', self.space, 1)\n else:\n self.suggested_sample = self._compute_next_evaluations()\n\n self.X = np.vstack((self.X,self.suggested_sample))\n\n # --- Update current evaluation time and function evaluations\n self.num_acquisitions += 1\n\n if self.verbosity:\n print(\"num acquisition: {}\".format(self.num_acquisitions))\n\n return np.array(self.suggested_sample[0,:])", "def naive_next_pos(measurement, OTHER = None):\n if not OTHER: # this is the first measurement\n OTHER = measurement\n xy_estimate = OTHER \n return xy_estimate, OTHER", "def choose_next(self, round):\n return random.choice(self.possible_coords)", "def next_sample(self, z):\n xp = self.A @ self.x\n Pp = self.A @ self.P @ self.A.T + self.Q\n\n self.K = Pp @ self.H.T * inv(self.H @ Pp @ self.H.T + self.R)\n\n self.x = xp + self.K @ (np.array([[z]] - self.H @ xp))\n self.P = Pp - self.K @ self.H @ Pp\n\n self.pos = self.x[0]\n self.vel = self.x[1]\n return self.pos, self.vel", "def next_sample(self, z):\n xp = self.A @ self.x\n Pp = self.A @ self.P @ self.A.T + self.Q\n\n self.K = Pp @ self.H.T * inv(self.H @ Pp @ self.H.T + self.R)\n\n self.x = xp + self.K @ (np.array([[z]] - self.H @ xp))\n self.P = Pp - self.K @ self.H @ Pp\n\n self.pos = self.x[0]\n self.vel = self.x[1]\n return self.pos, self.vel", "def find_max_score_location(grid, shape):", "def _get_optimum_location(dataset: Dataset) -> np.ndarray:\n\n # Retrieve the observations\n X, Y = dataset.inputs_array, dataset.output_array\n\n # Return the location of the maximum\n best_index = int(np.argmax(Y))\n\n return X[best_index, :]", "def find_bestParameter(self,currentEnergy):\n if currentEnergy==5.89:\n currentEnergy=6.4\n print(\"WARNING !!!!!!!! E=5.89 KeV ==>> uso best value trovato a 6.4 KeV !!!!!\")\n \n \n index_summary=1e6\n try:\n index_summary=np.where( np.logical_and ( self.energy<(float(currentEnergy)+0.05), self.energy >(float(currentEnergy)-0.05) ) )[0][0]\n print (\"readSummaryData: energia trovata! index = \",index_summary)\n except:\n print (\"readSummaryData: energia *NON* trovata nello scan ploarizzato\")\n\n \n bestPar=1e6 \n if ( index_summary<1000):\n bestPar=self.best_val[index_summary]\n \n return bestPar", "def find_bestParameter(self,currentEnergy):\n if currentEnergy==5.89:\n currentEnergy=6.4\n print(\"WARNING !!!!!!!! E=5.89 KeV ==>> uso best value trovato a 6.4 KeV !!!!!\")\n \n \n index_summary=1e6\n try:\n index_summary=np.where( np.logical_and ( self.energy<(float(currentEnergy)+0.05), self.energy >(float(currentEnergy)-0.05) ) )[0][0]\n print (\"readSummaryData: energia trovata! index = \",index_summary)\n except:\n print (\"readSummaryData: energia *NON* trovata nello scan ploarizzato\")\n\n \n bestPar=1e6 \n if ( index_summary<1000):\n bestPar=self.best_val[index_summary]\n \n return bestPar", "def get_best_value(self):\n # Todo: implement\n best_value_global = -inf\n for particle in self.particles:\n if particle.best_value >= best_value_global:\n best_value_global = particle.best_value\n return best_value_global # Remove this line", "def _distance_next(self):\n\n self.distance = 10\n\n # Here a set index to 0 if the car is finishing a lap\n # Also reset the farthest\n if self.index > (len(self.x_trajectory) - 6) and self.closed:\n self.index = 0\n self.farthest = -1\n self.laps += 1\n\n for w in range(self.index, self.index + 20):\n\n self.dist_point = math.sqrt((self.x_trajectory[w] - self.x)**2\n + (self.y_trajectory[w] - self.y)**2)\n\n if self.dist_point < self.distance:\n self.distance = self.dist_point\n self.index = w\n\n if w >= (len(self.x_trajectory) - 1):\n break\n\n self._calc_side()\n\n self.distance = self.distance * self.sign\n\n return self.distance", "def biased_sample(self):\n return self.STARS", "def calculate_target_location(self, alphas, epsilons, data_collected):\n if len(alphas) == 1:\n \tfor i in range(0, self.number_sampling_points-1):\n \t\talphas.append(alphas[0])\n \t\tepsilons.append(epsilons[0])\n\n # if self.target_location == None:\n # # dBm_list = []\n # # for sample in data_collected[0][3]:\n # # dBm_list.append(sample)\n\n # # average_dBm = sum(dBm_list) / float(len(dBm_list))\n # # radius_target_position = 10 ** ((average_dBm - self.epsilon) / self.alpha)\n # # ###TODO: fix radius_target_position\n # # if radius_target_position > self.altitude:\n # # horizontal_distance = sqrt((radius_target_position**2) - (self.altitude**2))\n # # else:\n # # horizontal_distance = 0\n\n # local_position = self.dc.read_gps()\n # local_coord = Coordinate(local_position.lat, local_position.lon)\n\n # first_emulated_target = local_coord.offset_toward_target(self.region.center(), DISTANCE_TO_TARGET)\n\n # self.log.debug('=========================================================================')\n # self.log.debug('Calculated emulated target at location: {}'.format(first_emulated_target))\n # self.log.debug('=========================================================================')\n\n # return first_emulated_target\n\n # else:\n prediction = predict(dronenum=self.number_sampling_points,\n maxRun=1,\n numIterations=GDParameters.NUM_ITERATIONS,\n numEpoch=GDParameters.NUM_EPOCH,\n threshold=GDParameters.THRESHOLD,\n learning_rate=GDParameters.LEARNING_RATE,\n numberBatch=1,\n data_length=NUMBER_SAMPLES*self.number_sampling_points)\n\n try:\n target = prediction.swarm(drone_data=data_collected,\n alphas=alphas,\n epsilons=epsilons)\n except IndexError:\n self.log.warn('Target localization failed. Data not good enough.')\n return False\n\n computed_target_position = Coordinate(target[0], target[1])\n\n self.log.debug('=========================================================================')\n self.log.debug('Calculated new target at location: {}'.format(computed_target_position))\n\n if IS_SIMULATION:\n error = computed_target_position.distance_to(self.current_simulated_target)\n self.log.debug('Simulated error: {err}, Simulated target has moved {dist} meters to: {loc}'.format(\n err=error,\n dist=self.target_meters_moved,\n loc=self.current_simulated_target\n ))\n self.log.debug('=========================================================================')\n\n if not self.region.contains(computed_target_position) and not IS_SIMULATION:\n self.log.debug('New target is out of region')\n self.log.debug('Setting new target location as the latest one calculated')\n return self.target_location\n\n return computed_target_position", "def get_object_location(sensor_loc, single_distance, num_iters=20, num_repeats=10):\n obj_locs = np.zeros((num_repeats, 1, 2))\n distances = np.zeros(num_repeats)\n for i in range(num_repeats):\n obj_loc = np.random.randn(1, 2) * 100\n for t in range(num_iters):\n obj_loc += compute_update(obj_loc, sensor_loc, single_distance)\n\n distances[i] = np.sum((single_distance - np.linalg.norm(obj_loc - sensor_loc, axis=1))**2)\n obj_locs[i] = obj_loc\n\n obj_loc = obj_locs[np.argmin(distances)]\n\n return obj_loc[0]", "def get_object_location(sensor_loc, single_distance, num_iters=20, num_repeats=10):\n obj_locs = np.zeros((num_repeats, 1, 2))\n distances = np.zeros(num_repeats)\n for i in range(num_repeats):\n obj_loc = np.random.randn(1, 2) * 100\n for t in range(num_iters):\n obj_loc += compute_update(obj_loc, sensor_loc, single_distance)\n\n distances[i] = np.sum((single_distance - np.linalg.norm(obj_loc - sensor_loc, axis=1))**2)\n obj_locs[i] = obj_loc\n\n obj_loc = obj_locs[np.argmin(distances)]\n\n return obj_loc[0]", "def __find_another_nearest_medoid(self, point_index, current_medoid_index):\r\n other_medoid_index = -1\r\n other_distance_nearest = float('inf')\r\n for index_medoid in self.__current:\r\n if (index_medoid != current_medoid_index):\r\n other_distance_candidate = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[current_medoid_index])\r\n\r\n if other_distance_candidate < other_distance_nearest:\r\n other_distance_nearest = other_distance_candidate\r\n other_medoid_index = index_medoid\r\n\r\n return other_medoid_index", "def closest_waypoint(self, location: pylot.utils.Location):\n min_dist = np.infty\n min_index = 0\n for index, waypoint in enumerate(self.waypoints):\n dist = waypoint.location.distance(location)\n if dist < min_dist:\n min_dist = dist\n min_index = index\n return min_index", "def next(self):\r\n rnd = rand() * self.totals[(-1)]\r\n return bisect.bisect_right(self.totals, rnd)", "def _get_initial_location(self):\n for i in self.modelunctab.tableview.selectedIndexes():\n self.locid = i.row()\n break\n else:\n self.locid = 0\n\n self.locnaam = self.result_locations[self.locid]", "def guessLabel(self, sample):\n a = sample\n for layer in self.layers[1:]:\n a = layer.fastForward(a)\n return np.argmax(a)", "def optimize(start, stop, step):\n #Go through every angle\n rot_angle_list = np.arange(start, stop, step)\n fitness_list = []\n best_angles_list = []\n for (i, rot_angle) in enumerate(rot_angle_list):\n (fitness, best_angles) = self._angle_fitness(rot_angle, initial_R, ending_vec, starting_vec)\n fitness_list.append(fitness)\n best_angles_list.append(best_angles)\n #Find the best result\n best_index = np.argmin(fitness_list)\n best_rot_angle = rot_angle_list[best_index]\n best_angles = best_angles_list[best_index]\n return (best_rot_angle, best_angles)", "def best_bat(self):\n\n i = 0\n j = 0\n for i in range(self.NP):\n if self.Fitness[i] < self.Fitness[j]:\n j = i\n for i in range(self.D):\n self.best[i] = self.Sol[j][i]\n self.f_min = self.Fitness[j]", "def _target(self, data):\n relative_values = abs(data - data.mean())\n index = relative_values.idxmax()\n value = relative_values[index]\n return index, value", "def targetpoint(self, initpoint):\n while True:\n col = int(random.uniform(0, COLS))\n row = int(random.uniform(0, ROWS))\n if (row, col) != initpoint:\n break\n return (row, col)", "def get_closest_waypoint(self, pose):\n #TODO implement - Done\n # Iterate the base_waypoints' x value with current position's x value and find the closest\n # match, and pick that waypoint location index. \n min_idx = 0\n min_dist = None\n cur_x = pose.position.x\n cur_y = pose.position.y\n if self.waypoints is not None:\n for i, wp in enumerate(self.waypoints):\n wp_x = wp.pose.pose.position.x\n wp_y = wp.pose.pose.position.y\n dist = np.sqrt((cur_x - wp_x)**2 + (cur_y - wp_y)**2)\n if min_dist is None or min_dist >= dist:\n min_dist = dist\n min_idx = i\n \n # check whether the identified index is behind the current position, if so, move it by 1 index\n # https://gamedev.stackexchange.com/questions/75072/how-can-i-compare-two-quaternions-for-logical-equality\n # rospy.logwarn('min_idx before = %d', min_idx)\n eps = 1e-12\n if self.waypoints is not None:\n q1 = self.waypoints[min_idx].pose.pose.orientation\n q2 = pose.orientation\n q1_a = np.array([q1.x, q1.y, q1.z, q1.w])\n q2_a = np.array([q2.x, q2.y, q2.z, q2.w])\n direction = abs(np.dot(q1_a, q2_a))\n #rospy.logwarn('calculated direction %f', direction)\n wp_x = self.waypoints[min_idx].pose.pose.position.x\n if direction > 1-eps:\n if wp_x < cur_x:\n min_idx += 1\n else:\n min_idx -= 1\n else:\n if wp_x < cur_x:\n min_idx -= 1\n else:\n min_idx += 1\n\n # rospy.logwarn('min_idx after = %d', min_idx)\n return min_idx", "def next_positions(self):\n self.scores = np.array(self.scores)\n improved = self.scores < self._pso_data.best_scores\n\n self._pso_data.best_scores[improved] = self.scores[improved]\n self._pso_data.best_positions[improved] = self.positions[improved]\n\n self._pso_data.velocities = self._new_velocities()\n new_positions = self.positions + self._pso_data.velocities\n return new_positions", "def get_latest_locant(self):\n return self._next_locant - 1", "def youngest(self):\n # Your implementation here", "def test_nearest_location_even():\n assert nearest_location([(3, 6), (8, 13)], 6, 0) == 0\n assert nearest_location([(3, 6), (8, 13)], 6, 1) == 0\n assert nearest_location([(3, 6), (8, 13)], 7, 0) == 1\n assert nearest_location([(3, 6), (8, 13)], 7, 1) == 1", "def __find_another_nearest_medoid(self, point_index, current_medoid_index):\n other_medoid_index = -1\n other_distance_nearest = float(\"inf\")\n for index_medoid in self.__current:\n if index_medoid != current_medoid_index:\n other_distance_candidate = euclidean_distance_square(\n self.__pointer_data[point_index],\n self.__pointer_data[current_medoid_index],\n )\n\n if other_distance_candidate < other_distance_nearest:\n other_distance_nearest = other_distance_candidate\n other_medoid_index = index_medoid\n\n return other_medoid_index", "def __get_best_pos_to_shoot(self):\n #Gets the state of the markov model at time t.\n transition_probabilities = self.__get_net_probs()\n emission_probabilities = self.__get_net_probs()\n hmm = HMM(transition_probabilities, emission_probabilities)\n emissions = [2, 1, 0]\n initial = self.__get_net_probs()\n return(self.net[self.viterbi(hmm, initial, emissions)[0]].id)", "def get_best_idx(patient, plan_type, stop=False):\n if plan_type in ['clinical', 'default']:\n return 0\n plan = get_plan(patient, plan_type)\n util_vec = plan.opt_result.func_vals\n if stop:\n stop_idx = get_stop_idx(util_vec)\n util_vec = util_vec[:stop_idx + 1]\n return np.argmin(util_vec)", "def getNearestSampleIndex(test, trainX):\n dist_matrix = test - trainX\n dist_square = dist_matrix ** 2\n dist_sums = dist_square.sum(axis=1)\n distance_vector = np.sqrt(dist_sums)\n return (distance_vector).argmin()", "def best_allowed(self, base):\n x = base.copy()\n var_opt = None\n dim = x.shape[0]\n for i in range(dim):\n # Plus increment\n x[i] += self.step\n curr_obj = self.obj_wrap(x)\n # Check update feasible, obj improved\n # new point in STM, before accepting\n if (curr_obj and \n not np.isclose(x.T, self.STM).all(axis=1).any()):\n if var_opt is None:\n var_opt = (i, self.step, curr_obj)\n elif var_opt[2] > curr_obj:\n var_opt = (i, self.step, curr_obj)\n\n \n # Minus increment\n x[i] -= 2 * self.step\n curr_obj = self.obj_wrap(x)\n # Check update feasible, obj improved\n # new point in STM, before accepting\n if (curr_obj and \n not np.isclose(x.T, self.STM).all(axis=1).any()):\n if var_opt is None:\n var_opt = (i, -self.step, curr_obj)\n elif var_opt[2] > curr_obj:\n var_opt = (i, -self.step, curr_obj)\n \n # Restore to original value\n x[i] += self.step\n \n if var_opt:\n x[var_opt[0]] += var_opt[1]\n return x, var_opt[2]\n else:\n return None", "def best_value(self):\r\n return self._best_value", "def next_move(self):\n\n # Calculate all paths to destination from current location and time.\n solution = self.calculate_best_solution((None, None), self.currentTurn, [self.character.path[-1]],\n self.character.spent)\n\n # Add travel weight to spent.\n if solution[1] is not None and solution[1][0] != solution[1][1]:\n self.character.spent += self.pekingMap.get_vertex(solution[1][0]).weight(solution[1][1])\n\n # Return next point in shortest path to location.\n if solution[1] is not None:\n return solution[1][1]\n\n return None", "def find_best(self):\n best_st = 0\n best_bt = 0\n best_perf = -1.1\n for bt in self.btl:\n for st in self.stl:\n if self.total[bt, st, \"perf\"] > best_perf:\n best_perf = self.total[bt, st, \"perf\"]\n best_st = st\n best_bt = bt\n return (best_perf, self.total[best_bt, best_st, \"count\"], best_bt, best_st)", "def sample(self, global_step, logging):\n if self.record_size < self.learn_start:\n sys.stderr.write('Record size less than learn start! Sample failed\\n')\n return False, False\n\n dist_index = int(math.floor(float(self.record_size) / float(self.size) * float(self.partition_num))) \n partition_size = int(math.floor(self.size / self.partition_num))\n partition_max = dist_index * partition_size\n distribution = self.distributions[dist_index]\n rank_list = []\n # sample from k segments\n for n in range(1, self.batch_size + 1):\n if(distribution['strata_ends'][n] + 1 < distribution['strata_ends'][n + 1]):\n index = np.random.randint(distribution['strata_ends'][n] + 1,\n distribution['strata_ends'][n + 1])\n else:\n index = distribution['strata_ends'][n + 1]\n \n rank_list.append(index)\n\n \n # beta, increase by global_step, max 1\n #beta = min(self.beta_zero + (global_step - self.learn_start - 1) * self.beta_grad, 1)\n beta = self.beta_zero + (1.0 - self.beta_zero) / 2 + (1.0 - self.beta_zero) / 2 * np.tanh((global_step - self.total_steps/2) / (self.total_steps/6.0))\n #beta = (1.0 - self.beta_zero) * np.exp(float(global_step) / float(self.total_steps)) / (np.exp(1) - 1) + (self.beta_zero * np.exp(1) - 1) / (np.exp(1) - 1)\n # find all alpha pow, notice that pdf is a list, start from 0\n alpha_pow = [distribution['pdf'][v - 1] for v in rank_list]\n # w = (N * P(i)) ^ (-beta) / max w\n w = np.power(np.array(alpha_pow) * partition_max, -beta)\n w_max = max(w)\n w = np.divide(w, w_max)\n \n logging.info(\"current beta is: {0}\".format(beta))\n\n # get experience id according rank_list\n experience, priority = self.retrieve(rank_list)\n return experience, rank_list, w, priority", "def get_best_individual(self):\n return self._best_indv", "def _find_largest_candidate(self, reduced):\n nbr_counts = np.count_nonzero(reduced == 0, axis=0) # = [1, 1, 4, 2,...] where each value is the number of neighbours for the variant at that index.\n count_max = nbr_counts.max()\n if count_max == 0: # Indicates there are no available variants close enough\n return None, [] # to the remaining unassigned. Usually raises an error.\n max_inds = np.nonzero(nbr_counts == count_max)[0] # Array containing the indices of all variants with the max number of neighbours.\n if len(max_inds) == 1: # A single largest cluster\n best_center = max_inds[0]\n best_clstr = np.nonzero(reduced[:,best_center] == 0)[0]\n else: # A tie for largest cluster. Broken by smallest sum of full scores\n # This was tested with the below more accurate and true scoring function. Unfortunately it became hideously slow (clustered_inds and centre_inds were given as args):\n # clstr_inds = np.nonzero(reduced[:,max_ind] == 0)[0]\n # covered_inds = list(clustered_inds | set(clstr_inds))\n # centre_inds.append(max_ind)\n # score = np.sum(np.min(self.orig_dists[np.ix_(covered_inds,centre_inds)], axis=1))\n # centre_inds.pop()\n best_center, best_clstr, best_score = None, [], np.inf\n for max_ind in max_inds:\n clstr_inds = np.nonzero(reduced[:,max_ind] == 0)[0]\n score = np.sum(self.orig_dists[clstr_inds,max_ind])\n if score < best_score:\n best_center, best_clstr, best_score = max_ind, clstr_inds, score\n return best_center, best_clstr", "def __return_best_hotspot(self, dup_dict):\n hotspots = dup_dict[dup_dict.keys()[0]]\n fewest_alt_alleles = 10\n\n hotspot_with_fewest_alleles = []\n for hotspot in hotspots:\n if len(hotspot['ALT']) < fewest_alt_alleles:\n fewest_alt_alleles = len(hotspot['ALT'])\n\n del hotspot_with_fewest_alleles[:]\n hotspot_with_fewest_alleles = []\n hotspot_with_fewest_alleles.append(hotspot)\n elif len(hotspot['ALT']) == fewest_alt_alleles:\n hotspot_with_fewest_alleles.append(hotspot)\n\n if len(hotspot_with_fewest_alleles) == 1:\n best_hotspot = hotspot_with_fewest_alleles[0]\n return best_hotspot['CHROM'], best_hotspot['POS'], best_hotspot['REF'], \",\".join(best_hotspot['ALT'])\n\n # Now checking for the highest number of variants that pass the qc parameters.\n highest_hotspot_count = 0\n most_frequent_hotspot = []\n for hotspot in hotspots:\n if hotspot['orig_stats']['qc']['final_qc_count'] > highest_hotspot_count:\n highest_hotspot_count = len(hotspot['ALT'])\n\n del most_frequent_hotspot[:]\n most_frequent_hotspot = []\n most_frequent_hotspot.append(hotspot)\n elif hotspot['orig_stats']['qc']['final_qc_count'] == highest_hotspot_count:\n most_frequent_hotspot.append(hotspot)\n\n best_hotspot = most_frequent_hotspot[0]\n return best_hotspot['CHROM'], best_hotspot['POS'], best_hotspot['REF'], \",\".join(best_hotspot['ALT'])", "def _getBestIndex(self, validQvals):\r\n maxVal = max(validQvals) # FIXME\r\n bestMoves = [index for index, move in enumerate(validQvals) if move == maxVal]\r\n\r\n # heuristic: choose last bucket\r\n return int(bestMoves[-1])", "def get_best_candidate(self):\n if not self.scores:\n return None\n return self.te_list[self.scores.index(max(self.scores))]", "def get_starting_point(self, Otrain, Ftrain, y):\n return self.get_curve_fmin(Otrain, Ftrain, [y])\n # xx = np.linspace(np.min(Otrain), np.max(Otrain), 50)\n # scores, xx = self.compute_scores(Otrain, Ftrain, y, xx)\n # bestScore = np.max(scores)\n # Ibest = np.where(scores == bestScore)[0]\n # x = xx[Ibest[0]]\n return x", "def next_generation(self):\n new_population = self.population.copy()\n new_length = self.tour_length.copy()\n for i in range(self.loops):\n order_a = self.pick_one()\n order_b = self.pick_one()\n order = self.crossover(order_a, order_b)\n order_length = self.distance(order)\n new_population[i], new_length[i] = self.mutate(order_length, order)\n if new_length[i] < self.worst:\n self.tour_length[self.worst_pos] = new_length[i]\n self.population[self.worst_pos] = new_population[i]\n self.fitness[self.worst_pos] = 1/new_length[i]\n self.normalise()\n self.worst = 0\n for j in range(self.loops):\n if self.worst < self.tour_length[j]:\n self.worst = self.tour_length[j]\n self.worst_pos = j\n return new_population, new_length", "def next_sample(self, z):\n H = self.Hjacob(self.x)\n xp = self.A @ self.x\n Pp = self.A @ self.P @ self.A.T + self.Q\n\n self.K = Pp @ H.T * inv(H @ Pp @ H.T + self.R)\n\n self.x = xp + self.K @ (np.array([z - self.hx(xp)]))\n self.P = Pp - self.K @ H @ Pp\n\n self.pos = self.x[0]\n self.vel = self.x[1]\n self.alt = self.x[2]\n return self.pos, self.vel, self.alt", "def _get_sample(self):\n prev = self.prev_img\n curr = self.curr_img\n prevbb = self._last_bbox\n prev_sample, opts_prev = crop_sample({'image': prev, 'bb': prevbb})\n curr_sample, opts_curr = crop_sample({'image': curr, 'bb': prevbb})\n prev_img = bgr2rgb(self.scale(prev_sample, opts_prev)['image'])\n curr_img = bgr2rgb(self.scale(curr_sample, opts_curr)['image'])\n sample = {'previmg': prev_img, 'currimg': curr_img}\n self.curr_img = curr\n self.opts = opts_curr\n return sample", "def next_sample(self, z):\n xp = self.A @ self.x\n Pp = self.A @ self.P @ self.A.T + self.Q\n\n self.K = Pp @ self.H.T * inv(self.H @ Pp @ self.H.T + self.R)\n\n self.x = xp + self.K @ (z - self.H @ xp)\n self.P = Pp - self.K @ self.H @ Pp\n return self.x", "def best(self):\n self.population.ascendent_sort()\n self.best_genome = self.population.collection[0]\n return self.best_genome", "def _update_next_waypoint(self):\n if not self.base_waypoints:\n #rospy.logwarn(\"Waypoints not updated: base_waypoints not available yet.\")\n return False\n\n if not self.current_pose:\n #rospy.logwarn(\"Waypoints not updated: current_pose not available yet.\")\n return False\n\n # Get ego car variables\n ego_x = self.current_pose.position.x\n ego_y = self.current_pose.position.y\n ego_theta = math.atan2(self.current_pose.orientation.y, self.current_pose.orientation.x)\n\n # If I do have a next_waypoint, I will start looking from it, and stop looking\n # as soon as get a local minimum. Otherwise I will do a full search across the whole track\n t = time.time()\n wp = None\n yaw = 0\n dist = 1000000 # Long number\n if self.next_waypoint:\n idx_offset = self.next_waypoint\n full_search = False\n else:\n idx_offset = 0\n full_search = True\n num_base_wp = len(self.base_waypoints)\n\n for i in range(num_base_wp):\n idx = (i + idx_offset)%(num_base_wp)\n wp_x = self.base_waypoints[idx].pose.pose.position.x\n wp_y = self.base_waypoints[idx].pose.pose.position.y\n wp_d = math.sqrt((ego_x - wp_x)**2 + (ego_y - wp_y)**2)\n\n if wp_d < dist:\n dist = wp_d\n wp = idx\n if debugging:\n # Angle betwee car heading and waypoint heading\n yaw = math.atan2(wp_y - ego_y, wp_x - ego_x) - ego_theta\n elif not full_search:\n # Local minimum. If the waypoint makes sense, just use it and break\n if dist < max_local_distance:\n break; # Found a point\n else:\n # Seem to have lost track. Do search again\n rospy.logwarn(\"Waypoint updater lost track (local min at %.1f m after %d waypoints). Going back to full search.\", dist, i+1)\n full_search = True\n\n if debugging:\n rospy.loginfo(\"New next wp [%d] -> (%.1f,%.1f) after searching %d points in %fs\", wp, dist * math.cos(yaw), dist * math.sin(yaw), i, time.time()-t)\n\n if wp is None:\n rospy.logwarn(\"Waypoint updater did not find a valid waypoint\")\n return False\n\n self.next_waypoint = wp\n return True", "def heuristic_cost_estimate(self, current):\n relevants = 0\n accurate_relevants = 0\n for i in range(len(self.sample)):\n if is_relevant(self.sample.iloc[i], current.anchor):\n relevants += 1\n if self.pred_sample.iloc[i] == self.pred_example:\n accurate_relevants += 1\n accuracy = accurate_relevants/relevants\n if self.threshold-accuracy <= 0:\n x = 5\n return max(0, self.threshold - accuracy)", "def better_sample(self, sample):\n new_logprob_model, new_loglik_data = self._logprob(sample)\n # if there's no best sample recorded yet\n if self.best_sample[0] is None:\n self.best_sample = (sample, new_logprob_model, new_loglik_data)\n self.logprob_model, self.loglik_data = new_logprob_model, new_loglik_data\n logging.info('Initial sample generated, logprob of model: {0}, loglik: {1}'.format(new_logprob_model, new_loglik_data))\n return\n\n # if there's a best sample\n if self.search_data_fit_only:\n better = new_loglik_data - self.best_sample[2]\n else:\n better = new_logprob_model + new_loglik_data - (self.best_sample[1] + self.best_sample[2])\n if better > 0:\n self.no_improv = 0\n self.best_diff.append(better)\n self.logprob_model, self.loglik_data = new_logprob_model, new_loglik_data\n self.best_sample = (copy.deepcopy(sample), new_logprob_model, new_loglik_data)\n logging.info('New best sample found, logprob of model: {0} loglik: {1}'.format(new_logprob_model, new_loglik_data))\n return True\n else:\n self.no_improv += 1\n return False", "def nearest_sparse(self, query):\n self.best_dist = float(\"inf\")\n self.best_element = None\n self._register_best_element = self._register_best_element_single \n self._nearest_sparse_recursive(self._sparse2seq(query), self.root, 0.0)\n return self.best_element,self.best_dist", "def _compute_results(self):\n self.Y_best = best_value(self.Y)\n self.x_opt = self.X[np.argmin(self.Y),:]\n self.fx_opt = np.min(self.Y)\n self.distance = self._compute_distance_betw_consecutive_x()", "def _select_destination(self):\n # Ideally this should do something clever based on the start location\n # ie known trips. But for now, it will pick randomly!\n station_dict = self.network.station_dict\n\n stations = list(station_dict.keys())\n #stations = [x for x in stations if isinstance(x, int) or x.startswith(\"801\")]\n #stations = [x for x in stations if isinstance(x, int) or x.startswith(\"80139\")]\n weights = [station_dict[x].in_popularity for x in stations]\n\n # pick using the given weight distributions\n self.dest = random.choices(stations, weights=weights)[0]\n\n return", "def pick_initial_sample(self):\n x = np.atleast_1d(self.init_sample_func())\n return 0, x", "def _determine_next_batch_of_eval_points(self):\n anc_data = self._get_ancillary_data_for_acquisition()\n acq_to_use = getattr(acquisitions.syn, self.options.acq.lower())\n next_batch_of_eval_points = acq_to_use(self.num_workers, self.gp, self.acq_optimise,\n anc_data)\n return next_batch_of_eval_points", "def find_loop_point(self, start_offset=200, test_len=500):\n\n # Using heuristics for the test length and \"loop to\" point.\n # NOTE: this algorithm is arbitrary and could certainly be improved,\n # especially for cases where the loop point is not totally clear\n\n max_corr = 0\n best_start = None\n best_end = None\n\n for start in range(200,\n len(self.max_freq) - test_len,\n int(len(self.max_freq) / 10)):\n for end in range(start + 500, len(self.max_freq) - test_len):\n sc = self.sig_corr(start, end, test_len)\n if sc > max_corr:\n best_start = start\n best_end = end\n max_corr = sc\n\n return (best_start, best_end, max_corr)", "def get_best_payload_coordinate(self):\n\n result = self.visible_payloads[0]\n best_distance = self._distance_to(result)\n\n for payload in self.visible_payloads[1:]:\n distance = self._distance_to(payload)\n if distance <= best_distance:\n result = payload\n best_distance = distance\n\n return result", "def mts_ls1(current_x, current_fitness, best_x, best_fitness, improve, search_range, task, rng, bonus1=10, bonus2=1,\n sr_fix=0.4, **_kwargs):\n if not improve:\n search_range /= 2\n i_fix = np.argwhere(search_range < 1e-15)\n search_range[i_fix] = task.range[i_fix] * sr_fix\n improve = False\n grade = 0.0\n for i in range(len(current_x)):\n x_old = current_x[i]\n current_x[i] = x_old - search_range[i]\n current_x = task.repair(current_x, rng)\n new_fitness = task.eval(current_x)\n if new_fitness < best_fitness:\n grade = grade + bonus1\n best_x = current_x.copy()\n best_fitness = new_fitness\n if new_fitness == current_fitness:\n current_x[i] = x_old\n elif new_fitness > current_fitness:\n current_x[i] = x_old + 0.5 * search_range[i]\n current_x = task.repair(current_x, rng)\n new_fitness = task.eval(current_x)\n if new_fitness < best_fitness:\n grade = grade + bonus1\n best_x = current_x.copy()\n best_fitness = new_fitness\n if new_fitness >= current_fitness:\n current_x[i] = x_old\n else:\n grade = grade + bonus2\n improve = True\n current_fitness = new_fitness\n else:\n grade = grade + bonus2\n improve = True\n current_fitness = new_fitness\n return current_x, current_fitness, best_x, best_fitness, improve, grade, search_range", "def next_sample(self):\n if self.seq is not None:\n if self.cur >= len(self.seq):\n raise StopIteration\n idx = self.seq[self.cur]\n self.cur += 1\n if self.imgrec is not None:\n s = self.imgrec.read_idx(idx)\n header, img = recordio.unpack(s)\n if self.imglist is None:\n return header.label, img\n else:\n return self.imglist[idx][0], img\n else:\n label, fname = self.imglist[idx]\n if self.imgrec is None:\n with open(os.path.join(self.path_root, fname), 'rb') as fin:\n img = fin.read()\n return label, img\n else:\n s = self.imgrec.read()\n if s is None:\n raise StopIteration\n header, img = recordio.unpack(s)\n return header.label, img", "def matching_function_startpoint(self, idx):\n real_idx = idx + 1\n path = dtw.best_path(self.paths, col=real_idx)\n start_idx = path[0][1]\n return start_idx", "def mts_ls1v1(current_x, current_fitness, best_x, best_fitness, improve, search_range, task, rng, bonus1=10, bonus2=1,\n sr_fix=0.4, **_kwargs):\n if not improve:\n search_range /= 2\n i_fix = np.argwhere(search_range < 1e-15)\n search_range[i_fix] = task.range[i_fix] * sr_fix\n improve, d, grade = False, rng.uniform(-1, 1, task.dimension), 0.0\n for i in range(len(current_x)):\n x_old = current_x[i]\n current_x[i] = x_old - search_range[i] * d[i]\n current_x = task.repair(current_x, rng)\n new_fitness = task.eval(current_x)\n if new_fitness < best_fitness:\n grade, best_x, best_fitness = grade + bonus1, current_x.copy(), new_fitness\n elif new_fitness == current_fitness:\n current_x[i] = x_old\n elif new_fitness > current_fitness:\n current_x[i] = x_old + 0.5 * search_range[i]\n current_x = task.repair(current_x, rng)\n new_fitness = task.eval(current_x)\n if new_fitness < best_fitness:\n grade, best_x, best_fitness = grade + bonus1, current_x.copy(), new_fitness\n elif new_fitness >= current_fitness:\n current_x[i] = x_old\n else:\n grade, improve, current_fitness = grade + bonus2, True, new_fitness\n else:\n grade, improve, current_fitness = grade + bonus2, True, new_fitness\n return current_x, current_fitness, best_x, best_fitness, improve, grade, search_range", "def mostLikelyPath(self, start, length=100):\n path = [start]\n for step in range(length):\n bestValue = -100000000\n bestAction = None\n for action in range(self.A):\n pathValue = self.valueLookAhead(path[-1][0], [1], action)\n # print((self.stateToCoord[nextState[0]], pathValue))\n if pathValue > bestValue:\n bestValue = pathValue\n bestAction = action\n possibleNextStates = self.trans(path[-1][0], path[-1][1])[bestAction]\n path.append(randomSample(possibleNextStates))\n return path", "def test_nearest_location_odd():\n assert nearest_location([(3, 6), (9, 13)], 7) == 0\n assert nearest_location([(3, 6), (9, 13)], 7, 1) == 1", "def new_and_near(self):\n if self.prob and random.random() < self.prob:\n x_rand = self.goal_config\n else:\n x_rand = self.planning_env.sample_free()\n x_nearest_id, x_nearest = self.tree.GetNearestVertex(x_rand)\n x_new = self.steer(x_nearest, x_rand)\n # check if new point is in X_free and not already in V\n # if x_new in self.tree.vertices or not self.planning_env.state_validity_checker(x_new):\n if x_new in self.tree.vertices or not self.planning_env.collision_free(x_new, x_nearest):\n return None, None\n\n self.tree.samples_taken += 1\n return x_new, x_nearest", "def _next_index(self):\n # Cache a string of random numbers to speed things up\n if not self.rnd_pool_:\n self.rnd_pool_ = self.rnd.randint(0, self.input_size - 1, self.batch_size * 10).tolist()\n\n return self.rnd_pool_.pop()", "def _select_heuristic(self):\n\n # take a sample of rewards from the current prior of heuristics\n sample_rewards = np.random.normal(self.prior_mus, self.prior_sigmas)\n\n # select the heuristic that has the highest reward sample value\n self.best_heuristic_idx = np.argmax(sample_rewards)\n self.best_heuristic = self.heuristics[self.best_heuristic_idx]\n self.heuristic_selection.append(self.best_heuristic_idx)", "def _sample_pos(self, assign_result, num_expected, **kwargs):\n pos_inds = torch.nonzero(assign_result.gt_inds > 0)\n if pos_inds.numel() != 0:\n pos_inds = pos_inds.squeeze(1)\n if pos_inds.numel() <= num_expected:\n return pos_inds\n else:\n return self.random_choice(pos_inds, num_expected)" ]
[ "0.71639574", "0.6434643", "0.6302643", "0.6135785", "0.6110737", "0.60888207", "0.601396", "0.60047734", "0.5961023", "0.59551257", "0.5950009", "0.59325665", "0.59085697", "0.58706766", "0.58677566", "0.58429563", "0.58406365", "0.58211887", "0.58168864", "0.5816688", "0.57982624", "0.57896256", "0.5760274", "0.5753177", "0.5708207", "0.5694553", "0.5693657", "0.56836945", "0.56719726", "0.56647366", "0.5655309", "0.56539804", "0.5651338", "0.563978", "0.5635203", "0.5624217", "0.5624217", "0.56192195", "0.56088364", "0.5599107", "0.5599107", "0.5592619", "0.5586314", "0.5576513", "0.5570292", "0.5565863", "0.5565863", "0.55645704", "0.55607295", "0.55597645", "0.5554461", "0.5553942", "0.5548806", "0.55425936", "0.55370057", "0.55347633", "0.5529462", "0.5527519", "0.5525788", "0.55143505", "0.5512225", "0.5510891", "0.55102944", "0.55062765", "0.5501222", "0.5498856", "0.54978883", "0.54927", "0.5489304", "0.5483574", "0.547723", "0.5465827", "0.5463998", "0.54612", "0.5459466", "0.54581386", "0.5447326", "0.5444883", "0.5444223", "0.5441508", "0.54376924", "0.54315317", "0.5423122", "0.54187506", "0.541398", "0.5402465", "0.5397795", "0.53938884", "0.53903264", "0.5384002", "0.5382953", "0.5374416", "0.53732693", "0.5371301", "0.53672194", "0.53595996", "0.5356734", "0.5354979", "0.5353946", "0.53501976", "0.5345411" ]
0.0
-1
Optimize for black box function
def optimize(self, iterations=1000): prev = None finalx = None finaly = None while iterations: maxei, eis = self.acquisition() new_y = self.f(maxei) if maxei == prev: break self.gp.update(maxei, new_y) pycodehack = finaly is None or self.minimize and finaly > new_y if ((pycodehack or not self.minimize and finaly < new_y)): finaly = new_y finalx = maxei prev = maxei iterations -= 1 return finalx, finaly
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def blackwin(x):\n print('blackwin is untested')\n if isinstance(x, (list, tuple, np.ndarray)):\n n = x.shape[1]\n f = blackwin(n)\n\n if len(x.shape) == 3:\n f, _, _ = np.meshgrid(f[0, :], np.arange(\n x.shape[0]), np.arange(x.shape[2]))\n else:\n f, _ = np.meshgrid(f[0, :], np.arange(x.shape[0]))\n else:\n n = x\n f = np.reshape((0.42 - 0.5 * np.cos(2 * np.pi * (np.arange(n) + .5)) /\n (n) + .08 * np.cos(4 * np.pi * (np.arange(n) + .5)) /\n (n)) * np.sqrt(5000 / 1523), (1, -1))\n f = f / la.norm(f) * np.sqrt(n)\n return f", "def fn(mask, j):\n ans = 0 \n for i in range(m): \n if not mask & (1<<i): \n ans = max(ans, fn(mask^(1<<i), j-1) + score[i][j])\n return ans", "def black(self, x):\n n = x.shape[0]\n count_arr = np.array([[0] * 28] * 28) # array to keep the count for all [i,j] coordinates\n # Base: i = j = 0, count_arr[i, j] = 0\n for i in range(1, n):\n for j in range(1, n):\n if x[i, j] > 0:\n count_arr[i, j] = count_arr[i, j - 1] + count_arr[i - 1, j] - count_arr[i - 1, j - 1] + 1\n else:\n count_arr[i, j] = count_arr[i, j - 1] + count_arr[i - 1, j] - count_arr[i - 1, j - 1]\n self.count_black = count_arr", "def __init__(self, n, m, k=2):\n # expecting to hold n elements\n self.n = n\n if m%4: m += (4-m%4)\n self.m = m*8\n print \"bit map size set to %d (%d bytes)after round up to 32bits\"%(self.m, self.m/8)\n self.bm = BitVector(size=self.m, intVal=0)\n if k in BloomFilter.KRange:\n self.k = k\n else:\n self.k = BloomFilter.KRange[-1]\n # round k to closest allowed value\n for i in range(len(BloomFilter.KRange)-1):\n if k < BloomFilter.KRange[i]:\n self.k = BloomFilter.KRange[i]\n break\n elif k < BloomFilter.KRange[1+i]:\n if (BloomFilter.KRange[+i]-k) >= k-BloomFilter.KRange[1+i]:\n self.k = BloomFilter.KRange[i]\n else:\n self.k = BloomFilter.KRange[i+1]\n break\n print \"k set to %d after validation\"%(self.k)\n p=BloomFilter.calPFP(self.n, self.m, self.k)\n print \"false positive probability will be %f when filtering %d elements\"%(p, self.n)\n #slice bitmap into k slices\n self.ms = self.m/self.k\n self.hashf = MurmurHash3_x86_32", "def anoise(this, *args, **kargs):\n\t\t\n\t\t# Arguments\n\t\tif not args: args = [50]\n\t\t\n\t\t# Kernel's retrieval\n\t\tanoisek = this._ANOISEK\n\t\tif anoisek is None: return None\n\t\t\n\t\t# More magic\n\t\tbin = this._BINARY\n\t\tfor thresh in args:\n\t\t\tbin[:,:] = (cv2.filter2D(bin, -1, anoisek) / 2.55 > thresh) * 255\n\t\treturn True", "def py_cpu_nms(boxes, scores, thresh=0.55):\n # x1、y1、x2、y2、以及score赋值\n boxes = boxes.detach().numpy()\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n scores = scores\n\n # 每一个检测框的面积\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n # 按照score置信度降序排序\n # order = scores.argsort()[::-1]\n all_scores, order = scores.sort(descending=True)\n\n keep = [] # 保留的结果框集合\n # print(order)\n while int(len(order.detach().numpy())) > 0:\n i = order[0]\n keep.append(i.numpy()) # 保留该类剩余box中得分最高的一个\n # 得到相交区域,左上及右下\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n # 计算相交的面积,不重叠时面积为0\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n # 计算IoU:重叠面积 /(面积1+面积2-重叠面积)\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n # 保留IoU小于阈值的box\n inds = np.where(ovr <= thresh)[0]\n order = order[inds + 1] # 因为ovr数组的长度比order数组少一个,所以这里要将所有下标后移一位\n\n return keep", "def blackout_pixel_weights_by_box_regions(height, width, boxes, blackout):\n (y_grid, x_grid) = image_shape_to_grids(height, width)\n y_grid = tf.expand_dims(y_grid, axis=0)\n x_grid = tf.expand_dims(x_grid, axis=0)\n y_min = tf.expand_dims(boxes[:, 0:1], axis=-1)\n x_min = tf.expand_dims(boxes[:, 1:2], axis=-1)\n y_max = tf.expand_dims(boxes[:, 2:3], axis=-1)\n x_max = tf.expand_dims(boxes[:, 3:], axis=-1)\n\n # Make the mask with all 1.0 in the box regions.\n # Shape: [num_instances, height, width]\n in_boxes = tf.cast(\n tf.logical_and(\n tf.logical_and(y_grid >= y_min, y_grid <= y_max),\n tf.logical_and(x_grid >= x_min, x_grid <= x_max)),\n dtype=tf.float32)\n\n # Shape: [num_instances, height, width]\n blackout = tf.tile(\n tf.expand_dims(tf.expand_dims(blackout, axis=-1), axis=-1),\n [1, height, width])\n\n # Select only the boxes specified by blackout.\n selected_in_boxes = tf.where(blackout, in_boxes, tf.zeros_like(in_boxes))\n out_boxes = tf.reduce_max(selected_in_boxes, axis=0)\n out_boxes = tf.ones_like(out_boxes) - out_boxes\n return out_boxes", "def blackcover(model, X, y, width, height, xskip, yskip):\n\t#wideth:44 , height:22, xship:22. yship:22\n max_loss = torch.zeros(y.shape[0]).to(y.device)\n max_delta = torch.ones_like(X).to(y.device)\n xtimes = 224//xskip\n ytimes = 224//yskip\n\n for i in range(xtimes):\n for j in range(ytimes):\n\n blackcover = np.ones([224,224,3]).astype(np.float32)*255\n blackcover[yskip*j:(yskip*j+height),xskip*i:(xskip*i+width),:] = 0 \n blackcover = transforms.ToTensor()(blackcover).to(y.device)\n\n #print(blackcover[:,1,1])\n # out = torchvision.utils.make_grid(blackcover)\n # imshow(out)\n \n\n all_loss = nn.CrossEntropyLoss(reduction='none')(model( X*blackcover), y )\n if(all_loss>=max_loss):\n max_delta = blackcover.detach()\n max_loss = torch.max(max_loss, all_loss)\n \n return max_delta", "def fn(x, mask):\n ans = size = 0 \n for xx in range(1, 10): \n if not mask & (1 << xx): \n if (x, xx) not in mp or mask & 1 << mp[x, xx]: \n ans += fn(xx, mask^(1<<xx))\n size += 1\n size = 9 - size\n if m <= size <= n: ans += 1\n return ans", "def fn(i, j, mask):\n if j == n: return 1 \n if i == m: return fn(0, j+1, mask)\n ans = 0 \n for x in 1<<2*i, 1<<2*i+1, 0b11<<2*i: \n mask0 = mask ^ x\n if mask0 & 0b11<<2*i and (i == 0 or (mask0 >> 2*i) & 0b11 != (mask0 >> 2*i-2) & 0b11): \n ans += fn(i+1, j, mask0)\n return ans % 1_000_000_007", "def is_perfect_square():", "def chk_hamming(data):\n pass", "def _cce(func, s, sf, bl, bu, mask, icall, maxn, alpha, beta, maxit, printit):\n\n \"\"\"\n List of local variables\n sb(.) = the best point of the simplex\n sw(.) = the worst point of the simplex\n w2(.) = the second worst point of the simplex\n fw = function value of the worst point\n ce(.) = the centroid of the simplex excluding wo\n snew(.) = new point generated from the simplex\n iviol = flag indicating if constraints are violated\n = 1 , yes\n = 0 , no\n \"\"\"\n # Assign the best and worst points:\n sb = s[0,:]\n fb = sf[0]\n sw = s[-1,:]\n fw = sf[-1]\n\n # Compute the centroid of the simplex excluding the worst point:\n ce = np.mean(s[:-1,:],axis=0)\n\n # Attempt a reflection point\n snew = ce + alpha*(ce-sw)\n snew = np.where(mask, snew, sb) # sb should have initial params at mask==False\n\n # Check if is outside the bounds:\n ibound = 0\n # s1 = snew-bl\n # idx = (s1<0).nonzero()\n # if idx[0].size != 0: ibound = 1\n if np.ma.any(np.ma.array(snew-bl, mask=~mask) < 0.): ibound = 1\n\n # s1 = bu-snew\n # idx = (s1<0).nonzero()\n # if idx[0].size != 0: ibound = 2\n if np.ma.any(np.ma.array(bu-snew, mask=~mask) < 0.): ibound = 2\n\n if ibound >= 1:\n snew = _SampleInputMatrix(1,bl,bu,distname='randomUniform')[0]\n snew = np.where(mask, snew, sb)\n\n fuc = func(snew)\n fnew = -fuc if maxit else fuc\n icall += 1\n if printit==1: print(' i, f, X: ', icall, fnew, snew)\n\n # Reflection failed; now attempt a contraction point:\n if fnew > fw:\n snew = sw + beta*(ce-sw)\n snew = np.where(mask, snew, sb)\n fuc = func(snew)\n fnew = -fuc if maxit else fuc\n icall += 1\n if printit==1: print(' i, f, X: ', icall, fnew, snew)\n\n # Both reflection and contraction have failed, attempt a random point;\n if fnew > fw:\n snew = _SampleInputMatrix(1,bl,bu,distname='randomUniform')[0]\n snew = np.where(mask, snew, sb)\n fuc = func(snew)\n fnew = -fuc if maxit else fuc\n icall += 1\n if printit==1: print(' i, f, X: ', icall, fnew, snew)\n\n # end of _cce\n return snew, fnew, icall", "def bboxes_nms_fast(classes, scores, bboxes, threshold=0.45):\n pass", "def phantom_squares(n_points,S):\n \n #Rescaling according to image size \n S[:,0] = S[:,0]*n_points/2\n S[:,1] = S[:,1]*n_points/2\n S[:,2] = S[:,2]*n_points/2\n S[:,3] = S[:,3]*math.pi/180\n \n x,y = np.meshgrid(np.arange(0,n_points)-n_points//2 ,np.arange(0,n_points)-n_points//2 ) \n nrow,ncol = S.shape\n phantom1 = np.zeros((y.shape[0], y.shape[1], nrow)) \n\n for k in range(nrow): #itero sui quadrati\n x_new = x - S[k,0]\n y_new = y - S[k,1]\n\n u = abs(x_new*math.cos(S[k,3])+y_new*math.sin(S[k,3]))\n v = abs(-x_new*math.sin(S[k,3])+y_new*math.cos(S[k,3]))\n\n cond = np.maximum(u,v)\n\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n if (cond[i,j] < S[k,2]/2):\n phantom1[i,j,k] = S[k,4]; # gray scale\n else:\n phantom1[i,j,k] = 0.0;\n #endif\n #endfor\n #endfor\n #endfor\n\n phantom1 = phantom1.sum(axis=2)\n phantom = np.flipud(phantom1)\n return phantom", "def filter_fusion(luma_bin, sat_bin, grad_bin, mentor_bin):\n binary = np.zeros_like(luma_bin)\n binary[ (((grad_bin==1) | (sat_bin==1)) & (luma_bin==1)) | (mentor_bin==1) ] = 1\n\n # Erosion and dilation - Seems doesn't work. Mask-off\n #kernel = np.ones((5,5))\n #binary_dilation = cv2.dilate(binary, kernel, iterations=1)\n #binary_erosion = cv2.erode(binary_dilation, kernel, iterations=1)\n #binary = binary_erosion\n\n return binary", "def rb_erosion(image,size,origin=-1):\n return cv2.erode(image.astype(uint8), brick(size))\n # output = zeros(image.shape,'f')\n # filters.uniform_filter(image,size,output=output,origin=0, mode='constant', cval=1)\n # return array(output==1,'i')", "def approching_blackhole():\n blackhole = BlackHole()\n Rs = 8.0\n D_list = np.round(10**np.linspace(np.log10(50), np.log10(100000), 30))\n blackhole.open(blackhole.img_name, size=2000)\n\n for D in D_list:\n blackhole.compute(Rs, D)\n blackhole.img_save()", "def styblinskitankfcn(x: np.ndarray) -> np.ndarray:\n n = x.shape[1]\n scores = np.zeros((x.shape[0], 1))\n for i in range(n):\n scores += x[:, i] ** 4 - 16 * x[:, i] ** 2 + 5 * x[:, i]\n scores *= 0.5\n return scores", "def non_max_suppression(self, filtered_boxes, box_classes, box_scores):\n box_predictions = []\n predicted_box_classes = []\n predicted_box_scores = []\n for label in range(len(self.class_names)):\n # for each class\n boxes = []\n class_tmp = []\n score_tmp = []\n for i in range(len(box_classes)):\n if box_classes[i] == label:\n boxes.append(filtered_boxes[i])\n class_tmp.append(box_classes[i])\n score_tmp.append(box_scores[i])\n\n class_tmp = np.array(class_tmp)\n while len(class_tmp) > 0 and np.amax(class_tmp) > -1:\n index = np.argmax(score_tmp)\n box_predictions.append(boxes[index])\n predicted_box_classes.append(class_tmp[index])\n predicted_box_scores.append(score_tmp[index])\n score_tmp[index] = -1\n class_tmp[index] = -1\n px1, py1, px2, py2 = boxes[index]\n p_area = (px2 - px1) * (py2 - py1)\n\n for box in range(len(boxes)):\n if class_tmp[box] != -1:\n bx1, by1, bx2, by2 = boxes[box]\n b_area = (bx2 - bx1) * (by2 - by1)\n ox1 = px1 if px1 > bx1 else bx1\n oy1 = py1 if py1 > by1 else by1\n ox2 = px2 if px2 < bx2 else bx2\n oy2 = py2 if py2 < by2 else by2\n if ox2 - ox1 <= 0 or oy2 - oy1 <= 0:\n continue\n # Calculate overlap area and IoU\n o_area = (ox2 - ox1) * (oy2 - oy1)\n u_area = p_area + b_area - o_area\n iou = o_area / u_area\n\n if iou > self.nms_t:\n class_tmp[box] = -1\n score_tmp[box] = -1\n\n box_predictions = np.array(box_predictions)\n predicted_box_classes = np.array(predicted_box_classes)\n predicted_box_scores = np.array(predicted_box_scores)\n return (box_predictions, predicted_box_classes, predicted_box_scores)", "def brownfcn(x: np.ndarray) -> np.ndarray:\n\n n = x.shape[1]\n scores = np.zeros((x.shape[0], 1))\n x = x**2\n\n for i in range(n - 1):\n scores += x[:, i] ** (x[:, i + 1] + 1) + x[:, i + 1] ** (x[:, i] + 1)\n\n return scores", "def iou_suppression(cnt_box, yolo_box, max_threshold, min_threshold):\n all_boxes = []\n pre_bboxes = yolo_box\n bboxes = cnt_box\n for i in range(len(pre_bboxes)):\n max_flag = 0\n min_flag = 0\n for j in range(len(bboxes)):\n\n (pre_x1, pre_y1) = (pre_bboxes[i][0], pre_bboxes[i][1])\n (pre_x2, pre_y2) = (pre_bboxes[i][2], pre_bboxes[i][3])\n (cur_x1, cur_y1) = (bboxes[j][0], bboxes[j][1])\n (cur_x2, cur_y2) = (bboxes[j][2], bboxes[j][3])\n origin_w = pre_x2 - pre_x1\n origin_h = pre_y2 - pre_y1\n current_w = cur_x2 - cur_x1\n current_h = cur_y2 - cur_y1\n prime_area = origin_h * origin_w\n current_area = current_h*current_w\n\n if pre_x1 > cur_x1:\n if pre_y1 > cur_y1:\n if cur_x2 - pre_x1 <= 0 or cur_y2 - pre_y1 <= 0:\n lap_area = 0\n else:\n width = cur_x2 - pre_x1\n height = cur_y2 - pre_y1\n if width > origin_w:\n width = origin_w\n if height > origin_h:\n height = origin_h\n\n lap_area = width*height\n\n else:\n if cur_x2 - pre_x1 <= 0 or pre_y2 - cur_y1 <= 0:\n lap_area = 0\n else:\n width = cur_x2 - pre_x1\n height = pre_y2 - cur_y1\n if width > origin_w:\n width = origin_w\n if height > current_h:\n height = current_h\n\n lap_area = width*height\n else:\n if pre_y1 > cur_y1:\n if pre_x2 - cur_x1 <= 0 or cur_y2 - pre_y1 <= 0:\n lap_area = 0\n else:\n width = pre_x2 - cur_x1\n height = cur_y2 - pre_y1\n if width > current_w:\n width = current_w\n if height > origin_h:\n height = origin_h\n\n lap_area = width*height\n else:\n if pre_x2 - cur_x1 <= 0 or pre_y2 - cur_y1 <= 0:\n lap_area = 0\n else:\n width = pre_x2 - cur_x1\n height = pre_y2 - cur_y1\n if width > current_w:\n width = current_w\n if height > current_h:\n height = current_h\n\n lap_area = width*height\n\n if lap_area != 0:\n sum_area = (prime_area + current_area - lap_area)\n iou_score = lap_area/sum_area\n if iou_score > max_threshold: # set the threshold of the iou scores, in line with the sort\n max_flag = 1\n elif iou_score > min_threshold:\n min_flag = 1\n\n if max_flag == 1 or min_flag == 0:\n all_boxes.append(pre_bboxes[i])\n\n if cnt_box != []:\n for index_box in range(cnt_box.shape[0]):\n all_boxes.append(cnt_box[index_box])\n\n return np.asarray(all_boxes)", "def clConvolution(self, size, mask):", "def hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=10):\n # TOP_K was originally -1, to keep all faces, but trying to filter\n # CANDIDATE_SIZE was originally 200, trying to limit # of faces\n scores = box_scores[:, -1]\n boxes = box_scores[:, :-1]\n picked = []\n indexes = np.argsort(scores)\n indexes = indexes[-candidate_size:]\n while len(indexes) > 0:\n current = indexes[-1]\n picked.append(current)\n if 0 < top_k == len(picked) or len(indexes) == 1:\n break\n current_box = boxes[current, :]\n\n indexes = indexes[:-1]\n rest_boxes = boxes[indexes, :]\n iou = iou_of(\n rest_boxes,\n np.expand_dims(current_box, axis=0),\n )\n indexes = indexes[iou <= iou_threshold]\n \n # additional method of discrimination, only the boxes\n # with the largest areas are selected\n new_boxes = box_scores[picked, :]\n areas = []\n for box in new_boxes:\n left_top = np.asarray([box[0], box[1]])\n right_bottom = np.asarray([box[2], box[3]])\n area = area_of(left_top, right_bottom)\n areas.append(area)\n areas = np.asarray(areas)\n biggest = np.argsort(areas)\n last_index = len(biggest) - 1\n middle = max(len(biggest)// 2, 1)\n size = min(middle, candidate_size / 2)\n \n final_boxes = []\n for i in range(size):\n final_boxes.append(new_boxes[biggest[last_index-i]])\n final_boxes = np.asarray(final_boxes)\n \n return final_boxes\n #return box_scores[picked, :]", "def non_max_suppress_orig(boxes_in_batch, score_arr, config):\n# FRAME_STEP_SEC = 0.1 # sec\n# FRAME_SIZE_SEC = 2.0 # each window is 2 sec long\n# \n# OVERLAP_RATIO = 1.0 # non-max suppression\n \n \n overlap_size = int(config.FRAME_SIZE_SEC/config.FRAME_STEP_SEC\n *config.OVERLAP_RATIO)\n # boxes sorted by scores\n box_sorted_by_score = boxes_in_batch[np.argsort(score_arr[boxes_in_batch])[::-1]] \n # [::-1] reverse the sort order from ascending to descending\n # get the ordered values: score_arr[boxes_in_batch][box_sorted_by_score]\n\n # original approach\n# time_start = time.time()\n# boxes_separated = separate_boxes(box_sorted_by_score, overlap_size)\n# print('Method 1: run time is: '+str(time.time() - time_start))\n# \n# time_start2 = time.time()\n# boxes_separated2 = separate_boxes_faster(box_sorted_by_score, overlap_size)\n# print('Method 2: run time is: '+str(time.time() - time_start2))\n\n #time_start3 = time.time()\n boxes_separated = separate_boxes_fasterer(box_sorted_by_score, overlap_size)\n #print('Method 3: run time is: '+str(time.time() - time_start3))\n\n \n # alternative approach\n #boxes_separated = separate_boxes_time(box_sorted_by_score.sort(), overlap_size)\n \n # computer vision approach: Malisiewicz et al.\n #boxes_separated = non_max_suppression_fast(boxes_in_batch, overlapThresh):\n #print(boxes_separated)\n #print(boxes_separated2)\n #print(boxes_separated3)\n \n return boxes_separated", "def fn(mask, k):\n if not mask: return 0 \n ans = inf \n for i in range(n): \n if mask & (1<<i): \n ans = min(ans, (nums1[i]^nums2[k]) + fn(mask^(1<<i), k+1))\n return ans", "def compute(self, node, input_vals):\r\n #assert len(input_vals) == 2\r\n #start = time.time()\r\n strides = node.const_attr\r\n ish = list(input_vals[0].shape)\r\n fsh = list(input_vals[1].shape)\r\n filter = input_vals[1].astype(float32)\r\n input = np.zeros((ish[0],ish[1]+fsh[0]-1,ish[2]+fsh[1]-1,ish[3])).astype(float32)\r\n input[:,fsh[0]//2:fsh[0]//2+ish[1]:1,fsh[1]//2:fsh[1]//2+ish[2]:1,:]+=input_vals[0].astype(float32)\r\n ish = list(input.shape)\r\n output = np.zeros([ish[0],(ish[1]-fsh[0])//strides[1]+1,(ish[2]-fsh[1])//strides[2]+1,fsh[3]]).astype(float32)\r\n osh = output.shape\r\n\r\n assert c_kernel.conv2d_c(get_pointer(input), ish[0],ish[1],ish[2],ish[3],get_pointer(filter),fsh[0],fsh[1],fsh[2],fsh[3],strides[0],strides[1],strides[2],strides[3],get_pointer(output), osh[0],osh[1],osh[2],osh[3])==0\r\n #print(\"conv2d\") \r\n #end = time.time()\r\n\r\n #print(end - start) \r\n return output\r\n \r\n '''\r\n rm = range(osh[0])\r\n ri = range(osh[1])\r\n rj = range(osh[2])\r\n rdi = range(fsh[0])\r\n rdj = range(fsh[1])\r\n for m in rm:\r\n for i in ri:\r\n for j in rj:\r\n for di in rdi:\r\n for dj in rdj:\r\n print(input[m,strides[1]*i+di,strides[2]*j+dj,:])\r\n print(filter[di,dj,:,:])\r\n t = np.dot(\r\n input[m,strides[1]*i+di,strides[2]*j+dj,:],\r\n filter[di,dj,:,:]\r\n )\r\n output[m,i,j] = np.sum(\r\n [\r\n t,\r\n output[m,i,j]\r\n ],\r\n axis=0\r\n )\r\n #print(\"type(output)\")\r\n #print(type(output))\r\n return output\r\n '''", "def fn(k, mask):\n can = [i for i in range(n) if mask & (1 << i)]\n cand = [] # eliminated player\n for i in range(len(can)//2): \n p1, p2 = can[i], can[~i]\n if p1 == firstPlayer and p2 == secondPlayer or p1 == secondPlayer and p2 == firstPlayer: return [k, k] # game of interest \n if p1 in (firstPlayer, secondPlayer): cand.append([p2]) # p2 eliminated \n elif p2 in (firstPlayer, secondPlayer): cand.append([p1]) # p1 eliminated \n else: cand.append([p1, p2]) # both could be elimited \n \n minn, maxx = inf, -inf\n for x in product(*cand): \n mask0 = mask\n for i in x: mask0 ^= 1 << i\n mn, mx = fn(k+1, mask0)\n minn, maxx = min(minn, mn), max(maxx, mx)\n return minn, maxx", "def check_correctness_bc01(f):\n\n rng = np.random.RandomState([2012, 7, 19])\n batch_size = 5\n rows = 32\n cols = 30\n channels = 3\n pool_rows = 2\n pool_cols = 3\n zv = rng.randn(batch_size, rows, cols,\n channels).astype(config.floatX) * 1. - 1.5\n top_down_v = rng.randn(batch_size, rows / pool_rows, cols / pool_cols,\n channels).astype(config.floatX)\n\n p_np, h_np = max_pool_python(zv, (pool_rows, pool_cols), top_down_v)\n\n z_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n z_th.name = 'z_th'\n zr = z_th.dimshuffle(0, 3, 1, 2)\n\n top_down_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n top_down_th.name = 'top_down_th'\n top_down_r = top_down_th.dimshuffle(0, 3, 1, 2)\n\n p_th, h_th = f(zr, (pool_rows, pool_cols), top_down_r)\n\n func = function([z_th, top_down_th], [p_th.dimshuffle(0, 2, 3, 1),\n h_th.dimshuffle(0, 2, 3, 1)])\n\n pv, hv = func(zv, top_down_v)\n\n assert p_np.shape == pv.shape\n assert h_np.shape == hv.shape\n if not np.allclose(h_np, hv):\n print((h_np.min(), h_np.max()))\n print((hv.min(), hv.max()))\n assert False\n if not np.allclose(p_np, pv):\n diff = abs(p_np - pv)\n print('max diff ', diff.max())\n print('min diff ', diff.min())\n print('ave diff ', diff.mean())\n assert False", "def filter(x,y):\n if tf.reduce_sum(y) > pixels:\n return True\n else:\n return False", "def fn(mask, k):\n if mask == 0: return 0 # no more numbers \n ans = 0\n for i in range(n): \n if mask & 1 << i:\n for j in range(i+1, n): \n if mask & 1 << j: \n mask0 = mask & ~(1<<i) & ~(1<<j) # unset ith & jth bit\n ans = max(ans, k*gcd(nums[i], nums[j]) + fn(mask0, k+1))\n return ans", "def aksprob(alam):\r\n if type(alam) == N.ndarray:\r\n frozen = -1 *N.ones(alam.shape,N.float64)\r\n alam = alam.astype(N.float64)\r\n arrayflag = 1\r\n else:\r\n frozen = N.array(-1.)\r\n alam = N.array(alam,N.float64)\r\n arrayflag = 1\r\n mask = N.zeros(alam.shape)\r\n fac = 2.0 *N.ones(alam.shape,N.float_)\r\n sum = N.zeros(alam.shape,N.float_)\r\n termbf = N.zeros(alam.shape,N.float_)\r\n a2 = N.array(-2.0*alam*alam,N.float64)\r\n totalelements = N.multiply.reduce(N.array(mask.shape))\r\n for j in range(1,201):\r\n if asum(mask) == totalelements:\r\n break\r\n exponents = (a2*j*j)\r\n overflowmask = N.less(exponents,-746)\r\n frozen = N.where(overflowmask,0,frozen)\r\n mask = mask+overflowmask\r\n term = fac*N.exp(exponents)\r\n sum = sum + term\r\n newmask = N.where(N.less_equal(abs(term),(0.001*termbf)) +\r\n N.less(abs(term),1.0e-8*sum), 1, 0)\r\n frozen = N.where(newmask*N.equal(mask,0), sum, frozen)\r\n mask = N.clip(mask+newmask,0,1)\r\n fac = -fac\r\n termbf = abs(term)\r\n if arrayflag:\r\n return N.where(N.equal(frozen,-1), 1.0, frozen) # 1.0 if doesn't converge\r\n else:\r\n return N.where(N.equal(frozen,-1), 1.0, frozen)[0] # 1.0 if doesn't converge\r", "def filter_squares(sequences):\r\n for i in range(1, len(sequences) - 1):\r\n for j in range(1, len(sequences[0])):\r\n if all([sequences[i-1][j-1] == 0,\r\n sequences[i-1][j] == 1,\r\n sequences[i][j-1] == 1,\r\n sequences[i][j] == 0]):\r\n if r.random() > 0.5:\r\n sequences[i][j] = 1\r\n else:\r\n sequences[i-1][j] = 0\r\n return sequences", "def smoothing(image, boxsize = 3):\n boxkernal = np.ones((boxsize, boxsize))/(boxsize**2)\n result = convolve2d(image, boxkernal, mode = 'same')\n return result.astype('uint8')", "def check_correctness_channelwise(f):\n\n rng = np.random.RandomState([2012, 7, 19])\n batch_size = 5\n pool_size = 4\n n = 3 * pool_size\n zv = rng.randn(batch_size, n).astype(config.floatX) * 1. - 1.5\n top_down_v = rng.randn(batch_size, n / pool_size).astype(config.floatX)\n\n p_np, h_np = max_pool_channels_python(zv, pool_size, top_down_v)\n\n z_th = T.matrix()\n z_th.name = 'z_th'\n\n top_down_th = T.matrix()\n top_down_th.name = 'top_down_th'\n\n p_th, h_th = f(z_th, pool_size, top_down_th)\n\n func = function([z_th, top_down_th], [p_th, h_th])\n\n pv, hv = func(zv, top_down_v)\n\n assert p_np.shape == pv.shape\n assert h_np.shape == hv.shape\n if not np.allclose(h_np, hv):\n print((h_np.min(), h_np.max()))\n print((hv.min(), hv.max()))\n assert False\n if not np.allclose(p_np, pv):\n diff = abs(p_np - pv)\n print('max diff ', diff.max())\n print('min diff ', diff.min())\n print('ave diff ', diff.mean())\n assert False", "def fn(mask):\n if mask == (1 << n) - 1: return 0 # all courses taken \n can = [] # available courses \n for i in range(n): \n if not mask & 1 << i and mask & pre[i] == pre[i]: \n can.append(i)\n \n ans = inf\n for courses in combinations(can, min(k, len(can))): \n temp = mask \n for c in courses: \n temp |= 1 << c\n ans = min(ans, 1 + fn(temp))\n return ans", "def blackbox_network():\n num_nodes = 6\n num_states = 2 ** num_nodes\n tpm = np.zeros((num_states, num_nodes))\n\n for index, previous_state in enumerate(all_states(num_nodes)):\n current_state = [0 for i in range(num_nodes)]\n if previous_state[5] == 1:\n current_state[0] = 1\n current_state[1] = 1\n if previous_state[0] == 1 and previous_state[1]:\n current_state[2] = 1\n if previous_state[2] == 1:\n current_state[3] = 1\n current_state[4] = 1\n if previous_state[3] == 1 and previous_state[4] == 1:\n current_state[5] = 1\n tpm[index, :] = current_state\n\n # fmt: off\n cm = np.array([\n [0, 0, 1, 0, 0, 0],\n [0, 0, 1, 0, 0, 0],\n [0, 0, 0, 1, 1, 0],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [1, 1, 0, 0, 0, 0],\n ])\n # fmt: on\n\n return Network(tpm, cm, node_labels=LABELS[:tpm.shape[1]])", "def blackAndWhite(image):\n blackPixel = (0, 0, 0)\n whitePixel = (255, 255, 255)\n for y in range(image.getHeight()):\n for x in range(image.getWidth()):\n (r, g, b) = image.getPixel(x, y)\n average = (r + g + b) // 3\n if average < 128:\n image.setPixel(x, y, blackPixel)\n else:\n image.setPixel(x, y, whitePixel)", "def nms(bboxes, iou_threshold, sigma = 0.3, method = 'nms'):\n \"\"\" takes bboxes with the shape of (num_of_box, 6), where 6 => (xmin, ymin, xmax, ymax, score, class) \"\"\"\n \n # remove duplicates in classes\n classes_in_img = list(set(bboxes[:, 5]))\n \n # initialise list to store best bboxes\n best_bboxes = []\n \n # iterate over each class\n for cls in classes_in_img:\n \n # get mask for bboxes with the same class and apply on bboxes to obtain array of bboxes with same class\n cls_mask = (bboxes[:, 5] == cls)\n cls_bboxes = bboxes[cls_mask]\n \n # iterate while there are still bboxes in cls_bboxes\n while len(cls_bboxes) > 0:\n \n # select index of the bbox with the highest score \n max_ind = np.argmax(cls_bboxes[:, 4])\n \n # select bbox with highest score \n best_bbox = cls_bboxes[max_ind]\n \n # append to best _bbox list \n best_bboxes.append(best_bbox)\n \n # obtain cls_bboxes without best bbox\n cls_bboxes = np.concatenate([cls_bboxes[: max_ind], cls_bboxes[max_ind + 1:]])\n \n # calculate iou of remaining bboxes with best bbox \n iou = bbox_iou(best_bbox[np.newaxis, :4], cls_bboxes[:, :4])\n \n weight = np.ones((len(iou), ), dtype = np.float32)\n \n # assert method to be either 'nms' or 'soft_nms'\n assert method in ['nms', 'soft_nms']\n \n if method == 'nms':\n \n # obtain nms iou mask based on threshold\n iou_mask = iou > iou_threshold\n \n # apply mask on weights\n weight[iou_mask.numpy()] = 0.0\n \n if method == 'soft_nms':\n \n # obtain soft_nms weights\n weight = np.exp(-(1.0 * iou ** 2 / sigma))\n \n # apply weights on cls_bboxes\n cls_bboxes[:, 4] = cls_bboxes[:, 4] * weight\n \n # obtain score mask of scores greater than zero\n score_mask = cls_bboxes[:, 4] > 0.\n \n # apply mask on cls_bboxes \n cls_bboxes = cls_bboxes[score_mask]\n\n return best_bboxes", "def mandel_numba(x, y, max_iters):\n i = 0\n c = complex(x, y)\n z = 0.0j\n for i in range(max_iters):\n z = z * z + c\n if (z.real * z.real + z.imag * z.imag) >= 4:\n return i\n\n return 255", "def obtain_testing_y(tu, omega, alg):\n \n \n # Create a binary image\n tu_b = np.zeros((len(tu[:, 0, 0]), len(tu[0, :, 0]))) # initialize the binary image to have the same dimensions\n \n if alg == 'LR':\n for i in range(len(tu[:, 0, 0])): # how many rows\n for j in range(len(tu[0, :, 0])): # how many columns\n if np.dot(tu[i, j, :], omega) >= 0.5: # y(wanted) = 1, y(else) = 0\n tu_b[i, j] = 1 # wanted = white on gray scale\n else:\n tu_b[i, j] = 0 # unwanted = black on gray scale\n \n elif alg == 'GNB': # Gaussian Naive Bayes \n for i in range(len(tu[:, 0, 0])): # how many rows\n for j in range(len(tu[0, :, 0])): # how many columns\n he0 = 0\n he1 = 0\n for k in range(3):\n he0 += np.log(omega[8 + k]) + (tu[i, j, k] - omega[2 + k]) ** 2 / omega[8 + k]\n he1 += np.log(omega[11 + k]) + (tu[i, j, k] - omega[5 + k]) ** 2 / omega[11 + k]\n if np.log(1 / omega[0] ** 2) + he0 > np.log(1 / omega[1] ** 2) + he1:\n tu_b[i, j] = 1 # wanted = white on gray scale\n else:\n tu_b[i, j] = 0 # unwanted = black on gray scale\n \n\n return tu_b", "def contains_black(image):\n extrema = ImageStat.Stat(image).extrema\n r = extrema[0][0]\n g = extrema[1][0]\n b = extrema[2][0]\n\n if r == 0 and g == 0 and b == 0:\n return True\n\n return False", "def test_kernel(binary_matrix, result):\n\n # get the kernel from the gaussian elimination.\n pivots = (binary_matrix.T != 0).argmax(axis=0)\n nonpivots = np.setdiff1d(range(len(binary_matrix[0])), pivots)\n\n kernel = []\n for col in nonpivots:\n col_vector = binary_matrix[:, col]\n null_vector = np.zeros((binary_matrix.shape[1]), dtype=int)\n null_vector[col] = 1\n for i in pivots:\n first_entry = np.where(binary_matrix[:, i] == 1)[0][0]\n if col_vector[first_entry] == 1:\n null_vector[i] = 1\n kernel.append(null_vector.tolist())\n\n # get the nullspace from the _kernel function.\n nullspace = _kernel(binary_matrix)\n\n for nullvec in kernel:\n assert nullvec in nullspace.tolist()\n\n assert (nullspace == result).all()", "def py_cpu_nms(dets, scores, thresh): \n # inpurt 8x3 \n x1 = dets[:, 0, 0] \n y1 = dets[:, 0, 1] \n # z1 = dets[:, 0, 2]\n x2 = dets[:, 2, 0] \n y2 = dets[:, 2, 1] \n print('7777777777777',scores.shape)\n # z2 = dets[:, 2, 2] \n # height = dets[:, 4, 2] - dets[:, 0, 2]\n \n areas = (x2 - x1 + 1) * (y2 - y1 + 1) \n #打分从大到小排列,取index \n order = scores.argsort()[::-1] \n #keep为最后保留的边框 \n keep = [] \n while order.size > 0: \n #order[0]是当前分数最大的窗口,肯定保留 \n i = order[0] \n keep.append(i) \n #计算窗口i与其他所有窗口的交叠部分的面积\n xx1 = np.maximum(x1[i], x1[order[1:]]) \n yy1 = np.maximum(y1[i], y1[order[1:]]) \n xx2 = np.minimum(x2[i], x2[order[1:]]) \n yy2 = np.minimum(y2[i], y2[order[1:]]) \n \n w = np.maximum(0.0, xx2 - xx1 + 1) \n h = np.maximum(0.0, yy2 - yy1 + 1) \n inter = w * h \n #交/并得到iou值 \n ovr = inter / (areas[i] + areas[order[1:]] - inter) \n #inds为所有与窗口i的iou值小于threshold值的窗口的index,其他窗口此次都被窗口i吸收 \n inds = np.where(ovr <= thresh)[0] \n #order里面只保留与窗口i交叠面积小于threshold的那些窗口,由于ovr长度比order长度少1(不包含i),所以inds+1对应到保留的窗口\n order = order[inds + 1] \n \n return keep", "def gcheckerboard(kernelen=64, nsig=32):\n c = np.array([[-1, 1], [1, -1]])\n intsize = int(np.ceil(kernelen/2))\n return np.kron(c, np.ones([intsize, intsize])) * gkern(kernelen, nsig)", "def blackhat(img, kernel = (5,5)):\n\ttmp = grayscale(img)\n\tk = np.ones(kernel, np.uint8)\n\treturn cv2.morphologyEx(tmp, cv2.MORPH_BLACKHAT, k)", "def _perturbInPlaceSoft(self):\n kevRandom = KevRandom()\n if random.random() < 0.5:\n newThreshold = -1\n while newThreshold < 0 or newThreshold > 1:\n newThreshold = self.threshold + kevRandom.laplacian() #* 0.1\n self.threshold = newThreshold\n else:\n self.beta += kevRandom.laplacian() #* 0.1", "def check_sample_correctishness_bc01(f):\n\n batch_size = 5\n rows = 32\n cols = 30\n channels = 3\n pool_rows = 2\n pool_cols = 3\n\n rng = np.random.RandomState([2012, 9, 26])\n zv = rng.randn(batch_size, channels, rows,\n cols).astype(config.floatX) * 2. - 3.\n top_down_v = rng.randn(batch_size, channels, rows / pool_rows,\n cols / pool_cols).astype(config.floatX)\n\n z_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n z_th.tag.test_value = zv\n z_th.name = 'z_th'\n\n top_down_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n top_down_th.tag.test_value = top_down_v\n top_down_th.name = 'top_down_th'\n\n theano_rng = MRG_RandomStreams(rng.randint(2147462579))\n p_th, h_th, p_sth, h_sth = f(z_th, (pool_rows, pool_cols), top_down_th,\n theano_rng)\n\n prob_func = function([z_th, top_down_th], [p_th, h_th])\n pv, hv = prob_func(zv, top_down_v)\n\n sample_func = function([z_th, top_down_th], [p_sth, h_sth])\n\n acc_p = 0. * pv\n acc_h = 0. * hv\n\n # make sure the test gets good coverage, ie, that it includes many\n # different activation probs for both detector and pooling layer\n buckets = 10\n bucket_width = 1. / float(buckets)\n for i in xrange(buckets):\n lower_lim = i * bucket_width\n upper_lim = (i+1) * bucket_width\n\n assert np.any((pv >= lower_lim) * (pv < upper_lim))\n assert np.any((hv >= lower_lim) * (hv < upper_lim))\n\n assert upper_lim == 1.\n\n for i in xrange(10000):\n ps, hs = sample_func(zv, top_down_v)\n\n assert ps.shape == pv.shape\n assert hs.shape == hv.shape\n\n acc_p += ps\n acc_h += hs\n\n est_p = acc_p / float(i+1)\n est_h = acc_h / float(i+1)\n\n pd = np.abs(est_p-pv)\n hd = np.abs(est_h-hv)\n\n \"\"\"\n # plot maps of the estimation error, this is to see if it has some\n # spatial pattern this is useful for detecting bugs like not handling\n # the border correctly, etc.\n from pylearn2.gui.patch_viewer import PatchViewer\n\n pv = PatchViewer((pd.shape[0],pd.shape[3]),(pd.shape[1],pd.shape[2]),\n is_color = False)\n for i in xrange(pd.shape[0]):\n for j in xrange(pd.shape[3]):\n pv.add_patch( (pd[i,:,:,j] / pd.max() )* 2.0 - 1.0, rescale = False)\n pv.show()\n\n pv = PatchViewer((hd.shape[0],hd.shape[3]), (hd.shape[1],hd.shape[2]),\n is_color = False)\n for i in xrange(hd.shape[0]):\n for j in xrange(hd.shape[3]):\n pv.add_patch( (hd[i,:,:,j] / hd.max() )* 2.0 - 1.0, rescale = False)\n pv.show()\n \"\"\"\n\n \"\"\"\n plot expectation to estimate versus error in estimation\n expect bigger errors for values closer to 0.5\n\n from matplotlib import pyplot as plt\n\n #nelem = reduce( lambda x, y : x*y, pd.shape)\n #plt.scatter( pv.reshape(nelem), pd.reshape(nelem))\n #plt.show()\n\n nelem = reduce( lambda x, y : x*y, hd.shape)\n plt.scatter( hv.reshape(nelem), hd.reshape(nelem))\n plt.show()\n \"\"\"\n\n # don't really know how tight this should be\n # but you can try to pose an equivalent problem\n # and implement it in another way\n # using a numpy implementation in softmax_acc.py\n # I got a max error of .17\n assert max(pd.max(), hd.max()) < .17\n\n # Do exhaustive checks on just the last sample\n assert np.all((ps == 0) + (ps == 1))\n assert np.all((hs == 0) + (hs == 1))\n\n for k in xrange(batch_size):\n for i in xrange(ps.shape[2]):\n for j in xrange(ps.shape[3]):\n for l in xrange(channels):\n p = ps[k, l, i, j]\n h = hs[k, l, i*pool_rows:(i+1)*pool_rows,\n j*pool_cols:(j+1)*pool_cols]\n assert h.shape == (pool_rows, pool_cols)\n assert p == h.max()\n assert h.sum() <= 1\n\n \"\"\" If you made it to here, it's correctish\n (cant tell if samples are perfectly \"correct\") \"\"\"", "def blackbody(lbda, T):\n fac = 2*c.h.value*(c.c.value**2)/(np.power(lbda*1e-6,5))\n div = (1/(np.exp((c.h.value*c.c.value)/((lbda*1e-6)*c.k_B.value*T))-1))\n # convert from W m-3 Sr-1 to W m-2 mu-1 Sr-1 \n conv = 1e-6\n return fac*div*conv", "def filterBNW(bmp, threshold, savefile = '' ):\n for h in range(bmp.height):\n for w in range(bmp.width):\n if( intensity(bmp.pixels[h][w]) > threshold ):\n bmp.pixels[h][w] = WHITE\n else:\n bmp.pixels[h][w] = BLACK\n\n if( savefile != '' ):\n bmp.save(savefile)\n return bmp", "def cube_fix_badpix_isolated(array, bpm_mask=None, sigma_clip=3, num_neig=5, \n size=5, frame_by_frame=False, protect_mask=0, \n cxy=None, mad=False, ignore_nan=True, verbose=True, \n full_output=False):\n if array.ndim != 3:\n raise TypeError('Array is not a 3d array or cube')\n if size % 2 == 0:\n raise TypeError('Size of the median blur kernel must be an odd integer')\n \n if bpm_mask is not None:\n bpm_mask = bpm_mask.astype('bool')\n \n if verbose: start = time_ini()\n \n if num_neig > 0:\n neigh = True\n else:\n neigh = False\n \n nz = array.shape[0]\n \n if cxy is None:\n cy, cx = frame_center(array[0])\n elif isinstance(cxy, tuple):\n cx, cy = cxy\n elif isinstance(cxy, np.ndarray):\n if cxy.shape[0] != nz or cxy.shape[1] != 2 or cxy.ndim != 2:\n raise ValueError(\"cxy does not have right shape\")\n elif not frame_by_frame:\n msg = \"cxy must be a tuple or None if not in frame_by_frame mode\"\n raise ValueError(msg)\n else:\n cx = cxy[:,0]\n cy = cxy[:,1]\n\n \n array_out = array.copy()\n final_bpm = np.zeros_like(array_out, dtype=bool)\n n_frames = array.shape[0]\n count_bp = 0\n if frame_by_frame:\n if np.isscalar(cx):\n cx = [cx]*nz\n cy = [cy]*nz\n if bpm_mask is not None:\n if bpm_mask.ndim == 2:\n bpm_mask = [bpm_mask]*n_frames\n bpm_mask = np.array(bpm_mask)\n for i in Progressbar(range(n_frames), desc=\"processing frames\"):\n if bpm_mask is not None:\n bpm_mask_tmp = bpm_mask[i]\n else:\n bpm_mask_tmp = None\n res = frame_fix_badpix_isolated(array[i], bpm_mask=bpm_mask_tmp, \n sigma_clip=sigma_clip,\n num_neig=num_neig, size=size, \n protect_mask=protect_mask, \n verbose=False, cxy=(cx[i],cy[i]), \n ignore_nan=ignore_nan,\n full_output=True)\n array_out[i] = res[0]\n final_bpm[i] = res[1] \n count_bp = np.sum(final_bpm) \n else: \n if bpm_mask is None:\n ori_nan_mask = np.where(np.isnan(np.nanmean(array, axis=0)))\n ind = clip_array(np.nanmean(array, axis=0), sigma_clip, sigma_clip,\n neighbor=neigh, num_neighbor=num_neig, mad=mad)\n final_bpm = np.zeros_like(array[0], dtype=bool)\n final_bpm[ind] = 1\n if ignore_nan:\n final_bpm[ori_nan_mask] = 0\n if protect_mask:\n cir = disk((cy, cx), protect_mask, shape=final_bpm.shape)\n final_bpm[cir] = 0\n final_bpm = final_bpm.astype('bool')\n else:\n if bpm_mask.ndim == 3:\n final_bpm = np.median(bpm_mask, axis=0)\n else:\n final_bpm = bpm_mask.copy()\n \n for i in Progressbar(range(n_frames), desc=\"processing frames\"):\n frame = array_out[i]\n smoothed = median_filter(frame, size, mode='mirror')\n frame[np.where(final_bpm)] = smoothed[np.where(final_bpm)]\n if verbose: \n count_bp += np.sum(final_bpm)\n \n if verbose: \n msg = \"Done replacing {:.0f} bad pixels using the median of neighbors\"\n print(msg.format(count_bp))\n if not frame_by_frame:\n msg = \"(i.e. {:.0f} static bad pixels per channel))\"\n print(msg.format(count_bp/n_frames)) \n timing(start)\n \n if full_output:\n return array_out, final_bpm\n else:\n return array_out", "def create(matrix):\n limit_y = len(matrix)\n limit_x = len(matrix[0])\n\n for y in range(1, limit_y):\n bit.create(matrix[y])\n\n for x in range(1, limit_x):\n for y in range(1, limit_y):\n k = y + (y & -y)\n if k < limit_y:\n matrix[k][x] += matrix[y][x]", "def sqr_inplace(a):", "def scoreCirc_PassiveFilter(circuit, gen, indi, makeRedundancyInMatrix):#TODO\n #Calculate density and uniquiness (as in makeNetlist)\n if makeRedundancyInMatrix == True:\n FullBigCircuitMatrix = deepcopy(fullRedundancyBigCircuitMatrix(circuit.BigCircuitMatrix))\n else:\n FullBigCircuitMatrix = deepcopy(circuit.BigCircuitMatrix)\n\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n #print \"Kratkih stikov zunanjih povezav:\", OcSc\n \n results = None\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluatePassiveFilter_SUHAD(gen, indi)#TODO\n \n disfCount = 0\n\n gain = np.array(results['gain']['nominal'], dtype=float)\n if np.isnan(gain):\n disfCount = disfCount + 1\n g = 0\n else:\n g = abs(gain - 0) if gain < 0 else 0\n\n ripple = np.array(results['ripple']['nominal'], dtype=float)\n if np.isnan(ripple):\n disfCount = disfCount + 1\n r = 0 \n else:\n r = abs(ripple - 0.5)# if ripple > 0.5 else 0\n \n damping = np.array(results['damping']['nominal'], dtype=float)\n if np.isnan(damping):\n disfCount = disfCount + 1\n d = 0\n else:\n d = abs(40 - damping)# if damping < 60 else 0\n \n #THD = np.array(results['THD']['nominal'], dtype=float)\n #if np.isnan(THD):\n # disfCount = disfCount + 1\n # thd = 0\n #else:\n # thd = THD-1 if THD > 1 else 0\n \n score = 10*r + g + 10*d\n\n if disfCount > 0:\n score = np.exp(disfCount) * 1e3\n\n score = score + (IcNc*IcNc+1)# + abs(BW-bw)*1e2 + abs(CUTOFF-cutoff)*1e2 #add small punishment if not all nodes connected and bw and cutoff are off\n\n #print \"\\t\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n #cleanup current subcircuit\n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename)\n return score, matrixDensity, matrixQuaziID, results", "def mask(n):\n if n >= 0:\n return 2**n - 1\n else:\n return 0", "def mask(n):\n if n >= 0:\n return 2**n - 1\n else:\n return 0", "def reduce_possibilities_by_box(self):\n x = self.targetCell.x\n y = self.targetCell.y\n if x < 3 and y < 3: #top left\n self.check_box1()\n if x > 2 and x < 6 and y < 3: #middle left\n self.check_box2()\n if x > 5 and y < 3: #bottom left\n self.check_box3()\n if x < 3 and y > 2 and y < 6: #top middle\n self.check_box4()\n if x > 2 and x < 6 and y > 2 and y < 6: #center\n self.check_box5()\n if x > 5 and y > 2 and y < 6: #bottom middle\n self.check_box6()\n if x < 3 and y > 5: #top right\n self.check_box7()\n if x > 2 and x < 6 and y > 5: #middle right\n self.check_box8()\n if x > 5 and y > 5: #bottom right\n self.check_box9()\n self.targetCell.box_neighbour_possibilities = flatten_list(self.targetCell.box_neighbour_possibilities)", "def get_biomass(binary_mask):\n\n white_pixels = cv2.countNonZero(binary_mask)\n return white_pixels", "def evaluate_f(self, x_query, black_box_function=None):\n # BoTorch assumes a maximization problem\n if black_box_function is not None:\n return -black_box_function(x_query)", "def blackbody(self, nu, T):\n x = self.h*nu/(self.kB*T)\n result = 2.*self.h*nu**3 /self.c**2\n result /= np.exp(x) - 1.\n return result", "def _optimise(self):\n pass", "def _blr_mbgs(obj):\n rnb, cnb = obj.nb\n min_nb = min(obj.nb)\n A = obj.copy()\n Q = core.BlockLowRank(numpy.full((rnb, min_nb), None))\n R = core.BlockLowRank(numpy.full((min_nb, cnb), None))\n\n for i, j in numpy.ndindex(R.nb):\n rows = A[i, i].shape[1]\n cols = A[i, j].shape[1]\n R[i, j] = core.Zero((rows, cols))\n\n for j in range(min_nb):\n Q[:, j], R[j, j] = _blr_tsqr(A[:, j])\n\n for k in range(j + 1, cnb):\n R[j, k] = (Q[:, j].T * A[:, k])[0, 0]\n A[:, k] = A[:, k] - Q[:, j] * core.BlockLowRank([[R[j, k]]])\n\n return Q, R", "def mask(self):", "def gaborFilter(img, ksize=31):\n filters = []\n #ksize = 31\n for theta in np.arange(0, np.pi, np.pi / 16):\n kern = cv2.getGaborKernel((ksize, ksize), 4.0, theta, 10.0, 0.5, 0, ktype=cv2.CV_32F)\n kern /= 1.5*kern.sum()\n filters.append(kern)\n accum = np.zeros_like(img)\n for ker in filters:\n fimg = cv2.filter2D(img, cv2.CV_8UC3, ker)\n np.maximum(accum, fimg, accum)\n return accum", "def check_correctness_sigmoid_channelwise(f):\n\n rng = np.random.RandomState([2012, 7, 19])\n batch_size = 5\n pool_size = 1\n n = 3 * pool_size\n zv = rng.randn(batch_size, n).astype(config.floatX) * 1. - 1.5\n top_down_v = rng.randn(batch_size, n / pool_size).astype(config.floatX)\n\n z_th = T.matrix()\n z_th.name = 'z_th'\n\n top_down_th = T.matrix()\n top_down_th.name = 'top_down_th'\n\n p_th, h_th = f(z_th, pool_size, top_down_th)\n h_s = T.nnet.sigmoid(z_th + top_down_th)\n\n func = function([z_th, top_down_th], [p_th, h_th, h_s])\n\n pv, hv, h_s = func(zv, top_down_v)\n p_s = h_s\n\n assert p_s.shape == pv.shape\n assert h_s.shape == hv.shape\n if not np.allclose(h_s, hv):\n print((h_s.min(), h_s.max()))\n print((hv.min(), hv.max()))\n assert False\n if not np.allclose(p_s, pv):\n diff = abs(p_s - pv)\n print('max diff ', diff.max())\n print('min diff ', diff.min())\n print('ave diff ', diff.mean())\n assert False", "def non_max_suppression(bboxes, iou_threshold, threshold, box_format=\"corners\"):\n\n # 49 x 6 \n assert type(bboxes) == list\n # print(bboxes)\n bboxes = [box for box in bboxes if box[1] > threshold]\n bboxes = sorted(bboxes, key=lambda x: x[1], reverse=True)\n bboxes_after_nms = []\n # print(bboxes)\n while bboxes:\n chosen_box = bboxes.pop(0)\n bbox_temp = bboxes.copy()\n bboxes = []\n for box in bbox_temp: # not the same class or not overlap a lot \n if box[0] != chosen_box[0] or intersection_over_union(torch.tensor(chosen_box[2:]),torch.tensor(box[2:]), box_format=box_format,) < iou_threshold:\n bboxes.append(box)\n\n bboxes_after_nms.append(chosen_box)\n # print(\"NMS: \" + str(len(bboxes_after_nms)))\n return bboxes_after_nms", "def compute(self, node, input_vals):\r\n #start = time.time()\r\n ans = np.zeros(input_vals[1].shape).astype(float32)\r\n #assert len(input_vals) == 3\r\n strides = node.const_attr\r\n ish = list(input_vals[0].shape)\r\n fsh = list(input_vals[1].shape)\r\n filter = input_vals[1].astype(float32)\r\n input = np.zeros((ish[0],ish[1]+fsh[0]-1,ish[2]+fsh[1]-1,ish[3])).astype(float32)\r\n input[:,fsh[0]//2:fsh[0]//2+ish[1]:1,fsh[1]//2:fsh[1]//2+ish[2]:1,:]+=input_vals[0].astype(float32)\r\n ish = list(input.shape)\r\n output_grad = input_vals[2].astype(float32)\r\n osh = output_grad.shape\r\n assert c_kernel.conv2d_c_grad2(get_pointer(input), ish[0],ish[1],ish[2],ish[3],get_pointer(ans),fsh[0],fsh[1],fsh[2],fsh[3],strides[0],strides[1],strides[2],strides[3],get_pointer(output_grad), osh[0],osh[1],osh[2],osh[3])==0\r\n #print(\"conv2d_grad2\") \r\n #end = time.time()\r\n \r\n #print(end - start) \r\n return ans\r\n \r\n '''rm = range(osh[0])\r\n ri = range(osh[1])\r\n rj = range(osh[2])\r\n rdi = range(fsh[0])\r\n rdj = range(fsh[1])\r\n for m in rm:\r\n for i in ri:\r\n for j in rj:\r\n for di in rdi:\r\n for dj in rdj:\r\n \"\"\"t = np.dot(\r\n input[m,strides[1]*i+di,strides[2]*j+dj,:],\r\n filter[di,dj,:,:]\r\n )\"\"\"\r\n #print(input[m,strides[1]*i+di,strides[2]*j+dj,:].shape)\r\n #print(output_grad[m,i,j].shape)\r\n ans[di,dj,:,:] += np.dot(input[m,strides[1]*i+di,strides[2]*j+dj,:].reshape((-1,1)), output_grad[m,i,j].reshape((1,-1)))\r\n \"\"\"output[m,i,j] = np.sum(\r\n [\r\n t,\r\n output[m,i,j]\r\n ],\r\n axis=0\r\n )\"\"\"\r\n return ans'''", "def outlierdetection(data,method):\n import numpy as np\n ##########\n # 0. Input\n data = np.array(data)\n methodname = method['name']\n rule = method['rule']\n try:\n mask = rule['initmask'].copy()\n if not mask:\n mask = np.full_like(data,True,dtype=bool)\n rule['initmask'] = mask.copy()\n except:\n mask = np.full_like(data,True,dtype=bool)\n rule['initmask'] = mask.copy()\n ##########\n # 1. Compute\n if methodname in {'median','sigma'}:\n minp,maxp = rule['minp'],rule['maxp']\n niter = rule['niter']\n for i in range(niter):\n gooddata = data[mask] # good data\n ### median or sigma\n if methodname=='median':\n median = np.median(gooddata)\n minbound = minp*median\n maxbound = maxp*median\n elif methodname=='sigma':\n std = np.std(gooddata)\n median = np.median(gooddata)\n minbound = median - minp*std\n maxbound = median + maxp*std\n ### update mask\n m = np.argwhere((data >= minbound) & (data <= maxbound)).flatten() # good data\n mask = np.full_like(data,False,dtype=bool)\n mask[m] = True\n print('{0} iter {1}'.format(methodname,i))\n elif methodname == 'sn':\n minp = rule['minp']\n noise = rule['noise']\n keepneg = rule['keepneg']\n sn = data / noise\n if keepneg:\n sn = np.abs(sn)\n m = np.argwhere(sn >= minp).flatten()\n mask = np.full_like(data,False,dtype=bool)\n mask[m] = True\n print('{0} complete'.format(methodname))\n elif methodname == 'sigmalocal':\n sigma = rule['sigma']\n noise = rule['noise']\n keepneg = rule['keepneg']\n niter = rule['niter']\n params = rule['params']\n for i in range(niter):\n tmpdata = data[mask]\n tmpmedian = savgol_filter(tmpdata,**params)\n tmpnoise = noise[mask]\n ratio = (tmpdata - tmpmedian)/tmpnoise\n if keepneg:\n ratio = np.abs(ratio)\n m = np.argwhere(ratio > sigma).flatten()\n mask[m] = False\n print('{0} iter {1}'.format(methodname,i))\n else:\n raise ValueError('method {0} does not support'.format(method))\n ##########\n # 2. Update with the initial mask and return\n return mask & rule['initmask']", "def fn(mask):\n for i in range(len(piles)): \n val = (mask >> 3*i) & 7\n for k in range(1, val+1): \n mask0 = mask - (k << 3*i)\n if not fn(mask0): return True \n return False", "def _remove_outliers(self, boxes):\n\n filtered_boxes = []\n for bc in boxes:\n w = bc[2] - bc[0]\n h = bc[3] - bc[1]\n if bc[1] < 450 and w > 32 and h > 32:\n filtered_boxes.append(bc)\n elif bc[1] > 450 and w > 64 and h > 64:\n filtered_boxes.append(bc)\n\n return np.array(filtered_boxes)", "def check_sample_correctishness_channelwise(f):\n\n batch_size = 27\n pool_size = 4\n n = pool_size * 21\n\n rng = np.random.RandomState([2012, 9, 26])\n zv = rng.randn(batch_size, n).astype(config.floatX) * 3.5 - 5.\n top_down_v = rng.randn(batch_size, n / pool_size).astype(config.floatX)\n\n z_th = T.matrix()\n z_th.tag.test_value = zv\n z_th.name = 'z_th'\n\n top_down_th = T.matrix()\n top_down_th.tag.test_value = top_down_v\n top_down_th.name = 'top_down_th'\n\n theano_rng = MRG_RandomStreams(rng.randint(2147462579))\n p_th, h_th, p_sth, h_sth = f(z_th, pool_size, top_down_th, theano_rng)\n\n prob_func = function([z_th, top_down_th], [p_th, h_th])\n pv, hv = prob_func(zv, top_down_v)\n\n sample_func = function([z_th, top_down_th], [p_sth, h_sth])\n\n acc_p = 0. * pv\n acc_h = 0. * hv\n\n # make sure the test gets good coverage, ie, that it includes\n # many different activation probs for both detector and pooling layer\n buckets = 10\n bucket_width = 1. / float(buckets)\n print(pv.min(), pv.max())\n print(hv.min(), hv.max())\n for i in xrange(buckets):\n lower_lim = i * bucket_width\n upper_lim = (i+1) * bucket_width\n print(lower_lim, upper_lim)\n\n assert np.any((pv >= lower_lim) * (pv < upper_lim))\n assert np.any((hv >= lower_lim) * (hv < upper_lim))\n\n assert upper_lim == 1.\n\n for i in xrange(10000):\n ps, hs = sample_func(zv, top_down_v)\n\n assert ps.shape == pv.shape\n assert hs.shape == hv.shape\n\n acc_p += ps\n acc_h += hs\n\n est_p = acc_p / float(i+1)\n est_h = acc_h / float(i+1)\n\n pd = np.abs(est_p-pv)\n hd = np.abs(est_h-hv)\n\n \"\"\"\n # plot maps of the estimation error, this is to see if it has some\n # spatial pattern this is useful for detecting bugs like not handling\n # the border correctly, etc.\n # from pylearn2.gui.patch_viewer import PatchViewer\n\n pv = PatchViewer((pd.shape[0],pd.shape[3]),(pd.shape[1],pd.shape[2]),\n is_color = False)\n for i in xrange(pd.shape[0]):\n for j in xrange(pd.shape[3]):\n pv.add_patch( (pd[i,:,:,j] / pd.max() )* 2.0 - 1.0, rescale = False)\n pv.show()\n\n pv = PatchViewer((hd.shape[0],hd.shape[3]),(hd.shape[1],hd.shape[2]),\n is_color = False)\n for i in xrange(hd.shape[0]):\n for j in xrange(hd.shape[3]):\n pv.add_patch( (hd[i,:,:,j] / hd.max() )* 2.0 - 1.0, rescale = False)\n pv.show()\n \"\"\"\n\n \"\"\"\n plot expectation to estimate versus error in estimation\n expect bigger errors for values closer to 0.5\n\n from matplotlib import pyplot as plt\n\n #nelem = reduce( lambda x, y : x*y, pd.shape)\n #plt.scatter( pv.reshape(nelem), pd.reshape(nelem))\n #plt.show()\n\n nelem = reduce( lambda x, y : x*y, hd.shape)\n plt.scatter( hv.reshape(nelem), hd.reshape(nelem))\n plt.show()\n \"\"\"\n\n # don't really know how tight this should be\n # but you can try to pose an equivalent problem\n # and implement it in another way\n # using a numpy implementation in softmax_acc.py\n # I got a max error of .17\n assert max(pd.max(), hd.max()) < .17\n\n # Do exhaustive checks on just the last sample\n assert np.all((ps == 0) + (ps == 1))\n assert np.all((hs == 0) + (hs == 1))\n\n for k in xrange(batch_size):\n for i in xrange(ps.shape[1]):\n p = ps[k, i]\n h = hs[k, i*pool_size:(i+1)*pool_size]\n assert h.shape == (pool_size,)\n assert p == h.max()\n assert h.sum() <= 1\n\n \"\"\" If you made it to here, it's correctish\n (cant tell if samples are perfectly \"correct\") \"\"\"", "def test_noise_equiv_bandwidth_boxcar():\n win = windows.boxcar(2000)\n assert 1 == 1.0 / utils.noise_equivalent_bandwidth(win)", "def ct_bis(env, a, median, mad):\n\n\tb1 = (a.T < median - env.threshold * mad).T\n\tb2 = a[:, :a.shape[1] - 1] < a[:, 1:]\n\tb3 = a[:, 1:] < a[:, :a.shape[1] - 1]\n\tb1[:, :a.shape[1] - 1] = b1[:, :a.shape[1] - 1] & b2\n\tb1[:, 1:] = b1[:, 1:] & b3\n\treturn (b1)", "def train(trial_num, image_num, filter_num, filter_size, input_size, channel_num, pooling_rate, left_upper_padding, right_lower_padding):\n\n input_batch_num = 1\n batch_num = 2\n\n init_filters = np.array(np.random.normal(size=filter_num * channel_num *\n filter_size*filter_size), dtype=\"float32\")\n #init_filters = np.array([1.0] * filter_num * channel_num * filter_size * filter_size, dtype=\"float32\")\n init_filters = 0.01 * init_filters.reshape(filter_num, channel_num*filter_size*filter_size)\n\n init_hbias = np.array([-0.1] * filter_num, dtype=\"float32\").reshape(filter_num, 1)\n\n init_vbias = np.array([0.0] * channel_num, dtype=\"float32\").reshape(channel_num, 1)\n\n libnvcrbm = __import__(\"nvcrbm\")\n cur_filters = libnvcrbm.init(filter_num, filter_size, \n input_batch_num, input_size, channel_num,\n pooling_rate, left_upper_padding, right_lower_padding,\n init_filters, init_hbias, init_vbias)\n\n imgs = cPickle.load(open(\"../data/kyoto_large_train.pkl\", \"r\"))\n img_size = imgs[0].shape[0]\n\n for trial_idx in xrange(trial_num):\n for img_idx in xrange(image_num):\n for batch_idx in xrange(batch_num):\n row_idx = np.arange(0, input_size) + np.random.random_integers(img_size - 2 * filter_size - input_size) + filter_size - 1\n col_idx = np.arange(0, input_size) + np.random.random_integers(img_size - 2 * filter_size - input_size) + filter_size - 1\n #row_idx = np.arange(0, input_size) + 200\n #col_idx = np.arange(0, input_size) + 200\n\n batch_data = imgs[img_idx][row_idx][:,col_idx]\n batch_data = batch_data - batch_data.mean()\n batch_data = np.asarray(batch_data.reshape(1, input_size * input_size), dtype=\"float32\")\n \n libnvcrbm.run_batch(trial_idx, img_idx, batch_idx, batch_data)\n\n libnvcrbm.print_result()\n cur_filters = libnvcrbm.get_gpu_filters()\n dump_filter_image(cur_filters, \"../data/kyoto/filters/trial_%d.png\" % trial_idx)\n\n first_layer = {}\n first_layer[\"filters\"] = cur_filters\n first_layer[\"bias\"] = libnvcrbm.get_gpu_hbias()\n cPickle.dump(first_layer, open(\"../data/first_layer.dat\", \"w+\"))", "def nms(bobj, cf_thresh, nms_thresh):\n bboxs = bobj[\"boxs\"]\n scores = bobj[\"scores\"]\n cfvalid_ids = np.where(scores >= cf_thresh)[0]\n if len(cfvalid_ids) == 0:\n return None, None\n bboxs = bobj[\"boxs\"][cfvalid_ids]\n scores = scores[cfvalid_ids]\n ids = bobj[\"ids\"][cfvalid_ids]\n masks = bobj[\"masks\"][cfvalid_ids]\n x1 = bboxs[:, 0]\n y1 = bboxs[:, 1]\n x2 = bboxs[:, 2]\n y2 = bboxs[:, 3]\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n # cfvalid_ids = np.where(scores >= cf_thresh)[0]\n # scores = scores[cfvalid_ids]\n\n # order = scores.argsort()[::-1]\n mask_sizes = np.sum(masks, axis=(1, 2))\n order = mask_sizes.argsort()[::-1]\n keep = []\n suppress = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n iou = inter / (areas[i] + areas[order[1:]] - inter)\n # Because of we split the object cross the boundary in the cropped instance,\n # concatenating it to the original instance, thus we need also mask iou condition for nms\n mask_other = masks[order[1:], :, :]\n mask_cur = masks[i, :, :]\n mask_inter = np.sum(mask_cur & mask_other, axis=(1, 2))\n mask_union = np.sum(mask_cur | mask_other, axis=(1, 2))\n mask_iou = mask_inter / mask_union\n\n suppress_inds = np.where((iou > nms_thresh) | (mask_iou > nms_thresh))[0]\n sup_i = order[1:][suppress_inds] if suppress_inds.size != 0 else np.array([])\n suppress.append(sup_i)\n\n inds = np.where((iou <= nms_thresh) & (mask_iou <= nms_thresh))[0]\n order = order[inds + 1]\n\n for i, sup in enumerate(suppress):\n if sup.any():\n for sup_id in sup:\n # sup_id = s + 1\n keep_id = keep[i]\n # union the keep mask and the suppress mask\n masks[keep_id, :, :] = masks[keep_id, :, :] | masks[sup_id, :, :]\n if keep:\n return ids[keep], masks[keep]\n else:\n return [], []", "def scoreCirc_PassiveFilter_2(circuit, gen, indi, makeRedundancyInMatrix):\n #Calculate density and uniquiness (as in makeNetlist)\n if makeRedundancyInMatrix == True:\n FullBigCircuitMatrix = deepcopy(fullRedundancyBigCircuitMatrix(circuit.BigCircuitMatrix))\n else:\n FullBigCircuitMatrix = deepcopy(circuit.BigCircuitMatrix)\n\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n #print \"Kratkih stikov zunanjih povezav:\", OcSc\n \n results = None\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluatePassiveFilter_2(gen, indi)\n \n disfCount = 0\n\n gain = np.array(results['gain']['nominal'], dtype=float)\n if np.isnan(gain):\n disfCount = disfCount + 1\n g = 0\n else:\n g = abs(gain - 0) if gain < 0 else 0\n\n ripple = np.array(results['ripple']['nominal'], dtype=float)\n if np.isnan(ripple):\n disfCount = disfCount + 1\n r = 0 \n else:\n r = abs(ripple - 0.5) if ripple > 0.5 else 0\n \n damping = np.array(results['damping']['nominal'], dtype=float)\n if np.isnan(damping):\n disfCount = disfCount + 1\n d = 0\n else:\n d = abs(40 - damping) if damping < 40 else 0\n\n slope = np.array(results['dumpingSlope']['nominal'], dtype=float)\n if np.isnan(slope):\n disfCount = disfCount + 1\n slo = 0\n else:\n slo = 0 if slope>60 else 60-slope\n \n bandwidth = np.array(results['bw']['nominal'], dtype=float)\n if np.isnan(bandwidth):\n disfCount = disfCount + 1\n bw = 0\n else:\n bw = abs(bandwidth-1000)/100\n \n #THD = np.array(results['THD']['nominal'], dtype=float)\n #if np.isnan(THD):\n # disfCount = disfCount + 1\n # thd = 0\n #else:\n # thd = THD-1 if THD > 1 else 0\n #print 10*r, g, d, slo, bw\n score = 10*r + g + d + slo + bw\n\n if disfCount > 0:\n score += np.exp(disfCount) * 1e3\n\n #score = score + (IcNc*IcNc+1)# + abs(BW-bw)*1e2 + abs(CUTOFF-cutoff)*1e2 #add small punishment if not all nodes connected and bw and cutoff are off\n\n #print \"\\t\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n #cleanup current subcircuit\n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename)\n return score, matrixDensity, matrixQuaziID, results", "def membrane_detect(img_grayscale):\n \"\"\"input: grayscale image\"\"\"\n \"\"\"output: binary image\"\"\"\n\n #sharpened image:\n im_sharp= unsharp_mask(img_grayscale, radius=2, amount=2)\n\n # Equalization threshold:\n p2, p98 = np.percentile(im_sharp, (2, 98))\n im_eq = exposure.rescale_intensity(img_grayscale, in_range=(p2, p98))\n\n #Gaussian:\n im_gaus=gaussian_filter(im_eq, sigma=2.7)\n\n #Edge detection: \n im_edge=feature.canny(im_gaus, sigma=1)\n\n #remove small objects:\n im_clean1 = morphology.remove_small_objects(im_edge, 200, in_place=True, connectivity=50)\n\n #close:\n phantom = im_clean1\n phantom[10:30, 200:210] = 0\n selem_c = disk(10)\n im_closed = closing(phantom, selem_c)\n\n #dialated:\n selem_d = disk(4)\n im_dialated=dilation(im_closed, selem_d)\n\n\n #remove small objects:\n im_final = morphology.remove_small_objects(im_dialated, 1700, in_place=True, connectivity=200)\n\n return im_final", "def find_img2d_candidates(image, **kwargs):\n\n # filter_kernel = np.array([[-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225]])\n\n filter_kernel = np.array([[-2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2/324, -2/324, -2/324],\n [-2 / 324, -2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -2/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -2/324, -2/324],\n [-2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2/324, -2/324, -2/324]])\n\n res = sg.convolve2d(image, filter_kernel, mode='same', boundary='fill', fillvalue=0)\n coord_x, coord_y = find_max_coords(np.absolute(res))\n\n return coord_x, coord_y", "def bottleneck_block(outer_filters, bottleneck_filters):\n return compose(DarknetConv2D_BN_Leaky(outer_filters, (3, 3)),\n DarknetConv2D_BN_Leaky(bottleneck_filters, (1, 1)),\n DarknetConv2D_BN_Leaky(outer_filters, (3, 3)))", "def boxfilter(y, N):\n n = np.clip(np.arange(len(y)), 0, N-1) + 1\n return np.array([ y[i+1-ni:i+1].sum() / ni for i,ni in zip(range(len(y)), n) ])", "def check_if_white_back_black_edge(pred):\n values = np.unique(pred)\n # print(values)\n\n # check if binary\n if len(values) > 2:\n print(\"Your prediction result has not been binarized, please prompt them to choose the appropriate threshold for binarization.\")\n raise ValueError\n\n white_pos = np.where(pred == 255)\n # print(len(white_pos[0]))\n white_count = len(white_pos[0])\n black_pos = np.where(pred == 0)\n # print(len(black_pos[0]))\n black_count = len(black_pos[0])\n # print(black_count / white_count)\n rate = black_count / white_count\n if rate < 5:\n print(\"The results must be submitted with white background and black edge. Please submit after correction.\")\n raise ValueError", "def bench(count=10):\n from sys import platform\n from time import time, asctime\n from numpy import average, zeros\n\n filename = 'csample.jpg'\n f = readgray(filename)\n fbin=threshad(f,150)\n se = img2se(binary([[0,1,0],[1,1,1],[0,1,0]]),'NON-FLAT',to_int32([[0,1,0],[1,2,1],[0,1,0]]))\n m=thin(fbin)\n tasks=[\n [' 1. Union bin ','union(fbin,fbin)'],\n [' 2. Union gray-scale ','union(f,f)'],\n [' 3. Dilation bin, secross ','dilate(fbin)'],\n [' 4. Dilation gray, secross ','dilate(f)'],\n [' 5. Dilation gray, non-flat 3x3 SE ','dilate(f,se)'],\n [' 6. Open bin, secross ','open(fbin)'],\n [' 7. Open gray-scale, secross ','open(f)'],\n [' 8. Open gray, non-flat 3x3 SE ','open(f,se)'],\n [' 9. Distance secross ','dist(fbin)'],\n ['10. Distance Euclidean ','dist(fbin,sebox(),\"euclidean\")'],\n ['11. Geodesic distance secross ','gdist(fbin,m)'],\n ['12. Geodesic distance Euclidean ','gdist(fbin,m,sebox(),\"euclidean\")'],\n ['13. Area open bin ','areaopen(fbin,100)'],\n ['14. Area open gray-scale ','areaopen(f,100)'],\n ['15. Label secross ','label(fbin)'],\n ['16. Regional maximum, secross ','regmax(f)'],\n ['17. Open by rec, gray, secross ','openrec(f)'],\n ['18. ASF by rec, oc, secross, 1 ','asfrec(f)'],\n ['19. Gradient, gray-scale, secross ','gradm(f)'],\n ['20. Thinning ','thin(fbin)'],\n ['21. Watershed ','cwatershed(f,fbin)']]\n result = zeros((21),'d')\n for t in xrange(len(tasks)):\n print tasks[t][0],tasks[t][1]\n t1=time()\n for k in xrange(count):\n a=eval(tasks[t][1])\n t2=time()\n result[t]= (t2-t1)/(count+0.0)\n print version() +' Benchmark'\n print 'Made on ',asctime(),' computer=',platform\n print 'image filename=',filename,' width=', f.shape[1],', height=',f.shape[0]\n print ' Function time (sec.)'\n for j in xrange(21):\n print tasks[j][0], result[j]\n print ' Average ', average(result)\n out=[]", "def blurry_degree(lambdas):\n return lambdas[0] / (numpy.sum(lambdas) + 0.001)", "def remove_negative_pixels(spectra, verbose = True):\n cuenta=0\n\n output=copy.deepcopy(spectra) \n for fibre in range(len(spectra)):\n vector_ = spectra[fibre] \n stats_=basic_statistics(vector_, return_data=True, verbose=False)\n #rss.low_cut.append(stats_[1])\n if stats_[1] < 0.:\n cuenta = cuenta + 1\n vector_ = vector_ - stats_[1]\n output[fibre] = [0. if x < 0. else x for x in vector_] \n \n if verbose: print(\"\\n> Found {} spectra for which the median value is negative, they have been corrected\".format(cuenta))\n return output", "def clip_boxes(self, box):\n return tf.clip_by_value(box, 0, self._output_size - 1)", "def mandel(x, y, max_iters):\n i = 0\n c = complex(x,y)\n z = 0.0j\n for i in range(max_iters):\n z = z*z + c\n if (z.real*z.real + z.imag*z.imag) >= 4:\n return i\n\n return 255", "def filter_boxes(self, boxes, box_confidence, box_class_probs):\n f_boxes = []\n b_classes = []\n b_scores = []\n for i in range(len(boxes)):\n boxscore = box_confidence[i] * box_class_probs[i]\n maxes = np.amax(boxscore, axis=3)\n keep = np.argwhere(maxes[:, :, :] >= self.class_t)\n\n for kept in keep:\n f_boxes.append(boxes[i][kept[0], kept[1], kept[2]])\n b_classes.append(np.argmax(boxscore[kept[0],\n kept[1], kept[2]]))\n b_scores.append(maxes[kept[0], kept[1], kept[2]])\n \"\"\" muchj easier in tf 2.x\n\n box_class = tf.argmax(boxscore, axis=-1)\n box_score = tf.math.reduce_max(boxscore, axis=-1)\n mask = boxscore >= self.class_t\n\n boxes = tf.compat.v1.boolean_mask(boxes, mask)\n scores = tf.compaat.v1.boolean_mask(boxscore, mask)\n classes = tf.compat.v1.boolean_mask(box_class, mask)\n\n f_boxes.append(boxes)\n b_classes.append(classes)\n b_scores.append(scores)\n \"\"\"\n filtered_boxes = np.array(f_boxes)\n box_classes = np.array(b_classes)\n box_scores = np.array(b_scores)\n return (filtered_boxes, box_classes, box_scores)", "def csrbf(r):\n return num.power((num.maximum(0, 1-r)), 3)*(3*r+1)", "def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold=.6):\r\n\r\n # Step 1: Compute box scores\r\n ### START CODE HERE ### (≈ 1 line)\r\n box_scores = box_confidence * box_class_probs\r\n ### END CODE HERE ###\r\n\r\n # Step 2: Find the box_classes thanks to the max box_scores, keep track of the corresponding score\r\n ### START CODE HERE ### (≈ 2 lines)\r\n box_classes = tf.argmax(box_scores, axis=-1)\r\n box_class_scores = tf.reduce_max(box_scores, axis=-1)\r\n ### END CODE HERE ###\r\n\r\n # Step 3: Create a filtering mask based on \"box_class_scores\" by using \"threshold\". The mask should have the\r\n # same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)\r\n ### START CODE HERE ### (≈ 1 line)\r\n filtering_mask = box_class_scores >= threshold\r\n ### END CODE HERE ###\r\n\r\n # Step 4: Apply the mask to scores, boxes and classes\r\n ### START CODE HERE ### (≈ 3 lines)\r\n scores = tf.boolean_mask(box_class_scores, filtering_mask)\r\n boxes = tf.boolean_mask(boxes, filtering_mask)\r\n classes = tf.boolean_mask(box_classes, filtering_mask)\r\n ### END CODE HERE ###\r\n\r\n return scores, boxes, classes", "def valid(black, white, x, y):\n return (not black & gobit[(x, y)]) and (not white & gobit[(x, y)])", "def check_correctness_c01b(f):\n\n rng = np.random.RandomState([2013, 5, 6])\n batch_size = 5\n rows = 32\n cols = 30\n channels = 3\n pool_rows = 2\n pool_cols = 3\n\n # Do the python ground truth in b01c format\n zv = rng.randn(batch_size, rows, cols,\n channels).astype(config.floatX) * 1. - 1.5\n top_down_v = rng.randn(batch_size, rows / pool_rows, cols / pool_cols,\n channels).astype(config.floatX)\n\n p_np, h_np = max_pool_python(zv, (pool_rows, pool_cols), top_down_v)\n\n # Dimshuffle the inputs into c01b for the theano implementation\n z_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n z_th.tag.test_value = zv\n z_th.name = 'z_th'\n zr = z_th.dimshuffle(3, 1, 2, 0)\n\n top_down_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n top_down_th.name = 'top_down_th'\n top_down_th.tag.test_value = top_down_v\n top_down_r = top_down_th.dimshuffle(3, 1, 2, 0)\n\n p_th, h_th = f(zr, (pool_rows, pool_cols), top_down_r)\n\n func = function([z_th, top_down_th], [p_th.dimshuffle(3, 1, 2, 0),\n h_th.dimshuffle(3, 1, 2, 0)])\n\n pv, hv = func(zv, top_down_v)\n\n if not p_np.shape == pv.shape:\n raise AssertionError(str((p_np.shape, pv.shape)))\n assert h_np.shape == hv.shape\n if not np.allclose(h_np, hv):\n print((h_np.min(), h_np.max()))\n print((hv.min(), hv.max()))\n assert False\n if not np.allclose(p_np, pv):\n diff = abs(p_np - pv)\n print('max diff ', diff.max())\n print('min diff ', diff.min())\n print('ave diff ', diff.mean())\n assert False\n warnings.warn(\"TODO: make sampling tests run on c01b format of pooling.\")", "def obtain_filters_mask(model, threshold, cba_index, prune_index):\n\n num_pruned_bn = 0\n num_total_bn = 0\n num_remain_filters = []\n mask_remain_filters = []\n\n # The number of filters reserved must be a multiple of 8\n int_multiple = 8\n filter_switch = list(range(0, 1024, int_multiple))\n\n # cba_index stores all convolution layers with BN layer (the previous layer of YOLO layer is without BN layer)\n for index in cba_index:\n bn_module = model.module_list[index][1]\n if index in prune_index:\n mask = obtain_bn_mask(bn_module, threshold).cpu().numpy()\n num_layer_remain_bn = int(mask.sum())\n if num_layer_remain_bn < 8:\n layer_sort_bn = bn_module.weight.data.abs().clone()\n value_sort_bn = torch.sort(layer_sort_bn)[0]\n layer_threshold = value_sort_bn[-8]\n mask = obtain_bn_mask(bn_module, layer_threshold).cpu().numpy()\n else:\n for i, _ in enumerate(filter_switch):\n if num_layer_remain_bn < filter_switch[i]:\n num_layer_remain_bn = filter_switch[i - 1]\n break\n layer_sort_bn = bn_module.weight.data.abs().clone()\n value_sort_bn = torch.sort(layer_sort_bn)[0]\n layer_threshold = value_sort_bn[-num_layer_remain_bn]\n mask = obtain_bn_mask(bn_module, layer_threshold).cpu().numpy()\n\n num_remain_bn = int(mask.sum())\n num_pruned_bn = num_pruned_bn + mask.shape[0] - num_remain_bn\n\n if num_remain_bn == 0:\n print(\"Channels would be all pruned!\")\n raise Exception\n\n logger.info('layer index: %d \\t total channel: %d \\t remaining channel: %d',\n index, mask.shape[0], num_remain_bn)\n else:\n mask = np.ones(bn_module.weight.data.shape)\n num_remain_bn = mask.shape[0]\n num_total_bn += mask.shape[0]\n num_remain_filters.append(num_remain_bn)\n mask_remain_filters.append(mask.copy())\n\n prune_ratio = num_pruned_bn / num_total_bn\n logger.info('Prune channels: %d \\t Prune ratio: %.3f', num_pruned_bn, prune_ratio)\n\n return num_remain_filters, mask_remain_filters", "def remove_small_cc(binary, thres=10):\n cc, n_cc = measure.label(binary)\n binary2 = np.copy(binary)\n for n in range(1, n_cc + 1):\n area = np.sum(cc == n)\n if area < thres:\n binary2[cc == n] = 0\n return binary2", "def greedyNonMaximumSupression(boxlist,clipthresh=0.05,IOUthresh=0.5):\r\n NMSed_list=[]\r\n if len(boxlist)==0 or clipthresh>1:\r\n return NMSed_list\r\n \r\n # keep every box with largest score while doesn't overlap with all the other\r\n # boxes\r\n NMSed_list.append(boxlist[0])\r\n for i in range(1,len(boxlist)):\r\n keepflag=True\r\n \r\n if boxlist[i][4]<clipthresh:\r\n break # break when score of current box is lower than thresh\r\n else:\r\n #print('----NMS--{}----'.format(i))\r\n for j in range(len(NMSed_list)):\r\n iou=getIoU(boxlist[i],NMSed_list[j])\r\n #print(iou)\r\n if iou>IOUthresh:\r\n keepflag=False\r\n break\r\n if keepflag:\r\n NMSed_list.append(boxlist[i])\r\n \r\n return NMSed_list", "def classifyLinear(x,w,b=0):\n \n # print(x.shape)\n # print(np.transpose(w).shape)\n preds = np.matmul(x,np.transpose(w))\n preds = preds + b\n np.place(preds,preds==0,-1)\n preds = np.sign(preds)\n \n return preds", "def compute(self, node, input_vals):\r\n #start = time.time()\r\n #ans = np.zeros(input_vals[0].shape)\r\n #assert len(input_vals) == 3\r\n strides = node.const_attr\r\n ish = list(input_vals[0].shape)\r\n fsh = list(input_vals[1].shape)\r\n filter = input_vals[1].astype(float32)\r\n input = np.zeros((ish[0],ish[1]+fsh[0]-1,ish[2]+fsh[1]-1,ish[3])).astype(float32)\r\n input[:,fsh[0]//2:fsh[0]//2+ish[1]:1,fsh[1]//2:fsh[1]//2+ish[2]:1,:]+=input_vals[0].astype(float32)\r\n ish = list(input.shape)\r\n ans = np.zeros(tuple(ish)).astype(float32)\r\n #output = np.zeros([ish[0],(ish[1]-fsh[0])//strides[1]+1,(ish[2]-fsh[1])//strides[2]+1,fsh[3]])\r\n output_grad = input_vals[2].astype(float32)\r\n osh = output_grad.shape\r\n #print(fsh)\r\n #print(ish)\r\n assert c_kernel.conv2d_c_grad1(get_pointer(ans), ish[0],ish[1],ish[2],ish[3],get_pointer(filter),fsh[0],fsh[1],fsh[2],fsh[3],strides[0],strides[1],strides[2],strides[3],get_pointer(output_grad), osh[0],osh[1],osh[2],osh[3])==0\r\n ish = list(input_vals[0].shape)\r\n #end = time.time()\r\n\r\n #print(\"conv2d_grad1\") \r\n #print(end - start) \r\n return ans[:,fsh[0]//2:fsh[0]//2+ish[1]:1,fsh[1]//2:fsh[1]//2+ish[2]:1,:] \r\n '''\r\n rm = range(osh[0])\r\n ri = range(osh[1])\r\n rj = range(osh[2])\r\n rdi = range(fsh[0])\r\n rdj = range(fsh[1])\r\n for m in rm:\r\n for i in ri:\r\n for j in rj:\r\n for di in rdi:\r\n for dj in rdj:\r\n #print(input[m,strides[1]*i+di,strides[2]*j+dj,:].shape)\r\n #print(filter[di,dj,:,:])\r\n \"\"\"t = np.dot(\r\n input[m,strides[1]*i+di,strides[2]*j+dj,:],\r\n filter[di,dj,:,:]\r\n )\"\"\"\r\n #print(matB)\r\n #print(np.dot(matA , matB))\r\n print(np.array(output_grad[m,i,j]))\r\n print(np.array(np.array(filter[di,dj,:,:].T)))\r\n ans[m,strides[1]*i+di,strides[2]*j+dj,:]+= np.dot(np.array(output_grad[m,i,j].reshape((1,-1))),np.array(filter[di,dj,:,:].T)).reshape((-1,));\r\n \"\"\"output[m,i,j] = np.sum(\r\n [\r\n t,\r\n output[m,i,j]\r\n ],\r\n axis=0\r\n )\r\n \"\"\"\r\n #output += t\r\n ish = list(input_vals[0].shape)\r\n \r\n return ans[:,fsh[0]//2:fsh[0]//2+ish[1]:1,fsh[1]//2:fsh[1]//2+ish[2]:1,:]'''", "def fn(mask, k):\n if mask == 0: return 0 \n if k == 0: return inf\n ans = inf \n orig = mask \n while mask: \n mask = orig & (mask - 1)\n amt = sum(cookies[i] for i in range(n) if (orig ^ mask) & 1<<i)\n ans = min(ans, max(amt, fn(mask, k-1)))\n return ans", "def white(input_dim,variance=1.):\r\n part = parts.white.White(input_dim,variance)\r\n return kern(input_dim, [part])", "def test_BCGPLVM_rbf_bias_white_kern_2D(self):\n N, input_dim, D = 50, 1, 2\n X = np.random.rand(N, input_dim)\n k = GPy.kern.RBF(input_dim, 0.5, 0.9 * np.ones((1,))) + GPy.kern.Bias(input_dim, 0.1) + GPy.kern.White(input_dim, 0.05)\n K = k.K(X)\n Y = np.random.multivariate_normal(np.zeros(N), K, input_dim).T\n m = GPy.models.BCGPLVM(Y, input_dim, kernel=k)\n self.assertTrue(m.checkgrad())", "def count_nonblack_np(img):\n return img.any(axis=-1).sum()", "def MovigAverageBinaryThresholding(image, kernel_sigma, n, b): \n\n if kernel_sigma >= 1:\n image = Denoising(image, kernel_sigma);\n \n nr, nc = image.shape;\n mask = np.zeros(image.shape);\n \n for ii in range(0, nr):\n for jj in range(0, nc):\n if jj < n:\n nb_mean = image[ii, 0:jj + 1].sum() / n;\n else:\n nb = image[ii, jj - n + 1:jj + 1];\n nb_mean = np.mean(nb);\n if image[ii, jj] > b * nb_mean:\n mask[ii, jj] = 1; \n \n return mask;" ]
[ "0.60105205", "0.5797661", "0.57564193", "0.57291317", "0.56770235", "0.56659347", "0.5652914", "0.5614355", "0.56009203", "0.5584697", "0.5571387", "0.5517217", "0.5487182", "0.5478953", "0.54679054", "0.5436867", "0.54337466", "0.53844154", "0.53843486", "0.5379643", "0.53631043", "0.5352488", "0.5346639", "0.5308947", "0.5307354", "0.5280224", "0.52772456", "0.5276802", "0.5267626", "0.5266875", "0.52612144", "0.5257082", "0.5255529", "0.52533334", "0.52509326", "0.5250048", "0.52414745", "0.5230127", "0.522199", "0.52211523", "0.52126026", "0.5210228", "0.5209406", "0.5203404", "0.5200779", "0.5199998", "0.5184408", "0.51780206", "0.51746786", "0.5174444", "0.5168013", "0.5157669", "0.51550555", "0.5154442", "0.5148755", "0.5148755", "0.51376945", "0.5136423", "0.51334697", "0.5122541", "0.51121575", "0.51062983", "0.51061267", "0.51034045", "0.50962365", "0.5093598", "0.50921214", "0.5089923", "0.5086704", "0.50814134", "0.5079709", "0.5069318", "0.50607353", "0.5058502", "0.50584215", "0.50574774", "0.5050395", "0.50494516", "0.50467706", "0.50448734", "0.5041271", "0.50388145", "0.5038083", "0.5036678", "0.50356066", "0.5016465", "0.501603", "0.50127214", "0.5012658", "0.5012568", "0.50086385", "0.5007748", "0.5002724", "0.49967593", "0.49938747", "0.49905464", "0.49831754", "0.4981893", "0.49815887", "0.49749014", "0.4974857" ]
0.0
-1
Delete the created network when site creation failed
def _delete_vpn(self, request, vpn): try: #api.quantum.network_delete(request, network.id) msg = _('Delete the created VPN "%s" ' 'due to site addition failure.') % vpn_name LOG.debug(msg) redirect = self.get_failure_url() messages.info(request, msg) raise exceptions.Http302(redirect) #return exceptions.RecoverableError except: msg = _('Failed to delete VPN %s') % vpn_id LOG.info(msg) redirect = self.get_failure_url() exceptions.handle(request, msg, redirect=redirect)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_network(self):\n pass", "def delete_networks(self):\n logging.debug(\"cleanup called\")\n # for network in self.networks.key():\n # self.networks[network].delete()\n for network in self.networks.values():\n logging.warn(\"Deleting network '%s'\" % network)\n print \"Deleting network '%s'\" % network\n # print self.networks[network]\n network.delete()\n self.networks = {}", "def test_networking_project_network_delete(self):\n pass", "def network_delete_event(self, network_info):\n\n net_id = network_info['network_id']\n if net_id not in self.network:\n LOG.error(_LE('network_delete_event: net_id %s does not exist.'),\n net_id)\n return\n\n segid = self.network[net_id].get('segmentation_id')\n tenant_id = self.network[net_id].get('tenant_id')\n tenant_name = self.get_project_name(tenant_id)\n net = utils.Dict2Obj(self.network[net_id])\n if not tenant_name:\n LOG.error(_LE('Project %(tenant_id)s does not exist.'),\n {'tenant_id': tenant_id})\n self.update_network_db(net.id, constants.DELETE_FAIL)\n return\n\n try:\n self.dcnm_client.delete_network(tenant_name, net)\n # Put back the segmentation id into the pool.\n self.seg_drvr.release_segmentation_id(segid)\n\n # Remove entry from database and cache.\n self.delete_network_db(net_id)\n del self.network[net_id]\n snets = [k for k in self.subnet if (\n self.subnet[k].get('network_id') == net_id)]\n [self.subnet.pop(s) for s in snets]\n except dexc.DfaClientRequestFailed:\n LOG.error(_LE('Failed to create network %(net)s.'),\n {'net': net.name})\n self.update_network_db(net_id, constants.DELETE_FAIL)\n # deleting all related VMs\n instances = self.get_vms()\n instances_related = [k for k in instances if k.network_id == net_id]\n for vm in instances_related:\n LOG.debug(\"deleting vm %s because network is deleted\", vm.name)\n self.delete_vm_function(vm.port_id, vm)\n self.network_del_notif(tenant_id, tenant_name, net_id)", "def delete_network(self, network):\r\n return self.delete(self.network_path % (network))", "def network_cleanup(self, args):\n pass", "def delete_network_postcommit(self, context):\n if self.rpc_handler is None:\n return\n network = self._get_network_info(context._network)\n for _, _network in network.items():\n network_type = _network.get('network_type', '')\n if network_type not in CentecConstant.SUPPORTED_NETWORK_TYPES and len(CentecConstant.SUPPORTED_NETWORK_TYPES) > 0:\n return\n if network is not None:\n try:\n self.rpc_handler.delete_network(network)\n except:\n pass", "def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.delete_network(network[\"id\"])", "def network_delete_end(self, payload):\n self.disable_dhcp_helper(payload['network_id'])", "def test_delete_cluster_network(self):\n pass", "def cleanup_networks(self):\n for network in self.networks:\n try:\n network.remove()\n network.client.api.close()\n network.client.close()\n self.log_message(\n f'{dateutils.get_current_time()} '\n f'destroying docker network {network}'\n )\n except Exception:\n self.log_message(\n f'{dateutils.get_current_time()} ERROR: Could not remove docker '\n f'network {network}'\n )\n self.networks.clear()", "def Delete(self):\n\n if self.network_id:\n self.cs.delete_network(self.network_id)\n\n if self.is_vpc and self.vpc_id:\n self.cs.delete_vpc(self.vpc_id)", "def delete_site(self, site):\n raise NotImplementedError('delete_site')", "def delete(self):\n \n logging.info(\"Deleting network %s\" % self.cloudnet)\n # res = cn.delete(self.cloudnet)\n res = self.cloudnet.delete()\n return res", "def cleanup(self):\n logging.debug(\"cleanup called\")\n self.delete_networks()\n self.delete_machines()", "def delete_network(options, vsm_obj):\n print(\"Disconnecting edge interface attached to this network\")\n edge_id = get_edge(vsm_obj)\n edge = Edge(vsm_obj, '4.0')\n edge.id = edge_id\n vnics = Vnics(edge)\n vnics_schema = vnics.query()\n network = get_network_id(options, get_network_name_on_vc(options))\n for vnic in vnics_schema.vnics:\n if network and vnic.portgroupId == network:\n print(\"Found a matching vnic %s %s\" % (options.name, vnic.index))\n vnic.isConnected = \"False\"\n vnic.portgroupId = None\n vnic.name = \"vnic%s\" % vnic.index\n vnics_schema = VnicsSchema()\n vnics_schema.vnics = [vnic]\n result = vnics.create(vnics_schema)\n if (result[0].response.status != 204):\n print \"update vnic error: %s %s\" \\\n % (result[0].response.status, result[0].response.reason)\n return False\n else:\n break\n else:\n print (\"No matching vnic found\")\n\n vdn_scope = get_transport_zone(options)\n virtual_wire = VirtualWire(vdn_scope)\n vwire = virtual_wire.read_by_name(get_network_name(options))\n name = get_network_name(options)\n if vwire != \"FAILURE\":\n print(\"Found a matching network %s\" % (options.name))\n virtual_wire.id = vwire.objectId\n result = virtual_wire.delete()\n if (result.response.status != 200):\n print (\"Delete vwire error: %s\" % result.response.reason)\n return False\n else:\n print (\"No matching network found\")\n print(\"Network %s deleted\" % (options.name))\n\n return True", "def test_networking_project_network_tag_delete(self):\n pass", "def post_delete_subnet(self, sender, instance, **kwargs):\n RecurseNetworks.delete_entries(subnet=str(instance.ip_network), net_name=instance.name)", "def dcnm_network_delete_event(self, network_info):\n seg_id = network_info.get('segmentation_id')\n if not seg_id:\n LOG.error(_LE('Failed to delete network. Invalid network '\n 'info %s.'), network_info)\n query_net = self.get_network_by_segid(seg_id)\n if not query_net:\n LOG.info(_LI('dcnm_network_delete_event: network %(segid)s '\n 'does not exist.'), {'segid': seg_id})\n return\n if self.fw_api.is_network_source_fw(query_net, query_net.name):\n LOG.info(_LI(\"Service network %s, returning\"), query_net.name)\n return\n # Send network delete request to neutron\n try:\n del_net = self.network.pop(query_net.network_id)\n self.neutronclient.delete_network(query_net.network_id)\n self.delete_network_db(query_net.network_id)\n except Exception as exc:\n # Failed to delete network.\n # Put back the entry to the local cache???\n self.network[query_net.network_id] = del_net\n LOG.exception(_LE('dcnm_network_delete_event: Failed to delete '\n '%(network)s. Reason %(err)s.'),\n {'network': query_net.name, 'err': str(exc)})", "def test_delete_collection_cluster_network(self):\n pass", "def delete_network_postcommit(self, context):\n for _switch in self.switches:\n self._remove_from_switch(_switch, context)", "def network_create_end(self, payload):\n network_id = payload['network']['id']\n self.enable_dhcp_helper(network_id)", "def delete_network(name, host, network_type):\n logging.info(\"Deleting %s '%s' from host '%s'\", network_type, name, host.name)\n\n try:\n if network_type.lower() == \"vswitch\":\n host.configManager.networkSystem.RemoveVirtualSwitch(name)\n elif network_type.lower() == \"portgroup\":\n host.configManager.networkSystem.RemovePortGroup(name)\n except vim.fault.NotFound:\n logging.error(\"Tried to remove %s '%s' that does not exist from host '%s'\",\n network_type, name, host.name)\n except vim.fault.ResourceInUse:\n logging.error(\"%s '%s' can't be removed because there are vNICs associated with it\",\n network_type, name)", "def pre_virtual_network_delete(self, resource_id):\n pass", "def delete_network(session, name):\n # type: (Session, Text) -> None\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}/{name}\"\n return _delete(session, url_tail)", "def _retry_delete_network(self, context, network_id):\n first_try = True\n while True:\n try:\n with db_api.CONTEXT_WRITER.using(context):\n self._process_l3_delete(context, network_id)\n return super(NsxPluginV3Base, self).delete_network(\n context, network_id)\n except n_exc.NetworkInUse:\n # There is a race condition in delete_network() that we need\n # to work around here. delete_network() issues a query to\n # automatically delete DHCP ports and then checks to see if any\n # ports exist on the network. If a network is created and\n # deleted quickly, such as when running tempest, the DHCP agent\n # may be creating its port for the network around the same time\n # that the network is deleted. This can result in the DHCP\n # port getting created in between these two queries in\n # delete_network(). To work around that, we'll call\n # delete_network() a second time if we get a NetworkInUse\n # exception but the only port(s) that exist are ones that\n # delete_network() is supposed to automatically delete.\n if not first_try:\n # We tried once to work around the known race condition,\n # but we still got the exception, so something else is\n # wrong that we can't recover from.\n raise\n first_try = False\n if self._has_active_port(context, network_id):\n # There is a port on the network that is not going to be\n # automatically deleted (such as a tenant created port), so\n # we have nothing else to do but raise the exception.\n raise", "def delete_net(self, net_id):\n LOG_OBJ.debug(\"Deleting network %s\" % net_id)\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks/\" + \\\n net_id + \".json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"DELETE\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while deleting net:%s\" %\n net_id)\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Deletion of Network Failed with status %s \" %\n response.status)\n return response.status\n\n LOG_OBJ.info(\"Deleted the network : %s \" % net_id)\n return True", "def delete_network(self, network_o):\n tenant_mo = self.moDir.lookupByDn(network_o.group)\n\n # Filters the tenant children in memory looking for the ones that belongs to the Ap class with an specific name\n ap_list = filter(lambda x: type(x).__name__ == 'Ap' and x.name == AP_NAME,\n self.query_child_objects(str(tenant_mo.dn)))\n if len(ap_list) > 0:\n network_ap = ap_list[0]\n # Filters the tenant children in memory looking for the ones that belongs to the AEPg\n # class with an specific name\n network_epgs = filter(lambda x: type(x).__name__ == 'AEPg' and x.name == network_o.name + VLAN_SUFIX +\n str(network_o.encapsulation),\n self.query_child_objects(str(network_ap.dn)))\n # Removes EPG\n if len(network_epgs) > 0:\n network_epgs[0].delete()\n self.commit(network_epgs[0])\n\n # Filters the tenant children in memory looking for the ones that belongs to the BD class and with an specific\n # name\n bd_list = filter(lambda x: type(x).__name__ == 'BD' and x.name == VLAN + str(network_o.encapsulation),\n self.query_child_objects(str(tenant_mo.dn)))\n if len(bd_list) > 0:\n # Removes bridge domain\n bd_list[0].delete()\n self.commit(bd_list[0])", "def post_virtual_network_delete(self, resource_id, resource_dict):\n pass", "def filter_create_network_attributes(network, context):\n try_del(network, ['status', 'subnets'])", "def delete(self, oid):\n path = '%s/networks/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack network: %s' % truncate(res))\n return res[0]", "def test_destroy_nas_share(self):\n pass", "def _delete_network_vm(args):\n libvirtConn = libvirt.openReadOnly(None)\n if libvirtConn is None:\n print('Cannot contact hypervisor', file=sys.stderr)\n return 1\n net = None\n try:\n net = libvirtConn.networkLookupByName(args.network_name)\n except libvirt.libvirtError:\n print('Cannot find network named [%s]' % args.network_name, file=sys.stderr)\n return 1\n print('Network found:\\n')\n print(xml.dom.minidom.parseString(net.XMLDesc()).toprettyxml(indent=\" \", newl=''))\n print('')\n\n if not args.yes:\n if not input('Really destroy this network ?').strip().lower() in ('y', 'yes'):\n return 1\n return oci_utils.kvm.virt.delete_virtual_network(network_name=args.network_name)", "def delete_networks(self, skip_list=None):\n skip_list = skip_list or []\n for account in self.accounts:\n rc, rsp = self.cal.get_virtual_link_list(account)\n\n for vlink in rsp.virtual_link_info_list:\n if vlink.name in skip_list:\n continue\n if self.user not in vlink.name:\n continue\n logger.info(\"Deleting Network: {}\".format(vlink.name))\n if self.dry_run:\n continue\n self.cal.delete_virtual_link(\n account,\n vlink.virtual_link_id)", "def testDeleteNetworkAuth(self):\n response = self._delete('inventory/networks/1/')\n self.assertEquals(response.status_code, 401)\n\n response = self._delete('inventory/networks/1/',\n username=\"testuser\", password=\"password\")\n self.assertEquals(response.status_code, 403)", "def site_delete(self, name):\n self.core.api.os.shell.cmd('{0} delete site \"{1}\"'.format(self.APP_CMD, name))", "def test_delete_net_namespace(self):\n pass", "def delete_network_postcommit(self, mech_context):\n\n LOG.debug(\"delete_network_postcommit: called\")\n network = mech_context.current\n network_id = network['id']\n vlan_id = network['provider:segmentation_id']\n tenant_id = network['tenant_id']\n\n for switch_ip in self._switch:\n try:\n system = self.client[switch_ip].system.list()\n system[0].remove_segment(vlan_id)\n except seamicro_client_exception.ClientException as ex:\n LOG.exception(_LE(\"SeaMicr driver: failed to delete network\"\n \" with the following error: %(error)s\"),\n {'error': ex.message})\n raise Exception(\n _(\"Seamicro switch exception, delete_network_postcommit\"\n \" failed\"))\n\n LOG.info(_LI(\"delete network (postcommit): %(network_id)s\"\n \" with vlan = %(vlan_id)s\"\n \" for tenant %(tenant_id)s on switch %(switch_ip)s\"),\n {'network_id': network_id,\n 'vlan_id': vlan_id,\n 'tenant_id': tenant_id,\n 'switch_ip': switch_ip})", "def deleteNodeNetworkConfig(self,node):\n data = self.connect('delete',\"nodes/%s/network\" % (node),None)\n return data", "def delete_host(self, conf, tenant_id, network_id, host_id):\n\t\tpass", "def rm_network(c):\n print('Stopping local test network and removing containers')\n with c.cd('images'):\n c.run('sudo docker-compose down -v', hide='stderr')\n\n c.run('sudo rm -rf volumes/stellar-core/opt/stellar-core/buckets')\n c.run('sudo rm -f volumes/stellar-core/opt/stellar-core/*.log')\n c.run('sudo rm -rf volumes/stellar-core/tmp')", "def delete_overlay_network(self, name=NETWORK_NAME):\n try:\n # An overlay network is usually created in host belonging to a swarm\n self.leave_swarm()\n network = self.docker_client.networks.get(name)\n network.remove()\n except docker.errors.NotFound as nf:\n print(\"Network \"+name+\" not found\")\n except docker.errors.APIError as de:\n print(\"Error deleting overlay network\")\n print de\n exit(1)\n return", "def reset_network(self, instance):\n LOG.debug(\"reset_network\")\n return", "def delete_network_profile(arn=None):\n pass", "def delete_network_profile(self, profile):\r\n return self.delete(self.network_profile_path % profile)", "def network_create_event(self, network_info):\n net = network_info['network']\n net_id = net['id']\n net_name = net.get('name')\n network_db_elem = self.get_network(net_id)\n # Check if the source of network creation is FW and if yes, skip\n # this event.\n # Check if there's a way to read the DB from service class\n # TODO(padkrish)\n if self.fw_api.is_network_source_fw(network_db_elem, net_name):\n LOG.info(_LI(\"Service network %s, returning\"), net_name)\n return\n self.network[net_id] = {}\n self.network[net_id].update(net)\n\n net_name = net.get('name')\n tenant_id = net.get('tenant_id')\n\n # Extract segmentation_id from the network name\n net_ext_name = self.cfg.dcnm.dcnm_net_ext\n nobj = re.search(net_ext_name, net_name)\n try:\n seg_id = int((net_name[nobj.start(0) + len(net_ext_name) - 1:]\n if nobj else None))\n except (IndexError, TypeError, ValueError):\n seg_id = None\n\n # Check if network is already created.\n query_net = self.get_network_by_segid(seg_id) if seg_id else None\n if query_net:\n # The network is already created no need to process the event.\n if query_net.source.lower() == 'dcnm':\n # DCNM created the network. Only update network id in database.\n prev_id = query_net.network_id\n params = dict(columns=dict(network_id=net_id))\n self.update_network(prev_id, **params)\n\n # Update the network cache.\n prev_info = self.network.pop(prev_id)\n prev_info['id'] = net_id\n self.network[net_id] = prev_info\n\n # Update the network name. After extracting the segmentation_id\n # no need to keep it in the name. Removing it and update\n # the network.\n updated_net_name = (\n net_name[:nobj.start(0) + len(net_ext_name) - 1])\n try:\n body = {'network': {'name': updated_net_name, }}\n dcnm_net = self.neutronclient.update_network(\n net_id, body=body).get('network')\n LOG.debug('Updated network %(network)s', dcnm_net)\n except Exception as exc:\n LOG.exception(_LE('Failed to update network '\n '%(network)s. Reason %(err)s.'),\n {'network': updated_net_name,\n 'err': str(exc)})\n return\n\n LOG.info(_LI('network_create_event: network %(name)s was created '\n 'by %(source)s. Ignoring processing the event.'),\n {'name': net_name, 'source': 'dcnm'})\n return\n\n # Check if project (i.e. tenant) exist.\n tenant_name = self.get_project_name(tenant_id)\n if not tenant_name:\n LOG.error(_LE('Failed to create network %(name)s. Project '\n '%(tenant_id)s does not exist.'),\n {'name': net_name, 'tenant_id': tenant_id})\n return\n\n pseg_id = self.network[net_id].get('provider:segmentation_id')\n seg_id = self._get_segmentation_id(net_id, pseg_id, 'openstack')\n self.network[net_id]['segmentation_id'] = seg_id\n try:\n cfgp, fwd_mod = self.dcnm_client.get_config_profile_for_network(\n net.get('name'))\n self.network[net_id]['config_profile'] = cfgp\n self.network[net_id]['fwd_mod'] = fwd_mod\n self.add_network_db(net_id, self.network[net_id],\n 'openstack',\n constants.RESULT_SUCCESS)\n LOG.debug('network_create_event: network=%s', self.network)\n except dexc.DfaClientRequestFailed:\n # Fail to get config profile from DCNM.\n # Save the network info with failure result and send the request\n # to DCNM later.\n self.add_network_db(net_id, self.network[net_id], 'openstack',\n constants.CREATE_FAIL)\n LOG.error(_LE('Failed to create network=%s.'), self.network)", "def cleanup(self, context, instance, network_info, block_device_info=None,\n destroy_disks=True):\n pass", "def test_delete_hyperflex_cluster_network_policy(self):\n pass", "def stop_network(self):\n self.net.stop()\n cleanup()", "def delete_network(self, tenant_id, network_id, network_segments):\n self.delete_network_segments(tenant_id, network_segments)\n self.delete_network_bulk(tenant_id, [network_id])", "def delete(self): \n params = {'command':'deleteNetwork',\n 'id':self.id}\n \n self.logger.debug('Remove network %s' % self.name)\n \n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['deletenetworkresponse']['jobid']\n self.logger.debug('Start job over %s.%s - %s: %s' % (\n self._obj_type, self.name, \n 'deleteNetwork', res))\n return clsk_job_id\n except KeyError as ex :\n self.logger.error('Error parsing json data: %s' % ex)\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n self.logger.error(ex)\n raise ClskError(ex)", "def test_destroy_nas_share_by_nas(self):\n pass", "def resource_cleanup(self):\n for lb in self.loadbalancers:\n self.octavia_client.load_balancer_delete(lb['id'], cascade=True)\n try:\n self.wait_for_lb_resource(\n self.octavia_client.load_balancer_show, lb['id'],\n provisioning_status='DELETED')\n except osc_lib.exceptions.NotFound:\n pass\n for fip in self.fips:\n self.neutron_client.delete_floatingip(fip)\n # we run the parent resource_cleanup last as it will remove instances\n # referenced as members in the above cleaned up load balancers\n super(LBAASv2Test, self).resource_cleanup()", "def test_destroy_nas_share_by_pool(self):\n pass", "def postShutdown(self):\r\n self._network.cleanUp()\r\n self._balancer.cleanUp()\r\n self._distributor.cleanUp()", "def _cleanup_deleted_nics(self):\n try:\n nics = self.network.network_interfaces.list(\n CONF.azure.resource_group)\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n return\n residual_ids = [i.name for i in nics if not i.virtual_machine]\n to_delete_ids = set(self.residual_nics) & set(residual_ids)\n self.residual_nics = list(set(self.residual_nics) | set(residual_ids))\n if not to_delete_ids:\n LOG.info(_LI('No residual nic in Azure'))\n return\n for i in to_delete_ids:\n try:\n self.network.network_interfaces.delete(\n CONF.azure.resource_group, i\n )\n except Exception as e:\n LOG.warning(_LW(\"Unable to delete network_interfaces \"\n \"%(nic)s in Azure because %(reason)s\"),\n dict(nic=i,\n reason=six.text_type(e)))\n else:\n self.residual_nics.remove(i)\n LOG.info(_LI('Delete residual Nic: %s in Azure'), i)\n else:\n LOG.info(_LI('Delete all residual Nics in Azure'))", "def pre_network_ipam_delete(self, resource_id):\n pass", "def remove_machine(self, url):\n\n model = TestMachine.objects.filter(url=url).first()\n if model:\n self.deactivate_model(model)\n print \"Removed test machine: %s\" % url", "def test_delete_collection_host_subnet(self):\n pass", "def test_delete_namespaced_egress_network_policy(self):\n pass", "def test_delete_host_subnet(self):\n pass", "def test_03_network_create(self):\n # Validate the following\n # 1. Create a project.\n # 2. Add virtual/direct network resource to the project. User shared\n # network resource for the project\n # 3. Verify any number of Project level Virtual/Direct networks can be\n # created and used for vm deployment within the project.\n # 4. Verify shared networks (zone and domain wide) from outside the\n # project can also be used in a project.\n\n # Create project as a domain admin\n project = Project.create(\n self.apiclient,\n self.services[\"project\"],\n account=self.account.name,\n domainid=self.account.domainid\n )\n # Cleanup created project at end of test\n self.cleanup.append(project)\n self.debug(\"Created project with domain admin with ID: %s\" %\n project.id)\n\n network_offerings = list_network_offerings(\n self.apiclient,\n projectid=project.id,\n supportedServices='SourceNat',\n type='isolated',\n state='Enabled'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a network with network offering ID: %s\" %\n network_offering.id)\n self.services[\"network\"][\"zoneid\"] = self.zone.id\n network = Network.create(\n self.apiclient,\n self.services[\"network\"],\n networkofferingid=network_offering.id,\n projectid=project.id\n )\n self.debug(\"Created network with ID: %s\" % network.id)\n networks = Network.list(\n self.apiclient,\n projectid=project.id,\n listall=True\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check for the valid network list response\"\n )\n\n self.debug(\"Deploying VM with network: %s\" % network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n network_offerings = list_network_offerings(\n self.apiclient,\n state='Enabled',\n guestiptype='Shared',\n name='DefaultSharedNetworkOffering',\n displaytext='Offering for Shared networks'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a shared network in domain: %s\" %\n self.domain.id)\n\n # Getting physical network and free vlan in it\n physical_network, vlan = get_free_vlan(self.apiclient, self.zone.id)\n\n self.services[\"domain_network\"][\"vlan\"] = vlan\n self.services[\"domain_network\"][\"physicalnetworkid\"] = physical_network.id\n\n # Generating random subnet number for shared network creation\n shared_network_subnet_number = random.randrange(1,254)\n\n self.services[\"domain_network\"][\"gateway\"] = \"172.16.\"+str(shared_network_subnet_number)+\".1\"\n self.services[\"domain_network\"][\"startip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".2\"\n self.services[\"domain_network\"][\"endip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".20\"\n\n domain_network = Network.create(\n self.apiclient,\n self.services[\"domain_network\"],\n domainid=self.domain.id,\n networkofferingid=network_offering.id,\n zoneid=self.zone.id\n )\n self.cleanup.append(domain_network)\n self.debug(\"Created network with ID: %s\" % domain_network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(domain_network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n # Delete VM before network gets deleted in cleanup\n virtual_machine.delete(self.apiclient, expunge=True)\n return", "def test_delete_network_from_dhcp_agent(self):\n network_id = self._create_and_prepare_network_for_agent(\n self.agent['id'])\n self.agents_client.add_dhcp_agent_to_network(\n self.agent['id'], network_id=network_id)\n # Clean up is not necessary and might result in 409 being raised.\n\n with self.override_role():\n self.agents_client.delete_network_from_dhcp_agent(\n self.agent['id'], network_id=network_id)", "def post_network_ipam_delete(self, resource_id, resource_dict):\n pass", "def test_delete_collection_namespaced_egress_network_policy(self):\n pass", "def pre_delete_network(sender, instance, **kwargs):\n # Disable the pre_delete signal for centroids (the signal is useless\n # because the links are already deleted but it slows down the deleting\n # process).\n pre_delete.disconnect(sender=Centroid, dispatch_uid=\"centroid\")\n instance.centroid_set.all().delete()\n # Enable the pre_delete signal again.\n pre_delete.connect(pre_delete_centroid, sender=Centroid)\n pre_delete.disconnect(sender=Crossing, dispatch_uid=\"crossing\")\n instance.crossing_set.all().delete()\n pre_delete.connect(pre_delete_crossing, sender=Crossing)", "def test_networking_project_network_create(self):\n pass", "def deleteNetwork(self, session: Session, id_: str):\n try:\n return NetworkManager().deleteNetwork(session, id_)\n except TortugaException as ex:\n raise\n except Exception as ex:\n self._logger.exception(str(ex))\n raise TortugaException(exception=ex)", "def create_network_postcommit(self, context):\n if self.rpc_handler is None:\n return\n network = self._get_network_info(context._network)\n for _, _network in network.items():\n network_type = _network.get('network_type', '')\n if network_type not in CentecConstant.SUPPORTED_NETWORK_TYPES and len(CentecConstant.SUPPORTED_NETWORK_TYPES) > 0:\n return\n if network is not None:\n try:\n self.rpc_handler.create_network(network)\n except:\n pass", "def destroy(self):\n if hasattr(self, 'vistrailsStartup'):\n self.vistrailsStartup.destroy()", "def test_delete__network(self):\n arglist = [\n '--network',\n self.projects[0].id,\n ]\n verifylist = [\n ('service', 'network'),\n ('project', self.projects[0].id),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n result = self.cmd.take_action(parsed_args)\n\n self.assertIsNone(result)\n self.projects_mock.get.assert_called_once_with(self.projects[0].id)\n self.compute_quotas_mock.delete.assert_not_called()\n self.volume_quotas_mock.delete.assert_not_called()\n self.network_mock.delete_quota.assert_called_once_with(\n self.projects[0].id,\n )", "def remove_network(self, name_of_vm):\n try:\n # vmachine = self.get_vm_by_name(name_of_vm)\n vmachine = self.get_dc_object([vim.VirtualMachine], name_of_vm)\n network = None\n devices = vmachine.config.hardware.device\n networks = []\n for device in devices:\n if isinstance(device, vim.vm.device.VirtualEthernetCard):\n networks.append(device)\n status = 'error'\n if not networks:\n log.info(\"INFO: No network adapters connected to the VM to remove\")\n status = 'success'\n else:\n for network in networks:\n name = network.deviceInfo.label\n nic_spec = vim.vm.device.VirtualDeviceSpec()\n nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove\n nic_spec.device = network\n remove_nic = vim.vm.ConfigSpec()\n remove_nic.deviceChange = [nic_spec]\n task = WaitForTask(vmachine.ReconfigVM_Task(spec=remove_nic))\n\n if task == 'success':\n log.info(\"removed '{}' network adapter : {}\".format(name, name_of_vm))\n else:\n log.info(\"Could not '{}' Remove Network adapter: {}\".format(name, name_of_vm))\n status = 'success'\n return status\n except Exception as error:\n log.info(\"Error in 'remove_nic' keyword... {} \\n {}\".format(error, error.message))", "def delete_machines(self):\n logging.debug(\"delete_machines called\")\n \n for machine in self.machines:\n logging.warn(\"Deleting %s\" % machine)\n print \"Deleting %s\" % machine\n cs.servers.delete(self.machines[machine])", "def testPutNetworkNotFound(self):\n try:\n response = self._put('inventory/networks/1zcvxzvzgvsdzfewrew4t4tga34/',\n data=testsxml.network_put_xml,\n username=\"testuser\", password=\"password\")\n self.assertEquals(response.status_code, 404)\n except TemplateDoesNotExist, e:\n # might not have template, so check for 404 in error\n self.assertTrue(\"404\" in str(e))", "def tearDown(self):\n self.labGroup.delete()", "def delete_network_bulk(self, tenant_id, network_id_list, sync=False):", "def delete_cluster(self):", "def delete(self):\n self._lbcall('delete_pool', [self._name])", "def testInvalidNetworkInfo(self):\n ### create test resources\n instance_name = \"end-to-end-test-instance-1\"\n instance_selfLink = \\\n self.test_resource_creator.create_instance_using_template(\n instance_name,\n self.test_resource_creator.legacy_instance_template_selfLink)[\n 'targetLink']\n original_config = self.google_api_interface.get_instance_configs(\n instance_name)\n ### start migration\n selfLink_executor = SelfLinkExecutor(self.compute, instance_selfLink,\n 'an-invalid-network-for-testing',\n self.test_resource_creator.subnetwork_name,\n True)\n with self.assertRaises(Exception):\n migration_handler = selfLink_executor.build_migration_handler()\n migration_handler.network_migration()\n\n ### check result\n # Terminate before migration starts\n new_config = self.google_api_interface.get_instance_configs(\n instance_name)\n self.assertEqual(original_config, new_config)\n print('Pass the current test')", "def stop(self):\n logging.debug(\"Network.stop entered:\" + str(self.id))\n # print self.cloudnet\n # res = cn.delete(self.cloudnet)\n notify(\"Stopping network %s\" % self.name)\n # if not self.cloudnet:\n # \n # self.cloudnet = cn.find(id=\"52a24319-f58d-4795-a3bd-c22d87bb65ae\")\n if self.cloudnet:\n res = self.cloudnet.delete()\n else:\n res = True\n return res", "def teardown_local_site(self):\n site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD)\n resp = site1.login()\n if not resp.ok:\n print resp, resp.text\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite')\n tenant.mark_as_deleted()\n\n resp = tenant.push_to_apic(site1)\n self.assertTrue(resp.ok)", "def _validate_duplicate_network(old_networks, new_networks):\n for network in new_networks:\n if network in old_networks:\n raise Exception('Network name {0} already exists. Cannot add '\n 'new networks. Choose uniqe network names and '\n 'run the command again'.format(network))", "def teardown_remote_site(self):\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite')\n tenant.mark_as_deleted()\n\n resp = tenant.push_to_apic(site2)\n self.assertTrue(resp.ok)\n time.sleep(2)", "def post_network_policy_delete(self, resource_id, resource_dict):\n pass", "def delete_ipsec_site_connection(self, ipsecsite_conn):\r\n return self.delete(self.ipsec_site_connection_path % (ipsecsite_conn))", "def test_delete_hyperflex_server_model(self):\n pass", "def teardown_local_site(self):\n site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD)\n resp = site1.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite-local')\n tenant.mark_as_deleted()\n\n resp = tenant.push_to_apic(site1)\n self.assertTrue(resp.ok)", "def teardown_local_site(self):\n site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD)\n resp = site1.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite-local')\n tenant.mark_as_deleted()\n\n resp = tenant.push_to_apic(site1)\n self.assertTrue(resp.ok)", "def test_get_unregistered_networks(self):\n pass", "def dhcp_agent_network_remove(self, dhcp_net_info):\n self.turn_on_dhcp_check()", "def _die(self):\n\t\tself.site.agents_in_site.remove(self)\n\t\tself.site = None\n\t\tif self.debt_link != None:\n\t\t\tself.debt_link.lender.loans.remove(self.debt_link)\n\t\t\tself.debt_link = None\n\t\tfor l, loan in enumerate(self.loans):\n\t\t\tloan.borrower.debt_link = None\n\t\t\tdel self.loans[l]\n\t\tif self.gift_link != None:\n\t\t\tself.gift_link.giver.gifts.remove(self.gift_link)\n\t\t\tself.gift_link = None\n\t\tfor g, gift in enumerate(self.gifts):\n\t\t\tgift.taker.gift_link = None\n\t\t\tdel self.gifts[g]\n\t\tself.agents_list.remove(self)", "def post_virtual_network_create(self, resource_dict):\n pass", "def deleteSocialAuthentication(self, network):\n\t\turl = \"https://habitica.com/api/v3/user/auth/social/\" + network\n\t\treturn(deleteUrl(url, self.credentials))", "def run(self):\n tenant_id = self.context[\"tenant\"][\"id\"]\n users = self.context[\"tenants\"][tenant_id][\"users\"]\n number = users.index(self.context[\"user\"])\n for network in self.context[\"tenants\"][tenant_id][\"networks\"]:\n # delete one of subnets based on the user sequential number\n subnet_id = network[\"subnets\"][number]\n self.neutron.delete_subnet(subnet_id)", "def _destroy(self):\r\n if self._client:\r\n self._client.returnNr(self._nr)\r\n self._client.unregisterContainer(self)\r\n self._client = None\r\n\r\n if self._confDir:\r\n shutil.rmtree(self._confDir, True)\r\n self._confDir = None\r\n\r\n if self._dataDir:\r\n shutil.rmtree(self._dataDir, True)\r\n self._dataDir = None", "def test_delete_non_existing(created_test_helper, request):\n # delete all files from listed files\n response = created_test_helper.delete_single(-1)\n\n # Validate returned json contains right error\n created_test_helper.validate_response_json(request.node.name, response)", "def teardown_remote_site(self):\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite-remote')\n tenant.mark_as_deleted()\n\n resp = tenant.push_to_apic(site2)\n self.assertTrue(resp.ok)", "def teardown_remote_site(self):\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite-remote')\n tenant.mark_as_deleted()\n\n resp = tenant.push_to_apic(site2)\n self.assertTrue(resp.ok)", "def pre_network_policy_delete(self, resource_id):\n pass", "def __delete_existing_connection(self, ssid):\n conns = []\n try:\n conns = self.settings.ListConnections()\n except dbus.exceptions.DBusException:\n print(Messages.dbus_error)\n exit(3)\n for each in conns:\n con_proxy = self.bus.get_object(self.system_service_name, each)\n connection = dbus.Interface(\n con_proxy,\n \"org.freedesktop.NetworkManager.Settings.Connection\")\n try:\n connection_settings = connection.GetSettings()\n if connection_settings['connection']['type'] == '802-11-' \\\n 'wireless':\n conn_ssid = byte_to_string(\n connection_settings['802-11-wireless']['ssid'])\n if conn_ssid == ssid:\n debug(\"deleting connection: \" + conn_ssid)\n connection.Delete()\n except dbus.exceptions.DBusException:\n pass" ]
[ "0.67859447", "0.6550001", "0.65075684", "0.64984155", "0.646287", "0.6276366", "0.61979026", "0.6197258", "0.61300117", "0.6093675", "0.6090884", "0.5948053", "0.5943114", "0.5920346", "0.5890928", "0.58596253", "0.5844363", "0.5840034", "0.58133364", "0.5790481", "0.5786879", "0.574909", "0.57300395", "0.5709587", "0.57027507", "0.5680949", "0.5650212", "0.5606252", "0.55930626", "0.55887276", "0.55805266", "0.5580343", "0.5573062", "0.5525367", "0.5519206", "0.5517354", "0.5511533", "0.5505184", "0.5491184", "0.5490948", "0.5489121", "0.54800165", "0.54698825", "0.545843", "0.5451029", "0.5435958", "0.54208606", "0.54205656", "0.5415276", "0.54011923", "0.53929144", "0.53565544", "0.5350938", "0.5348107", "0.5344351", "0.5340536", "0.5334139", "0.53316087", "0.5331414", "0.53278106", "0.5301954", "0.52965057", "0.52864504", "0.52861756", "0.5276719", "0.5275222", "0.5265328", "0.5261918", "0.5258438", "0.524244", "0.5239948", "0.5229732", "0.51994914", "0.5197408", "0.5195177", "0.51909816", "0.518797", "0.51873875", "0.5163783", "0.5163019", "0.516183", "0.5158239", "0.51539636", "0.5143504", "0.513302", "0.51328534", "0.5124924", "0.5124924", "0.5122272", "0.51161015", "0.5112773", "0.51101255", "0.5100066", "0.5098211", "0.5087211", "0.5085345", "0.5079187", "0.5079187", "0.50746953", "0.507054" ]
0.53581345
51
Take JSON as string returned from a Playfield API request and parse data section into list of dicts {field_name=data}
def parse_json(self, json_to_parse): json_obj = json.loads(json_to_parse) return_data = [] for row in json_obj['data']: row_dict = {} for key, value in row.items(): row_dict[key] = value return_data.append(row_dict) return return_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dig_fields(json_data):\n data = json.loads(json_data)\n fields = [f for f in data]\n return fields", "def parse(data, datetime_field=None):\n\n parsed_data = json.loads(data)\n if datetime_field:\n if isinstance(parsed_data, list):\n for item in parsed_data:\n date = item.get(datetime_field)\n if date:\n item[datetime_field] = dateparse.parse_datetime(date)\n elif isinstance(parsed_data, dict):\n date = parsed_data.get(datetime_field)\n if date:\n parsed_data[datetime_field] = dateparse.parse_datetime(date)\n\n return parsed_data", "def json2dict(data):\n sub_all = data.get(\"values\")\n keys = data.get(\"fields\")\n dic_all = []\n for sub in sub_all:\n x = dict(zip(keys, sub))\n if not x['datastring'] is None:\n x['datastring'] = json.loads(x.get('datastring'))\n dic_all.append(x)\n return dic_all", "def parse_json(data):\n return json.loads(data)", "def load_json_body(data):\n # type: (str) -> Union[Dict, List]\n try:\n return json.loads(data)\n except Exception:\n raise HttpQueryError(400, \"POST body sent invalid JSON.\")", "def load_record_from_json(data):\n if isinstance(data, str):\n data = json.loads(data)\n\n value = data[\"data\"]\n if isinstance(value, str):\n with contextlib.suppress(json.JSONDecodeError):\n value = json.loads(value)\n return data[\"identifier\"], value, data[\"completed\"]", "def smart_parse(body):\n try:\n data_dict = json.loads(body)\n except ValueError:\n return form_urlencoded_parse(body)\n return data_dict", "def parse_source_json(data):\n data_dict=[]\n for item in data:\n tmp_dict ={}\n tmp_dict[\"_id\"]=item[\"POIID\"]\n tmp_dict[\"POIName\"]=item[\"POIName\"]\n tmp_dict[\"Address\"]=item[\"Address\"]\n try:\n if item[\"isDining\"]=='Y':\n tmp_dict[\"isDining\"] = True\n else:\n tmp_dict[\"isDining\"] = False\n except:\n tmp_dict[\"isDining\"] = None\n tmp_dict[\"location\"] = {\"type\": \"Point\", \"coordinates\": [item[\"X\"],item[\"Y\"]]}\n \n data_dict.append(tmp_dict)\n return data_dict", "def extract_and_parse_json(response):\n return json.loads(response.text, object_hook=OrderedDict)", "def ingest_json_body(request):\n # log.debug(request.body)\n try:\n data = json.loads(str(request.body, encoding='utf-8'))\n except Exception as e:\n log.error(log.exc(e))\n return None\n return data", "def field_to_json(field_list):\n def str_to_json(json_str):\n try:\n return json.loads(json_str)\n except:\n return None\n new_list = []\n for elem in field_list:\n json_obj = str_to_json(elem)\n if json_obj:\n new_list.append(json_obj)\n return new_list", "def dict_to_fm_field_list(\n self, data: Dict[str, Any], language_code: str, line: int = 0\n ) -> nodes.field_list:\n field_list = nodes.field_list()\n\n bibliofields = get_language(language_code).bibliographic_fields\n state_machine = MockStateMachine(self, line)\n state = MockState(self, state_machine, line)\n\n for key, value in data.items():\n if not isinstance(value, (str, int, float, date, datetime)):\n value = json.dumps(value)\n value = str(value)\n if key in bibliofields:\n para_nodes, _ = state.inline_text(value, line)\n body_children = [nodes.paragraph(\"\", \"\", *para_nodes)]\n else:\n body_children = [nodes.Text(value, value)]\n\n field_node = nodes.field()\n field_node.source = value\n field_node += nodes.field_name(key, \"\", nodes.Text(key, key))\n field_node += nodes.field_body(value, *body_children)\n field_list += field_node\n\n return field_list", "def pre_process_raw(raw: dict) -> dict:\n api_data = raw.get(\"data\", {}).get(\"apiList\", [])\n return {api[\"id\"]: api for api in api_data}", "def parse_data(data):\n if isinstance(data, basestring):\n data = StringIO.StringIO(data)\n try:\n jsondata = json.load(data)\n except Exception as exc:\n jsondata = []\n msg = 'Unable to apply json.load to \"%s\"' % data\n print(msg)\n if isinstance(jsondata, dict):\n yield jsondata\n elif isinstance(jsondata, list):\n for row in jsondata:\n yield row", "def json_of_response(response):\n return json.loads(response.text)", "def _separate_raw_data(self, raw_data):\n for key, value in raw_data.items():\n if type(value) == dict:\n self.data_dict[key] = value\n elif type(value) == list:\n self.data_list[key] = value", "def _parse_ddwrt_response(data_str):\n return {key: val for key, val in _DDWRT_DATA_REGEX.findall(data_str)}", "def get_data(body):\n data = parse_qs(body)\n for key, value in data.items():\n data[key] = value[0]\n return data", "def parse_response(response):\n return json.loads(response.text)", "def parse_json(response):\r\n return json.loads(response.content)", "def _build_data_from_text(self, text):\n try:\n record = json.loads(text)\n except Exception as e:\n logging.error(f\"Exception: {e}\")\n logging.error(f\"datapoint: {text}\")\n raise e\n return record", "def extract_data(line):\n lines = line.split(' - ')\n return json.loads(lines[1])", "def parse_data(self):\n data = {}\n content = self.headers.get('content-type', None)\n if content:\n ctype, pdict = parse_header(content)\n if ctype == 'application/json':\n length = int(self.headers['content-length'])\n data = json.loads(self.bytes_to_str(self.rfile.read(length)))\n return data", "def _unpack(self, json_value):\r\n json_d = json.loads(json_value)\r\n if type(json_d) != dict:\r\n json_d = {}\r\n\r\n comment_value = json_d.get('comment', '')\r\n if not isinstance(json_d, basestring):\r\n comment_value = ''\r\n\r\n options_value = json_d.get('options', [])\r\n if not isinstance(options_value, list):\r\n options_value = []\r\n\r\n return {\r\n 'options_value': options_value,\r\n 'comment_value': comment_value\r\n }", "def data_with(self, fields):\n with_fields = json.loads(self.data())\n for field, value in fields.items():\n with_fields[field] = value\n return json.dumps(with_fields)", "def json_of_response(response):\n return json.loads(response.data.decode('utf8'))", "def json_of_response(response):\n return json.loads(response.data.decode('utf8'))", "def json_loads(s):\n return json.loads(s, cls=DataDecoder)", "def parse_response(self, r):\n data = (\"\".join(r.split(\"\\n\")[1:])).replace(\",]\",\"]\")\n obj = simplejson.loads(data)\n return obj", "def get_product_values(response):\n json_response = json.loads(response)\n product_fields = OrderedDict([(field, json_response.get(field)) for field in needed_fields])\n\n return product_fields", "def parse_data(data):\n result = {}\n if data:\n params = data.split('&')\n for item in params:\n key, value = item.split('=')\n result[key] = value\n return decode_data(result)", "def _data_list(json: 'a json'):\n data = json['Time Series (Daily)']\n return list(data.items())", "def data(self):\n return json.loads(self.data_json)", "def parse_response(self, response):\n\n return json.loads(response.text)", "def recieve_information_from_client():\r\n client_data = request.forms.get('json')\r\n client_data_dict = json.loads(client_data)\r\n return client_data_dict", "def data_from_response(response: dict) -> dict:\n if response[\"status\"] != 200:\n raise ValueError\n return {\"data\": response[\"payload\"]}", "def data_from_string(text):\n return json_load(text.replace(']],\\n', ']], '))", "def get_json_data(request):\n\n # First we need to write request logs\n record_logs(request)\n if request.method == \"GET\":\n return json.loads(request.GET[\"data\"])\n else:\n return json.loads(request.POST[\"data\"])", "def preprocess(self, request):\n # Take the input data and pre-process it make it inference ready\n\n json_list = []\n # for each request\n for idx, data in enumerate(request):\n # Read the bytearray of the jsonline from the input\n jsonline_arr = data.get('body') \n # Input json is in bytearray, convert it to string\n jsonline_str = jsonline_arr.decode(\"utf-8\")\n # split the json lines\n json_list_request = []\n # for each time series\n for line in io.StringIO(jsonline_str):\n json_record = json.loads(line)\n json_list_request.append(json_record)\n json_list.append(json_list_request)\n return json_list", "def parse(content):\n return json.loads(content)", "def massage_api_response(api_data):\n return_dict = defaultdict(list)\n legs = api_data['legs'][0]\n\n return_dict['start_address'].append(legs['start_address'])\n return_dict['end_address'].append(legs['end_address'])\n return_dict['distance'].append(legs['distance']['text'])\n return_dict['duration'].append(legs['duration']['text'])\n if 'duration_in_traffic' in legs:\n (return_dict['duration_in_traffic']\n .append(legs['duration_in_traffic']['text']))\n return_dict['travel_mode'].append(legs['steps'][0]['travel_mode'])\n\n for instruction in legs['steps']:\n (return_dict['instructions']\n .append(BeautifulSoup(instruction['html_instructions'],\n 'html.parser').get_text()))\n return_dict['step_distance'].append(instruction['distance'])\n return return_dict", "def post_data_parser(post_data):\n post_data_json = {}\n for parameter in post_data.rsplit(\"&\"):\n post_data_json[parameter.rsplit(\"=\")[0]] = parameter.rsplit(\"=\")[1]\n return post_data_json", "def parse(line):\n data = json.loads(line)\n return data['SUPPLIERS_ID'], data['CATEGORY_ID'], data['NAME'], data['COUNTRY'], data['CITY'], data[\n 'LAST_UPDATE_DATE']", "def get_data(self):\n data = self.request.body\n\n try:\n data = json.loads(data)\n except ValueError:\n data = None\n return data", "def extract_fields(self, json_dict):\n raise NotImplementedError()", "def json_post_to_dict(form):\n message = str(form.json_message.data)\n try:\n dict_post = json.loads(message)\n except json.decoder.JSONDecodeError as e:\n print(\"json_post_to_dict: json decoder failed to parse message\")\n print(e)\n return None\n return dict_post", "def get_data(self,field):\n\n\t\t#see redcap api documentation -- https://redcap.wustl.edu/redcap/srvrs/prod_v3_1_0_001/redcap/api/help/\n\t\tbuf = io.BytesIO()\n\t\tdata = {\n\t\t 'token': 'D9FFA77DB83AE7D9E3E92BB0B0CBBFDB',\n\t\t 'content': 'record',\n\t\t 'format': 'json',\n\t\t 'type': 'flat',\n\t\t 'fields[0]': 'article_doi',\n\t\t 'fields[1]': 'record_id',\n\t\t 'fields[2]': field,\n\t\t 'rawOrLabel': 'raw',\n\t\t 'rawOrLabelHeaders': 'raw',\n\t\t 'exportCheckboxLabel': 'false',\n\t\t 'exportSurveyFields': 'false',\n\t\t 'exportDataAccessGroups': 'false',\n\t\t 'returnFormat': 'json'\n\t\t}\n\t\tch = pycurl.Curl()\n\t\tch.setopt(ch.URL, 'https://redcap.wustl.edu/redcap/srvrs/prod_v3_1_0_001/redcap/api/')\n\t\tch.setopt(ch.HTTPPOST, list(data.items()))\n\t\tch.setopt(ch.WRITEFUNCTION, buf.write)\n\t\tch.perform()\n\t\tch.close()\n\t\trecords = json.loads(buf.getvalue().decode())\n\t\tbuf.close()\n\t\treturn records", "def process_data(data):\n info = {\n 'cities': [],\n 'temperatures': [],\n 'humidities': [],\n }\n cities = data['list']\n for city in cities:\n main_data = city['main']\n info['cities'].append(city['name'])\n info['temperatures'].append(main_data['temp'])\n info['humidities'].append(main_data['humidity'])\n\n return info", "def _unpack(self, json_value):\r\n d = json.loads(json_value)\r\n if type(d) != dict:\r\n d = {}\r\n\r\n comment_value = d.get('comment', '')\r\n if not isinstance(comment_value, basestring):\r\n comment_value = ''\r\n\r\n options_value = d.get('options', [])\r\n if not isinstance(options_value, list):\r\n options_value = []\r\n\r\n return {\r\n 'options_value': options_value,\r\n 'has_options_value': len(options_value) > 0, # for convenience\r\n 'comment_value': comment_value,\r\n }", "def parse_data(\n raw_data: Any,\n variables_mapping: VariablesMapping = None,\n functions_mapping: FunctionsMapping = None,\n) -> Any:\n if isinstance(raw_data, str):\n # content in string format may contains variables and functions\n variables_mapping = variables_mapping or {}\n functions_mapping = functions_mapping or {}\n # only strip whitespaces and tabs, \\n\\r is left because they maybe used in changeset\n raw_data = raw_data.strip(\" \\t\")\n return parse_string(raw_data, variables_mapping, functions_mapping)\n\n elif isinstance(raw_data, (list, set, tuple)):\n return [\n parse_data(item, variables_mapping, functions_mapping) for item in raw_data\n ]\n\n elif isinstance(raw_data, dict):\n parsed_data = {}\n for key, value in raw_data.items():\n parsed_key = parse_data(key, variables_mapping, functions_mapping)\n parsed_value = parse_data(value, variables_mapping, functions_mapping)\n parsed_data[parsed_key] = parsed_value\n\n return parsed_data\n\n else:\n # other types, e.g. None, int, float, bool\n return raw_data", "def __convert_data_to_list_of_dict__(self, data):\n jsons = list()\n for row in data:\n json_for_row = dict(zip(self.__fieldnames__, row))\n jsons += [json_for_row]\n return jsons", "def request_data(call_url):\n r = requests.get(call_url)\n response_dict = r.json()\n json_list = response_dict['data'] # list of dicts containing data\n return json_list", "def parse_fields(self, response, fields_dict, net_start=None,\n net_end=None, dt_format=None, field_list=None):\n\n ret = {}\n\n if not field_list:\n\n field_list = ['name', 'handle', 'description', 'country', 'state',\n 'city', 'address', 'postal_code', 'emails',\n 'created', 'updated']\n\n generate = ((field, pattern) for (field, pattern) in\n fields_dict.items() if field in field_list)\n\n for field, pattern in generate:\n\n pattern = re.compile(\n str(pattern),\n re.DOTALL\n )\n\n if net_start is not None:\n\n match = pattern.finditer(response, net_end, net_start)\n\n elif net_end is not None:\n\n match = pattern.finditer(response, net_end)\n\n else:\n\n match = pattern.finditer(response)\n\n values = []\n sub_section_end = None\n for m in match:\n\n if sub_section_end:\n\n if field not in (\n 'emails'\n ) and (sub_section_end != (m.start() - 1)):\n\n break\n\n try:\n\n values.append(m.group('val').strip())\n\n except IndexError:\n\n pass\n\n sub_section_end = m.end()\n\n if len(values) > 0:\n\n value = None\n try:\n\n if field == 'country':\n\n value = values[0].upper()\n\n elif field in ['created', 'updated'] and dt_format:\n\n value = datetime.strptime(\n values[0],\n str(dt_format)).isoformat('T')\n\n elif field in ['emails']:\n\n value = list(unique_everseen(values))\n\n else:\n\n values = unique_everseen(values)\n value = '\\n'.join(values).strip()\n\n except ValueError as e:\n\n log.debug('Whois field parsing failed for {0}: {1}'.format(\n field, e))\n pass\n\n ret[field] = value\n\n return ret", "def get_data(self, body):\n params = json.loads(body)\n logger.debug('New Data Format')\n return self._get_data(body)", "def parse_response(response):\n result = []\n if response and \"data\" in response:\n for data in response[\"data\"]:\n user = next(filter(lambda usr: usr[\"id\"] == data[\"author_id\"],\n response[\"includes\"][\"users\"]), None) if \"users\" in response[\"includes\"] else None\n account = dict()\n if user:\n account[\"fullname\"] = user[\"name\"]\n account[\"href\"] = f\"/{user['username']}\"\n account[\"id\"] = user[\"id\"]\n item = dict()\n item[\"account\"] = account\n parse_date = datetime.strptime(data['created_at'], \"%Y-%m-%dT%H:%M:%S.%f%z\")\n item[\"date\"] = parse_date.strftime(\"%H:%M %p - %d %b %Y\")\n item[\"hashtags\"] = list(map(lambda ht: f\"#{ht['tag']}\",\n data[\"entities\"][\"hashtags\"])) if \"hashtags\" in data[\"entities\"] else []\n item[\"likes\"] = data[\"public_metrics\"][\"like_count\"]\n item[\"replies\"] = data[\"public_metrics\"][\"reply_count\"]\n item[\"retweets\"] = data[\"public_metrics\"][\"retweet_count\"]\n item[\"text\"] = data[\"text\"]\n result.append(item)\n return result", "def processData(self, json):\r\n if json[\"data\"] is not None:\r\n return json[\"data\"]\r\n else:\r\n pass\r\n # Raise Exception", "def parse_requests_json(stream):\n import json\n \n requests = json.load(stream)\n \n return requests", "def get_json(response):\n return json.loads(response.data.decode('utf8'))", "def parse_user_fields(json_data):\n # Populate the fields\n user_info = {}\n for db_field, json_field in Users.UserJSON.fields.items():\n try:\n user_info[db_field] = get_json_field(json_data, json_field)\n if db_field == 'user_address_street':\n user_info[db_field] = user_info.get(db_field).replace('\\n', '')\n elif (db_field == 'user_first_login') or (db_field == 'user_last_login'):\n raw_timestamp = user_info.get(db_field)[:19]\n user_info[db_field] = core_utils.validate_timestamp(raw_timestamp)\n elif db_field == 'user_tags':\n user_info[db_field] = ', '.join(user_info.get(db_field))\n elif db_field == 'user_profile':\n profile = user_info[db_field]\n for idx in range(len(profile)):\n if profile[idx]['jive_label'] in Users.UserJSON.profile_fields:\n profile_field_name = Users.UserJSON.profile_fields.get(profile[idx]['jive_label'])\n user_info[profile_field_name] = profile[idx]['value']\n del user_info['user_profile']\n except (KeyError, IndexError, AttributeError):\n # Continue on to the next field\n continue\n # Return the user information\n return user_info", "def get_jsonparsed_data(url):\n response = urlopen(url)\n data = response.read().decode(\"utf-8\")\n return json.loads(data)", "def get_jsonparsed_data(url):\n response = urlopen(url)\n data = response.read().decode(\"utf-8\")\n return json.loads(data)", "def get_jsonparsed_data(url):\n response = urlopen(url)\n data = response.read().decode(\"utf-8\")\n return json.loads(data)", "def get_jsonparsed_data(url):\n response = urlopen(url)\n data = response.read().decode(\"utf-8\")\n return json.loads(data)", "def to_dict(self, data):\n return json.loads(json.dumps(data))", "def reach_process_json():\n response = request.body.read().decode('utf-8')\n body = json.loads(response)\n json_str = body.get('json')\n rp = reach.process_json_str(json_str)\n if rp and rp.statements:\n stmts = stmts_to_json(rp.statements)\n res = {'statements': stmts}\n return res\n else:\n res = {'statements': []}\n return res", "def test_meta_data_passes_fields(self):\n self.expect_json_http({\"some\": \"value\"},\n uri=re.compile(\".*/articles/1234-56\"))\n\n federalregister.meta_data(\"1234-56\", ['field1', 'field2', 'field3'])\n params = self.last_http_params()\n self.assertEqual(params['fields[]'], ['field1', 'field2', 'field3'])", "def extract_from_json_ld(self, data, url):\n\n scripts = data.xpath(\"//script[@type='application/ld+json']\")\n records = [ ]\n\n for scr in scripts:\n\n try:\n data = json.loads(scr.text)\n except:\n continue\n\n if not isinstance(data, dict):\n continue\n\n record = dict([ (k, v) for k, v in data.items() if k in self.store_fields ])\n if \"recipeIngredient\" not in record and \"ingredients\" in data:\n record[\"recipeIngredient\"] = data[\"ingredients\"]\n\n record[\"url\"] = url\n record[\"collect_time\"] = datetime.utcnow()\n\n if self.validate(record):\n records.append(record)\n\n return records", "def consumeData(self, data):\n ret = []\n\n soup = BeautifulSoup(StringIO(data))\n ingredientses = soup.find_all(None, itemprop='ingredients')\n for ing in ingredientses:\n separateByClass(soup, ing, \"ingredient\")\n separateByTag(soup, ing, ['br', 'tr', 'li'])\n instructionses = soup.find_all(None, itemprop=\"recipeInstructions\")\n for ins in instructionses:\n separateByClass(soup, ins, \"instruction\")\n separateByTag(soup, ins, ['br', 'tr', 'li'])\n workingDocument = StringIO(soup.encode('utf-8'))\n\n items = microdata.get_items(workingDocument)\n for i in items:\n for typ in i.itemtype:\n if typ.string == MICROFORMAT_RECIPE:\n ret.append(i.json())\n break\n return map(json.loads, ret)", "def request_data_to_dict(data):\r\n if not isinstance(data, ImmutableMultiDict):\r\n raise ValueError('Input must be ImmutableMultiDict type.')\r\n\r\n res = {}\r\n for (key, value) in data.to_dict().items():\r\n matches = re.match('(.*)\\[(.*)\\]', key)\r\n if matches:\r\n (key_lv_1, key_lv_2) = matches.groups()\r\n if key_lv_1 not in res:\r\n res[key_lv_1] = {}\r\n res[key_lv_1][key_lv_2] = value\r\n else:\r\n res[key] = value\r\n return res", "def decode_record(record):\n return json.loads(record, object_hook=decode_dict)", "def apicall():\r\n# try:\r\n print request.get_json()\r\n test_json = request.get_json()\r\n logger.info(\"input json object loaded\")\r\n logger.info(test_json)\r\n k=MetaData(test_json)\r\n int_res=k.getData()\r\n print '------------------------------'\r\n print int_res\r\n return jsonify(int_res)", "def _wrap_rest_data(self, data):\n if isinstance(data, dict):\n return self._wrap_rest_data_one(data)\n\n if not isinstance(data, list):\n raise RuntimeError(\"Result data must be a dict or a list: '%s' was returned\" % type(data))\n\n objs = []\n for item in data:\n objs.append(self._wrap_rest_data_one(item))\n return objs", "def _parsejson(x):\n return json.loads(x.read().decode('utf-8'))", "def _parse_api_base_data (self, netflix_page_data):\n api_data = {};\n important_fields = [\n 'API_BASE_URL',\n 'API_ROOT',\n 'BUILD_IDENTIFIER',\n 'ICHNAEA_ROOT'\n ]\n\n # values are accessible via dict (sloppy parsing successfull)\n if type(netflix_page_data) == dict:\n for important_field in important_fields:\n api_data.update({important_field: netflix_page_data.get(important_field, '')})\n return api_data\n\n for item in netflix_page_data:\n if 'serverDefs' in dict(item).keys():\n for important_field in important_fields:\n api_data.update({important_field: item['serverDefs']['data'][important_field]})\n return api_data", "def from_json(self, content):\r\n return simplejson.loads(content)", "def prepare_data(self, data):\n for i, v in data.items():\n field_type = self.get_field_type(i)\n #log.info('i = %s, type = %s', i, field_type)\n if field_type == 'datetime' and isinstance(v, (str, unicode)):\n data[i] = datetime_from_string(v)\n return data", "def parse_response(response):\n return json.loads(response.read()[MAGIC_PREFIX_OFFSET:])", "def _perform_request(self, req):\n \n res = req.content\n x = json.loads(res, object_hook=lambda d: namedtuple('X', d.keys())(*d.values()))\n return x", "def load_json(response):\n return json.loads(response.data.decode('utf8'))", "def get_fields():\n if not request.is_xhr:\n abort(403)\n fields = Field.query.all()\n result = {field.id:field.name for field in fields}\n return jsonify(result)", "def parse_data(data, pub_ip):\n # Convert str to dict\n data = json.loads(data)\n\n result = []\n if pub_ip:\n for item in data['lstatuses']:\n if item['status_11']['whiteip'] == pub_ip:\n temp_user = {\n 'Private IP': item['ipv4'],\n 'TCP Utilization': item['status_11']['sess_tcp'],\n 'UDP Utilization': item['status_11']['sess_udp'],\n 'Public IP': item['status_11']['whiteip']\n }\n result.append(temp_user)\n else:\n for item in data['lstatuses']:\n if item['status_11']['whiteip'] != '0.0.0.0':\n temp_user = {\n 'Private IP': item['ipv4'],\n 'TCP Utilization': item['status_11']['sess_tcp'],\n 'UDP Utilization': item['status_11']['sess_udp'],\n 'Public IP': item['status_11']['whiteip']\n }\n result.append(temp_user)\n return result", "def convert_for_form(data):\n if \"name\" in data:\n data[\"full_name\"] = data[\"name\"].get(\"value\")\n try:\n data[\"given_names\"] = data[\"name\"].get(\n \"value\").split(\",\")[1].strip()\n except IndexError:\n data[\"given_names\"] = \"\"\n data[\"family_name\"] = data[\"name\"].get(\"value\").split(\",\")[0].strip()\n data[\"display_name\"] = data[\"name\"].get(\"preferred_name\")\n data[\"status\"] = data[\"name\"].get(\"status\", \"\").lower()\n if \"urls\" in data:\n data[\"websites\"] = []\n for url in data[\"urls\"]:\n if \"description\" not in url:\n data[\"websites\"].append({\"webpage\": url[\"value\"]})\n else:\n if url[\"description\"].lower() == \"twitter\":\n data[\"twitter_url\"] = url[\"value\"]\n elif url[\"description\"].lower() == \"blog\":\n data[\"blog_url\"] = url[\"value\"]\n elif url[\"description\"].lower() == \"linkedin\":\n data[\"linkedin_url\"] = url[\"value\"]\n del data[\"urls\"]\n if \"field_categories\" in data:\n data[\"research_field\"] = data['field_categories']\n if \"positions\" in data:\n data[\"institution_history\"] = []\n for position in data[\"positions\"]:\n if not any(\n [\n key in position for key in ('name', 'rank',\n 'start_year', 'end_year')\n ]\n ):\n if 'email' in position:\n # Only email available, take as public_email\n data[\"public_email\"] = position.get(\"email\")\n continue\n pos = {}\n pos[\"name\"] = position.get(\"institution\", {}).get(\"name\")\n pos[\"rank\"] = position.get(\"rank\", \"\")\n pos[\"start_year\"] = position.get(\"start_date\", \"\")\n pos[\"end_year\"] = position.get(\"end_date\", \"\")\n pos[\"current\"] = True if position.get(\"status\") else False\n pos[\"old_email\"] = position.get(\"old_email\", \"\")\n if position.get(\"email\"):\n pos[\"email\"] = position.get(\"email\", \"\")\n if not data.get(\"public_email\"):\n data[\"public_email\"] = position.get(\"email\")\n data[\"institution_history\"].append(pos)\n data[\"institution_history\"].reverse()\n if 'advisors' in data:\n advisors = data['advisors']\n data['advisors'] = []\n for advisor in advisors:\n adv = {}\n adv[\"name\"] = advisor.get(\"name\", \"\")\n adv[\"degree_type\"] = advisor.get(\"degree_type\", \"\")\n data[\"advisors\"].append(adv)\n if \"ids\" in data:\n for id in data[\"ids\"]:\n try:\n if id[\"type\"] == \"ORCID\":\n data[\"orcid\"] = id[\"value\"]\n elif id[\"type\"] == \"BAI\":\n data[\"bai\"] = id[\"value\"]\n elif id[\"type\"] == \"INSPIRE\":\n data[\"inspireid\"] = id[\"value\"]\n except KeyError:\n # Protect against cases when there is no value in metadata\n pass", "def json(self):\n return json.loads(self.text)", "def _parse_json(req, resp):\n try:\n body = req.stream.read()\n return json.loads(body)\n except ValueError as e:\n err_msg = str(e) + ': ' + body\n resp.status = falcon.HTTP_400\n resp.body = make_error_body(err_msg)\n return", "def process_json(self, data):\r\n rsp = json.loads(data)\r\n\r\n if rsp['stat'] == 'fail':\r\n raise APIError, rsp\r\n\r\n return rsp", "def json_loads(value):\n return json.loads(value)", "def json(self):\n return self._parsejson(self.raw)", "def from_json_string(json_string):\n if not json_string:\n return []\n return json.loads(json_string)", "def upload_initial_data():\n data = {\"updated\":1512866292573,\"data\":[[[\"DAY %\",\"PPS (CAD)\",\"TKR\",\"NAME\",\"SECTOR\",\"EX.\",\"ALT. TKR\",\"VOL\",\"VOL @ CRNT $\",\"MC ($MM)\"],[\"5.84%\",\"1.45\",\"ABCN\",\"ABcann Medicinals\",\"BioTech\",\"CVE\",\"ABCCF\",\"901,940\",\"1,307,813\",\"78.49\"],[\"6.14%\",\"7.09\",\"ACB\",\"Aurora Cannabis\",\"Cultivation & Retail\",\"TSE\",\"ACBFF\",\"13,927,205\",\"98,743,883.45\",\"2,861.53\"],[\"9.09%\",\"0.24\",\"ACG\",\"Alliance Growers \",\"AgTech\",\"CNSX\",\"--\",\"1,189,385\",\"285,452.4\",\"\"],[\"0.00%\",\"0.10\",\"AFI\",\"Affinor Growers\",\"AgTech\",\"CNSX\",\"RSSFF\",\"210,310\",\"21,031\",\"\"],[\"3.22%\",\"13.47\",\"APH\",\"Aphria\",\"Cultivation & Retail\",\"TSE\",\"APHQF\",\"2,663,133\",\"35,872,401.51\",\"2,042.08\"],[\"13.95%\",\"0.25\",\"ATT\",\"Abattis Bio\",\"BioTech\",\"CNSX\",\"ATTBF\",\"2,706,357\",\"676,589.25\",\"39.86\"],[\"0.00%\",\"2.03\",\"BE\",\"Beleave\",\"Cultivation & Retail\",\"CNSX\",\"BLEVF\",\"597,909\",\"1,213,755.27\",\"\"],[\"1.28%\",\"1.58\",\"BLO\",\"Cannabix Tech\",\"LawTech\",\"CNSX\",\"BLOZF\",\"465,869\",\"736,073.02\",\"136.61\"],[\"-2.20%\",\"0.89\",\"CBW\",\"Cannabis Wheaton \",\"Investing & Finance\",\"CVE\",\"KWFLF\",\"815,477\",\"725,774.53\",\"234.57\"],[\"-0.65%\",\"19.93\",\"CMED\",\"Cannimed\",\"Cultivation & Retail\",\"TSE\",\"CMMDF\",\"130,722\",\"2,605,289.46\",\"457.69\"],[\"12.73%\",\"0.62\",\"CMM\",\"Canabo Medical\",\"MedCare\",\"CVE\",\"CAMDF\",\"330,404\",\"204,850.48\",\"23.54\"],[\"-2.87%\",\"2.71\",\"CRZ\",\"Cannaroyalty\",\"Investing & Finance\",\"CNSX\",\"CNNRF\",\"961,449\",\"2,605,526.79\",\"115.09\"],[\"-6.67%\",\"0.28\",\"CYX\",\"Calyx Bio\",\"AgTech\",\"CVE\",\"CLYXF\",\"2,120,562\",\"593,757.36\",\"24.23\"],[\"0.00%\",\"1.23\",\"DOJA\",\"DOJA Cannabis\",\"Cultivation & Retail\",\"CNSX\",\"DJACF\",\"206,635\",\"254,161.05\",\"72.27\"],[\"-4.40%\",\"0.44\",\"DVA\",\"Delivra\",\"BioTech\",\"CVE\",\"--\",\"89,485\",\"39,373.4\",\"19.55\"],[\"6.52%\",\"0.25\",\"EAT\",\"Nutritional High\",\"Marijuana Edibles & Extracts\",\"CNSX\",\"SPLIF\",\"3,067,636\",\"766,909\",\"61.54\"],[\"-1.20%\",\"1.64\",\"EMC\",\"Emblem\",\"Cultivation & Retail\",\"CVE\",\"EMMBF\",\"411,764\",\"675,292.96\",\"130.60\"],[\"2.05%\",\"3.98\",\"EMH\",\"Emerald\",\"Cultivation & Retail\",\"CVE\",\"TBQBF\",\"1,430,067\",\"5,691,666.66\",\"374.34\"],[\"-5.88%\",\"0.48\",\"FFT\",\"Future Farm Tech\",\"AgTech\",\"CNSX\",\"AGSTF\",\"1,291,240\",\"619,795.2\",\"0.61\"],[\"1.06%\",\"1.90\",\"FIRE\",\"Supreme Pharma\",\"Cultivation & Retail\",\"CVE\",\"SPRWF\",\"1,275,906\",\"2,424,221.4\",\"391.96\"],[\"5.26%\",\"0.10\",\"GHG\",\"Global Hemp\",\"Cultivation & Retail\",\"CNSX\",\"GBHPF\",\"764,350\",\"76,435\",\"\"],[\"3.28%\",\"0.31\",\"GLH\",\"Golden Leaf\",\"Marijuana Products\",\"CNSX\",\"GLDFF\",\"4,298,567\",\"1,332,555.77\",\"116.96\"],[\"-1.96%\",\"0.50\",\"HC\",\"High Hampton Holdings\",\"Investing & Finance\",\"CNSX\",\"--\",\"727,116\",\"363,558\",\"\"],[\"1.89%\",\"0.54\",\"HIP\",\"Newstirke Resources \",\"Cultivation & Retail\",\"CVE\",\"NWKRF\",\"431,875\",\"233,212.5\",\"210.35\"],[\"8.91%\",\"1.10\",\"HVST\",\"Harvest One Cannabis\",\"Cultivation & Retail\",\"CVE\",\"HRVOF\",\"2,192,877\",\"2,412,164.7\",\"98.10\"],[\"8.89%\",\"0.98\",\"ICC\",\"International Cannabis\",\"Cultivation & Retail\",\"CVE\",\"ICCLF\",\"123,538\",\"121,067.24\",\"110.84\"],[\"0.00%\",\"1.62\",\"IMH\",\"Invictus MD\",\"Investing & Finance\",\"CVE\",\"IVITF\",\"781,924\",\"1,266,716.88\",\"129.87\"],[\"12.50%\",\"0.90\",\"IN\",\"Inmed Pharma\",\"BioTech\",\"CNSX\",\"IMLFF\",\"3,846,586\",\"3,461,927.4\",\"\"],[\"2.27%\",\"1.80\",\"ISOL\",\"Isodiol International \",\"Hemp Products\",\"CNSX\",\"LAGBF\",\"8,514,952\",\"15,326,913.6\",\"\"],[\"7.84%\",\"0.28\",\"KALY\",\"Kalytera Therapeutics\",\"BioTech\",\"CVE\",\"QUEZD\",\"5,634,186\",\"1,577,572.08\",\"34.74\"],[\"-1.72%\",\"0.57\",\"LDS\",\"Lifestyle Delivery Systems\",\"BioTech\",\"CNSX\",\"LDSYF\",\"685,628\",\"390,807.96\",\"51.44\"],[\"0.19%\",\"15.50\",\"LEAF\",\"MedReleaf Corp\",\"Cultivation & Retail\",\"TSE\",\"MEDFF\",\"229,190\",\"3,552,445\",\"1,459.18\"],[\"2.33%\",\"0.44\",\"LIB\",\"Liberty Leaf Holdings\",\"Investing & Finance\",\"CNSX\",\"LIBFF\",\"4,555,082\",\"2,004,236.08\",\"\"],[\"10.42%\",\"1.59\",\"LXX\",\"Lexaria Bio\",\"Hemp Products\",\"CNSX\",\"LXRP\",\"1,523,338\",\"2,422,107.42\",\"\"],[\"-1.38%\",\"2.14\",\"MARI\",\"Maricann Group\",\"Cultivation & Retail\",\"CNSX\",\"MRRCF\",\"678,106\",\"1,451,146.84\",\"157.10\"],[\"3.26%\",\"0.95\",\"MDM\",\"Marapharm\",\"Cultivation & Retail\",\"CNSX\",\"MRPHF\",\"209,019\",\"198,568.05\",\"\"],[\"0.00%\",\"0.57\",\"MGW\",\"Maple Leaf Green World\",\"Cultivation & Retail\",\"CVE\",\"MGWFF\",\"367,479\",\"209,463.03\",\"83.83\"],[\"7.37%\",\"1.02\",\"MJ\",\"True Leaf\",\"Hemp Pet Chews\",\"CNSX\",\"TLFMF\",\"164,101\",\"167,383.02\",\"\"],[\"2.27%\",\"4.50\",\"MJN\",\"Pharmacan /Cronos\",\"Investing & Finance\",\"CVE\",\"PRMCF\",\"419,922\",\"1,889,649\",\"675.43\"],[\"4.23%\",\"2.71\",\"MYM\",\"My Marijuana\",\"Cultivation & Retail\",\"CNSX\",\"--\",\"1,066,122\",\"2,889,190.62\",\"\"],[\"4.40%\",\"0.95\",\"N\",\"Namaste Tech\",\"Consumption Devices\",\"CNSX\",\"NXTTF\",\"5,714,764\",\"5,429,025.8\",\"192.50\"],[\"0.00%\",\"0.10\",\"NF\",\"New Age Farm\",\"Hemp Products\",\"CNSX\",\"NWGFF\",\"3,938,476\",\"393,847.6\",\"\"],[\"-7.27%\",\"0.25\",\"NSP\",\"Naturally Splendid\",\"Hemp Products\",\"CVE\",\"NSPDF\",\"484,812\",\"121,203\",\"24.42\"],[\"4.99%\",\"3.79\",\"OGI\",\"Organigram\",\"Cultivation & Retail\",\"CVE\",\"OGRMF\",\"3,654,843\",\"13,851,854.97\",\"375.89\"],[\"1.15%\",\"0.88\",\"PUF\",\"PUF Ventures\",\"Consumption Devices\",\"CNSX\",\"PUFXF\",\"719,534\",\"633,189.92\",\"45.85\"],[\"10.68%\",\"1.14\",\"RHT\",\"Reliq Health Tech\",\"Mobile Software\",\"CVE\",\"RQHTF\",\"1,564,567\",\"1,783,606.38\",\"98.74\"],[\"4.05%\",\"1.80\",\"RTI\",\"Radient Technologies\",\"Extraction\",\"CVE\",\"RDDTF\",\"2,181,473\",\"3,926,651.4\",\"345.53\"],[\"3.64%\",\"0.28\",\"RVV\",\"Revive Therapeutics\",\"Medication\",\"CVE\",\"RVVTF\",\"399,705\",\"111,917.4\",\"15.50\"],[\"-2.90%\",\"0.67\",\"SUN\",\"Wildflower\",\"Hemp Products\",\"CNSX\",\"WLDFF\",\"87,197\",\"58,421.99\",\"29.48\"],[\"-0.67%\",\"4.45\",\"SXP\",\"Supremex\",\"Packaging\",\"TSE\",\"SUMXF\",\"27,015\",\"120,216.75\",\"126.40\"],[\"0.00%\",\"0.76\",\"TBP\",\"Tetra Bio-Pharma\",\"BioTech\",\"CVE\",\"GRPOF\",\"497,745\",\"378,286.2\",\"88.67\"],[\"2.44%\",\"2.10\",\"TER\",\"TerrAscend Corp\",\"Cultivation & Retail\",\"CNSX\",\"--\",\"270,176\",\"567,369.6\",\"\"],[\"4.29%\",\"0.73\",\"THC\",\"THC Biomed\",\"BioTech\",\"CNSX\",\"THCBF\",\"818,162\",\"597,258.26\",\"81.29\"],[\"3.55%\",\"3.21\",\"THCX\",\"Hydropothecary Corp\",\"Cultivation & Retail\",\"CVE\",\"HYYDF\",\"1,581,640\",\"5,077,064.4\",\"282.37\"],[\"8.22%\",\"0.79\",\"TNY\",\"Tinley Beverage Co\",\"Beverage\",\"CNSX\",\"QRSRF\",\"945,154\",\"746,671.66\",\"57.81\"],[\"3.49%\",\"7.70\",\"TRST\",\"CannTrust\",\"Cultivation & Biotech\",\"CNSX\",\"CNTTF\",\"368,892\",\"2,840,468.4\",\"699.98\"],[\"-8.04%\",\"1.03\",\"VGW\",\"Valens Groworks\",\"BioTech\",\"CNSX\",\"MYMSF\",\"23,285\",\"23,983.55\",\"62.77\"],[\"0.00%\",\"0.52\",\"VIN\",\"Vinergy Resources\",\"Investing & Finance\",\"CNSX\",\"VNNYF\",\"0\",\"\",\"\"],[\"-2.50%\",\"0.39\",\"VP\",\"Vodis Pharma\",\"Cultivation & Retail\",\"CNSX\",\"VDQSF\",\"52,661\",\"20,537.79\",\"\"],[\"6.67%\",\"0.80\",\"VRT\",\"Veritas Pharma\",\"BioTech\",\"CNSX\",\"VRTHF\",\"377,901\",\"302,320.8\",\"\"],[\"6.41%\",\"19.42\",\"WEED\",\"Canopy Growth\",\"Cultivation & Retail\",\"TSE\",\"TWMJF\",\"4,940,034\",\"95,935,460.28\",\"3,706.63\"],[\"6.25%\",\"2.38\",\"WMD\",\"WeedMD\",\"Cultivation & Retail\",\"CVE\",\"WDDMF\",\"1,174,148\",\"2,794,472.24\",\"124.71\"],[\"3.36%\",\"14.75\",\"HMMJ\",\"Horizons Marijuana Life Sciences\",\"Canadian Marijuana ETF\",\"TSE\",\"HMLSF\",\"336,579\",\"4,964,540.25\",\"197.64\"]]],\"sheetnames\":[\"ALLSHOW\"]}\n \n exchange_suffixes = {'TSE': 'TO', 'CVE': 'V'}\n\n # create sector\n sector, _ = Sector.objects.get_or_create(name=\"Cannabis\", slug='cannabis')\n\n # create currency\n currency, _ = Currency.objects.get_or_create(symbol='CAD', defaults={'character':'$', 'name':'Canadian Dollar'})\n us_currency, _ = Currency.objects.get_or_create(symbol='USD', defaults={'character':'$', 'name':'US Dollar'})\n\n # OTC exchange\n otc, _ = Exchange.objects.get_or_create(symbol='OTC', defaults={'name':'OTC', 'currency': us_currency})\n\n # iterate over each item in our table, make the items\n for row in data[\"data\"][0][1:]:\n # percent = float(row[0].replace(\"%\",\"\"))\n suffix = exchange_suffixes[row[5]] if row[5] in exchange_suffixes else ''\n exchange, _ = Exchange.objects.get_or_create(symbol=row[5], defaults={'name':row[5], 'currency':currency, 'ticker_suffix': suffix})\n company, _ = Company.objects.get_or_create(name=row[3], defaults={'sector':sector})\n stock, _ = Stock.objects.get_or_create(ticker=row[2], defaults={\n 'company': company,\n 'exchange': exchange,\n 'market_cap': float(row[9].replace(\",\",\"\")) * 1000000 if row[9] else 0.0,\n # 'previous_close': float(row[1]) - float(row[1]) * percent / 100,\n # 'open': float(row[1]),\n # 'current': float(row[1]),\n # 'volume': float(row[8].replace(\",\",\"\")) if row[8] else 0.0,\n })\n stock.save()\n\n if row[4]:\n tag, _ = Tag.objects.get_or_create(name=row[4])\n company.tags.add(tag)\n\n if row[6] and not row[6] == \"--\":\n stock, _ = Stock.objects.get_or_create(ticker=row[6], defaults={'company':company, 'exchange':otc})\n\n\n print data", "def _decode(self, data: bytes):\n\n return json.loads(data.decode('utf-8'))", "def parse(data, **kwargs):\n if not data or not isinstance(data, basestring):\n return None\n\n data = data.split()\n\n try:\n url, _ = data[8].split('?')\n except (ValueError, IndexError):\n url = ''\n\n data = {\n # 'datetime': int(time.mktime(parser.parse(data[0]).timetuple())),\n 'datetime': data[0],\n 'remote_addr': data[1],\n 'request_length': int(data[2]),\n 'request_time': float(data[3]),\n 'bytes_sent': int(data[4]),\n 'body_bytes_sent': int(data[5]),\n 'response_status': int(data[6]),\n 'request_method': data[7].lower(),\n 'request_url': data[8],\n 'http_version': data[9],\n 'base_url': url,\n }\n\n return data", "def json_to_payload(cls, json_data, prefix=\"\") -> Dict[str, PayloadInterface]:\n\n res = {}\n for key, val in json_data.items():\n if isinstance(val, str):\n res[prefix + key] = PayloadInterfaceStrictAnyOf(value=val, type=\"keyword\")\n continue\n\n if isinstance(val, int):\n res[prefix + key] = PayloadInterfaceStrictAnyOf1(value=val, type=\"integer\")\n continue\n\n if isinstance(val, float):\n res[prefix + key] = PayloadInterfaceStrictAnyOf2(value=val, type=\"float\")\n continue\n\n if isinstance(val, dict):\n if 'lon' in val and 'lat' in val:\n res[prefix + key] = PayloadInterfaceStrictAnyOf3(\n value=GeoPoint(lat=val['lat'], lon=val['lon']),\n type=\"geo\"\n )\n else:\n res = {\n **res,\n **cls.json_to_payload(val, prefix=f\"{key}__\")\n }\n continue\n\n if isinstance(val, list):\n if all(isinstance(v, str) for v in val):\n res[prefix + key] = PayloadInterfaceStrictAnyOf(value=val, type=\"keyword\")\n continue\n\n if all(isinstance(v, int) for v in val):\n res[prefix + key] = PayloadInterfaceStrictAnyOf1(value=val, type=\"integer\")\n continue\n\n if all(isinstance(v, float) for v in val):\n res[prefix + key] = PayloadInterfaceStrictAnyOf2(value=val, type=\"float\")\n continue\n\n if all(isinstance(v, dict) and 'lon' in v and 'lat' in v for v in val):\n res[prefix + key] = PayloadInterfaceStrictAnyOf3(\n value=[GeoPoint(lat=v['lat'], lon=v['lon']) for v in val],\n type=\"geo\"\n )\n continue\n\n raise RuntimeError(f\"Payload {key} have unsupported type {type(val)}\")\n\n return res", "def load_field_data_dict(data_dict, verbose=True):\n \n # The output dict\n data = {}\n \n # Load attributes\n attrs, other = load_field_attrs(data_dict['attrs'], verbose=verbose)\n attrs.update(other)\n data['attrs'] = attrs \n \n # Go through components. Allow aliases\n comp = data['components'] = {}\n for k, v in data_dict['components'].items():\n if k in component_alias:\n comp[k] = v\n elif k in component_from_alias:\n k = component_from_alias[k]\n assert k not in data\n comp[k] = v\n else:\n raise ValueError(f'Unallowed component: {k}')\n \n \n return data", "def decode_params(text):\n try:\n response_dict = parse_json(text)\n except (IndexError, ValueError):\n qs = urlparse.parse_qs(text)\n response_dict = JsonDict(((k, v[0]) for k, v in qs.iteritems()))\n\n if not response_dict:\n raise APIError(\"can't decode text\", \"%s is not jsonp, json, or qs\" % text)\n\n return response_dict", "def json_decode(data):\n if isinstance(data, bytes):\n data = data.decode(\"utf-8\")\n\n return json.loads(data)", "def fields(request):\n fields = request.GET.getlist('field')\n response = {}\n if 'reviewers' in fields:\n response['reviewers'] = request.issue.reviewers or []\n if 'description' in fields:\n response['description'] = request.issue.description\n if 'subject' in fields:\n response['subject'] = request.issue.subject\n return response", "def field(self, field):\r\n url = '{0}/{1}'.format(self.get_url(), field)\r\n request = http.Request('GET', url)\r\n return request, parsers.parse_json", "def process_response(responseObject):\n\n payload = json.loads(\n str(\n responseObject\n )\n )\n\n return payload", "def process_response(responseObject):\n\n payload = json.loads(\n str(\n responseObject\n )\n )\n\n return payload", "def read_data(raw_data: list):\r\n cleaned_data = {}\r\n for data_item in raw_data:\r\n clean_data_item = pre_process_string_data(data_item)\r\n if clean_data_item is not None:\r\n cleaned_data[clean_data_item['_id']] = clean_data_item\r\n return cleaned_data" ]
[ "0.68718296", "0.66720295", "0.6531521", "0.6174297", "0.6158747", "0.6148615", "0.60345334", "0.60218924", "0.5973685", "0.5921073", "0.5912757", "0.5910022", "0.58769304", "0.58715916", "0.5844883", "0.58414644", "0.5825254", "0.5811341", "0.57988477", "0.57882863", "0.5775228", "0.5758465", "0.57438356", "0.5739316", "0.5725662", "0.57163143", "0.57163143", "0.57141477", "0.5705819", "0.57024133", "0.5686657", "0.56659496", "0.5660459", "0.5652841", "0.56505287", "0.5648375", "0.56462306", "0.56352746", "0.56306463", "0.5616586", "0.560526", "0.559411", "0.5581856", "0.5540143", "0.5536718", "0.5535806", "0.5535446", "0.55164844", "0.5500073", "0.54882556", "0.54671836", "0.5458564", "0.5431072", "0.5430184", "0.5428392", "0.5418657", "0.5406099", "0.54035765", "0.5400896", "0.5399338", "0.5399338", "0.5399338", "0.5399338", "0.539628", "0.53958243", "0.5394538", "0.5391805", "0.5381563", "0.5375386", "0.53729457", "0.5362629", "0.5361483", "0.5350959", "0.5350517", "0.5342737", "0.53391814", "0.53386045", "0.53237045", "0.53154874", "0.5315253", "0.5307389", "0.5305605", "0.5304441", "0.52895355", "0.5272028", "0.52676547", "0.52490306", "0.524612", "0.52429557", "0.524107", "0.5231315", "0.5214509", "0.5212326", "0.5202332", "0.51993865", "0.51974154", "0.5187668", "0.51845217", "0.51845217", "0.5183416" ]
0.60061646
8
Constructor parent reference to the parent widget QWidget
def __init__(self, parent=None): super(MainWindow, self).__init__(parent) self.setupUi(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create(self, parent):\n self.widget = QtCore.QObject(parent)", "def __init__(self, parent=None):\n super(Dialog, self).__init__(parent)\n self.setupUi(self)", "def __init__(self, parent=None):\n super(Form, self).__init__(parent)\n self.setupUi(self)", "def __init__(self, parent=None):\n super(Form, self).__init__(parent)\n self.setupUi(self)", "def __init__(self, parent=None):\n super(QCTP, self).__init__(parent)\n self.setupUi(self)", "def __init__(self, parent=None):\n super(Representative, self).__init__(parent)\n self.setupUi(self)", "def create(self, parent):\n self.widget = _QMainWindow(parent)", "def __init__(self, parent):\n self.parent = parent\n self.dialog = None", "def __init__(self, parent):\n QtGui.QMenu.__init__(self, parent)\n self.parent = parent", "def __init__(self, printer, parent=None):\n QtGui.QWidget.__init__(self, printer, parent)", "def __init__(self, parent=None):\n self._window = None\n\n self.setup_ui()", "def __init__(self, parent=None):\n super(union_Dialog, self).__init__(parent)\n self.setupUi(self)", "def __init__(self, parent=None):\n super(Inj, self).__init__(parent)\n self.setupUi(self)", "def create(self, parent):\n self.widget = QFrame(parent)", "def __init__(self, parent=None):\n super(ProgressDlg, self).__init__(parent)\n self.setupUi(self)", "def createWidget(self, parent):\n raise NotImplementedError()", "def __init__(self, parent):\r\n\r\n BasicDialog.__init__(self, parent, title=None)", "def __init__(self, parent):\r\n Frame.__init__(self, parent) \r\n \r\n self.parent = parent\r\n self.initUI()", "def __init__(self, parent=None):\n super(StyledInputDialog, self).__init__(parent)\n self.setupUi(self)\n self.input = None", "def __init__(self, parent):\n pass", "def __init__(self, parent):\n pass", "def __init__(self, parent):\n pass", "def __init__(self, parent):\n self._parent = parent", "def __init__(self, parent=None):\n super(RobotSelection, self).__init__(parent)\n self.parent = parent\n self.initUI()", "def __init__(self, parent: View):\n self.parent = parent\n self.root = self.parent.root\n # Content frame\n self.frame = tk.Frame(self.parent.frame)\n # Reference\n self.visible = False", "def __init__(self, parent):", "def __init__(self, parent=None):\n super(QAccountWidget, self).__init__(parent)\n self.setupUi(self)", "def create(self, parent):\n self.widget = QtGui.QScrollArea(parent)", "def __init__(self, parent=None):\n super().__init__(parent)\n\n topLayout = QtGui.QVBoxLayout(self)\n self.setLayout(topLayout)\n self.setTitle(_('Choose export format subtype'))\n self.subtypeButtons = QtGui.QButtonGroup(self)\n self.subtypeButtons.buttonClicked[int].connect(self.setCurrentSubtype)", "def __init__(self):\n self.stack = QWidget()", "def __init__(self, parent=None):\n # Inherited from QMainWindow\n if sys.platform == 'darwin':\n # Workaround for Qt issue on OS X that causes QMainWindow to\n # hide when adding QToolBar, see\n # https://bugreports.qt-project.org/browse/QTBUG-4300\n super(BpMainWindow, self).__init__(parent, Qt.MacWindowToolBarButtonHint)\n else:\n super(BpMainWindow, self).__init__(parent)\n\n # temporary variable\n self._temp_dir = None\n self.is_save_configure = False\n\n # pre-define a model variable\n self.model = None", "def __init__(self, parent: QWidget):\n super().__init__(parent)\n DiagramFieldView.__diagram_field = self\n\n self.__list: List[DiagramView] = []\n self.__dialog: Dialog = None\n self.__diagram_layout: QVBoxLayout = QVBoxLayout()\n self.__button_layout: QHBoxLayout = QHBoxLayout()\n self.__start_button: StartButtonView = StartButtonView()\n self.__maximize_button: QPushButton = QPushButton()\n\n self.__diagram_group: QtWidgets.QGroupBox = QtWidgets.QGroupBox(self)\n self.__group_layout: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout(self.__diagram_group)\n self.__stretch_widget: QtWidgets.QWidget = QtWidgets.QWidget(self)\n self.__diagram_count: int = 0\n\n self.__start_button.start_signal.connect(self.__clear_diagrams)\n self.__maximize_button.clicked.connect(self.__maximize_on_click)\n ManagerModel.set_diagram_notifier(self)\n self.__init_ui()", "def __init__(self, parent=None):\n super(SelfCarryAddressSeek, self).__init__(parent)\n self.setupUi(self)", "def create(self, parent):\n self.widget = QImageView(parent)", "def __init__(self, window: QWidget, parent=None):\n QWidget.__init__(self, parent)\n\n self._window = window\n self._mousePressed = False", "def __init__(self, parent):\n QtGui.QWidget.__init__(self, parent)\n self.ui = Ui_FindReplaceForm()\n self.ui.setupUi(self)\n\n self.ui.errorLabel.setText(\"\")\n\n self.ui.textToFind.textChanged.connect(self.text_to_find_changed)\n self.ui.textToFind.textChanged.connect(self.validate_regexp)\n\n self.ui.regexCheckBox.toggled.connect(self.regexp_selected)\n\n self.ui.findButton.clicked.connect(self.find)\n self.ui.closeButton.clicked.connect(parent.close)\n\n self.ui.replaceButton.clicked.connect(self.replace)\n self.ui.replaceAllButton.clicked.connect(self.replace_all)\n\n self.textedit = None\n self.regexp = QtCore.QRegExp()\n self.textcursor = None", "def __init__(self, parent, frame):\n\t\tself.frame = frame\n\n\t\t# Populate line edit with shot name\n\t\tself.frame.shot_lineEdit.setText(parent.self_name)", "def __init__(self, parent, **kwargs):\n PyGlassWidget.__init__(self, parent, **kwargs)\n\n self.deployBtn.clicked.connect(self._handleDeployClick)\n self.cancelBtn.clicked.connect(self._handleCancelClick)\n\n self._canceled = True\n self._includeEmails = False\n self._buildMessage = u''", "def __init__(self, parent):\n super(MasterDialog, self).__init__(parent)\n self.setupUi(self)\n self.setAttribute(Qt.WA_DeleteOnClose)\n self.ms = parent\n self.devenvironment = self.ms.devenvironment\n if not self.devenvironment:\n self.setCursor(Qt.BlankCursor)\n self.fill_combobox()\n self.employee_id = 0\n # connect the buttons\n self.PB_change.clicked.connect(self.change_clicked)\n self.PB_back.clicked.connect(self.back_clicked)\n self.CB_employee.activated.connect(self.combobox_change)\n self.LE_first_name.clicked.connect(lambda: self.ms.lineedit_clicked(self.LE_first_name))\n self.LE_last_name.clicked.connect(lambda: self.ms.lineedit_clicked(self.LE_last_name))", "def __init__(self, parent=None):\n super(CommentDialog, self).__init__(parent)\n self.createDialog()\n self.createConnections()", "def __init__(self, *args, **kwargs):\n\n\t\t\tLOGGER.debug(\"> Initializing '{0}()' class.\".format(self.__class__.__name__))\n\n\t\t\tsuper(QWidget, self).__init__(*args, **kwargs)\n\n\t\t\tself.__uiFile = file\n\n\t\t\tself.__geometry = None\n\n\t\t\tself.setupUi(self)", "def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)\n self.path=''\n self.bool=0\n self.child=0\n self.click=\" \"", "def __init__(self, parent):\r\n\r\n pre = wx.PrePyWindow()\r\n \r\n self._tabs = None\r\n self._rect = wx.Rect(0, 0, 200, 200)\r\n self._tab_ctrl_height = 20\r\n self._tab_rect = wx.Rect() \r\n self._parent = parent\r\n \r\n self.PostCreate(pre)", "def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)\n \n self.setWindowTitle(\"RPI HMI - pH Debug\") # Title creation", "def __init__(self, parent=None):\n QtGui.QWidget.__init__(self, parent)\n self.setText(\"The document has been modified.\")\n self.setInformativeText(\"What do you want to do?\")\n self.setStandardButtons(QtGui.QMessageBox.Save | QtGui.QMessageBox.Discard |\n QtGui.QMessageBox.Cancel)\n self.setDefaultButton(QtGui.QMessageBox.Save)\n self.setWindowTitle(\"Changes have been made\")\n self.setWindowIcon(QtGui.QIcon(\"data/images/question.png\"))", "def __init__(self, parent=None):\n super().__init__()\n\n self.parent = parent\n\n # plot object, can be 2D or 3D\n self.plt = None", "def __init__(self, *args, **kwargs):\n\n super(FlirCameraWidget, self).__init__(*args, **kwargs)\n #self.loadForm()\n self.window = Ui_MainWindow()\n self.window.setupUi(self)\n\n self.initUI()\n Styles(self)", "def __init__(self, parent):\n # parent is the main frame of PyCorrFit\n self.parent = parent\n ## MYID\n # This ID is given by the parent for an instance of this class\n self.MyID = None\n ## Wrapping\n curvedict, labels = self.GetCurvedict()\n self.labels = labels\n self.Selector = UserSelectCurves(parent, curvedict,\n wrapper=self, labels=labels)\n # This is necessary for parent to deselect and select the tool\n # in the tools menu.\n self.Bind = self.Selector.Bind\n if self.parent.notebook.GetPageCount() == 0:\n self.Selector.sp.Disable()", "def __init__(self, parent=None):\n super().__init__(parent)\n # print(self.__dict__.keys())\n\n # print(self.__dict__.keys(), '\\n\\n')\n #\n # print(self.__dict__['menu'].__dict__.keys())\n # print(self.__dict__['menu']['leftMenu'])\n # self._viewbox.fftCheck.setObjectName(\"fftCheck\")\n\n # self.viewAll = QtGui.QRadioButton(\"Vue d\\'ensemble\")\n # self.viewAll.triggered.connect(self.autoRange)\n # self.menu.addAction(self.viewAll)\n # print(self.menu.__dict__['leftMenu'].__dict__)", "def __init__(self, parent=None):\n QLabel.__init__(self, parent)\n self.start_animation(self.SLOW_DURATION)", "def __init__(self, parent):\r\n\r\n AuiDockingGuide.__init__(self, parent, style=wx.FRAME_TOOL_WINDOW | wx.STAY_ON_TOP |\r\n wx.FRAME_NO_TASKBAR | wx.NO_BORDER | wx.FRAME_SHAPED,\r\n name=\"auiCenterDockTarget\")\r\n\r\n self.Hide()\r\n\r\n self.CreateShapesWithStyle()\r\n self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)\r\n \r\n if wx.Platform == \"__WXGTK__\":\r\n self.Bind(wx.EVT_WINDOW_CREATE, self.SetGuideShape)\r\n else:\r\n self.SetGuideShape()\r\n \r\n self.SetSize(self.region.GetBox().GetSize())\r\n\r\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)\r\n self.Bind(wx.EVT_PAINT, self.OnPaint)", "def __init__(self, parent = None):\n QObject.__init__(self, parent)\n self.overlay = None# assume overlay does not exist\n self.box_coordinates = [0,0,0,0]", "def __init__(self, parent):\n self.parent = parent\n myStyle = wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER|wx.TAB_TRAVERSAL\n wx.Dialog.__init__(self, None, wx.ID_ANY, '%s - Choose your colors' % globs.myName, style=myStyle)\n\n self._initialize()\n\n self.panel1.SetSizerAndFit(self.topBoxSizer)\n self.SetClientSize(self.topBoxSizer.GetSize())\n self.Centre()", "def init_widget(self):", "def create(self, parent):\n self.widget = QtGui.QTreeView(parent)", "def __init__(self, parent=None):\n super(addOne, self).__init__(parent)\n self.setupUi(self)\n \n # 内部自定义初始化操作 ---------------------------------------------------------------------begin\n self.zShow = Fdebug(self);\n self.zShow.show();\n self.verticalLayout.insertWidget(0, self.zShow)", "def __init__(self, parent, message):\n\n\t\tself.parent = parent\t\t\t\t# Main window\n\t\tself.message = message \t\t\t\t# Error message\n\t\t# Creat GUI\n\t\tself.initGUI()", "def __init__(self, scene, parent=None, flags=Qt.WindowFlags()):\n super(CustomQFrame, self).__init__(parent=parent, flags=flags)\n self.scene = scene\n self.parent = parent", "def __init__(self):\n super().__init__() # Call the superclass constructor\n self.setupUi(self) # Run the code that creates the UI layout\n self.saveButton.clicked.connect(self.save_change)\n self.pushButton.clicked.connect(self.go_back)", "def _setparent(self, parent):\n\t\tself.parent = parent\n\t\tif self.parent is not None:\n\t\t\tself.parent.components.add(self)", "def __init__(self, parent):\n self.name = \"Base.View\"\n self.parent = parent\n self.Main = parent.Main", "def __init__(self, parent=None):\n super().__init__(parent)\n\n topLayout = QtGui.QVBoxLayout(self)\n self.setLayout(topLayout)\n self.setTitle(_('Choose export format type'))\n\n typeButtons = QtGui.QButtonGroup(self)\n for id, exportType in enumerate(ExportDialog.exportTypes):\n button = QtGui.QRadioButton(ExportDialog.\n exportTypeDescript[exportType])\n typeButtons.addButton(button, id)\n topLayout.addWidget(button)\n if exportType == ExportDialog.currentType:\n button.setChecked(True)\n typeButtons.buttonClicked[int].connect(self.setCurrentType)", "def createWidget(self, QWidget): # real signature unknown; restored from __doc__\n pass", "def __init__(self):\n Form, Window = uic.loadUiType(\"Visuals/QtFiles/ConfirmationMenu.ui\")\n self.window = Window()\n self.form = Form()\n self.form.setupUi(self.window)\n self.centre = self.window.findChild(QWidget, \"centralwidget\")", "def __init__(self, parent: View):\n super().__init__(parent)", "def __init__(self, parent=None):\n super().__init__(parent);\n tabBar=EditableTabBar(parent);\n self.setTabBar(tabBar);", "def __init__(self, parent):\n super(StageInterface, self).__init__(parent)\n\n # Make the sizer\n self.sizer = wx.BoxSizer(wx.HORIZONTAL)\n\n # Make the toolbar, then add it to the sizer\n self.tb = wx.ToolBar(self, style=wx.TB_VERTICAL)\n self.sizer.Add(self.tb, 0, wx.EXPAND)\n\n # Make the visualizer\n self.set_visualization(StageVisualizer(self))\n\n # We want to see what's in the sizer\n self.SetSizer(self.sizer)", "def __init__(self):\n self.view = GuiView(self)\n return", "def __init__(self, parent):\n super().__init__(parent=parent)\n self.setupUi(self)\n self.tableView.setContextMenuPolicy(Qt.CustomContextMenu)\n self.tableView.customContextMenuRequested.connect(self._show_context_menu)\n self.tableView.setSelectionBehavior(QAbstractItemView.SelectItems)", "def __init__(self, parent, **kwargs):\n super(bubblePyHomeWidget, self).__init__(parent, **kwargs)\n self._firstView = True\n\n self.OneBubbleBtn.clicked.connect(self._handleOneBubble)\n self.ManyBubblesBtn.clicked.connect(self._handleManyBubbles)\n\n self._statusBox, statusLayout = self._createElementWidget(self, QtGui.QVBoxLayout, True)\n statusLayout.addStretch()\n\n self._nimbleStatus = NimbleStatusElement(\n self._statusBox,\n disabled=self.mainWindow.appConfig.get(UserConfigEnum.NIMBLE_TEST_STATUS, True) )\n statusLayout.addWidget(self._nimbleStatus)", "def __init__(self, parent):\n FindReplaceDialog.__init__(self, parent)\n self.ui.findReplaceForm.hide_replace_widgets()\n self.setWindowTitle(self.tr(\"Find\"))", "def __init__(self, parent: View):\n super().__init__(parent)\n # Crossword title\n self.title = tk.StringVar(self.root)\n self.title_label = tk.Label(self.frame, textvariable=self.title)\n # Crossword author\n self.author = tk.StringVar(self.root)\n self.author_label = tk.Label(self.frame, textvariable=self.author)\n # Dividing line separating the header and other groups\n self.separator = tk.Frame(self.frame)\n # Load\n self.load()", "def __init__(self, parent):\n QtGui.QDialog.__init__(self, parent)\n self.parent = parent\n self.ui = Ui_FileSelectDialog()\n self.ui.setupUi(self)\n mneRoot = os.environ.get('MNE_ROOT', '')\n if mneRoot == \"\":\n mneRoot = self.settings.value(\"MNE_ROOT\", \"\").toString()\n self.ui.lineEditMneRoot.setText(mneRoot)\n self.show()", "def widget(self, p_int): # real signature unknown; restored from __doc__\n return QWidget", "def __init__(self, parent, title=\"Plot\"):\n super(Dialog3DPlot, self).__init__(parent)\n self.setWindowTitle(title)\n self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)\n self.layout = QtGui.QHBoxLayout(self)\n self.mayavi = MayaviViewer(self)\n self.layout.addWidget(self.mayavi)\n self.messenger = Messenger()", "def __init__(self,\n\t label:str=None,\n\t variable_name:str=None,\n\t value:typing.Any=None,\n\t parent:QtWidgets.QWidget=None,\n\t on_change:typing.Callable=None):\n\t\tQtWidgets.QWidget.__init__(self, parent=parent)\n\n\t\tif label is None:\n\t\t\tif variable_name is None:\n\t\t\t\tlabel = \"\"\n\t\t\telse:\n\t\t\t\tlabel = app.translator(variable_name)\n\n\t\tself._make_label_widget(label)\n\t\tself.layout = self._formset()\n\t\tself.setLayout(self.layout)\n\t\tself.label = label\n\n\t\tValueMixin.__init__(self, variable_name=variable_name, on_change=on_change, value=value)", "def __init__(self, parent):\n super(sppasFeedbackDialog, self).__init__(\n parent=parent,\n title='{:s} Feedback'.format(sg.__name__),\n style=wx.DEFAULT_FRAME_STYLE)\n\n self.CreateHeader(MSG_HEADER_FEEDBACK, icon_name=\"mail-at\")\n self._create_content()\n self._create_buttons()\n self.Bind(wx.EVT_BUTTON, self._process_event)\n\n self.SetMinSize(wx.Size(480, 320))\n self.LayoutComponents()\n self.CenterOnParent()\n self.FadeIn(deltaN=-8)", "def __init__(self, parent=None):\n super(GUIForm, self).__init__()\n self.ui = Ui_MainWindow()\n self.Summarizer = TextRankSummarizer(\"english\")\n self.ui.setupUi(self)\n self.ui.plainTextEdit.textChanged.connect(self.edit_text)\n self.ui.actionOpen_text_file.triggered.connect(self.open_file_dialog)\n self.ui.actionOpen_text_from_url.triggered.connect(self.open_url)\n self.ui.pushButton.clicked.connect(self.summarize)\n self.ui.plainTextEdit.textChanged.connect(self.edit_text)\n self.ui.spinBox.setMinimum(0)\n self.ui.spinBox.setMaximum(0)\n self.ui.actionFile_menu_help.triggered.connect(\n self.show_file_menu_help)\n self.ui.actionGeneral_help_2.triggered.connect(self.show_general_help)\n self.ui.actionSummarization_help.triggered.connect(\n self.show_summary_help)", "def create_widget(self):\n pass", "def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Ciné Club\")\n self.setup_ui() # Ajout des Widgets.\n self.populate_movies()\n self.setup_connexions() # Création des connexion entre widgets.", "def initialize(self):\n super(QtBaseWidgetComponent, self).initialize()", "def __init__(self, parent=None):\n super(E5PathPicker, self).__init__(parent, useLineEdit=True)", "def _set_parent(self, parent):\n self.__parent = parent", "def __init__(self,*args, **kwargs):\n # super(FCmbMainWindow, self).__init__(*args, **kwargs)\n # self.setParent(mayaMainWindow) # ���´�������Ϊmaya���Ӽ�\n # self.setWindowFlags(Qt.Window)\n # self.setupUi(self) # ���и����ʼ������\n # self.connectSignals()\n\n\n super(FCmbMainWindow, self).__init__(*args, **kwargs)\n self.setParent(mayaMainWindow)\n self.setWindowFlags(Qt.Window)\n self.setupUi(self)", "def __init__(self, parent):\n\n super().__init__()\n\n self.color_depth = parent.color_depth\n self.original_hist = parent.calc_histogram()['b']\n self.img_data = parent.data.copy()\n self.current_img_data = None\n\n self.init_ui(self, [self.img_data.min(), self.img_data.max()])\n self.label_txt.setText(\"Choose the range for normalization:\")\n self.setWindowTitle(\"Normalize\")\n\n self.range_slider.left_value_changed.connect(self.update_left_value)\n self.range_slider.right_value_changed.connect(self.update_right_value)\n self.range_slider.range_chagned.connect(self.update_plot_preview)\n\n self.update_left_value()\n self.update_right_value()\n self.update_plot_preview()", "def __init__(self, ui, parent=None):\n super(LogViewer, self).__init__(parent)\n \n self.setWindowIcon(UI.PixmapCache.getIcon(\"eric.png\"))\n \n self.__ui = ui\n \n self.__logViewer = LogViewerEdit(self)\n from .SearchWidget import SearchWidget\n self.__searchWidget = SearchWidget(self.__logViewer, self)\n self.__searchWidget.setSizePolicy(\n QSizePolicy.Fixed, QSizePolicy.Preferred)\n self.__searchWidget.hide()\n \n self.__layout = QHBoxLayout(self)\n self.__layout.setContentsMargins(1, 1, 1, 1)\n self.__layout.addWidget(self.__logViewer)\n self.__layout.addWidget(self.__searchWidget)\n \n self.__searchWidget.searchNext.connect(self.__logViewer.searchNext)\n self.__searchWidget.searchPrevious.connect(self.__logViewer.searchPrev)\n self.__logViewer.searchStringFound.connect(\n self.__searchWidget.searchStringFound)", "def __init__(self, treeView, isChildView=True, parent=None):\n super().__init__(parent)\n self.treeView = treeView\n self.isChildView = isChildView\n self.hideChildView = not globalref.genOptions['InitShowChildPane']\n self.setAcceptRichText(False)\n self.setLineWrapMode(QTextEdit.NoWrap)\n self.setTabChangesFocus(True)\n self.setUndoRedoEnabled(False)\n self.treeSelectAction = QAction(_('Select in Tree'), self)\n self.treeSelectAction.triggered.connect(self.selectLineInTree)\n self.textChanged.connect(self.readChange)", "def parent(self, parent):\n\n self._parent = parent", "def parent(self, parent):\n\n self._parent = parent", "def parent(self, parent):\n\n self._parent = parent", "def parent(self, parent):\n\n self._parent = parent", "def __init__(self, form: dict, help: str, parent=None, is_multi_cif=False):\n super().__init__(parent)\n self.is_multi_cif = is_multi_cif\n self.setParent(parent)\n self.form = form\n # self.setMinimumWidth(400)\n self.mainVLayout = QVBoxLayout(self)\n self.setLayout(self.mainVLayout)\n # self.setStyleSheet('QWidget { border: 2px solid black }')\n self.mainVLayout.setContentsMargins(0, 0, 0, 0)\n self.mainVLayout.setSpacing(0)\n self.mainVLayout.addWidget(QHLine())\n # The button to get help for the respective alert:\n self.helpbutton = QPushButton('Help')\n self.helpbutton.clicked.connect(self.show_help)\n self.response_text_edit = QTextEdit()\n self.alert_label_box()\n self.problem_label_box()\n self.response_label_box()\n self.setAutoFillBackground(False)\n self.help = help\n #\n self.show()", "def __init__(self, parent=None):\n super(E5ComboPathPicker, self).__init__(parent, useLineEdit=False)", "def __init__(self, parent=None):\n super(yQGeoMagFieldSettingDialog, self).__init__(parent)\n self.setupUi(self)\n self.setWindowFlags(Qt.WindowStaysOnTopHint)\n self.accept = False", "def __init__(self, parent):\n super(DummyStageInterface, self).__init__(parent)\n\n open_bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_TOOLBAR,\n self.tb_size)\n self.tb.AddLabelTool(wx.ID_OPEN, \"Open\", open_bmp)\n\n self.tb.Realize()", "def create(self, parent):\n self.widget = wxBitmapWidget(parent)" ]
[ "0.80446535", "0.790637", "0.7829028", "0.7829028", "0.7818371", "0.76782995", "0.76558757", "0.76455885", "0.7594716", "0.7536031", "0.75227404", "0.751839", "0.7513868", "0.7468696", "0.74049985", "0.7376986", "0.736306", "0.73554814", "0.7326882", "0.7282546", "0.7282546", "0.7282546", "0.72487694", "0.7183207", "0.7129338", "0.7121242", "0.70437986", "0.699516", "0.6994315", "0.69738275", "0.6963427", "0.6958084", "0.6949107", "0.6894904", "0.68611526", "0.68461186", "0.68147963", "0.6814142", "0.67745167", "0.6774384", "0.6772228", "0.6710709", "0.6692361", "0.66772085", "0.6663515", "0.6657969", "0.66513664", "0.6648562", "0.6641199", "0.6637269", "0.6637215", "0.6633686", "0.6633576", "0.6615191", "0.6575982", "0.6569227", "0.6546616", "0.6538472", "0.6532263", "0.65206826", "0.6509241", "0.65015864", "0.65014327", "0.6490079", "0.64487976", "0.6446116", "0.6440754", "0.6434976", "0.6432431", "0.64320654", "0.64289016", "0.6419205", "0.63946325", "0.63858664", "0.638552", "0.6365156", "0.6360151", "0.63587075", "0.6354245", "0.63523525", "0.6345173", "0.6340043", "0.63308144", "0.6317093", "0.6311447", "0.6310911", "0.6308296", "0.6307222", "0.6307222", "0.6307222", "0.6307222", "0.6295391", "0.6292048", "0.6284787", "0.628297", "0.6273498" ]
0.770407
8
Slot documentation goes here.
def on_pushButton_clicked(self): # TODO: not implemented yet print("加载数据") boston = datasets.load_boston() train = boston.data target = boston.target self.X_train,self.x_test,self.y_train,self.y_true = train_test_split(train,target,test_size=0.2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slot(self, name):\n raise ClixxException(\"Not implemented.\")", "def play_slot__(self):\n print(\"play_slot__\")\n self.valkkafs_manager.play()", "def add_slot(self, slot):\n slot.set_location(len(self.slots)+1)\n self.slots.append(slot)", "def visit_slot(self, slot_name: str, slot: SlotDefinition) -> None:\n sn = underscore(slot_name)\n self.emit('slot', sn)\n if slot.domain:\n self.emit('domain', sn, underscore(slot.domain))\n if slot.range:\n self.emit('range', sn, underscore(slot.range))\n for p in slot.mixins:\n self.emit('mixin', sn, underscore(p))\n if slot.is_a:\n is_a = underscore(slot.is_a)\n\n #uri = self.owlgen._prop_uri(slot.name)\n uri = f'http://w3id.org/biolink/vocab/{sn}'\n self.emit('has_uri', sn, uri)\n if slot.multivalued:\n self.emit('multivalued', sn)\n if slot.required:\n self.emit('required', sn)", "def time_slot(self):\n pass", "def _slot(self, event) -> None:\n self.events.gui_event(original_event=event)", "def isSlot(self, p_int): # real signature unknown; restored from __doc__\n return False", "def get_slot(self, idx):\n assert (idx >= 0) and (idx < self.size()), \"Index is out of range\"\n return self.slots[idx]", "def slot(self,num):\n if num in ApexAP1000.SLOTS:\n self.__slot=num\n else:\n raise ValueError('Bad slot number !')", "def get_slots(self) -> int:", "def slot_receive(self):\n\n self.start_time = str(time.strftime(\"%Y%m%d%H%M%S\", time.localtime()))\n\n self.setter.start()\n self.getter.start()\n self.timer.start(1)\n # self.plotter.start()", "def perceive(self, slot):\n # right now, let's just observe the first element in the world\n if isinstance(slot, Slot):\n self.percept = slot.get_state()\n else:\n raise RuntimeError(\"Cannot observe other objects right now!\")", "def reserve(self):\n assert self.is_available() is True, \"this slot is not available\"", "def var(self, _type):\n return Slot()", "def slot(self):\n if self.__slot in ApexAP1000.SLOTS:\n return self.__slot\n else:\n raise ValueError('Bad slot number !')", "def slot_changed(self, dummy_sender, dummy_data):\r\n self.do_paint()", "def slot_tick(self, gox, (bid, ask)):\r\n pass", "def slot_wallet_changed(self, gox, _dummy):\r\n pass", "def required_slots(self,tracker) -> List[Text]:", "def slot_owns_changed(self, orderbook, _dummy):\r\n pass", "def on_select_clip_slot(self, clip_slot):\n pass", "def connect(self, slot):\n #if inspect.getargspec(slot).keywords is None:\n # raise exceptions.SlotMustAcceptKeywords(self, slot)\n\n if not self.is_connected(slot):\n self.slots.append(slot)", "def stop_slot__(self):\n print(\"stop_slot__\")\n self.valkkafs_manager.stop()", "def InSlotSet(self,slot,RTSignalIndex):\n ## Make Header\n hex_rep = self.NanonisTCP.make_header('Signals.InSlotSet', body_size=8)\n \n ## Arguments\n hex_rep += self.NanonisTCP.to_hex(slot,4)\n hex_rep += self.NanonisTCP.to_hex(RTSignalIndex,4)\n \n self.NanonisTCP.send_command(hex_rep)\n \n self.NanonisTCP.receive_response(0)", "def remove_slot(self, slot):\n if slot in self.slots:\n idx = self.slots.index(slot)\n # update location of rest slot\n for s in self.slots[idx:]:\n s.set_location(s.get_location()-1)\n self.slots.remove(slot)", "def onConnect(self, *args):\n UAV_Logger.log(\"connect signal recieved\")\n self.socketIO.emit('CORE_SL_SLOTS_SET', Request(self.getModuleName()).newData({\"slots\":self.slots}).toDictionary())", "def _GetSlots(mcs, attrs):\n raise NotImplementedError", "def slots(self):\n return self.__slots.values()", "def time_slot_p(self, c, p):\n pass", "def ParseSlot(self, G, node):\n slot = BLNlpClipsSlotMap()\n slot.Name = node\n return slot", "def add_item(self, item):\n\n if item.descriptor in self.__slots:\n slot = self.__slots[item.descriptor]\n slot.quantity += 1\n else:\n self.__slots[item.descriptor] = Slot(item)", "def on_triggered(self, slot):\n self.triggered.connect(slot)", "def update(self):\n if self.size() < 2: return\n idx = random.randint(0, 100) % 3\n if idx < 2:\n slot = self.slots[idx]\n if slot.get_state() == Slot.CLEAN:\n slot.set_state(Slot.DIRTY)\n # self.slots[idx] = slot", "def run(self):\r\n for slot in self.slots:\r\n slot.work()\r\n self.increment()", "def source_slot(self):\n return self._container_slot(True)", "def handle_slot_select(data: bytes) -> Tuple[bytes, str]:\n new_slot = struct.unpack('B', data[:1])[0]\n return data[1:], f'New slot: {new_slot}'", "def get_frame_slot_value(self, slot):\n # FrameObject is a dictionary of slot names and values.\n [slotName, value] = self.pgdb.sendPgdbFnCall('get-frame-slot-value', self.frameid, Symbol(slot))\n if not slotName:\n raise PythonCycError(\"Slot \"+slot+\" does not exist for frame \"+self.frameid+\" from organism (orgid) \"+self.pgdb._orgid)\n # Modify slot name to allow Python's syntax (e.g., '_' instead of '-').\n self.__dict__[convertLispIdtoPythonId(slotName)] = value\n return self", "def __validate_slots(self, slots: int):\n\n if (not isinstance(slots, int)):\n raise ValueError('slots', slots, 'The value must be an integer')\n if (slots <= 0):\n raise ValueError('slots', slots, 'The value must be greater than zero')", "def decide(self, slot_index):\n self._slot_decided[slot_index] = True", "def slot_trade(self, gox, (date, price, volume, typ, own)):\r\n pass", "def setInSlot(self, slot, subindex, roi, value):\n # Determine which subslot this is and prepend it to the totalIndex\n totalIndex = (self._subSlots.index(slot),) + subindex\n # Forward the call to our operator\n self.operator.setInSlot(self, totalIndex, roi, value)", "def size(self):\n return len(self.slots)", "def occupy_slot(self, slot, vehicle):\n self.__occupied_slots__[slot.slot_number] = vehicle.registration_number, vehicle.color\n self.__vehicle_slot_mapping__[vehicle.registration_number] = slot.slot_number\n self.__available_slots__.remove(slot)", "def target_slot(self):\n return self._container_slot(False)", "def intuit(self):\n raise NotImplemented()", "def slot_history_changed(self, history, _dummy):\r\n pass", "def slot_info(self) -> Dict[int, Tuple[int, str, int]]:\n return self._slot_info", "def increment(self):\r\n self.add_output()\r\n for i in range(self.length-1, 0, -1):\r\n self.slots[i].item = self.slots[i-1].item\r\n self.slots[0].item = Item.random()", "def get_num_slots(self):\n # Your code here\n return self.capacity", "def action(self, player, game, slot):\n super().action(player, game, slot)\n card = game.get_sleeping_coder(slot)\n game.set_sleeping_coder(slot,None)\n player.get_coders().add_card(card)\n game.set_action(\"NO_ACTION\")\n game.next_player()", "def get_slot(self, c):\n if 'slot_number' in c.keys():\n slot_number = c['slot_number']\n return slot_number\n else:\n raise ValueError(self.no_selection_msg())\n \n # returnValue(voltage * units.V)", "async def slot(self, ctx: commands.Context, bid: int):\r\n author = ctx.author\r\n guild = ctx.guild\r\n channel = ctx.channel\r\n if await bank.is_global():\r\n valid_bid = await self.config.SLOT_MIN() <= bid <= await self.config.SLOT_MAX()\r\n slot_time = await self.config.SLOT_TIME()\r\n last_slot = await self.config.user(author).last_slot()\r\n else:\r\n valid_bid = (\r\n await self.config.guild(guild).SLOT_MIN()\r\n <= bid\r\n <= await self.config.guild(guild).SLOT_MAX()\r\n )\r\n slot_time = await self.config.guild(guild).SLOT_TIME()\r\n last_slot = await self.config.member(author).last_slot()\r\n now = calendar.timegm(ctx.message.created_at.utctimetuple())\r\n\r\n if (now - last_slot) < slot_time:\r\n await ctx.send(_(\"You're on cooldown, try again in a bit.\"))\r\n return\r\n if not valid_bid:\r\n await ctx.send(_(\"That's an invalid bid amount, sorry :/\"))\r\n return\r\n if not await bank.can_spend(author, bid):\r\n await ctx.send(_(\"You ain't got enough money, friend.\"))\r\n return\r\n if await bank.is_global():\r\n await self.config.user(author).last_slot.set(now)\r\n else:\r\n await self.config.member(author).last_slot.set(now)\r\n await self.slot_machine(author, channel, bid)", "def get_slot(self, *args, **kwargs):\n return self._optimizer.get_slot(*args, **kwargs)", "def get_num_slots(self):\n # Your code here\n return len(self.data)", "def slot_before_unload(self, _sender, _data):\r\n pass", "def slotAccept(self, id):\n self.category = self.buttonGroup.find(id).text().ascii()\n self.accept()", "def render_ifslot(self, slotName):\n\t\tdef render(ctx, data):\n\t\t\ttry:\n\t\t\t\tif ctx.locateSlotData(slotName):\n\t\t\t\t\treturn ctx.tag\n\t\t\t\telse:\n\t\t\t\t\treturn \"\"\n\t\t\texcept KeyError:\n\t\t\t\treturn \"\"\n\t\treturn render", "def update_Q(self):", "def on_VI_518p_set_clicked(self):\n # TODO: not implemented yet\n disp_518P()\n self.up_slot = qmdz_const.up_slot\n self.down_slot = qmdz_const.down_slot", "def set_signal(self):\n self.pbSave.clicked.connect(self.save_match_result)\n self.pbCancel.clicked.connect(self.close_match_result)\n self.lwScoreList.doubleClicked.connect(self.display_player_name)\n self.lwAssistList.doubleClicked.connect(self.display_player_name)", "def timeslot(self, timeslot: List[TimeslotTimeslot]):\n\n self._timeslot = timeslot", "def get_slot(item_id):\n if item_id in all_items:\n return all_items[item_id]['slot']\n return None", "def connect_slots(self):\n\n self.set_elf_sig.connect(self.set_elf)\n self.run_target_sig.connect(self.run_target)\n self.continue_target_sig.connect(self.continue_target)\n self.set_breakpoint_sig.connect(self.set_breakpoint)\n self.set_app_mode_sig.connect(self.set_app_mode)\n self.write_hex_value_sig.connect(self.write_word_to_address)", "def connect(self, slot):\r\n if inspect.ismethod(slot):\r\n instance = slot.__self__\r\n function = slot.__func__\r\n if instance not in self._methods:\r\n self._methods[instance] = set()\r\n if function not in self._methods[instance]:\r\n self._methods[instance].add(function)\r\n else:\r\n if slot not in self._functions:\r\n self._functions.add(slot)", "def __init__(self, name=\"\", operator=None, stype=ArrayLike,\n rtype=rtype.SubRegion, value=None, optional=False,\n level=0, nonlane=False):\n # This assertion is here for a reason: default values do NOT work on OutputSlots.\n # (We should probably change that at some point...)\n assert value is None or isinstance(self, InputSlot), \"Only InputSlots can have default values. OutputSlots cannot.\"\n \n if not hasattr(self, \"_type\"):\n self._type = None\n if type(stype) == str:\n stype = ArrayLike\n self.partners = []\n self.name = name\n self._optional = optional\n self.operator = operator\n self._real_operator = None # Memoized in getRealOperator()\n\n # in the case of an InputSlot this is the slot to which it is\n # connected\n self.partner = None\n self.level = level\n\n # in the case of an InputSlot one can directly assign a value\n # to a slot instead of connecting it to a partner, this\n # attribute holds the value\n self._value = None\n\n self._defaultValue = value\n\n # Causes calls to setValue to be propagated backwards to the\n # partner slot. Used by the OperatorWrapper.\n self._backpropagate_values = False\n\n self.rtype = rtype\n\n # the MetaDict that holds the slots meta information\n self.meta = MetaDict()\n\n # if level > 0, this holds the sub-Input/Output slots\n self._subSlots = []\n self._stypeType = stype\n\n # the slot type instance\n self.stype = stype(self)\n self.nonlane = nonlane\n\n self._sig_changed = OrderedSignal()\n self._sig_value_changed = OrderedSignal()\n self._sig_ready = OrderedSignal()\n self._sig_unready = OrderedSignal()\n self._sig_dirty = OrderedSignal()\n self._sig_connect = OrderedSignal()\n self._sig_disconnect = OrderedSignal()\n self._sig_resize = OrderedSignal()\n self._sig_resized = OrderedSignal()\n self._sig_remove = OrderedSignal()\n self._sig_removed = OrderedSignal()\n self._sig_preinsertion = OrderedSignal()\n self._sig_inserted = OrderedSignal()\n\n self._resizing = False\n\n self._executionCount = 0\n self._settingUp = False\n self._condition = threading.Condition()\n\n # Allow slots to be sorted by their order of creation for\n # debug output and diagramming purposes.\n self._global_slot_id = Slot._global_counter.next()", "def action(self, player, game, slot): \n super().action(player, game, slot) \n card = player.get_coders().get_card(slot)\n player.get_coders().remove_card(slot)\n game.current_player().get_coders().add_card(card)\n game.set_action(\"NO_ACTION\")\n game.next_player()", "def __init__(self, driver_age=None, vehicle_registration_number=None):\n self.slot_status = 0 # 0 Means the slot is free\n self.vehicle_driver_age = driver_age\n self.vehicle_registration_number = vehicle_registration_number", "def _comboSlot(self, select):\n select = self.sender().itemText(select)\n if qt4:\n qs = str(self.sender().property(\"dom address\").toPyObject())\n else:\n qs = str(self.sender().property(\"dom address\"))\n item = QtXml.QDomElement()\n\n ind = qs.rfind('/')\n ids = qs[ind:]\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() != select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n widget_enabled = h.elem.attribute(\"Enabled\", \"True\")\n widget_visible = h.elem.attribute(\"Visible\", \"Unknown\")\n h.widget.setEnabled(False)\n if(widget_visible != \"Unknown\"):\n h.label.hide()\n h.widget.hide()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() == select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n h.widget.setEnabled(True)\n h.label.show()\n h.widget.show()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")", "def showSlot(self, number):\n if number <= 0:\n self.log.info('Showing slot \"Negative\" (%d) slot ignored' % number)\n else:\n slot = self.slots[self.number - number]\n if slot.isOccupied():\n slot.removeItem()\n if number == self.target:\n image = self.target_image if self.value < self.target else self.target_image_filled\n self.log.debug('Showing target row: %s, %s, %s' % (self.value, self.target, image))\n elif number <= self.value:\n image = self.fill_image\n elif number <= self.target:\n image = self.blank_image\n else:\n image = self.grey_image\n self.log.debug('Showing %s cell %d as %s (value=%s, target=%s)' % (\n self.name, number, image, self.value, self.target))\n\n slot.addItem(sprite.Sprite(image))", "def disconnect(self, slot):\n if self.is_connected(slot):\n self.slots.pop(self.slots.index(slot))", "def fillSlots(self, name, stan):\n if self._slotData is None:\n self._slotData = {}\n self._slotData[name] = stan", "def slot_ticker(self, dummy_sender, data):\r\n (bid, ask) = data\r\n self.bid = bid\r\n self.ask = ask\r\n self.last_change_type = None\r\n self.last_change_price = 0\r\n self.last_change_volume = 0\r\n self._repair_crossed_asks(ask)\r\n self._repair_crossed_bids(bid)\r\n self.signal_changed(self, None)", "def put_slot_value(self, slot_name_or_uri, value):\n self.memory[slot_name_or_uri] = value\n # print(\"MemoryManager: Implement Me\")\n return self.memory[slot_name_or_uri]", "def slotSelectRow(self, selectDict):\n if self.dataType != selectDict['dataType']:\n return\n\n #print('slotSelectRow() selectDict:', selectDict)\n \n ind = selectDict['index']\n '''\n plotDf = selectDict['plotDf']\n index = plotDf.at[ind, 'index']\n index = int(index)\n '''\n index = ind\n #index -= 1 # !!! MY VISUAL INDEX IN TABLE IS ONE BASED !!!\n column = 0\n modelIndex = self.model().index(index, column)\n self.setCurrentIndex(modelIndex)", "def getSlotforObject(cls, obj):\n if obj.__class__ in restslotattributedict.keys():\n attr = cls.getSlotAttrib(obj)\n if attr is not None:\n peek = getattr(obj, \"peek_\" + attr)\n slot = str(peek()).split('/')[0]\n else:\n return False, \"0\"\n return True, slot", "def __init__(self, slots):\n self.slots = slots\n self.table = []\n for i in range(slots):\n self.table.append([])", "def __init__(self, slots):\n self.slots = slots\n self.table = []\n for i in range(slots):\n self.table.append([])", "def num_slots(self, num_slots):\n\n self._num_slots = num_slots", "def render_ifnoslot(self, slotName):\n\t\t# just repeat the code from ifslot -- this is called frequently,\n\t\t# and additional logic just is not worth it.\n\t\tdef render(ctx, data):\n\t\t\ttry:\n\t\t\t\tif not ctx.locateSlotData(slotName):\n\t\t\t\t\treturn ctx.tag\n\t\t\t\telse:\n\t\t\t\t\treturn \"\"\n\t\t\texcept KeyError:\n\t\t\t\treturn \"\"\n\t\treturn render", "def __len__(self):\n return len(self._subSlots)", "def slot_keypress(self, gox, (key)):\r\n pass", "def Item(self) -> object:", "def Item(self) -> object:", "def extend_slots(self, prediction, item):\n spec = prediction.phrasal_pattern[0]\n slots = prediction.slots\n if is_role_specifier(spec):\n new_slots = copy(slots)\n new_slot = self.role_specifier(spec)\n if new_slot in new_slots:\n raise DuplicateSlotError('Slot %s already exists in %s.' % (\n new_slot, prediction))\n new_slots[new_slot] = item\n return new_slots\n else:\n return slots", "def SlotStatus(rangeInfo):\n if rangeInfo >= 3 and rangeInfo <= 10:\n \"\"\" Full Slot \"\"\"\n status = 1\n return status\n else:\n \"\"\" Empty Slot \"\"\"\n status = 0\n return status", "def InSlotsGet(self):\n ## Make Header\n hex_rep = self.NanonisTCP.make_header('Signals.InSlotsGet', body_size=0)\n \n self.NanonisTCP.send_command(hex_rep)\n \n response = self.NanonisTCP.receive_response()\n \n # signals_names_size = self.NanonisTCP.hex_to_int32(response[0:4])\n signals_names_num = self.NanonisTCP.hex_to_int32(response[4:8])\n \n idx = 8\n signal_names = []\n for n in range(signals_names_num):\n size = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n idx += 4\n signal_name = response[idx:idx+size].decode()\n idx += size\n signal_names.append(signal_name)\n \n signal_indexes = []\n signal_indexes_size = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n for n in range(signal_indexes_size):\n idx += 4\n signal_index = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n signal_indexes.append(signal_index)\n \n return [signal_names,signal_indexes]", "def __init__(self):\n self.map = {} #key: val, val, idx\n self.slot = []", "def _lSlot(self, state):\n self._param = QtXml.QDomElement()\n if qt4:\n qs = str(self.sender().property(\"dom address\").toPyObject())\n else:\n qs = str(self.sender().property(\"dom address\"))\n\n ind = qs.rfind('/')\n ids = qs[ind:]\n\n self._param = self.qhash[qs].elem.firstChildElement(\"Activate\")\n while(self._param.isNull() is False):\n qs = str(self._param.text()).strip() + ids\n self.qhash[qs].widget.setEnabled(state)\n widget_visible = self.qhash[qs].elem.attribute(\"Visible\", \"Unknown\")\n if(state is False & widget_visible != \"Unknown\"):\n self.qhash[qs].label.hide()\n self.qhash[qs].widget.hide()\n else:\n self.qhash[qs].label.show()\n self.qhash[qs].widget.show()\n self._param = self._param.nextSiblingElement(\"Activate\")\n\n self._param = self.qhash[qs].elem.firstChildElement(\"Deactivate\")\n while(self._param.isNull() is False):\n qs = str(self._param.text()).strip() + ids\n self.qhash[qs].widget.setEnabled(-state)\n widget_visible = self.qhash[qs].elem.attribute(\"Visible\", \"Unknown\")\n if(state is True & widget_visible != \"Unknown\"):\n self.qhash[qs].label.hide()\n self.qhash[qs].widget.hide()\n else:\n self.qhash[qs].label.show()\n self.qhash[qs].widget.show()\n self._param = self._param.nextSiblingElement(\"Deactivate\")", "def partVirtualHelixRenumberedSlot(self, sender, coord):\n vh = self._virtualHelixHash[coord]\n # check for new number\n # notify VirtualHelixHandleItem to update its label\n # notify VirtualHelix to update its xovers\n # if the VirtualHelix is active, refresh prexovers\n pass", "def extra_slots(self, extra_slots):\n\n self._extra_slots = extra_slots", "def activated(self):", "def __getitem__(self, key):\n if self.level > 0:\n if isinstance(key, tuple):\n assert len(key) > 0\n assert len(key) <= self.level\n if len(key) == 1:\n return self._subSlots[key[0]]\n else:\n return self._subSlots[key[0]][key[1:]]\n return self._subSlots[key]\n else:\n if self.meta.shape is None:\n # Something is wrong. Are we cancelled?\n Request.raise_if_cancelled()\n if not self.ready():\n msg = \"This slot ({}.{}) isn't ready yet, which means \" \\\n \"you can't ask for its data. Is it connected?\".format(self.getRealOperator().name, self.name)\n self.logger.error(msg)\n slotInfoMsg = \"Can't get data from slot {}.{} yet.\"\\\n \" It isn't ready.\"\\\n \"First upstream problem slot is: {}\"\\\n \"\".format( self.getRealOperator().__class__, self.name, Slot._findUpstreamProblemSlot(self) )\n self.logger.error(slotInfoMsg)\n raise Slot.SlotNotReadyError(\"Slot isn't ready. See error log.\")\n assert self.meta.shape is not None, \\\n (\"Can't ask for slices of this slot yet:\"\n \" self.meta.shape is None!\"\n \" (operator {} [self={}] slot: {}, key={}\".format(\n self.operator.name, self.operator, self.name, key))\n return self(pslice=key)", "def on_comboBox_qihuozhanghao_currentIndexChanged(self, p0):\n # TODO: not implemented yet\n raise NotImplementedError", "def vacate_slot(self, slot, reg_num):\n self.__occupied_slots__.pop(slot.slot_number)\n self.__vehicle_slot_mapping__.pop(reg_num)\n self.__available_slots__.add(slot)", "def _reserve_bsa_slot(self):\n self._run_cmd(CAPUT + \" IOC:IN20:EV01:EDEFNAME \" + '\"' + self._edef_name + '\"')", "def emit(self, **kwargs):\n for slot in self.slots:\n result = slot(**kwargs)\n\n if result is not None:\n return result", "def slotshow_m(obj, content):\n global _slotshow_m_tbl\n\n return _slotshow(obj, content, _slotshow_m_tbl)", "def slotVolume(self, a0):\n self.sampleGroup.action('volume', value=a0)", "def execute(self, slot, subindex, roi, result):\n totalIndex = (self._subSlots.index(slot),) + subindex\n return self.operator.execute(self, totalIndex, roi, result)", "def frame(self):", "def action(self, player, game, slot):\n card = player.get_coders().get_card(slot)\n player.get_coders().remove_card(slot)\n index = game.get_sleeping_coders().index(None)\n game.set_sleeping_coder(index,card)\n game.set_action(\"NO_ACTION\")\n game.next_player()" ]
[ "0.7317067", "0.6809478", "0.6723371", "0.6583449", "0.63984954", "0.63675296", "0.63494873", "0.6319528", "0.63195074", "0.631188", "0.6152515", "0.6127954", "0.61003345", "0.6093386", "0.60838413", "0.6083497", "0.6056153", "0.60259384", "0.6022296", "0.6002864", "0.59812987", "0.5944142", "0.5926356", "0.5860501", "0.5821294", "0.58022904", "0.579247", "0.57412773", "0.57305163", "0.57208335", "0.56980944", "0.567121", "0.56663936", "0.5663972", "0.566115", "0.5615209", "0.5612441", "0.5593378", "0.5579261", "0.5575075", "0.55743563", "0.5564597", "0.55627596", "0.55434495", "0.5541362", "0.55390745", "0.55162525", "0.5515694", "0.5501102", "0.54724556", "0.54612845", "0.5448487", "0.54467326", "0.5443292", "0.53959346", "0.53911555", "0.53839684", "0.53802204", "0.53784084", "0.5376231", "0.5373751", "0.5369214", "0.53615224", "0.53489757", "0.534799", "0.5344866", "0.53426665", "0.5342361", "0.53222084", "0.5320364", "0.5314095", "0.5309191", "0.5283628", "0.52824056", "0.5276979", "0.5275021", "0.5275021", "0.5260909", "0.5252085", "0.5249819", "0.52494216", "0.5246176", "0.5246176", "0.5220556", "0.5220291", "0.5219783", "0.52178997", "0.52175796", "0.5215104", "0.5212783", "0.51965827", "0.51849735", "0.5182628", "0.5180071", "0.51781875", "0.5176648", "0.5168609", "0.51619506", "0.5156556", "0.51523393", "0.5133018" ]
0.0
-1
Slot documentation goes here.
def on_pushButton_2_clicked(self): # TODO: not implemented yet print("模型预测") # 模型加载 lr_m = joblib.load("model/LR_model.m") rr_m = joblib.load("model/RR_model.m") llr_m = joblib.load("model/LLR_model.m") knnr_m = joblib.load("model/KNNR_model.m") dr_m = joblib.load("model/DR_model.m") svmr_m = joblib.load("model/SVMR_model.m") try: y_LR = lr_m.predict(self.x_test) y_RR = rr_m.predict(self.x_test) y_LLR = llr_m.predict(self.x_test) y_KNNR = knnr_m.predict(self.x_test) y_DR = dr_m.predict(self.x_test) y_SVMR = svmr_m.predict(self.x_test) model_pre = pd.DataFrame({'LinearRegression()':list(y_LR),'Ridge()':list(y_RR),'Lasso()':list(y_LLR), \ 'KNeighborsRegressor()':list(y_KNNR),'DecisionTreeRegressor()':list(y_DR),'SVR()':list(y_SVMR)}) self.model_plot(self.y_true, model_pre) self.graphicsView.setStyleSheet("border-image: url(model_plot.png);") except: my_button_w3=QMessageBox.warning(self,"严重警告", '请务必先加载数据然后再点击模型预测!!!', QMessageBox.Ok|QMessageBox.Cancel, QMessageBox.Ok)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slot(self, name):\n raise ClixxException(\"Not implemented.\")", "def play_slot__(self):\n print(\"play_slot__\")\n self.valkkafs_manager.play()", "def add_slot(self, slot):\n slot.set_location(len(self.slots)+1)\n self.slots.append(slot)", "def visit_slot(self, slot_name: str, slot: SlotDefinition) -> None:\n sn = underscore(slot_name)\n self.emit('slot', sn)\n if slot.domain:\n self.emit('domain', sn, underscore(slot.domain))\n if slot.range:\n self.emit('range', sn, underscore(slot.range))\n for p in slot.mixins:\n self.emit('mixin', sn, underscore(p))\n if slot.is_a:\n is_a = underscore(slot.is_a)\n\n #uri = self.owlgen._prop_uri(slot.name)\n uri = f'http://w3id.org/biolink/vocab/{sn}'\n self.emit('has_uri', sn, uri)\n if slot.multivalued:\n self.emit('multivalued', sn)\n if slot.required:\n self.emit('required', sn)", "def time_slot(self):\n pass", "def _slot(self, event) -> None:\n self.events.gui_event(original_event=event)", "def isSlot(self, p_int): # real signature unknown; restored from __doc__\n return False", "def slot(self,num):\n if num in ApexAP1000.SLOTS:\n self.__slot=num\n else:\n raise ValueError('Bad slot number !')", "def get_slot(self, idx):\n assert (idx >= 0) and (idx < self.size()), \"Index is out of range\"\n return self.slots[idx]", "def get_slots(self) -> int:", "def slot_receive(self):\n\n self.start_time = str(time.strftime(\"%Y%m%d%H%M%S\", time.localtime()))\n\n self.setter.start()\n self.getter.start()\n self.timer.start(1)\n # self.plotter.start()", "def perceive(self, slot):\n # right now, let's just observe the first element in the world\n if isinstance(slot, Slot):\n self.percept = slot.get_state()\n else:\n raise RuntimeError(\"Cannot observe other objects right now!\")", "def reserve(self):\n assert self.is_available() is True, \"this slot is not available\"", "def var(self, _type):\n return Slot()", "def slot(self):\n if self.__slot in ApexAP1000.SLOTS:\n return self.__slot\n else:\n raise ValueError('Bad slot number !')", "def slot_changed(self, dummy_sender, dummy_data):\r\n self.do_paint()", "def slot_tick(self, gox, (bid, ask)):\r\n pass", "def slot_wallet_changed(self, gox, _dummy):\r\n pass", "def required_slots(self,tracker) -> List[Text]:", "def slot_owns_changed(self, orderbook, _dummy):\r\n pass", "def on_select_clip_slot(self, clip_slot):\n pass", "def connect(self, slot):\n #if inspect.getargspec(slot).keywords is None:\n # raise exceptions.SlotMustAcceptKeywords(self, slot)\n\n if not self.is_connected(slot):\n self.slots.append(slot)", "def stop_slot__(self):\n print(\"stop_slot__\")\n self.valkkafs_manager.stop()", "def InSlotSet(self,slot,RTSignalIndex):\n ## Make Header\n hex_rep = self.NanonisTCP.make_header('Signals.InSlotSet', body_size=8)\n \n ## Arguments\n hex_rep += self.NanonisTCP.to_hex(slot,4)\n hex_rep += self.NanonisTCP.to_hex(RTSignalIndex,4)\n \n self.NanonisTCP.send_command(hex_rep)\n \n self.NanonisTCP.receive_response(0)", "def remove_slot(self, slot):\n if slot in self.slots:\n idx = self.slots.index(slot)\n # update location of rest slot\n for s in self.slots[idx:]:\n s.set_location(s.get_location()-1)\n self.slots.remove(slot)", "def onConnect(self, *args):\n UAV_Logger.log(\"connect signal recieved\")\n self.socketIO.emit('CORE_SL_SLOTS_SET', Request(self.getModuleName()).newData({\"slots\":self.slots}).toDictionary())", "def _GetSlots(mcs, attrs):\n raise NotImplementedError", "def slots(self):\n return self.__slots.values()", "def time_slot_p(self, c, p):\n pass", "def ParseSlot(self, G, node):\n slot = BLNlpClipsSlotMap()\n slot.Name = node\n return slot", "def add_item(self, item):\n\n if item.descriptor in self.__slots:\n slot = self.__slots[item.descriptor]\n slot.quantity += 1\n else:\n self.__slots[item.descriptor] = Slot(item)", "def on_triggered(self, slot):\n self.triggered.connect(slot)", "def update(self):\n if self.size() < 2: return\n idx = random.randint(0, 100) % 3\n if idx < 2:\n slot = self.slots[idx]\n if slot.get_state() == Slot.CLEAN:\n slot.set_state(Slot.DIRTY)\n # self.slots[idx] = slot", "def run(self):\r\n for slot in self.slots:\r\n slot.work()\r\n self.increment()", "def source_slot(self):\n return self._container_slot(True)", "def handle_slot_select(data: bytes) -> Tuple[bytes, str]:\n new_slot = struct.unpack('B', data[:1])[0]\n return data[1:], f'New slot: {new_slot}'", "def get_frame_slot_value(self, slot):\n # FrameObject is a dictionary of slot names and values.\n [slotName, value] = self.pgdb.sendPgdbFnCall('get-frame-slot-value', self.frameid, Symbol(slot))\n if not slotName:\n raise PythonCycError(\"Slot \"+slot+\" does not exist for frame \"+self.frameid+\" from organism (orgid) \"+self.pgdb._orgid)\n # Modify slot name to allow Python's syntax (e.g., '_' instead of '-').\n self.__dict__[convertLispIdtoPythonId(slotName)] = value\n return self", "def __validate_slots(self, slots: int):\n\n if (not isinstance(slots, int)):\n raise ValueError('slots', slots, 'The value must be an integer')\n if (slots <= 0):\n raise ValueError('slots', slots, 'The value must be greater than zero')", "def decide(self, slot_index):\n self._slot_decided[slot_index] = True", "def slot_trade(self, gox, (date, price, volume, typ, own)):\r\n pass", "def setInSlot(self, slot, subindex, roi, value):\n # Determine which subslot this is and prepend it to the totalIndex\n totalIndex = (self._subSlots.index(slot),) + subindex\n # Forward the call to our operator\n self.operator.setInSlot(self, totalIndex, roi, value)", "def size(self):\n return len(self.slots)", "def occupy_slot(self, slot, vehicle):\n self.__occupied_slots__[slot.slot_number] = vehicle.registration_number, vehicle.color\n self.__vehicle_slot_mapping__[vehicle.registration_number] = slot.slot_number\n self.__available_slots__.remove(slot)", "def target_slot(self):\n return self._container_slot(False)", "def slot_history_changed(self, history, _dummy):\r\n pass", "def intuit(self):\n raise NotImplemented()", "def slot_info(self) -> Dict[int, Tuple[int, str, int]]:\n return self._slot_info", "def increment(self):\r\n self.add_output()\r\n for i in range(self.length-1, 0, -1):\r\n self.slots[i].item = self.slots[i-1].item\r\n self.slots[0].item = Item.random()", "def get_num_slots(self):\n # Your code here\n return self.capacity", "def action(self, player, game, slot):\n super().action(player, game, slot)\n card = game.get_sleeping_coder(slot)\n game.set_sleeping_coder(slot,None)\n player.get_coders().add_card(card)\n game.set_action(\"NO_ACTION\")\n game.next_player()", "def get_slot(self, c):\n if 'slot_number' in c.keys():\n slot_number = c['slot_number']\n return slot_number\n else:\n raise ValueError(self.no_selection_msg())\n \n # returnValue(voltage * units.V)", "async def slot(self, ctx: commands.Context, bid: int):\r\n author = ctx.author\r\n guild = ctx.guild\r\n channel = ctx.channel\r\n if await bank.is_global():\r\n valid_bid = await self.config.SLOT_MIN() <= bid <= await self.config.SLOT_MAX()\r\n slot_time = await self.config.SLOT_TIME()\r\n last_slot = await self.config.user(author).last_slot()\r\n else:\r\n valid_bid = (\r\n await self.config.guild(guild).SLOT_MIN()\r\n <= bid\r\n <= await self.config.guild(guild).SLOT_MAX()\r\n )\r\n slot_time = await self.config.guild(guild).SLOT_TIME()\r\n last_slot = await self.config.member(author).last_slot()\r\n now = calendar.timegm(ctx.message.created_at.utctimetuple())\r\n\r\n if (now - last_slot) < slot_time:\r\n await ctx.send(_(\"You're on cooldown, try again in a bit.\"))\r\n return\r\n if not valid_bid:\r\n await ctx.send(_(\"That's an invalid bid amount, sorry :/\"))\r\n return\r\n if not await bank.can_spend(author, bid):\r\n await ctx.send(_(\"You ain't got enough money, friend.\"))\r\n return\r\n if await bank.is_global():\r\n await self.config.user(author).last_slot.set(now)\r\n else:\r\n await self.config.member(author).last_slot.set(now)\r\n await self.slot_machine(author, channel, bid)", "def get_slot(self, *args, **kwargs):\n return self._optimizer.get_slot(*args, **kwargs)", "def get_num_slots(self):\n # Your code here\n return len(self.data)", "def slot_before_unload(self, _sender, _data):\r\n pass", "def slotAccept(self, id):\n self.category = self.buttonGroup.find(id).text().ascii()\n self.accept()", "def render_ifslot(self, slotName):\n\t\tdef render(ctx, data):\n\t\t\ttry:\n\t\t\t\tif ctx.locateSlotData(slotName):\n\t\t\t\t\treturn ctx.tag\n\t\t\t\telse:\n\t\t\t\t\treturn \"\"\n\t\t\texcept KeyError:\n\t\t\t\treturn \"\"\n\t\treturn render", "def on_VI_518p_set_clicked(self):\n # TODO: not implemented yet\n disp_518P()\n self.up_slot = qmdz_const.up_slot\n self.down_slot = qmdz_const.down_slot", "def update_Q(self):", "def set_signal(self):\n self.pbSave.clicked.connect(self.save_match_result)\n self.pbCancel.clicked.connect(self.close_match_result)\n self.lwScoreList.doubleClicked.connect(self.display_player_name)\n self.lwAssistList.doubleClicked.connect(self.display_player_name)", "def timeslot(self, timeslot: List[TimeslotTimeslot]):\n\n self._timeslot = timeslot", "def get_slot(item_id):\n if item_id in all_items:\n return all_items[item_id]['slot']\n return None", "def connect_slots(self):\n\n self.set_elf_sig.connect(self.set_elf)\n self.run_target_sig.connect(self.run_target)\n self.continue_target_sig.connect(self.continue_target)\n self.set_breakpoint_sig.connect(self.set_breakpoint)\n self.set_app_mode_sig.connect(self.set_app_mode)\n self.write_hex_value_sig.connect(self.write_word_to_address)", "def connect(self, slot):\r\n if inspect.ismethod(slot):\r\n instance = slot.__self__\r\n function = slot.__func__\r\n if instance not in self._methods:\r\n self._methods[instance] = set()\r\n if function not in self._methods[instance]:\r\n self._methods[instance].add(function)\r\n else:\r\n if slot not in self._functions:\r\n self._functions.add(slot)", "def __init__(self, name=\"\", operator=None, stype=ArrayLike,\n rtype=rtype.SubRegion, value=None, optional=False,\n level=0, nonlane=False):\n # This assertion is here for a reason: default values do NOT work on OutputSlots.\n # (We should probably change that at some point...)\n assert value is None or isinstance(self, InputSlot), \"Only InputSlots can have default values. OutputSlots cannot.\"\n \n if not hasattr(self, \"_type\"):\n self._type = None\n if type(stype) == str:\n stype = ArrayLike\n self.partners = []\n self.name = name\n self._optional = optional\n self.operator = operator\n self._real_operator = None # Memoized in getRealOperator()\n\n # in the case of an InputSlot this is the slot to which it is\n # connected\n self.partner = None\n self.level = level\n\n # in the case of an InputSlot one can directly assign a value\n # to a slot instead of connecting it to a partner, this\n # attribute holds the value\n self._value = None\n\n self._defaultValue = value\n\n # Causes calls to setValue to be propagated backwards to the\n # partner slot. Used by the OperatorWrapper.\n self._backpropagate_values = False\n\n self.rtype = rtype\n\n # the MetaDict that holds the slots meta information\n self.meta = MetaDict()\n\n # if level > 0, this holds the sub-Input/Output slots\n self._subSlots = []\n self._stypeType = stype\n\n # the slot type instance\n self.stype = stype(self)\n self.nonlane = nonlane\n\n self._sig_changed = OrderedSignal()\n self._sig_value_changed = OrderedSignal()\n self._sig_ready = OrderedSignal()\n self._sig_unready = OrderedSignal()\n self._sig_dirty = OrderedSignal()\n self._sig_connect = OrderedSignal()\n self._sig_disconnect = OrderedSignal()\n self._sig_resize = OrderedSignal()\n self._sig_resized = OrderedSignal()\n self._sig_remove = OrderedSignal()\n self._sig_removed = OrderedSignal()\n self._sig_preinsertion = OrderedSignal()\n self._sig_inserted = OrderedSignal()\n\n self._resizing = False\n\n self._executionCount = 0\n self._settingUp = False\n self._condition = threading.Condition()\n\n # Allow slots to be sorted by their order of creation for\n # debug output and diagramming purposes.\n self._global_slot_id = Slot._global_counter.next()", "def action(self, player, game, slot): \n super().action(player, game, slot) \n card = player.get_coders().get_card(slot)\n player.get_coders().remove_card(slot)\n game.current_player().get_coders().add_card(card)\n game.set_action(\"NO_ACTION\")\n game.next_player()", "def __init__(self, driver_age=None, vehicle_registration_number=None):\n self.slot_status = 0 # 0 Means the slot is free\n self.vehicle_driver_age = driver_age\n self.vehicle_registration_number = vehicle_registration_number", "def _comboSlot(self, select):\n select = self.sender().itemText(select)\n if qt4:\n qs = str(self.sender().property(\"dom address\").toPyObject())\n else:\n qs = str(self.sender().property(\"dom address\"))\n item = QtXml.QDomElement()\n\n ind = qs.rfind('/')\n ids = qs[ind:]\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() != select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n widget_enabled = h.elem.attribute(\"Enabled\", \"True\")\n widget_visible = h.elem.attribute(\"Visible\", \"Unknown\")\n h.widget.setEnabled(False)\n if(widget_visible != \"Unknown\"):\n h.label.hide()\n h.widget.hide()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() == select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n h.widget.setEnabled(True)\n h.label.show()\n h.widget.show()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")", "def showSlot(self, number):\n if number <= 0:\n self.log.info('Showing slot \"Negative\" (%d) slot ignored' % number)\n else:\n slot = self.slots[self.number - number]\n if slot.isOccupied():\n slot.removeItem()\n if number == self.target:\n image = self.target_image if self.value < self.target else self.target_image_filled\n self.log.debug('Showing target row: %s, %s, %s' % (self.value, self.target, image))\n elif number <= self.value:\n image = self.fill_image\n elif number <= self.target:\n image = self.blank_image\n else:\n image = self.grey_image\n self.log.debug('Showing %s cell %d as %s (value=%s, target=%s)' % (\n self.name, number, image, self.value, self.target))\n\n slot.addItem(sprite.Sprite(image))", "def disconnect(self, slot):\n if self.is_connected(slot):\n self.slots.pop(self.slots.index(slot))", "def fillSlots(self, name, stan):\n if self._slotData is None:\n self._slotData = {}\n self._slotData[name] = stan", "def slot_ticker(self, dummy_sender, data):\r\n (bid, ask) = data\r\n self.bid = bid\r\n self.ask = ask\r\n self.last_change_type = None\r\n self.last_change_price = 0\r\n self.last_change_volume = 0\r\n self._repair_crossed_asks(ask)\r\n self._repair_crossed_bids(bid)\r\n self.signal_changed(self, None)", "def put_slot_value(self, slot_name_or_uri, value):\n self.memory[slot_name_or_uri] = value\n # print(\"MemoryManager: Implement Me\")\n return self.memory[slot_name_or_uri]", "def slotSelectRow(self, selectDict):\n if self.dataType != selectDict['dataType']:\n return\n\n #print('slotSelectRow() selectDict:', selectDict)\n \n ind = selectDict['index']\n '''\n plotDf = selectDict['plotDf']\n index = plotDf.at[ind, 'index']\n index = int(index)\n '''\n index = ind\n #index -= 1 # !!! MY VISUAL INDEX IN TABLE IS ONE BASED !!!\n column = 0\n modelIndex = self.model().index(index, column)\n self.setCurrentIndex(modelIndex)", "def getSlotforObject(cls, obj):\n if obj.__class__ in restslotattributedict.keys():\n attr = cls.getSlotAttrib(obj)\n if attr is not None:\n peek = getattr(obj, \"peek_\" + attr)\n slot = str(peek()).split('/')[0]\n else:\n return False, \"0\"\n return True, slot", "def __init__(self, slots):\n self.slots = slots\n self.table = []\n for i in range(slots):\n self.table.append([])", "def __init__(self, slots):\n self.slots = slots\n self.table = []\n for i in range(slots):\n self.table.append([])", "def num_slots(self, num_slots):\n\n self._num_slots = num_slots", "def render_ifnoslot(self, slotName):\n\t\t# just repeat the code from ifslot -- this is called frequently,\n\t\t# and additional logic just is not worth it.\n\t\tdef render(ctx, data):\n\t\t\ttry:\n\t\t\t\tif not ctx.locateSlotData(slotName):\n\t\t\t\t\treturn ctx.tag\n\t\t\t\telse:\n\t\t\t\t\treturn \"\"\n\t\t\texcept KeyError:\n\t\t\t\treturn \"\"\n\t\treturn render", "def __len__(self):\n return len(self._subSlots)", "def slot_keypress(self, gox, (key)):\r\n pass", "def Item(self) -> object:", "def Item(self) -> object:", "def extend_slots(self, prediction, item):\n spec = prediction.phrasal_pattern[0]\n slots = prediction.slots\n if is_role_specifier(spec):\n new_slots = copy(slots)\n new_slot = self.role_specifier(spec)\n if new_slot in new_slots:\n raise DuplicateSlotError('Slot %s already exists in %s.' % (\n new_slot, prediction))\n new_slots[new_slot] = item\n return new_slots\n else:\n return slots", "def SlotStatus(rangeInfo):\n if rangeInfo >= 3 and rangeInfo <= 10:\n \"\"\" Full Slot \"\"\"\n status = 1\n return status\n else:\n \"\"\" Empty Slot \"\"\"\n status = 0\n return status", "def InSlotsGet(self):\n ## Make Header\n hex_rep = self.NanonisTCP.make_header('Signals.InSlotsGet', body_size=0)\n \n self.NanonisTCP.send_command(hex_rep)\n \n response = self.NanonisTCP.receive_response()\n \n # signals_names_size = self.NanonisTCP.hex_to_int32(response[0:4])\n signals_names_num = self.NanonisTCP.hex_to_int32(response[4:8])\n \n idx = 8\n signal_names = []\n for n in range(signals_names_num):\n size = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n idx += 4\n signal_name = response[idx:idx+size].decode()\n idx += size\n signal_names.append(signal_name)\n \n signal_indexes = []\n signal_indexes_size = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n for n in range(signal_indexes_size):\n idx += 4\n signal_index = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n signal_indexes.append(signal_index)\n \n return [signal_names,signal_indexes]", "def _lSlot(self, state):\n self._param = QtXml.QDomElement()\n if qt4:\n qs = str(self.sender().property(\"dom address\").toPyObject())\n else:\n qs = str(self.sender().property(\"dom address\"))\n\n ind = qs.rfind('/')\n ids = qs[ind:]\n\n self._param = self.qhash[qs].elem.firstChildElement(\"Activate\")\n while(self._param.isNull() is False):\n qs = str(self._param.text()).strip() + ids\n self.qhash[qs].widget.setEnabled(state)\n widget_visible = self.qhash[qs].elem.attribute(\"Visible\", \"Unknown\")\n if(state is False & widget_visible != \"Unknown\"):\n self.qhash[qs].label.hide()\n self.qhash[qs].widget.hide()\n else:\n self.qhash[qs].label.show()\n self.qhash[qs].widget.show()\n self._param = self._param.nextSiblingElement(\"Activate\")\n\n self._param = self.qhash[qs].elem.firstChildElement(\"Deactivate\")\n while(self._param.isNull() is False):\n qs = str(self._param.text()).strip() + ids\n self.qhash[qs].widget.setEnabled(-state)\n widget_visible = self.qhash[qs].elem.attribute(\"Visible\", \"Unknown\")\n if(state is True & widget_visible != \"Unknown\"):\n self.qhash[qs].label.hide()\n self.qhash[qs].widget.hide()\n else:\n self.qhash[qs].label.show()\n self.qhash[qs].widget.show()\n self._param = self._param.nextSiblingElement(\"Deactivate\")", "def __init__(self):\n self.map = {} #key: val, val, idx\n self.slot = []", "def partVirtualHelixRenumberedSlot(self, sender, coord):\n vh = self._virtualHelixHash[coord]\n # check for new number\n # notify VirtualHelixHandleItem to update its label\n # notify VirtualHelix to update its xovers\n # if the VirtualHelix is active, refresh prexovers\n pass", "def extra_slots(self, extra_slots):\n\n self._extra_slots = extra_slots", "def activated(self):", "def __getitem__(self, key):\n if self.level > 0:\n if isinstance(key, tuple):\n assert len(key) > 0\n assert len(key) <= self.level\n if len(key) == 1:\n return self._subSlots[key[0]]\n else:\n return self._subSlots[key[0]][key[1:]]\n return self._subSlots[key]\n else:\n if self.meta.shape is None:\n # Something is wrong. Are we cancelled?\n Request.raise_if_cancelled()\n if not self.ready():\n msg = \"This slot ({}.{}) isn't ready yet, which means \" \\\n \"you can't ask for its data. Is it connected?\".format(self.getRealOperator().name, self.name)\n self.logger.error(msg)\n slotInfoMsg = \"Can't get data from slot {}.{} yet.\"\\\n \" It isn't ready.\"\\\n \"First upstream problem slot is: {}\"\\\n \"\".format( self.getRealOperator().__class__, self.name, Slot._findUpstreamProblemSlot(self) )\n self.logger.error(slotInfoMsg)\n raise Slot.SlotNotReadyError(\"Slot isn't ready. See error log.\")\n assert self.meta.shape is not None, \\\n (\"Can't ask for slices of this slot yet:\"\n \" self.meta.shape is None!\"\n \" (operator {} [self={}] slot: {}, key={}\".format(\n self.operator.name, self.operator, self.name, key))\n return self(pslice=key)", "def on_comboBox_qihuozhanghao_currentIndexChanged(self, p0):\n # TODO: not implemented yet\n raise NotImplementedError", "def vacate_slot(self, slot, reg_num):\n self.__occupied_slots__.pop(slot.slot_number)\n self.__vehicle_slot_mapping__.pop(reg_num)\n self.__available_slots__.add(slot)", "def _reserve_bsa_slot(self):\n self._run_cmd(CAPUT + \" IOC:IN20:EV01:EDEFNAME \" + '\"' + self._edef_name + '\"')", "def emit(self, **kwargs):\n for slot in self.slots:\n result = slot(**kwargs)\n\n if result is not None:\n return result", "def slotshow_m(obj, content):\n global _slotshow_m_tbl\n\n return _slotshow(obj, content, _slotshow_m_tbl)", "def slotVolume(self, a0):\n self.sampleGroup.action('volume', value=a0)", "def execute(self, slot, subindex, roi, result):\n totalIndex = (self._subSlots.index(slot),) + subindex\n return self.operator.execute(self, totalIndex, roi, result)", "def frame(self):", "def action(self, player, game, slot):\n card = player.get_coders().get_card(slot)\n player.get_coders().remove_card(slot)\n index = game.get_sleeping_coders().index(None)\n game.set_sleeping_coder(index,card)\n game.set_action(\"NO_ACTION\")\n game.next_player()" ]
[ "0.73193145", "0.6811926", "0.6725808", "0.65843606", "0.6397118", "0.6368233", "0.63534164", "0.63223046", "0.6321543", "0.6312873", "0.6151129", "0.6127017", "0.6102243", "0.60954756", "0.60865855", "0.6085341", "0.60555625", "0.6027709", "0.6023868", "0.60039294", "0.5981614", "0.5944901", "0.5928264", "0.5860642", "0.58225965", "0.5803388", "0.57931894", "0.5743234", "0.57300717", "0.57230735", "0.5700275", "0.56719446", "0.56676686", "0.56638163", "0.5662287", "0.56184554", "0.5609259", "0.55965775", "0.5580088", "0.5575003", "0.5574187", "0.5566322", "0.55628324", "0.554456", "0.55376935", "0.55371886", "0.55152655", "0.55149907", "0.55011916", "0.5471193", "0.54608166", "0.54495156", "0.54477423", "0.5443887", "0.5396864", "0.5391362", "0.53841025", "0.5380693", "0.5379568", "0.5375968", "0.5374344", "0.53722537", "0.53629327", "0.5348422", "0.5348001", "0.53435796", "0.53432775", "0.5343046", "0.5324689", "0.5320415", "0.53172785", "0.5309329", "0.5282788", "0.528094", "0.5278367", "0.52753556", "0.52753556", "0.5262484", "0.5252292", "0.5249807", "0.52497274", "0.52403206", "0.52403206", "0.5223128", "0.5221164", "0.52208847", "0.5218931", "0.5216089", "0.52156943", "0.5213589", "0.5192793", "0.51838696", "0.5183027", "0.51815283", "0.51802087", "0.51758593", "0.5171057", "0.5164619", "0.5154298", "0.51483667", "0.5131844" ]
0.0
-1
Slot documentation goes here.
def on_action_triggered(self): # TODO: not implemented yet print('打开') my_button_open = QMessageBox.about(self, '打开', '点击我打开某些文件')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slot(self, name):\n raise ClixxException(\"Not implemented.\")", "def play_slot__(self):\n print(\"play_slot__\")\n self.valkkafs_manager.play()", "def add_slot(self, slot):\n slot.set_location(len(self.slots)+1)\n self.slots.append(slot)", "def visit_slot(self, slot_name: str, slot: SlotDefinition) -> None:\n sn = underscore(slot_name)\n self.emit('slot', sn)\n if slot.domain:\n self.emit('domain', sn, underscore(slot.domain))\n if slot.range:\n self.emit('range', sn, underscore(slot.range))\n for p in slot.mixins:\n self.emit('mixin', sn, underscore(p))\n if slot.is_a:\n is_a = underscore(slot.is_a)\n\n #uri = self.owlgen._prop_uri(slot.name)\n uri = f'http://w3id.org/biolink/vocab/{sn}'\n self.emit('has_uri', sn, uri)\n if slot.multivalued:\n self.emit('multivalued', sn)\n if slot.required:\n self.emit('required', sn)", "def time_slot(self):\n pass", "def _slot(self, event) -> None:\n self.events.gui_event(original_event=event)", "def isSlot(self, p_int): # real signature unknown; restored from __doc__\n return False", "def slot(self,num):\n if num in ApexAP1000.SLOTS:\n self.__slot=num\n else:\n raise ValueError('Bad slot number !')", "def get_slot(self, idx):\n assert (idx >= 0) and (idx < self.size()), \"Index is out of range\"\n return self.slots[idx]", "def get_slots(self) -> int:", "def slot_receive(self):\n\n self.start_time = str(time.strftime(\"%Y%m%d%H%M%S\", time.localtime()))\n\n self.setter.start()\n self.getter.start()\n self.timer.start(1)\n # self.plotter.start()", "def perceive(self, slot):\n # right now, let's just observe the first element in the world\n if isinstance(slot, Slot):\n self.percept = slot.get_state()\n else:\n raise RuntimeError(\"Cannot observe other objects right now!\")", "def reserve(self):\n assert self.is_available() is True, \"this slot is not available\"", "def var(self, _type):\n return Slot()", "def slot(self):\n if self.__slot in ApexAP1000.SLOTS:\n return self.__slot\n else:\n raise ValueError('Bad slot number !')", "def slot_changed(self, dummy_sender, dummy_data):\r\n self.do_paint()", "def slot_tick(self, gox, (bid, ask)):\r\n pass", "def slot_wallet_changed(self, gox, _dummy):\r\n pass", "def required_slots(self,tracker) -> List[Text]:", "def slot_owns_changed(self, orderbook, _dummy):\r\n pass", "def on_select_clip_slot(self, clip_slot):\n pass", "def connect(self, slot):\n #if inspect.getargspec(slot).keywords is None:\n # raise exceptions.SlotMustAcceptKeywords(self, slot)\n\n if not self.is_connected(slot):\n self.slots.append(slot)", "def stop_slot__(self):\n print(\"stop_slot__\")\n self.valkkafs_manager.stop()", "def InSlotSet(self,slot,RTSignalIndex):\n ## Make Header\n hex_rep = self.NanonisTCP.make_header('Signals.InSlotSet', body_size=8)\n \n ## Arguments\n hex_rep += self.NanonisTCP.to_hex(slot,4)\n hex_rep += self.NanonisTCP.to_hex(RTSignalIndex,4)\n \n self.NanonisTCP.send_command(hex_rep)\n \n self.NanonisTCP.receive_response(0)", "def remove_slot(self, slot):\n if slot in self.slots:\n idx = self.slots.index(slot)\n # update location of rest slot\n for s in self.slots[idx:]:\n s.set_location(s.get_location()-1)\n self.slots.remove(slot)", "def onConnect(self, *args):\n UAV_Logger.log(\"connect signal recieved\")\n self.socketIO.emit('CORE_SL_SLOTS_SET', Request(self.getModuleName()).newData({\"slots\":self.slots}).toDictionary())", "def _GetSlots(mcs, attrs):\n raise NotImplementedError", "def slots(self):\n return self.__slots.values()", "def time_slot_p(self, c, p):\n pass", "def ParseSlot(self, G, node):\n slot = BLNlpClipsSlotMap()\n slot.Name = node\n return slot", "def add_item(self, item):\n\n if item.descriptor in self.__slots:\n slot = self.__slots[item.descriptor]\n slot.quantity += 1\n else:\n self.__slots[item.descriptor] = Slot(item)", "def on_triggered(self, slot):\n self.triggered.connect(slot)", "def update(self):\n if self.size() < 2: return\n idx = random.randint(0, 100) % 3\n if idx < 2:\n slot = self.slots[idx]\n if slot.get_state() == Slot.CLEAN:\n slot.set_state(Slot.DIRTY)\n # self.slots[idx] = slot", "def run(self):\r\n for slot in self.slots:\r\n slot.work()\r\n self.increment()", "def source_slot(self):\n return self._container_slot(True)", "def handle_slot_select(data: bytes) -> Tuple[bytes, str]:\n new_slot = struct.unpack('B', data[:1])[0]\n return data[1:], f'New slot: {new_slot}'", "def get_frame_slot_value(self, slot):\n # FrameObject is a dictionary of slot names and values.\n [slotName, value] = self.pgdb.sendPgdbFnCall('get-frame-slot-value', self.frameid, Symbol(slot))\n if not slotName:\n raise PythonCycError(\"Slot \"+slot+\" does not exist for frame \"+self.frameid+\" from organism (orgid) \"+self.pgdb._orgid)\n # Modify slot name to allow Python's syntax (e.g., '_' instead of '-').\n self.__dict__[convertLispIdtoPythonId(slotName)] = value\n return self", "def __validate_slots(self, slots: int):\n\n if (not isinstance(slots, int)):\n raise ValueError('slots', slots, 'The value must be an integer')\n if (slots <= 0):\n raise ValueError('slots', slots, 'The value must be greater than zero')", "def decide(self, slot_index):\n self._slot_decided[slot_index] = True", "def slot_trade(self, gox, (date, price, volume, typ, own)):\r\n pass", "def setInSlot(self, slot, subindex, roi, value):\n # Determine which subslot this is and prepend it to the totalIndex\n totalIndex = (self._subSlots.index(slot),) + subindex\n # Forward the call to our operator\n self.operator.setInSlot(self, totalIndex, roi, value)", "def size(self):\n return len(self.slots)", "def occupy_slot(self, slot, vehicle):\n self.__occupied_slots__[slot.slot_number] = vehicle.registration_number, vehicle.color\n self.__vehicle_slot_mapping__[vehicle.registration_number] = slot.slot_number\n self.__available_slots__.remove(slot)", "def target_slot(self):\n return self._container_slot(False)", "def slot_history_changed(self, history, _dummy):\r\n pass", "def intuit(self):\n raise NotImplemented()", "def slot_info(self) -> Dict[int, Tuple[int, str, int]]:\n return self._slot_info", "def increment(self):\r\n self.add_output()\r\n for i in range(self.length-1, 0, -1):\r\n self.slots[i].item = self.slots[i-1].item\r\n self.slots[0].item = Item.random()", "def get_num_slots(self):\n # Your code here\n return self.capacity", "def action(self, player, game, slot):\n super().action(player, game, slot)\n card = game.get_sleeping_coder(slot)\n game.set_sleeping_coder(slot,None)\n player.get_coders().add_card(card)\n game.set_action(\"NO_ACTION\")\n game.next_player()", "def get_slot(self, c):\n if 'slot_number' in c.keys():\n slot_number = c['slot_number']\n return slot_number\n else:\n raise ValueError(self.no_selection_msg())\n \n # returnValue(voltage * units.V)", "async def slot(self, ctx: commands.Context, bid: int):\r\n author = ctx.author\r\n guild = ctx.guild\r\n channel = ctx.channel\r\n if await bank.is_global():\r\n valid_bid = await self.config.SLOT_MIN() <= bid <= await self.config.SLOT_MAX()\r\n slot_time = await self.config.SLOT_TIME()\r\n last_slot = await self.config.user(author).last_slot()\r\n else:\r\n valid_bid = (\r\n await self.config.guild(guild).SLOT_MIN()\r\n <= bid\r\n <= await self.config.guild(guild).SLOT_MAX()\r\n )\r\n slot_time = await self.config.guild(guild).SLOT_TIME()\r\n last_slot = await self.config.member(author).last_slot()\r\n now = calendar.timegm(ctx.message.created_at.utctimetuple())\r\n\r\n if (now - last_slot) < slot_time:\r\n await ctx.send(_(\"You're on cooldown, try again in a bit.\"))\r\n return\r\n if not valid_bid:\r\n await ctx.send(_(\"That's an invalid bid amount, sorry :/\"))\r\n return\r\n if not await bank.can_spend(author, bid):\r\n await ctx.send(_(\"You ain't got enough money, friend.\"))\r\n return\r\n if await bank.is_global():\r\n await self.config.user(author).last_slot.set(now)\r\n else:\r\n await self.config.member(author).last_slot.set(now)\r\n await self.slot_machine(author, channel, bid)", "def get_slot(self, *args, **kwargs):\n return self._optimizer.get_slot(*args, **kwargs)", "def get_num_slots(self):\n # Your code here\n return len(self.data)", "def slot_before_unload(self, _sender, _data):\r\n pass", "def slotAccept(self, id):\n self.category = self.buttonGroup.find(id).text().ascii()\n self.accept()", "def render_ifslot(self, slotName):\n\t\tdef render(ctx, data):\n\t\t\ttry:\n\t\t\t\tif ctx.locateSlotData(slotName):\n\t\t\t\t\treturn ctx.tag\n\t\t\t\telse:\n\t\t\t\t\treturn \"\"\n\t\t\texcept KeyError:\n\t\t\t\treturn \"\"\n\t\treturn render", "def on_VI_518p_set_clicked(self):\n # TODO: not implemented yet\n disp_518P()\n self.up_slot = qmdz_const.up_slot\n self.down_slot = qmdz_const.down_slot", "def update_Q(self):", "def set_signal(self):\n self.pbSave.clicked.connect(self.save_match_result)\n self.pbCancel.clicked.connect(self.close_match_result)\n self.lwScoreList.doubleClicked.connect(self.display_player_name)\n self.lwAssistList.doubleClicked.connect(self.display_player_name)", "def timeslot(self, timeslot: List[TimeslotTimeslot]):\n\n self._timeslot = timeslot", "def get_slot(item_id):\n if item_id in all_items:\n return all_items[item_id]['slot']\n return None", "def connect_slots(self):\n\n self.set_elf_sig.connect(self.set_elf)\n self.run_target_sig.connect(self.run_target)\n self.continue_target_sig.connect(self.continue_target)\n self.set_breakpoint_sig.connect(self.set_breakpoint)\n self.set_app_mode_sig.connect(self.set_app_mode)\n self.write_hex_value_sig.connect(self.write_word_to_address)", "def connect(self, slot):\r\n if inspect.ismethod(slot):\r\n instance = slot.__self__\r\n function = slot.__func__\r\n if instance not in self._methods:\r\n self._methods[instance] = set()\r\n if function not in self._methods[instance]:\r\n self._methods[instance].add(function)\r\n else:\r\n if slot not in self._functions:\r\n self._functions.add(slot)", "def __init__(self, name=\"\", operator=None, stype=ArrayLike,\n rtype=rtype.SubRegion, value=None, optional=False,\n level=0, nonlane=False):\n # This assertion is here for a reason: default values do NOT work on OutputSlots.\n # (We should probably change that at some point...)\n assert value is None or isinstance(self, InputSlot), \"Only InputSlots can have default values. OutputSlots cannot.\"\n \n if not hasattr(self, \"_type\"):\n self._type = None\n if type(stype) == str:\n stype = ArrayLike\n self.partners = []\n self.name = name\n self._optional = optional\n self.operator = operator\n self._real_operator = None # Memoized in getRealOperator()\n\n # in the case of an InputSlot this is the slot to which it is\n # connected\n self.partner = None\n self.level = level\n\n # in the case of an InputSlot one can directly assign a value\n # to a slot instead of connecting it to a partner, this\n # attribute holds the value\n self._value = None\n\n self._defaultValue = value\n\n # Causes calls to setValue to be propagated backwards to the\n # partner slot. Used by the OperatorWrapper.\n self._backpropagate_values = False\n\n self.rtype = rtype\n\n # the MetaDict that holds the slots meta information\n self.meta = MetaDict()\n\n # if level > 0, this holds the sub-Input/Output slots\n self._subSlots = []\n self._stypeType = stype\n\n # the slot type instance\n self.stype = stype(self)\n self.nonlane = nonlane\n\n self._sig_changed = OrderedSignal()\n self._sig_value_changed = OrderedSignal()\n self._sig_ready = OrderedSignal()\n self._sig_unready = OrderedSignal()\n self._sig_dirty = OrderedSignal()\n self._sig_connect = OrderedSignal()\n self._sig_disconnect = OrderedSignal()\n self._sig_resize = OrderedSignal()\n self._sig_resized = OrderedSignal()\n self._sig_remove = OrderedSignal()\n self._sig_removed = OrderedSignal()\n self._sig_preinsertion = OrderedSignal()\n self._sig_inserted = OrderedSignal()\n\n self._resizing = False\n\n self._executionCount = 0\n self._settingUp = False\n self._condition = threading.Condition()\n\n # Allow slots to be sorted by their order of creation for\n # debug output and diagramming purposes.\n self._global_slot_id = Slot._global_counter.next()", "def action(self, player, game, slot): \n super().action(player, game, slot) \n card = player.get_coders().get_card(slot)\n player.get_coders().remove_card(slot)\n game.current_player().get_coders().add_card(card)\n game.set_action(\"NO_ACTION\")\n game.next_player()", "def __init__(self, driver_age=None, vehicle_registration_number=None):\n self.slot_status = 0 # 0 Means the slot is free\n self.vehicle_driver_age = driver_age\n self.vehicle_registration_number = vehicle_registration_number", "def _comboSlot(self, select):\n select = self.sender().itemText(select)\n if qt4:\n qs = str(self.sender().property(\"dom address\").toPyObject())\n else:\n qs = str(self.sender().property(\"dom address\"))\n item = QtXml.QDomElement()\n\n ind = qs.rfind('/')\n ids = qs[ind:]\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() != select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n widget_enabled = h.elem.attribute(\"Enabled\", \"True\")\n widget_visible = h.elem.attribute(\"Visible\", \"Unknown\")\n h.widget.setEnabled(False)\n if(widget_visible != \"Unknown\"):\n h.label.hide()\n h.widget.hide()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() == select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n h.widget.setEnabled(True)\n h.label.show()\n h.widget.show()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")", "def showSlot(self, number):\n if number <= 0:\n self.log.info('Showing slot \"Negative\" (%d) slot ignored' % number)\n else:\n slot = self.slots[self.number - number]\n if slot.isOccupied():\n slot.removeItem()\n if number == self.target:\n image = self.target_image if self.value < self.target else self.target_image_filled\n self.log.debug('Showing target row: %s, %s, %s' % (self.value, self.target, image))\n elif number <= self.value:\n image = self.fill_image\n elif number <= self.target:\n image = self.blank_image\n else:\n image = self.grey_image\n self.log.debug('Showing %s cell %d as %s (value=%s, target=%s)' % (\n self.name, number, image, self.value, self.target))\n\n slot.addItem(sprite.Sprite(image))", "def disconnect(self, slot):\n if self.is_connected(slot):\n self.slots.pop(self.slots.index(slot))", "def fillSlots(self, name, stan):\n if self._slotData is None:\n self._slotData = {}\n self._slotData[name] = stan", "def slot_ticker(self, dummy_sender, data):\r\n (bid, ask) = data\r\n self.bid = bid\r\n self.ask = ask\r\n self.last_change_type = None\r\n self.last_change_price = 0\r\n self.last_change_volume = 0\r\n self._repair_crossed_asks(ask)\r\n self._repair_crossed_bids(bid)\r\n self.signal_changed(self, None)", "def put_slot_value(self, slot_name_or_uri, value):\n self.memory[slot_name_or_uri] = value\n # print(\"MemoryManager: Implement Me\")\n return self.memory[slot_name_or_uri]", "def slotSelectRow(self, selectDict):\n if self.dataType != selectDict['dataType']:\n return\n\n #print('slotSelectRow() selectDict:', selectDict)\n \n ind = selectDict['index']\n '''\n plotDf = selectDict['plotDf']\n index = plotDf.at[ind, 'index']\n index = int(index)\n '''\n index = ind\n #index -= 1 # !!! MY VISUAL INDEX IN TABLE IS ONE BASED !!!\n column = 0\n modelIndex = self.model().index(index, column)\n self.setCurrentIndex(modelIndex)", "def getSlotforObject(cls, obj):\n if obj.__class__ in restslotattributedict.keys():\n attr = cls.getSlotAttrib(obj)\n if attr is not None:\n peek = getattr(obj, \"peek_\" + attr)\n slot = str(peek()).split('/')[0]\n else:\n return False, \"0\"\n return True, slot", "def __init__(self, slots):\n self.slots = slots\n self.table = []\n for i in range(slots):\n self.table.append([])", "def __init__(self, slots):\n self.slots = slots\n self.table = []\n for i in range(slots):\n self.table.append([])", "def num_slots(self, num_slots):\n\n self._num_slots = num_slots", "def render_ifnoslot(self, slotName):\n\t\t# just repeat the code from ifslot -- this is called frequently,\n\t\t# and additional logic just is not worth it.\n\t\tdef render(ctx, data):\n\t\t\ttry:\n\t\t\t\tif not ctx.locateSlotData(slotName):\n\t\t\t\t\treturn ctx.tag\n\t\t\t\telse:\n\t\t\t\t\treturn \"\"\n\t\t\texcept KeyError:\n\t\t\t\treturn \"\"\n\t\treturn render", "def __len__(self):\n return len(self._subSlots)", "def slot_keypress(self, gox, (key)):\r\n pass", "def Item(self) -> object:", "def Item(self) -> object:", "def extend_slots(self, prediction, item):\n spec = prediction.phrasal_pattern[0]\n slots = prediction.slots\n if is_role_specifier(spec):\n new_slots = copy(slots)\n new_slot = self.role_specifier(spec)\n if new_slot in new_slots:\n raise DuplicateSlotError('Slot %s already exists in %s.' % (\n new_slot, prediction))\n new_slots[new_slot] = item\n return new_slots\n else:\n return slots", "def SlotStatus(rangeInfo):\n if rangeInfo >= 3 and rangeInfo <= 10:\n \"\"\" Full Slot \"\"\"\n status = 1\n return status\n else:\n \"\"\" Empty Slot \"\"\"\n status = 0\n return status", "def InSlotsGet(self):\n ## Make Header\n hex_rep = self.NanonisTCP.make_header('Signals.InSlotsGet', body_size=0)\n \n self.NanonisTCP.send_command(hex_rep)\n \n response = self.NanonisTCP.receive_response()\n \n # signals_names_size = self.NanonisTCP.hex_to_int32(response[0:4])\n signals_names_num = self.NanonisTCP.hex_to_int32(response[4:8])\n \n idx = 8\n signal_names = []\n for n in range(signals_names_num):\n size = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n idx += 4\n signal_name = response[idx:idx+size].decode()\n idx += size\n signal_names.append(signal_name)\n \n signal_indexes = []\n signal_indexes_size = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n for n in range(signal_indexes_size):\n idx += 4\n signal_index = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n signal_indexes.append(signal_index)\n \n return [signal_names,signal_indexes]", "def _lSlot(self, state):\n self._param = QtXml.QDomElement()\n if qt4:\n qs = str(self.sender().property(\"dom address\").toPyObject())\n else:\n qs = str(self.sender().property(\"dom address\"))\n\n ind = qs.rfind('/')\n ids = qs[ind:]\n\n self._param = self.qhash[qs].elem.firstChildElement(\"Activate\")\n while(self._param.isNull() is False):\n qs = str(self._param.text()).strip() + ids\n self.qhash[qs].widget.setEnabled(state)\n widget_visible = self.qhash[qs].elem.attribute(\"Visible\", \"Unknown\")\n if(state is False & widget_visible != \"Unknown\"):\n self.qhash[qs].label.hide()\n self.qhash[qs].widget.hide()\n else:\n self.qhash[qs].label.show()\n self.qhash[qs].widget.show()\n self._param = self._param.nextSiblingElement(\"Activate\")\n\n self._param = self.qhash[qs].elem.firstChildElement(\"Deactivate\")\n while(self._param.isNull() is False):\n qs = str(self._param.text()).strip() + ids\n self.qhash[qs].widget.setEnabled(-state)\n widget_visible = self.qhash[qs].elem.attribute(\"Visible\", \"Unknown\")\n if(state is True & widget_visible != \"Unknown\"):\n self.qhash[qs].label.hide()\n self.qhash[qs].widget.hide()\n else:\n self.qhash[qs].label.show()\n self.qhash[qs].widget.show()\n self._param = self._param.nextSiblingElement(\"Deactivate\")", "def __init__(self):\n self.map = {} #key: val, val, idx\n self.slot = []", "def partVirtualHelixRenumberedSlot(self, sender, coord):\n vh = self._virtualHelixHash[coord]\n # check for new number\n # notify VirtualHelixHandleItem to update its label\n # notify VirtualHelix to update its xovers\n # if the VirtualHelix is active, refresh prexovers\n pass", "def extra_slots(self, extra_slots):\n\n self._extra_slots = extra_slots", "def activated(self):", "def __getitem__(self, key):\n if self.level > 0:\n if isinstance(key, tuple):\n assert len(key) > 0\n assert len(key) <= self.level\n if len(key) == 1:\n return self._subSlots[key[0]]\n else:\n return self._subSlots[key[0]][key[1:]]\n return self._subSlots[key]\n else:\n if self.meta.shape is None:\n # Something is wrong. Are we cancelled?\n Request.raise_if_cancelled()\n if not self.ready():\n msg = \"This slot ({}.{}) isn't ready yet, which means \" \\\n \"you can't ask for its data. Is it connected?\".format(self.getRealOperator().name, self.name)\n self.logger.error(msg)\n slotInfoMsg = \"Can't get data from slot {}.{} yet.\"\\\n \" It isn't ready.\"\\\n \"First upstream problem slot is: {}\"\\\n \"\".format( self.getRealOperator().__class__, self.name, Slot._findUpstreamProblemSlot(self) )\n self.logger.error(slotInfoMsg)\n raise Slot.SlotNotReadyError(\"Slot isn't ready. See error log.\")\n assert self.meta.shape is not None, \\\n (\"Can't ask for slices of this slot yet:\"\n \" self.meta.shape is None!\"\n \" (operator {} [self={}] slot: {}, key={}\".format(\n self.operator.name, self.operator, self.name, key))\n return self(pslice=key)", "def on_comboBox_qihuozhanghao_currentIndexChanged(self, p0):\n # TODO: not implemented yet\n raise NotImplementedError", "def vacate_slot(self, slot, reg_num):\n self.__occupied_slots__.pop(slot.slot_number)\n self.__vehicle_slot_mapping__.pop(reg_num)\n self.__available_slots__.add(slot)", "def _reserve_bsa_slot(self):\n self._run_cmd(CAPUT + \" IOC:IN20:EV01:EDEFNAME \" + '\"' + self._edef_name + '\"')", "def emit(self, **kwargs):\n for slot in self.slots:\n result = slot(**kwargs)\n\n if result is not None:\n return result", "def slotshow_m(obj, content):\n global _slotshow_m_tbl\n\n return _slotshow(obj, content, _slotshow_m_tbl)", "def slotVolume(self, a0):\n self.sampleGroup.action('volume', value=a0)", "def execute(self, slot, subindex, roi, result):\n totalIndex = (self._subSlots.index(slot),) + subindex\n return self.operator.execute(self, totalIndex, roi, result)", "def frame(self):", "def action(self, player, game, slot):\n card = player.get_coders().get_card(slot)\n player.get_coders().remove_card(slot)\n index = game.get_sleeping_coders().index(None)\n game.set_sleeping_coder(index,card)\n game.set_action(\"NO_ACTION\")\n game.next_player()" ]
[ "0.73193145", "0.6811926", "0.6725808", "0.65843606", "0.6397118", "0.6368233", "0.63534164", "0.63223046", "0.6321543", "0.6312873", "0.6151129", "0.6127017", "0.6102243", "0.60954756", "0.60865855", "0.6085341", "0.60555625", "0.6027709", "0.6023868", "0.60039294", "0.5981614", "0.5944901", "0.5928264", "0.5860642", "0.58225965", "0.5803388", "0.57931894", "0.5743234", "0.57300717", "0.57230735", "0.5700275", "0.56719446", "0.56676686", "0.56638163", "0.5662287", "0.56184554", "0.5609259", "0.55965775", "0.5580088", "0.5575003", "0.5574187", "0.5566322", "0.55628324", "0.554456", "0.55376935", "0.55371886", "0.55152655", "0.55149907", "0.55011916", "0.5471193", "0.54608166", "0.54495156", "0.54477423", "0.5443887", "0.5396864", "0.5391362", "0.53841025", "0.5380693", "0.5379568", "0.5375968", "0.5374344", "0.53722537", "0.53629327", "0.5348422", "0.5348001", "0.53435796", "0.53432775", "0.5343046", "0.5324689", "0.5320415", "0.53172785", "0.5309329", "0.5282788", "0.528094", "0.5278367", "0.52753556", "0.52753556", "0.5262484", "0.5252292", "0.5249807", "0.52497274", "0.52403206", "0.52403206", "0.5223128", "0.5221164", "0.52208847", "0.5218931", "0.5216089", "0.52156943", "0.5213589", "0.5192793", "0.51838696", "0.5183027", "0.51815283", "0.51802087", "0.51758593", "0.5171057", "0.5164619", "0.5154298", "0.51483667", "0.5131844" ]
0.0
-1
Slot documentation goes here.
def on_action_2_triggered(self): # TODO: not implemented yet print('关闭') sys.exit(0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slot(self, name):\n raise ClixxException(\"Not implemented.\")", "def play_slot__(self):\n print(\"play_slot__\")\n self.valkkafs_manager.play()", "def add_slot(self, slot):\n slot.set_location(len(self.slots)+1)\n self.slots.append(slot)", "def visit_slot(self, slot_name: str, slot: SlotDefinition) -> None:\n sn = underscore(slot_name)\n self.emit('slot', sn)\n if slot.domain:\n self.emit('domain', sn, underscore(slot.domain))\n if slot.range:\n self.emit('range', sn, underscore(slot.range))\n for p in slot.mixins:\n self.emit('mixin', sn, underscore(p))\n if slot.is_a:\n is_a = underscore(slot.is_a)\n\n #uri = self.owlgen._prop_uri(slot.name)\n uri = f'http://w3id.org/biolink/vocab/{sn}'\n self.emit('has_uri', sn, uri)\n if slot.multivalued:\n self.emit('multivalued', sn)\n if slot.required:\n self.emit('required', sn)", "def time_slot(self):\n pass", "def _slot(self, event) -> None:\n self.events.gui_event(original_event=event)", "def isSlot(self, p_int): # real signature unknown; restored from __doc__\n return False", "def slot(self,num):\n if num in ApexAP1000.SLOTS:\n self.__slot=num\n else:\n raise ValueError('Bad slot number !')", "def get_slot(self, idx):\n assert (idx >= 0) and (idx < self.size()), \"Index is out of range\"\n return self.slots[idx]", "def get_slots(self) -> int:", "def slot_receive(self):\n\n self.start_time = str(time.strftime(\"%Y%m%d%H%M%S\", time.localtime()))\n\n self.setter.start()\n self.getter.start()\n self.timer.start(1)\n # self.plotter.start()", "def perceive(self, slot):\n # right now, let's just observe the first element in the world\n if isinstance(slot, Slot):\n self.percept = slot.get_state()\n else:\n raise RuntimeError(\"Cannot observe other objects right now!\")", "def reserve(self):\n assert self.is_available() is True, \"this slot is not available\"", "def var(self, _type):\n return Slot()", "def slot(self):\n if self.__slot in ApexAP1000.SLOTS:\n return self.__slot\n else:\n raise ValueError('Bad slot number !')", "def slot_changed(self, dummy_sender, dummy_data):\r\n self.do_paint()", "def slot_tick(self, gox, (bid, ask)):\r\n pass", "def slot_wallet_changed(self, gox, _dummy):\r\n pass", "def required_slots(self,tracker) -> List[Text]:", "def slot_owns_changed(self, orderbook, _dummy):\r\n pass", "def on_select_clip_slot(self, clip_slot):\n pass", "def connect(self, slot):\n #if inspect.getargspec(slot).keywords is None:\n # raise exceptions.SlotMustAcceptKeywords(self, slot)\n\n if not self.is_connected(slot):\n self.slots.append(slot)", "def stop_slot__(self):\n print(\"stop_slot__\")\n self.valkkafs_manager.stop()", "def InSlotSet(self,slot,RTSignalIndex):\n ## Make Header\n hex_rep = self.NanonisTCP.make_header('Signals.InSlotSet', body_size=8)\n \n ## Arguments\n hex_rep += self.NanonisTCP.to_hex(slot,4)\n hex_rep += self.NanonisTCP.to_hex(RTSignalIndex,4)\n \n self.NanonisTCP.send_command(hex_rep)\n \n self.NanonisTCP.receive_response(0)", "def remove_slot(self, slot):\n if slot in self.slots:\n idx = self.slots.index(slot)\n # update location of rest slot\n for s in self.slots[idx:]:\n s.set_location(s.get_location()-1)\n self.slots.remove(slot)", "def onConnect(self, *args):\n UAV_Logger.log(\"connect signal recieved\")\n self.socketIO.emit('CORE_SL_SLOTS_SET', Request(self.getModuleName()).newData({\"slots\":self.slots}).toDictionary())", "def _GetSlots(mcs, attrs):\n raise NotImplementedError", "def slots(self):\n return self.__slots.values()", "def time_slot_p(self, c, p):\n pass", "def ParseSlot(self, G, node):\n slot = BLNlpClipsSlotMap()\n slot.Name = node\n return slot", "def add_item(self, item):\n\n if item.descriptor in self.__slots:\n slot = self.__slots[item.descriptor]\n slot.quantity += 1\n else:\n self.__slots[item.descriptor] = Slot(item)", "def on_triggered(self, slot):\n self.triggered.connect(slot)", "def update(self):\n if self.size() < 2: return\n idx = random.randint(0, 100) % 3\n if idx < 2:\n slot = self.slots[idx]\n if slot.get_state() == Slot.CLEAN:\n slot.set_state(Slot.DIRTY)\n # self.slots[idx] = slot", "def run(self):\r\n for slot in self.slots:\r\n slot.work()\r\n self.increment()", "def source_slot(self):\n return self._container_slot(True)", "def handle_slot_select(data: bytes) -> Tuple[bytes, str]:\n new_slot = struct.unpack('B', data[:1])[0]\n return data[1:], f'New slot: {new_slot}'", "def get_frame_slot_value(self, slot):\n # FrameObject is a dictionary of slot names and values.\n [slotName, value] = self.pgdb.sendPgdbFnCall('get-frame-slot-value', self.frameid, Symbol(slot))\n if not slotName:\n raise PythonCycError(\"Slot \"+slot+\" does not exist for frame \"+self.frameid+\" from organism (orgid) \"+self.pgdb._orgid)\n # Modify slot name to allow Python's syntax (e.g., '_' instead of '-').\n self.__dict__[convertLispIdtoPythonId(slotName)] = value\n return self", "def __validate_slots(self, slots: int):\n\n if (not isinstance(slots, int)):\n raise ValueError('slots', slots, 'The value must be an integer')\n if (slots <= 0):\n raise ValueError('slots', slots, 'The value must be greater than zero')", "def decide(self, slot_index):\n self._slot_decided[slot_index] = True", "def slot_trade(self, gox, (date, price, volume, typ, own)):\r\n pass", "def setInSlot(self, slot, subindex, roi, value):\n # Determine which subslot this is and prepend it to the totalIndex\n totalIndex = (self._subSlots.index(slot),) + subindex\n # Forward the call to our operator\n self.operator.setInSlot(self, totalIndex, roi, value)", "def size(self):\n return len(self.slots)", "def occupy_slot(self, slot, vehicle):\n self.__occupied_slots__[slot.slot_number] = vehicle.registration_number, vehicle.color\n self.__vehicle_slot_mapping__[vehicle.registration_number] = slot.slot_number\n self.__available_slots__.remove(slot)", "def target_slot(self):\n return self._container_slot(False)", "def slot_history_changed(self, history, _dummy):\r\n pass", "def intuit(self):\n raise NotImplemented()", "def slot_info(self) -> Dict[int, Tuple[int, str, int]]:\n return self._slot_info", "def increment(self):\r\n self.add_output()\r\n for i in range(self.length-1, 0, -1):\r\n self.slots[i].item = self.slots[i-1].item\r\n self.slots[0].item = Item.random()", "def get_num_slots(self):\n # Your code here\n return self.capacity", "def action(self, player, game, slot):\n super().action(player, game, slot)\n card = game.get_sleeping_coder(slot)\n game.set_sleeping_coder(slot,None)\n player.get_coders().add_card(card)\n game.set_action(\"NO_ACTION\")\n game.next_player()", "def get_slot(self, c):\n if 'slot_number' in c.keys():\n slot_number = c['slot_number']\n return slot_number\n else:\n raise ValueError(self.no_selection_msg())\n \n # returnValue(voltage * units.V)", "async def slot(self, ctx: commands.Context, bid: int):\r\n author = ctx.author\r\n guild = ctx.guild\r\n channel = ctx.channel\r\n if await bank.is_global():\r\n valid_bid = await self.config.SLOT_MIN() <= bid <= await self.config.SLOT_MAX()\r\n slot_time = await self.config.SLOT_TIME()\r\n last_slot = await self.config.user(author).last_slot()\r\n else:\r\n valid_bid = (\r\n await self.config.guild(guild).SLOT_MIN()\r\n <= bid\r\n <= await self.config.guild(guild).SLOT_MAX()\r\n )\r\n slot_time = await self.config.guild(guild).SLOT_TIME()\r\n last_slot = await self.config.member(author).last_slot()\r\n now = calendar.timegm(ctx.message.created_at.utctimetuple())\r\n\r\n if (now - last_slot) < slot_time:\r\n await ctx.send(_(\"You're on cooldown, try again in a bit.\"))\r\n return\r\n if not valid_bid:\r\n await ctx.send(_(\"That's an invalid bid amount, sorry :/\"))\r\n return\r\n if not await bank.can_spend(author, bid):\r\n await ctx.send(_(\"You ain't got enough money, friend.\"))\r\n return\r\n if await bank.is_global():\r\n await self.config.user(author).last_slot.set(now)\r\n else:\r\n await self.config.member(author).last_slot.set(now)\r\n await self.slot_machine(author, channel, bid)", "def get_slot(self, *args, **kwargs):\n return self._optimizer.get_slot(*args, **kwargs)", "def get_num_slots(self):\n # Your code here\n return len(self.data)", "def slot_before_unload(self, _sender, _data):\r\n pass", "def slotAccept(self, id):\n self.category = self.buttonGroup.find(id).text().ascii()\n self.accept()", "def render_ifslot(self, slotName):\n\t\tdef render(ctx, data):\n\t\t\ttry:\n\t\t\t\tif ctx.locateSlotData(slotName):\n\t\t\t\t\treturn ctx.tag\n\t\t\t\telse:\n\t\t\t\t\treturn \"\"\n\t\t\texcept KeyError:\n\t\t\t\treturn \"\"\n\t\treturn render", "def on_VI_518p_set_clicked(self):\n # TODO: not implemented yet\n disp_518P()\n self.up_slot = qmdz_const.up_slot\n self.down_slot = qmdz_const.down_slot", "def update_Q(self):", "def set_signal(self):\n self.pbSave.clicked.connect(self.save_match_result)\n self.pbCancel.clicked.connect(self.close_match_result)\n self.lwScoreList.doubleClicked.connect(self.display_player_name)\n self.lwAssistList.doubleClicked.connect(self.display_player_name)", "def timeslot(self, timeslot: List[TimeslotTimeslot]):\n\n self._timeslot = timeslot", "def get_slot(item_id):\n if item_id in all_items:\n return all_items[item_id]['slot']\n return None", "def connect_slots(self):\n\n self.set_elf_sig.connect(self.set_elf)\n self.run_target_sig.connect(self.run_target)\n self.continue_target_sig.connect(self.continue_target)\n self.set_breakpoint_sig.connect(self.set_breakpoint)\n self.set_app_mode_sig.connect(self.set_app_mode)\n self.write_hex_value_sig.connect(self.write_word_to_address)", "def connect(self, slot):\r\n if inspect.ismethod(slot):\r\n instance = slot.__self__\r\n function = slot.__func__\r\n if instance not in self._methods:\r\n self._methods[instance] = set()\r\n if function not in self._methods[instance]:\r\n self._methods[instance].add(function)\r\n else:\r\n if slot not in self._functions:\r\n self._functions.add(slot)", "def __init__(self, name=\"\", operator=None, stype=ArrayLike,\n rtype=rtype.SubRegion, value=None, optional=False,\n level=0, nonlane=False):\n # This assertion is here for a reason: default values do NOT work on OutputSlots.\n # (We should probably change that at some point...)\n assert value is None or isinstance(self, InputSlot), \"Only InputSlots can have default values. OutputSlots cannot.\"\n \n if not hasattr(self, \"_type\"):\n self._type = None\n if type(stype) == str:\n stype = ArrayLike\n self.partners = []\n self.name = name\n self._optional = optional\n self.operator = operator\n self._real_operator = None # Memoized in getRealOperator()\n\n # in the case of an InputSlot this is the slot to which it is\n # connected\n self.partner = None\n self.level = level\n\n # in the case of an InputSlot one can directly assign a value\n # to a slot instead of connecting it to a partner, this\n # attribute holds the value\n self._value = None\n\n self._defaultValue = value\n\n # Causes calls to setValue to be propagated backwards to the\n # partner slot. Used by the OperatorWrapper.\n self._backpropagate_values = False\n\n self.rtype = rtype\n\n # the MetaDict that holds the slots meta information\n self.meta = MetaDict()\n\n # if level > 0, this holds the sub-Input/Output slots\n self._subSlots = []\n self._stypeType = stype\n\n # the slot type instance\n self.stype = stype(self)\n self.nonlane = nonlane\n\n self._sig_changed = OrderedSignal()\n self._sig_value_changed = OrderedSignal()\n self._sig_ready = OrderedSignal()\n self._sig_unready = OrderedSignal()\n self._sig_dirty = OrderedSignal()\n self._sig_connect = OrderedSignal()\n self._sig_disconnect = OrderedSignal()\n self._sig_resize = OrderedSignal()\n self._sig_resized = OrderedSignal()\n self._sig_remove = OrderedSignal()\n self._sig_removed = OrderedSignal()\n self._sig_preinsertion = OrderedSignal()\n self._sig_inserted = OrderedSignal()\n\n self._resizing = False\n\n self._executionCount = 0\n self._settingUp = False\n self._condition = threading.Condition()\n\n # Allow slots to be sorted by their order of creation for\n # debug output and diagramming purposes.\n self._global_slot_id = Slot._global_counter.next()", "def action(self, player, game, slot): \n super().action(player, game, slot) \n card = player.get_coders().get_card(slot)\n player.get_coders().remove_card(slot)\n game.current_player().get_coders().add_card(card)\n game.set_action(\"NO_ACTION\")\n game.next_player()", "def __init__(self, driver_age=None, vehicle_registration_number=None):\n self.slot_status = 0 # 0 Means the slot is free\n self.vehicle_driver_age = driver_age\n self.vehicle_registration_number = vehicle_registration_number", "def _comboSlot(self, select):\n select = self.sender().itemText(select)\n if qt4:\n qs = str(self.sender().property(\"dom address\").toPyObject())\n else:\n qs = str(self.sender().property(\"dom address\"))\n item = QtXml.QDomElement()\n\n ind = qs.rfind('/')\n ids = qs[ind:]\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() != select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n widget_enabled = h.elem.attribute(\"Enabled\", \"True\")\n widget_visible = h.elem.attribute(\"Visible\", \"Unknown\")\n h.widget.setEnabled(False)\n if(widget_visible != \"Unknown\"):\n h.label.hide()\n h.widget.hide()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() == select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n h.widget.setEnabled(True)\n h.label.show()\n h.widget.show()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")", "def showSlot(self, number):\n if number <= 0:\n self.log.info('Showing slot \"Negative\" (%d) slot ignored' % number)\n else:\n slot = self.slots[self.number - number]\n if slot.isOccupied():\n slot.removeItem()\n if number == self.target:\n image = self.target_image if self.value < self.target else self.target_image_filled\n self.log.debug('Showing target row: %s, %s, %s' % (self.value, self.target, image))\n elif number <= self.value:\n image = self.fill_image\n elif number <= self.target:\n image = self.blank_image\n else:\n image = self.grey_image\n self.log.debug('Showing %s cell %d as %s (value=%s, target=%s)' % (\n self.name, number, image, self.value, self.target))\n\n slot.addItem(sprite.Sprite(image))", "def disconnect(self, slot):\n if self.is_connected(slot):\n self.slots.pop(self.slots.index(slot))", "def fillSlots(self, name, stan):\n if self._slotData is None:\n self._slotData = {}\n self._slotData[name] = stan", "def slot_ticker(self, dummy_sender, data):\r\n (bid, ask) = data\r\n self.bid = bid\r\n self.ask = ask\r\n self.last_change_type = None\r\n self.last_change_price = 0\r\n self.last_change_volume = 0\r\n self._repair_crossed_asks(ask)\r\n self._repair_crossed_bids(bid)\r\n self.signal_changed(self, None)", "def put_slot_value(self, slot_name_or_uri, value):\n self.memory[slot_name_or_uri] = value\n # print(\"MemoryManager: Implement Me\")\n return self.memory[slot_name_or_uri]", "def slotSelectRow(self, selectDict):\n if self.dataType != selectDict['dataType']:\n return\n\n #print('slotSelectRow() selectDict:', selectDict)\n \n ind = selectDict['index']\n '''\n plotDf = selectDict['plotDf']\n index = plotDf.at[ind, 'index']\n index = int(index)\n '''\n index = ind\n #index -= 1 # !!! MY VISUAL INDEX IN TABLE IS ONE BASED !!!\n column = 0\n modelIndex = self.model().index(index, column)\n self.setCurrentIndex(modelIndex)", "def getSlotforObject(cls, obj):\n if obj.__class__ in restslotattributedict.keys():\n attr = cls.getSlotAttrib(obj)\n if attr is not None:\n peek = getattr(obj, \"peek_\" + attr)\n slot = str(peek()).split('/')[0]\n else:\n return False, \"0\"\n return True, slot", "def __init__(self, slots):\n self.slots = slots\n self.table = []\n for i in range(slots):\n self.table.append([])", "def __init__(self, slots):\n self.slots = slots\n self.table = []\n for i in range(slots):\n self.table.append([])", "def num_slots(self, num_slots):\n\n self._num_slots = num_slots", "def render_ifnoslot(self, slotName):\n\t\t# just repeat the code from ifslot -- this is called frequently,\n\t\t# and additional logic just is not worth it.\n\t\tdef render(ctx, data):\n\t\t\ttry:\n\t\t\t\tif not ctx.locateSlotData(slotName):\n\t\t\t\t\treturn ctx.tag\n\t\t\t\telse:\n\t\t\t\t\treturn \"\"\n\t\t\texcept KeyError:\n\t\t\t\treturn \"\"\n\t\treturn render", "def __len__(self):\n return len(self._subSlots)", "def slot_keypress(self, gox, (key)):\r\n pass", "def Item(self) -> object:", "def Item(self) -> object:", "def extend_slots(self, prediction, item):\n spec = prediction.phrasal_pattern[0]\n slots = prediction.slots\n if is_role_specifier(spec):\n new_slots = copy(slots)\n new_slot = self.role_specifier(spec)\n if new_slot in new_slots:\n raise DuplicateSlotError('Slot %s already exists in %s.' % (\n new_slot, prediction))\n new_slots[new_slot] = item\n return new_slots\n else:\n return slots", "def SlotStatus(rangeInfo):\n if rangeInfo >= 3 and rangeInfo <= 10:\n \"\"\" Full Slot \"\"\"\n status = 1\n return status\n else:\n \"\"\" Empty Slot \"\"\"\n status = 0\n return status", "def InSlotsGet(self):\n ## Make Header\n hex_rep = self.NanonisTCP.make_header('Signals.InSlotsGet', body_size=0)\n \n self.NanonisTCP.send_command(hex_rep)\n \n response = self.NanonisTCP.receive_response()\n \n # signals_names_size = self.NanonisTCP.hex_to_int32(response[0:4])\n signals_names_num = self.NanonisTCP.hex_to_int32(response[4:8])\n \n idx = 8\n signal_names = []\n for n in range(signals_names_num):\n size = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n idx += 4\n signal_name = response[idx:idx+size].decode()\n idx += size\n signal_names.append(signal_name)\n \n signal_indexes = []\n signal_indexes_size = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n for n in range(signal_indexes_size):\n idx += 4\n signal_index = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n signal_indexes.append(signal_index)\n \n return [signal_names,signal_indexes]", "def _lSlot(self, state):\n self._param = QtXml.QDomElement()\n if qt4:\n qs = str(self.sender().property(\"dom address\").toPyObject())\n else:\n qs = str(self.sender().property(\"dom address\"))\n\n ind = qs.rfind('/')\n ids = qs[ind:]\n\n self._param = self.qhash[qs].elem.firstChildElement(\"Activate\")\n while(self._param.isNull() is False):\n qs = str(self._param.text()).strip() + ids\n self.qhash[qs].widget.setEnabled(state)\n widget_visible = self.qhash[qs].elem.attribute(\"Visible\", \"Unknown\")\n if(state is False & widget_visible != \"Unknown\"):\n self.qhash[qs].label.hide()\n self.qhash[qs].widget.hide()\n else:\n self.qhash[qs].label.show()\n self.qhash[qs].widget.show()\n self._param = self._param.nextSiblingElement(\"Activate\")\n\n self._param = self.qhash[qs].elem.firstChildElement(\"Deactivate\")\n while(self._param.isNull() is False):\n qs = str(self._param.text()).strip() + ids\n self.qhash[qs].widget.setEnabled(-state)\n widget_visible = self.qhash[qs].elem.attribute(\"Visible\", \"Unknown\")\n if(state is True & widget_visible != \"Unknown\"):\n self.qhash[qs].label.hide()\n self.qhash[qs].widget.hide()\n else:\n self.qhash[qs].label.show()\n self.qhash[qs].widget.show()\n self._param = self._param.nextSiblingElement(\"Deactivate\")", "def __init__(self):\n self.map = {} #key: val, val, idx\n self.slot = []", "def partVirtualHelixRenumberedSlot(self, sender, coord):\n vh = self._virtualHelixHash[coord]\n # check for new number\n # notify VirtualHelixHandleItem to update its label\n # notify VirtualHelix to update its xovers\n # if the VirtualHelix is active, refresh prexovers\n pass", "def extra_slots(self, extra_slots):\n\n self._extra_slots = extra_slots", "def activated(self):", "def __getitem__(self, key):\n if self.level > 0:\n if isinstance(key, tuple):\n assert len(key) > 0\n assert len(key) <= self.level\n if len(key) == 1:\n return self._subSlots[key[0]]\n else:\n return self._subSlots[key[0]][key[1:]]\n return self._subSlots[key]\n else:\n if self.meta.shape is None:\n # Something is wrong. Are we cancelled?\n Request.raise_if_cancelled()\n if not self.ready():\n msg = \"This slot ({}.{}) isn't ready yet, which means \" \\\n \"you can't ask for its data. Is it connected?\".format(self.getRealOperator().name, self.name)\n self.logger.error(msg)\n slotInfoMsg = \"Can't get data from slot {}.{} yet.\"\\\n \" It isn't ready.\"\\\n \"First upstream problem slot is: {}\"\\\n \"\".format( self.getRealOperator().__class__, self.name, Slot._findUpstreamProblemSlot(self) )\n self.logger.error(slotInfoMsg)\n raise Slot.SlotNotReadyError(\"Slot isn't ready. See error log.\")\n assert self.meta.shape is not None, \\\n (\"Can't ask for slices of this slot yet:\"\n \" self.meta.shape is None!\"\n \" (operator {} [self={}] slot: {}, key={}\".format(\n self.operator.name, self.operator, self.name, key))\n return self(pslice=key)", "def on_comboBox_qihuozhanghao_currentIndexChanged(self, p0):\n # TODO: not implemented yet\n raise NotImplementedError", "def vacate_slot(self, slot, reg_num):\n self.__occupied_slots__.pop(slot.slot_number)\n self.__vehicle_slot_mapping__.pop(reg_num)\n self.__available_slots__.add(slot)", "def _reserve_bsa_slot(self):\n self._run_cmd(CAPUT + \" IOC:IN20:EV01:EDEFNAME \" + '\"' + self._edef_name + '\"')", "def emit(self, **kwargs):\n for slot in self.slots:\n result = slot(**kwargs)\n\n if result is not None:\n return result", "def slotshow_m(obj, content):\n global _slotshow_m_tbl\n\n return _slotshow(obj, content, _slotshow_m_tbl)", "def slotVolume(self, a0):\n self.sampleGroup.action('volume', value=a0)", "def execute(self, slot, subindex, roi, result):\n totalIndex = (self._subSlots.index(slot),) + subindex\n return self.operator.execute(self, totalIndex, roi, result)", "def frame(self):", "def action(self, player, game, slot):\n card = player.get_coders().get_card(slot)\n player.get_coders().remove_card(slot)\n index = game.get_sleeping_coders().index(None)\n game.set_sleeping_coder(index,card)\n game.set_action(\"NO_ACTION\")\n game.next_player()" ]
[ "0.73193145", "0.6811926", "0.6725808", "0.65843606", "0.6397118", "0.6368233", "0.63534164", "0.63223046", "0.6321543", "0.6312873", "0.6151129", "0.6127017", "0.6102243", "0.60954756", "0.60865855", "0.6085341", "0.60555625", "0.6027709", "0.6023868", "0.60039294", "0.5981614", "0.5944901", "0.5928264", "0.5860642", "0.58225965", "0.5803388", "0.57931894", "0.5743234", "0.57300717", "0.57230735", "0.5700275", "0.56719446", "0.56676686", "0.56638163", "0.5662287", "0.56184554", "0.5609259", "0.55965775", "0.5580088", "0.5575003", "0.5574187", "0.5566322", "0.55628324", "0.554456", "0.55376935", "0.55371886", "0.55152655", "0.55149907", "0.55011916", "0.5471193", "0.54608166", "0.54495156", "0.54477423", "0.5443887", "0.5396864", "0.5391362", "0.53841025", "0.5380693", "0.5379568", "0.5375968", "0.5374344", "0.53722537", "0.53629327", "0.5348422", "0.5348001", "0.53435796", "0.53432775", "0.5343046", "0.5324689", "0.5320415", "0.53172785", "0.5309329", "0.5282788", "0.528094", "0.5278367", "0.52753556", "0.52753556", "0.5262484", "0.5252292", "0.5249807", "0.52497274", "0.52403206", "0.52403206", "0.5223128", "0.5221164", "0.52208847", "0.5218931", "0.5216089", "0.52156943", "0.5213589", "0.5192793", "0.51838696", "0.5183027", "0.51815283", "0.51802087", "0.51758593", "0.5171057", "0.5164619", "0.5154298", "0.51483667", "0.5131844" ]
0.0
-1
Slot documentation goes here.
def on_action_3_triggered(self): # TODO: not implemented yet print('联系我们') my_button_con_me = QMessageBox.about(self, '联系我们', '这个位置放的是联系我们的介绍')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slot(self, name):\n raise ClixxException(\"Not implemented.\")", "def play_slot__(self):\n print(\"play_slot__\")\n self.valkkafs_manager.play()", "def add_slot(self, slot):\n slot.set_location(len(self.slots)+1)\n self.slots.append(slot)", "def visit_slot(self, slot_name: str, slot: SlotDefinition) -> None:\n sn = underscore(slot_name)\n self.emit('slot', sn)\n if slot.domain:\n self.emit('domain', sn, underscore(slot.domain))\n if slot.range:\n self.emit('range', sn, underscore(slot.range))\n for p in slot.mixins:\n self.emit('mixin', sn, underscore(p))\n if slot.is_a:\n is_a = underscore(slot.is_a)\n\n #uri = self.owlgen._prop_uri(slot.name)\n uri = f'http://w3id.org/biolink/vocab/{sn}'\n self.emit('has_uri', sn, uri)\n if slot.multivalued:\n self.emit('multivalued', sn)\n if slot.required:\n self.emit('required', sn)", "def time_slot(self):\n pass", "def _slot(self, event) -> None:\n self.events.gui_event(original_event=event)", "def isSlot(self, p_int): # real signature unknown; restored from __doc__\n return False", "def get_slot(self, idx):\n assert (idx >= 0) and (idx < self.size()), \"Index is out of range\"\n return self.slots[idx]", "def slot(self,num):\n if num in ApexAP1000.SLOTS:\n self.__slot=num\n else:\n raise ValueError('Bad slot number !')", "def get_slots(self) -> int:", "def slot_receive(self):\n\n self.start_time = str(time.strftime(\"%Y%m%d%H%M%S\", time.localtime()))\n\n self.setter.start()\n self.getter.start()\n self.timer.start(1)\n # self.plotter.start()", "def perceive(self, slot):\n # right now, let's just observe the first element in the world\n if isinstance(slot, Slot):\n self.percept = slot.get_state()\n else:\n raise RuntimeError(\"Cannot observe other objects right now!\")", "def reserve(self):\n assert self.is_available() is True, \"this slot is not available\"", "def var(self, _type):\n return Slot()", "def slot(self):\n if self.__slot in ApexAP1000.SLOTS:\n return self.__slot\n else:\n raise ValueError('Bad slot number !')", "def slot_changed(self, dummy_sender, dummy_data):\r\n self.do_paint()", "def slot_tick(self, gox, (bid, ask)):\r\n pass", "def slot_wallet_changed(self, gox, _dummy):\r\n pass", "def required_slots(self,tracker) -> List[Text]:", "def slot_owns_changed(self, orderbook, _dummy):\r\n pass", "def on_select_clip_slot(self, clip_slot):\n pass", "def connect(self, slot):\n #if inspect.getargspec(slot).keywords is None:\n # raise exceptions.SlotMustAcceptKeywords(self, slot)\n\n if not self.is_connected(slot):\n self.slots.append(slot)", "def stop_slot__(self):\n print(\"stop_slot__\")\n self.valkkafs_manager.stop()", "def InSlotSet(self,slot,RTSignalIndex):\n ## Make Header\n hex_rep = self.NanonisTCP.make_header('Signals.InSlotSet', body_size=8)\n \n ## Arguments\n hex_rep += self.NanonisTCP.to_hex(slot,4)\n hex_rep += self.NanonisTCP.to_hex(RTSignalIndex,4)\n \n self.NanonisTCP.send_command(hex_rep)\n \n self.NanonisTCP.receive_response(0)", "def remove_slot(self, slot):\n if slot in self.slots:\n idx = self.slots.index(slot)\n # update location of rest slot\n for s in self.slots[idx:]:\n s.set_location(s.get_location()-1)\n self.slots.remove(slot)", "def onConnect(self, *args):\n UAV_Logger.log(\"connect signal recieved\")\n self.socketIO.emit('CORE_SL_SLOTS_SET', Request(self.getModuleName()).newData({\"slots\":self.slots}).toDictionary())", "def _GetSlots(mcs, attrs):\n raise NotImplementedError", "def slots(self):\n return self.__slots.values()", "def time_slot_p(self, c, p):\n pass", "def ParseSlot(self, G, node):\n slot = BLNlpClipsSlotMap()\n slot.Name = node\n return slot", "def add_item(self, item):\n\n if item.descriptor in self.__slots:\n slot = self.__slots[item.descriptor]\n slot.quantity += 1\n else:\n self.__slots[item.descriptor] = Slot(item)", "def on_triggered(self, slot):\n self.triggered.connect(slot)", "def update(self):\n if self.size() < 2: return\n idx = random.randint(0, 100) % 3\n if idx < 2:\n slot = self.slots[idx]\n if slot.get_state() == Slot.CLEAN:\n slot.set_state(Slot.DIRTY)\n # self.slots[idx] = slot", "def run(self):\r\n for slot in self.slots:\r\n slot.work()\r\n self.increment()", "def source_slot(self):\n return self._container_slot(True)", "def handle_slot_select(data: bytes) -> Tuple[bytes, str]:\n new_slot = struct.unpack('B', data[:1])[0]\n return data[1:], f'New slot: {new_slot}'", "def get_frame_slot_value(self, slot):\n # FrameObject is a dictionary of slot names and values.\n [slotName, value] = self.pgdb.sendPgdbFnCall('get-frame-slot-value', self.frameid, Symbol(slot))\n if not slotName:\n raise PythonCycError(\"Slot \"+slot+\" does not exist for frame \"+self.frameid+\" from organism (orgid) \"+self.pgdb._orgid)\n # Modify slot name to allow Python's syntax (e.g., '_' instead of '-').\n self.__dict__[convertLispIdtoPythonId(slotName)] = value\n return self", "def __validate_slots(self, slots: int):\n\n if (not isinstance(slots, int)):\n raise ValueError('slots', slots, 'The value must be an integer')\n if (slots <= 0):\n raise ValueError('slots', slots, 'The value must be greater than zero')", "def decide(self, slot_index):\n self._slot_decided[slot_index] = True", "def slot_trade(self, gox, (date, price, volume, typ, own)):\r\n pass", "def setInSlot(self, slot, subindex, roi, value):\n # Determine which subslot this is and prepend it to the totalIndex\n totalIndex = (self._subSlots.index(slot),) + subindex\n # Forward the call to our operator\n self.operator.setInSlot(self, totalIndex, roi, value)", "def size(self):\n return len(self.slots)", "def occupy_slot(self, slot, vehicle):\n self.__occupied_slots__[slot.slot_number] = vehicle.registration_number, vehicle.color\n self.__vehicle_slot_mapping__[vehicle.registration_number] = slot.slot_number\n self.__available_slots__.remove(slot)", "def target_slot(self):\n return self._container_slot(False)", "def intuit(self):\n raise NotImplemented()", "def slot_history_changed(self, history, _dummy):\r\n pass", "def slot_info(self) -> Dict[int, Tuple[int, str, int]]:\n return self._slot_info", "def increment(self):\r\n self.add_output()\r\n for i in range(self.length-1, 0, -1):\r\n self.slots[i].item = self.slots[i-1].item\r\n self.slots[0].item = Item.random()", "def get_num_slots(self):\n # Your code here\n return self.capacity", "def action(self, player, game, slot):\n super().action(player, game, slot)\n card = game.get_sleeping_coder(slot)\n game.set_sleeping_coder(slot,None)\n player.get_coders().add_card(card)\n game.set_action(\"NO_ACTION\")\n game.next_player()", "def get_slot(self, c):\n if 'slot_number' in c.keys():\n slot_number = c['slot_number']\n return slot_number\n else:\n raise ValueError(self.no_selection_msg())\n \n # returnValue(voltage * units.V)", "async def slot(self, ctx: commands.Context, bid: int):\r\n author = ctx.author\r\n guild = ctx.guild\r\n channel = ctx.channel\r\n if await bank.is_global():\r\n valid_bid = await self.config.SLOT_MIN() <= bid <= await self.config.SLOT_MAX()\r\n slot_time = await self.config.SLOT_TIME()\r\n last_slot = await self.config.user(author).last_slot()\r\n else:\r\n valid_bid = (\r\n await self.config.guild(guild).SLOT_MIN()\r\n <= bid\r\n <= await self.config.guild(guild).SLOT_MAX()\r\n )\r\n slot_time = await self.config.guild(guild).SLOT_TIME()\r\n last_slot = await self.config.member(author).last_slot()\r\n now = calendar.timegm(ctx.message.created_at.utctimetuple())\r\n\r\n if (now - last_slot) < slot_time:\r\n await ctx.send(_(\"You're on cooldown, try again in a bit.\"))\r\n return\r\n if not valid_bid:\r\n await ctx.send(_(\"That's an invalid bid amount, sorry :/\"))\r\n return\r\n if not await bank.can_spend(author, bid):\r\n await ctx.send(_(\"You ain't got enough money, friend.\"))\r\n return\r\n if await bank.is_global():\r\n await self.config.user(author).last_slot.set(now)\r\n else:\r\n await self.config.member(author).last_slot.set(now)\r\n await self.slot_machine(author, channel, bid)", "def get_slot(self, *args, **kwargs):\n return self._optimizer.get_slot(*args, **kwargs)", "def get_num_slots(self):\n # Your code here\n return len(self.data)", "def slot_before_unload(self, _sender, _data):\r\n pass", "def slotAccept(self, id):\n self.category = self.buttonGroup.find(id).text().ascii()\n self.accept()", "def render_ifslot(self, slotName):\n\t\tdef render(ctx, data):\n\t\t\ttry:\n\t\t\t\tif ctx.locateSlotData(slotName):\n\t\t\t\t\treturn ctx.tag\n\t\t\t\telse:\n\t\t\t\t\treturn \"\"\n\t\t\texcept KeyError:\n\t\t\t\treturn \"\"\n\t\treturn render", "def update_Q(self):", "def on_VI_518p_set_clicked(self):\n # TODO: not implemented yet\n disp_518P()\n self.up_slot = qmdz_const.up_slot\n self.down_slot = qmdz_const.down_slot", "def set_signal(self):\n self.pbSave.clicked.connect(self.save_match_result)\n self.pbCancel.clicked.connect(self.close_match_result)\n self.lwScoreList.doubleClicked.connect(self.display_player_name)\n self.lwAssistList.doubleClicked.connect(self.display_player_name)", "def timeslot(self, timeslot: List[TimeslotTimeslot]):\n\n self._timeslot = timeslot", "def get_slot(item_id):\n if item_id in all_items:\n return all_items[item_id]['slot']\n return None", "def connect_slots(self):\n\n self.set_elf_sig.connect(self.set_elf)\n self.run_target_sig.connect(self.run_target)\n self.continue_target_sig.connect(self.continue_target)\n self.set_breakpoint_sig.connect(self.set_breakpoint)\n self.set_app_mode_sig.connect(self.set_app_mode)\n self.write_hex_value_sig.connect(self.write_word_to_address)", "def connect(self, slot):\r\n if inspect.ismethod(slot):\r\n instance = slot.__self__\r\n function = slot.__func__\r\n if instance not in self._methods:\r\n self._methods[instance] = set()\r\n if function not in self._methods[instance]:\r\n self._methods[instance].add(function)\r\n else:\r\n if slot not in self._functions:\r\n self._functions.add(slot)", "def __init__(self, name=\"\", operator=None, stype=ArrayLike,\n rtype=rtype.SubRegion, value=None, optional=False,\n level=0, nonlane=False):\n # This assertion is here for a reason: default values do NOT work on OutputSlots.\n # (We should probably change that at some point...)\n assert value is None or isinstance(self, InputSlot), \"Only InputSlots can have default values. OutputSlots cannot.\"\n \n if not hasattr(self, \"_type\"):\n self._type = None\n if type(stype) == str:\n stype = ArrayLike\n self.partners = []\n self.name = name\n self._optional = optional\n self.operator = operator\n self._real_operator = None # Memoized in getRealOperator()\n\n # in the case of an InputSlot this is the slot to which it is\n # connected\n self.partner = None\n self.level = level\n\n # in the case of an InputSlot one can directly assign a value\n # to a slot instead of connecting it to a partner, this\n # attribute holds the value\n self._value = None\n\n self._defaultValue = value\n\n # Causes calls to setValue to be propagated backwards to the\n # partner slot. Used by the OperatorWrapper.\n self._backpropagate_values = False\n\n self.rtype = rtype\n\n # the MetaDict that holds the slots meta information\n self.meta = MetaDict()\n\n # if level > 0, this holds the sub-Input/Output slots\n self._subSlots = []\n self._stypeType = stype\n\n # the slot type instance\n self.stype = stype(self)\n self.nonlane = nonlane\n\n self._sig_changed = OrderedSignal()\n self._sig_value_changed = OrderedSignal()\n self._sig_ready = OrderedSignal()\n self._sig_unready = OrderedSignal()\n self._sig_dirty = OrderedSignal()\n self._sig_connect = OrderedSignal()\n self._sig_disconnect = OrderedSignal()\n self._sig_resize = OrderedSignal()\n self._sig_resized = OrderedSignal()\n self._sig_remove = OrderedSignal()\n self._sig_removed = OrderedSignal()\n self._sig_preinsertion = OrderedSignal()\n self._sig_inserted = OrderedSignal()\n\n self._resizing = False\n\n self._executionCount = 0\n self._settingUp = False\n self._condition = threading.Condition()\n\n # Allow slots to be sorted by their order of creation for\n # debug output and diagramming purposes.\n self._global_slot_id = Slot._global_counter.next()", "def action(self, player, game, slot): \n super().action(player, game, slot) \n card = player.get_coders().get_card(slot)\n player.get_coders().remove_card(slot)\n game.current_player().get_coders().add_card(card)\n game.set_action(\"NO_ACTION\")\n game.next_player()", "def __init__(self, driver_age=None, vehicle_registration_number=None):\n self.slot_status = 0 # 0 Means the slot is free\n self.vehicle_driver_age = driver_age\n self.vehicle_registration_number = vehicle_registration_number", "def _comboSlot(self, select):\n select = self.sender().itemText(select)\n if qt4:\n qs = str(self.sender().property(\"dom address\").toPyObject())\n else:\n qs = str(self.sender().property(\"dom address\"))\n item = QtXml.QDomElement()\n\n ind = qs.rfind('/')\n ids = qs[ind:]\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() != select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n widget_enabled = h.elem.attribute(\"Enabled\", \"True\")\n widget_visible = h.elem.attribute(\"Visible\", \"Unknown\")\n h.widget.setEnabled(False)\n if(widget_visible != \"Unknown\"):\n h.label.hide()\n h.widget.hide()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() == select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n h.widget.setEnabled(True)\n h.label.show()\n h.widget.show()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")", "def showSlot(self, number):\n if number <= 0:\n self.log.info('Showing slot \"Negative\" (%d) slot ignored' % number)\n else:\n slot = self.slots[self.number - number]\n if slot.isOccupied():\n slot.removeItem()\n if number == self.target:\n image = self.target_image if self.value < self.target else self.target_image_filled\n self.log.debug('Showing target row: %s, %s, %s' % (self.value, self.target, image))\n elif number <= self.value:\n image = self.fill_image\n elif number <= self.target:\n image = self.blank_image\n else:\n image = self.grey_image\n self.log.debug('Showing %s cell %d as %s (value=%s, target=%s)' % (\n self.name, number, image, self.value, self.target))\n\n slot.addItem(sprite.Sprite(image))", "def disconnect(self, slot):\n if self.is_connected(slot):\n self.slots.pop(self.slots.index(slot))", "def fillSlots(self, name, stan):\n if self._slotData is None:\n self._slotData = {}\n self._slotData[name] = stan", "def slot_ticker(self, dummy_sender, data):\r\n (bid, ask) = data\r\n self.bid = bid\r\n self.ask = ask\r\n self.last_change_type = None\r\n self.last_change_price = 0\r\n self.last_change_volume = 0\r\n self._repair_crossed_asks(ask)\r\n self._repair_crossed_bids(bid)\r\n self.signal_changed(self, None)", "def put_slot_value(self, slot_name_or_uri, value):\n self.memory[slot_name_or_uri] = value\n # print(\"MemoryManager: Implement Me\")\n return self.memory[slot_name_or_uri]", "def slotSelectRow(self, selectDict):\n if self.dataType != selectDict['dataType']:\n return\n\n #print('slotSelectRow() selectDict:', selectDict)\n \n ind = selectDict['index']\n '''\n plotDf = selectDict['plotDf']\n index = plotDf.at[ind, 'index']\n index = int(index)\n '''\n index = ind\n #index -= 1 # !!! MY VISUAL INDEX IN TABLE IS ONE BASED !!!\n column = 0\n modelIndex = self.model().index(index, column)\n self.setCurrentIndex(modelIndex)", "def getSlotforObject(cls, obj):\n if obj.__class__ in restslotattributedict.keys():\n attr = cls.getSlotAttrib(obj)\n if attr is not None:\n peek = getattr(obj, \"peek_\" + attr)\n slot = str(peek()).split('/')[0]\n else:\n return False, \"0\"\n return True, slot", "def __init__(self, slots):\n self.slots = slots\n self.table = []\n for i in range(slots):\n self.table.append([])", "def __init__(self, slots):\n self.slots = slots\n self.table = []\n for i in range(slots):\n self.table.append([])", "def num_slots(self, num_slots):\n\n self._num_slots = num_slots", "def render_ifnoslot(self, slotName):\n\t\t# just repeat the code from ifslot -- this is called frequently,\n\t\t# and additional logic just is not worth it.\n\t\tdef render(ctx, data):\n\t\t\ttry:\n\t\t\t\tif not ctx.locateSlotData(slotName):\n\t\t\t\t\treturn ctx.tag\n\t\t\t\telse:\n\t\t\t\t\treturn \"\"\n\t\t\texcept KeyError:\n\t\t\t\treturn \"\"\n\t\treturn render", "def __len__(self):\n return len(self._subSlots)", "def slot_keypress(self, gox, (key)):\r\n pass", "def Item(self) -> object:", "def Item(self) -> object:", "def extend_slots(self, prediction, item):\n spec = prediction.phrasal_pattern[0]\n slots = prediction.slots\n if is_role_specifier(spec):\n new_slots = copy(slots)\n new_slot = self.role_specifier(spec)\n if new_slot in new_slots:\n raise DuplicateSlotError('Slot %s already exists in %s.' % (\n new_slot, prediction))\n new_slots[new_slot] = item\n return new_slots\n else:\n return slots", "def SlotStatus(rangeInfo):\n if rangeInfo >= 3 and rangeInfo <= 10:\n \"\"\" Full Slot \"\"\"\n status = 1\n return status\n else:\n \"\"\" Empty Slot \"\"\"\n status = 0\n return status", "def InSlotsGet(self):\n ## Make Header\n hex_rep = self.NanonisTCP.make_header('Signals.InSlotsGet', body_size=0)\n \n self.NanonisTCP.send_command(hex_rep)\n \n response = self.NanonisTCP.receive_response()\n \n # signals_names_size = self.NanonisTCP.hex_to_int32(response[0:4])\n signals_names_num = self.NanonisTCP.hex_to_int32(response[4:8])\n \n idx = 8\n signal_names = []\n for n in range(signals_names_num):\n size = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n idx += 4\n signal_name = response[idx:idx+size].decode()\n idx += size\n signal_names.append(signal_name)\n \n signal_indexes = []\n signal_indexes_size = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n for n in range(signal_indexes_size):\n idx += 4\n signal_index = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n signal_indexes.append(signal_index)\n \n return [signal_names,signal_indexes]", "def __init__(self):\n self.map = {} #key: val, val, idx\n self.slot = []", "def _lSlot(self, state):\n self._param = QtXml.QDomElement()\n if qt4:\n qs = str(self.sender().property(\"dom address\").toPyObject())\n else:\n qs = str(self.sender().property(\"dom address\"))\n\n ind = qs.rfind('/')\n ids = qs[ind:]\n\n self._param = self.qhash[qs].elem.firstChildElement(\"Activate\")\n while(self._param.isNull() is False):\n qs = str(self._param.text()).strip() + ids\n self.qhash[qs].widget.setEnabled(state)\n widget_visible = self.qhash[qs].elem.attribute(\"Visible\", \"Unknown\")\n if(state is False & widget_visible != \"Unknown\"):\n self.qhash[qs].label.hide()\n self.qhash[qs].widget.hide()\n else:\n self.qhash[qs].label.show()\n self.qhash[qs].widget.show()\n self._param = self._param.nextSiblingElement(\"Activate\")\n\n self._param = self.qhash[qs].elem.firstChildElement(\"Deactivate\")\n while(self._param.isNull() is False):\n qs = str(self._param.text()).strip() + ids\n self.qhash[qs].widget.setEnabled(-state)\n widget_visible = self.qhash[qs].elem.attribute(\"Visible\", \"Unknown\")\n if(state is True & widget_visible != \"Unknown\"):\n self.qhash[qs].label.hide()\n self.qhash[qs].widget.hide()\n else:\n self.qhash[qs].label.show()\n self.qhash[qs].widget.show()\n self._param = self._param.nextSiblingElement(\"Deactivate\")", "def partVirtualHelixRenumberedSlot(self, sender, coord):\n vh = self._virtualHelixHash[coord]\n # check for new number\n # notify VirtualHelixHandleItem to update its label\n # notify VirtualHelix to update its xovers\n # if the VirtualHelix is active, refresh prexovers\n pass", "def extra_slots(self, extra_slots):\n\n self._extra_slots = extra_slots", "def activated(self):", "def __getitem__(self, key):\n if self.level > 0:\n if isinstance(key, tuple):\n assert len(key) > 0\n assert len(key) <= self.level\n if len(key) == 1:\n return self._subSlots[key[0]]\n else:\n return self._subSlots[key[0]][key[1:]]\n return self._subSlots[key]\n else:\n if self.meta.shape is None:\n # Something is wrong. Are we cancelled?\n Request.raise_if_cancelled()\n if not self.ready():\n msg = \"This slot ({}.{}) isn't ready yet, which means \" \\\n \"you can't ask for its data. Is it connected?\".format(self.getRealOperator().name, self.name)\n self.logger.error(msg)\n slotInfoMsg = \"Can't get data from slot {}.{} yet.\"\\\n \" It isn't ready.\"\\\n \"First upstream problem slot is: {}\"\\\n \"\".format( self.getRealOperator().__class__, self.name, Slot._findUpstreamProblemSlot(self) )\n self.logger.error(slotInfoMsg)\n raise Slot.SlotNotReadyError(\"Slot isn't ready. See error log.\")\n assert self.meta.shape is not None, \\\n (\"Can't ask for slices of this slot yet:\"\n \" self.meta.shape is None!\"\n \" (operator {} [self={}] slot: {}, key={}\".format(\n self.operator.name, self.operator, self.name, key))\n return self(pslice=key)", "def on_comboBox_qihuozhanghao_currentIndexChanged(self, p0):\n # TODO: not implemented yet\n raise NotImplementedError", "def vacate_slot(self, slot, reg_num):\n self.__occupied_slots__.pop(slot.slot_number)\n self.__vehicle_slot_mapping__.pop(reg_num)\n self.__available_slots__.add(slot)", "def _reserve_bsa_slot(self):\n self._run_cmd(CAPUT + \" IOC:IN20:EV01:EDEFNAME \" + '\"' + self._edef_name + '\"')", "def emit(self, **kwargs):\n for slot in self.slots:\n result = slot(**kwargs)\n\n if result is not None:\n return result", "def slotshow_m(obj, content):\n global _slotshow_m_tbl\n\n return _slotshow(obj, content, _slotshow_m_tbl)", "def slotVolume(self, a0):\n self.sampleGroup.action('volume', value=a0)", "def execute(self, slot, subindex, roi, result):\n totalIndex = (self._subSlots.index(slot),) + subindex\n return self.operator.execute(self, totalIndex, roi, result)", "def frame(self):", "def action(self, player, game, slot):\n card = player.get_coders().get_card(slot)\n player.get_coders().remove_card(slot)\n index = game.get_sleeping_coders().index(None)\n game.set_sleeping_coder(index,card)\n game.set_action(\"NO_ACTION\")\n game.next_player()" ]
[ "0.7317067", "0.6809478", "0.6723371", "0.6583449", "0.63984954", "0.63675296", "0.63494873", "0.6319528", "0.63195074", "0.631188", "0.6152515", "0.6127954", "0.61003345", "0.6093386", "0.60838413", "0.6083497", "0.6056153", "0.60259384", "0.6022296", "0.6002864", "0.59812987", "0.5944142", "0.5926356", "0.5860501", "0.5821294", "0.58022904", "0.579247", "0.57412773", "0.57305163", "0.57208335", "0.56980944", "0.567121", "0.56663936", "0.5663972", "0.566115", "0.5615209", "0.5612441", "0.5593378", "0.5579261", "0.5575075", "0.55743563", "0.5564597", "0.55627596", "0.55434495", "0.5541362", "0.55390745", "0.55162525", "0.5515694", "0.5501102", "0.54724556", "0.54612845", "0.5448487", "0.54467326", "0.5443292", "0.53959346", "0.53911555", "0.53839684", "0.53802204", "0.53784084", "0.5376231", "0.5373751", "0.5369214", "0.53615224", "0.53489757", "0.534799", "0.5344866", "0.53426665", "0.5342361", "0.53222084", "0.5320364", "0.5314095", "0.5309191", "0.5283628", "0.52824056", "0.5276979", "0.5275021", "0.5275021", "0.5260909", "0.5252085", "0.5249819", "0.52494216", "0.5246176", "0.5246176", "0.5220556", "0.5220291", "0.5219783", "0.52178997", "0.52175796", "0.5215104", "0.5212783", "0.51965827", "0.51849735", "0.5182628", "0.5180071", "0.51781875", "0.5176648", "0.5168609", "0.51619506", "0.5156556", "0.51523393", "0.5133018" ]
0.0
-1
Slot documentation goes here.
def on_action_4_triggered(self): # TODO: not implemented yet print('关于我们') my_button_about_me = QMessageBox.about(self, '关于我们', '这个位置放的是关于我们的介绍')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slot(self, name):\n raise ClixxException(\"Not implemented.\")", "def play_slot__(self):\n print(\"play_slot__\")\n self.valkkafs_manager.play()", "def add_slot(self, slot):\n slot.set_location(len(self.slots)+1)\n self.slots.append(slot)", "def visit_slot(self, slot_name: str, slot: SlotDefinition) -> None:\n sn = underscore(slot_name)\n self.emit('slot', sn)\n if slot.domain:\n self.emit('domain', sn, underscore(slot.domain))\n if slot.range:\n self.emit('range', sn, underscore(slot.range))\n for p in slot.mixins:\n self.emit('mixin', sn, underscore(p))\n if slot.is_a:\n is_a = underscore(slot.is_a)\n\n #uri = self.owlgen._prop_uri(slot.name)\n uri = f'http://w3id.org/biolink/vocab/{sn}'\n self.emit('has_uri', sn, uri)\n if slot.multivalued:\n self.emit('multivalued', sn)\n if slot.required:\n self.emit('required', sn)", "def time_slot(self):\n pass", "def _slot(self, event) -> None:\n self.events.gui_event(original_event=event)", "def isSlot(self, p_int): # real signature unknown; restored from __doc__\n return False", "def slot(self,num):\n if num in ApexAP1000.SLOTS:\n self.__slot=num\n else:\n raise ValueError('Bad slot number !')", "def get_slot(self, idx):\n assert (idx >= 0) and (idx < self.size()), \"Index is out of range\"\n return self.slots[idx]", "def get_slots(self) -> int:", "def slot_receive(self):\n\n self.start_time = str(time.strftime(\"%Y%m%d%H%M%S\", time.localtime()))\n\n self.setter.start()\n self.getter.start()\n self.timer.start(1)\n # self.plotter.start()", "def perceive(self, slot):\n # right now, let's just observe the first element in the world\n if isinstance(slot, Slot):\n self.percept = slot.get_state()\n else:\n raise RuntimeError(\"Cannot observe other objects right now!\")", "def reserve(self):\n assert self.is_available() is True, \"this slot is not available\"", "def var(self, _type):\n return Slot()", "def slot(self):\n if self.__slot in ApexAP1000.SLOTS:\n return self.__slot\n else:\n raise ValueError('Bad slot number !')", "def slot_changed(self, dummy_sender, dummy_data):\r\n self.do_paint()", "def slot_tick(self, gox, (bid, ask)):\r\n pass", "def slot_wallet_changed(self, gox, _dummy):\r\n pass", "def required_slots(self,tracker) -> List[Text]:", "def slot_owns_changed(self, orderbook, _dummy):\r\n pass", "def on_select_clip_slot(self, clip_slot):\n pass", "def connect(self, slot):\n #if inspect.getargspec(slot).keywords is None:\n # raise exceptions.SlotMustAcceptKeywords(self, slot)\n\n if not self.is_connected(slot):\n self.slots.append(slot)", "def stop_slot__(self):\n print(\"stop_slot__\")\n self.valkkafs_manager.stop()", "def InSlotSet(self,slot,RTSignalIndex):\n ## Make Header\n hex_rep = self.NanonisTCP.make_header('Signals.InSlotSet', body_size=8)\n \n ## Arguments\n hex_rep += self.NanonisTCP.to_hex(slot,4)\n hex_rep += self.NanonisTCP.to_hex(RTSignalIndex,4)\n \n self.NanonisTCP.send_command(hex_rep)\n \n self.NanonisTCP.receive_response(0)", "def remove_slot(self, slot):\n if slot in self.slots:\n idx = self.slots.index(slot)\n # update location of rest slot\n for s in self.slots[idx:]:\n s.set_location(s.get_location()-1)\n self.slots.remove(slot)", "def onConnect(self, *args):\n UAV_Logger.log(\"connect signal recieved\")\n self.socketIO.emit('CORE_SL_SLOTS_SET', Request(self.getModuleName()).newData({\"slots\":self.slots}).toDictionary())", "def _GetSlots(mcs, attrs):\n raise NotImplementedError", "def slots(self):\n return self.__slots.values()", "def time_slot_p(self, c, p):\n pass", "def ParseSlot(self, G, node):\n slot = BLNlpClipsSlotMap()\n slot.Name = node\n return slot", "def add_item(self, item):\n\n if item.descriptor in self.__slots:\n slot = self.__slots[item.descriptor]\n slot.quantity += 1\n else:\n self.__slots[item.descriptor] = Slot(item)", "def on_triggered(self, slot):\n self.triggered.connect(slot)", "def update(self):\n if self.size() < 2: return\n idx = random.randint(0, 100) % 3\n if idx < 2:\n slot = self.slots[idx]\n if slot.get_state() == Slot.CLEAN:\n slot.set_state(Slot.DIRTY)\n # self.slots[idx] = slot", "def run(self):\r\n for slot in self.slots:\r\n slot.work()\r\n self.increment()", "def source_slot(self):\n return self._container_slot(True)", "def handle_slot_select(data: bytes) -> Tuple[bytes, str]:\n new_slot = struct.unpack('B', data[:1])[0]\n return data[1:], f'New slot: {new_slot}'", "def get_frame_slot_value(self, slot):\n # FrameObject is a dictionary of slot names and values.\n [slotName, value] = self.pgdb.sendPgdbFnCall('get-frame-slot-value', self.frameid, Symbol(slot))\n if not slotName:\n raise PythonCycError(\"Slot \"+slot+\" does not exist for frame \"+self.frameid+\" from organism (orgid) \"+self.pgdb._orgid)\n # Modify slot name to allow Python's syntax (e.g., '_' instead of '-').\n self.__dict__[convertLispIdtoPythonId(slotName)] = value\n return self", "def __validate_slots(self, slots: int):\n\n if (not isinstance(slots, int)):\n raise ValueError('slots', slots, 'The value must be an integer')\n if (slots <= 0):\n raise ValueError('slots', slots, 'The value must be greater than zero')", "def decide(self, slot_index):\n self._slot_decided[slot_index] = True", "def slot_trade(self, gox, (date, price, volume, typ, own)):\r\n pass", "def setInSlot(self, slot, subindex, roi, value):\n # Determine which subslot this is and prepend it to the totalIndex\n totalIndex = (self._subSlots.index(slot),) + subindex\n # Forward the call to our operator\n self.operator.setInSlot(self, totalIndex, roi, value)", "def size(self):\n return len(self.slots)", "def occupy_slot(self, slot, vehicle):\n self.__occupied_slots__[slot.slot_number] = vehicle.registration_number, vehicle.color\n self.__vehicle_slot_mapping__[vehicle.registration_number] = slot.slot_number\n self.__available_slots__.remove(slot)", "def target_slot(self):\n return self._container_slot(False)", "def slot_history_changed(self, history, _dummy):\r\n pass", "def intuit(self):\n raise NotImplemented()", "def slot_info(self) -> Dict[int, Tuple[int, str, int]]:\n return self._slot_info", "def increment(self):\r\n self.add_output()\r\n for i in range(self.length-1, 0, -1):\r\n self.slots[i].item = self.slots[i-1].item\r\n self.slots[0].item = Item.random()", "def get_num_slots(self):\n # Your code here\n return self.capacity", "def action(self, player, game, slot):\n super().action(player, game, slot)\n card = game.get_sleeping_coder(slot)\n game.set_sleeping_coder(slot,None)\n player.get_coders().add_card(card)\n game.set_action(\"NO_ACTION\")\n game.next_player()", "def get_slot(self, c):\n if 'slot_number' in c.keys():\n slot_number = c['slot_number']\n return slot_number\n else:\n raise ValueError(self.no_selection_msg())\n \n # returnValue(voltage * units.V)", "async def slot(self, ctx: commands.Context, bid: int):\r\n author = ctx.author\r\n guild = ctx.guild\r\n channel = ctx.channel\r\n if await bank.is_global():\r\n valid_bid = await self.config.SLOT_MIN() <= bid <= await self.config.SLOT_MAX()\r\n slot_time = await self.config.SLOT_TIME()\r\n last_slot = await self.config.user(author).last_slot()\r\n else:\r\n valid_bid = (\r\n await self.config.guild(guild).SLOT_MIN()\r\n <= bid\r\n <= await self.config.guild(guild).SLOT_MAX()\r\n )\r\n slot_time = await self.config.guild(guild).SLOT_TIME()\r\n last_slot = await self.config.member(author).last_slot()\r\n now = calendar.timegm(ctx.message.created_at.utctimetuple())\r\n\r\n if (now - last_slot) < slot_time:\r\n await ctx.send(_(\"You're on cooldown, try again in a bit.\"))\r\n return\r\n if not valid_bid:\r\n await ctx.send(_(\"That's an invalid bid amount, sorry :/\"))\r\n return\r\n if not await bank.can_spend(author, bid):\r\n await ctx.send(_(\"You ain't got enough money, friend.\"))\r\n return\r\n if await bank.is_global():\r\n await self.config.user(author).last_slot.set(now)\r\n else:\r\n await self.config.member(author).last_slot.set(now)\r\n await self.slot_machine(author, channel, bid)", "def get_slot(self, *args, **kwargs):\n return self._optimizer.get_slot(*args, **kwargs)", "def get_num_slots(self):\n # Your code here\n return len(self.data)", "def slot_before_unload(self, _sender, _data):\r\n pass", "def slotAccept(self, id):\n self.category = self.buttonGroup.find(id).text().ascii()\n self.accept()", "def render_ifslot(self, slotName):\n\t\tdef render(ctx, data):\n\t\t\ttry:\n\t\t\t\tif ctx.locateSlotData(slotName):\n\t\t\t\t\treturn ctx.tag\n\t\t\t\telse:\n\t\t\t\t\treturn \"\"\n\t\t\texcept KeyError:\n\t\t\t\treturn \"\"\n\t\treturn render", "def on_VI_518p_set_clicked(self):\n # TODO: not implemented yet\n disp_518P()\n self.up_slot = qmdz_const.up_slot\n self.down_slot = qmdz_const.down_slot", "def update_Q(self):", "def set_signal(self):\n self.pbSave.clicked.connect(self.save_match_result)\n self.pbCancel.clicked.connect(self.close_match_result)\n self.lwScoreList.doubleClicked.connect(self.display_player_name)\n self.lwAssistList.doubleClicked.connect(self.display_player_name)", "def timeslot(self, timeslot: List[TimeslotTimeslot]):\n\n self._timeslot = timeslot", "def get_slot(item_id):\n if item_id in all_items:\n return all_items[item_id]['slot']\n return None", "def connect_slots(self):\n\n self.set_elf_sig.connect(self.set_elf)\n self.run_target_sig.connect(self.run_target)\n self.continue_target_sig.connect(self.continue_target)\n self.set_breakpoint_sig.connect(self.set_breakpoint)\n self.set_app_mode_sig.connect(self.set_app_mode)\n self.write_hex_value_sig.connect(self.write_word_to_address)", "def connect(self, slot):\r\n if inspect.ismethod(slot):\r\n instance = slot.__self__\r\n function = slot.__func__\r\n if instance not in self._methods:\r\n self._methods[instance] = set()\r\n if function not in self._methods[instance]:\r\n self._methods[instance].add(function)\r\n else:\r\n if slot not in self._functions:\r\n self._functions.add(slot)", "def __init__(self, name=\"\", operator=None, stype=ArrayLike,\n rtype=rtype.SubRegion, value=None, optional=False,\n level=0, nonlane=False):\n # This assertion is here for a reason: default values do NOT work on OutputSlots.\n # (We should probably change that at some point...)\n assert value is None or isinstance(self, InputSlot), \"Only InputSlots can have default values. OutputSlots cannot.\"\n \n if not hasattr(self, \"_type\"):\n self._type = None\n if type(stype) == str:\n stype = ArrayLike\n self.partners = []\n self.name = name\n self._optional = optional\n self.operator = operator\n self._real_operator = None # Memoized in getRealOperator()\n\n # in the case of an InputSlot this is the slot to which it is\n # connected\n self.partner = None\n self.level = level\n\n # in the case of an InputSlot one can directly assign a value\n # to a slot instead of connecting it to a partner, this\n # attribute holds the value\n self._value = None\n\n self._defaultValue = value\n\n # Causes calls to setValue to be propagated backwards to the\n # partner slot. Used by the OperatorWrapper.\n self._backpropagate_values = False\n\n self.rtype = rtype\n\n # the MetaDict that holds the slots meta information\n self.meta = MetaDict()\n\n # if level > 0, this holds the sub-Input/Output slots\n self._subSlots = []\n self._stypeType = stype\n\n # the slot type instance\n self.stype = stype(self)\n self.nonlane = nonlane\n\n self._sig_changed = OrderedSignal()\n self._sig_value_changed = OrderedSignal()\n self._sig_ready = OrderedSignal()\n self._sig_unready = OrderedSignal()\n self._sig_dirty = OrderedSignal()\n self._sig_connect = OrderedSignal()\n self._sig_disconnect = OrderedSignal()\n self._sig_resize = OrderedSignal()\n self._sig_resized = OrderedSignal()\n self._sig_remove = OrderedSignal()\n self._sig_removed = OrderedSignal()\n self._sig_preinsertion = OrderedSignal()\n self._sig_inserted = OrderedSignal()\n\n self._resizing = False\n\n self._executionCount = 0\n self._settingUp = False\n self._condition = threading.Condition()\n\n # Allow slots to be sorted by their order of creation for\n # debug output and diagramming purposes.\n self._global_slot_id = Slot._global_counter.next()", "def action(self, player, game, slot): \n super().action(player, game, slot) \n card = player.get_coders().get_card(slot)\n player.get_coders().remove_card(slot)\n game.current_player().get_coders().add_card(card)\n game.set_action(\"NO_ACTION\")\n game.next_player()", "def __init__(self, driver_age=None, vehicle_registration_number=None):\n self.slot_status = 0 # 0 Means the slot is free\n self.vehicle_driver_age = driver_age\n self.vehicle_registration_number = vehicle_registration_number", "def _comboSlot(self, select):\n select = self.sender().itemText(select)\n if qt4:\n qs = str(self.sender().property(\"dom address\").toPyObject())\n else:\n qs = str(self.sender().property(\"dom address\"))\n item = QtXml.QDomElement()\n\n ind = qs.rfind('/')\n ids = qs[ind:]\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() != select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n widget_enabled = h.elem.attribute(\"Enabled\", \"True\")\n widget_visible = h.elem.attribute(\"Visible\", \"Unknown\")\n h.widget.setEnabled(False)\n if(widget_visible != \"Unknown\"):\n h.label.hide()\n h.widget.hide()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() == select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n h.widget.setEnabled(True)\n h.label.show()\n h.widget.show()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")", "def showSlot(self, number):\n if number <= 0:\n self.log.info('Showing slot \"Negative\" (%d) slot ignored' % number)\n else:\n slot = self.slots[self.number - number]\n if slot.isOccupied():\n slot.removeItem()\n if number == self.target:\n image = self.target_image if self.value < self.target else self.target_image_filled\n self.log.debug('Showing target row: %s, %s, %s' % (self.value, self.target, image))\n elif number <= self.value:\n image = self.fill_image\n elif number <= self.target:\n image = self.blank_image\n else:\n image = self.grey_image\n self.log.debug('Showing %s cell %d as %s (value=%s, target=%s)' % (\n self.name, number, image, self.value, self.target))\n\n slot.addItem(sprite.Sprite(image))", "def disconnect(self, slot):\n if self.is_connected(slot):\n self.slots.pop(self.slots.index(slot))", "def fillSlots(self, name, stan):\n if self._slotData is None:\n self._slotData = {}\n self._slotData[name] = stan", "def slot_ticker(self, dummy_sender, data):\r\n (bid, ask) = data\r\n self.bid = bid\r\n self.ask = ask\r\n self.last_change_type = None\r\n self.last_change_price = 0\r\n self.last_change_volume = 0\r\n self._repair_crossed_asks(ask)\r\n self._repair_crossed_bids(bid)\r\n self.signal_changed(self, None)", "def put_slot_value(self, slot_name_or_uri, value):\n self.memory[slot_name_or_uri] = value\n # print(\"MemoryManager: Implement Me\")\n return self.memory[slot_name_or_uri]", "def slotSelectRow(self, selectDict):\n if self.dataType != selectDict['dataType']:\n return\n\n #print('slotSelectRow() selectDict:', selectDict)\n \n ind = selectDict['index']\n '''\n plotDf = selectDict['plotDf']\n index = plotDf.at[ind, 'index']\n index = int(index)\n '''\n index = ind\n #index -= 1 # !!! MY VISUAL INDEX IN TABLE IS ONE BASED !!!\n column = 0\n modelIndex = self.model().index(index, column)\n self.setCurrentIndex(modelIndex)", "def getSlotforObject(cls, obj):\n if obj.__class__ in restslotattributedict.keys():\n attr = cls.getSlotAttrib(obj)\n if attr is not None:\n peek = getattr(obj, \"peek_\" + attr)\n slot = str(peek()).split('/')[0]\n else:\n return False, \"0\"\n return True, slot", "def __init__(self, slots):\n self.slots = slots\n self.table = []\n for i in range(slots):\n self.table.append([])", "def __init__(self, slots):\n self.slots = slots\n self.table = []\n for i in range(slots):\n self.table.append([])", "def num_slots(self, num_slots):\n\n self._num_slots = num_slots", "def render_ifnoslot(self, slotName):\n\t\t# just repeat the code from ifslot -- this is called frequently,\n\t\t# and additional logic just is not worth it.\n\t\tdef render(ctx, data):\n\t\t\ttry:\n\t\t\t\tif not ctx.locateSlotData(slotName):\n\t\t\t\t\treturn ctx.tag\n\t\t\t\telse:\n\t\t\t\t\treturn \"\"\n\t\t\texcept KeyError:\n\t\t\t\treturn \"\"\n\t\treturn render", "def __len__(self):\n return len(self._subSlots)", "def slot_keypress(self, gox, (key)):\r\n pass", "def Item(self) -> object:", "def Item(self) -> object:", "def extend_slots(self, prediction, item):\n spec = prediction.phrasal_pattern[0]\n slots = prediction.slots\n if is_role_specifier(spec):\n new_slots = copy(slots)\n new_slot = self.role_specifier(spec)\n if new_slot in new_slots:\n raise DuplicateSlotError('Slot %s already exists in %s.' % (\n new_slot, prediction))\n new_slots[new_slot] = item\n return new_slots\n else:\n return slots", "def SlotStatus(rangeInfo):\n if rangeInfo >= 3 and rangeInfo <= 10:\n \"\"\" Full Slot \"\"\"\n status = 1\n return status\n else:\n \"\"\" Empty Slot \"\"\"\n status = 0\n return status", "def InSlotsGet(self):\n ## Make Header\n hex_rep = self.NanonisTCP.make_header('Signals.InSlotsGet', body_size=0)\n \n self.NanonisTCP.send_command(hex_rep)\n \n response = self.NanonisTCP.receive_response()\n \n # signals_names_size = self.NanonisTCP.hex_to_int32(response[0:4])\n signals_names_num = self.NanonisTCP.hex_to_int32(response[4:8])\n \n idx = 8\n signal_names = []\n for n in range(signals_names_num):\n size = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n idx += 4\n signal_name = response[idx:idx+size].decode()\n idx += size\n signal_names.append(signal_name)\n \n signal_indexes = []\n signal_indexes_size = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n for n in range(signal_indexes_size):\n idx += 4\n signal_index = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n signal_indexes.append(signal_index)\n \n return [signal_names,signal_indexes]", "def _lSlot(self, state):\n self._param = QtXml.QDomElement()\n if qt4:\n qs = str(self.sender().property(\"dom address\").toPyObject())\n else:\n qs = str(self.sender().property(\"dom address\"))\n\n ind = qs.rfind('/')\n ids = qs[ind:]\n\n self._param = self.qhash[qs].elem.firstChildElement(\"Activate\")\n while(self._param.isNull() is False):\n qs = str(self._param.text()).strip() + ids\n self.qhash[qs].widget.setEnabled(state)\n widget_visible = self.qhash[qs].elem.attribute(\"Visible\", \"Unknown\")\n if(state is False & widget_visible != \"Unknown\"):\n self.qhash[qs].label.hide()\n self.qhash[qs].widget.hide()\n else:\n self.qhash[qs].label.show()\n self.qhash[qs].widget.show()\n self._param = self._param.nextSiblingElement(\"Activate\")\n\n self._param = self.qhash[qs].elem.firstChildElement(\"Deactivate\")\n while(self._param.isNull() is False):\n qs = str(self._param.text()).strip() + ids\n self.qhash[qs].widget.setEnabled(-state)\n widget_visible = self.qhash[qs].elem.attribute(\"Visible\", \"Unknown\")\n if(state is True & widget_visible != \"Unknown\"):\n self.qhash[qs].label.hide()\n self.qhash[qs].widget.hide()\n else:\n self.qhash[qs].label.show()\n self.qhash[qs].widget.show()\n self._param = self._param.nextSiblingElement(\"Deactivate\")", "def __init__(self):\n self.map = {} #key: val, val, idx\n self.slot = []", "def partVirtualHelixRenumberedSlot(self, sender, coord):\n vh = self._virtualHelixHash[coord]\n # check for new number\n # notify VirtualHelixHandleItem to update its label\n # notify VirtualHelix to update its xovers\n # if the VirtualHelix is active, refresh prexovers\n pass", "def extra_slots(self, extra_slots):\n\n self._extra_slots = extra_slots", "def activated(self):", "def __getitem__(self, key):\n if self.level > 0:\n if isinstance(key, tuple):\n assert len(key) > 0\n assert len(key) <= self.level\n if len(key) == 1:\n return self._subSlots[key[0]]\n else:\n return self._subSlots[key[0]][key[1:]]\n return self._subSlots[key]\n else:\n if self.meta.shape is None:\n # Something is wrong. Are we cancelled?\n Request.raise_if_cancelled()\n if not self.ready():\n msg = \"This slot ({}.{}) isn't ready yet, which means \" \\\n \"you can't ask for its data. Is it connected?\".format(self.getRealOperator().name, self.name)\n self.logger.error(msg)\n slotInfoMsg = \"Can't get data from slot {}.{} yet.\"\\\n \" It isn't ready.\"\\\n \"First upstream problem slot is: {}\"\\\n \"\".format( self.getRealOperator().__class__, self.name, Slot._findUpstreamProblemSlot(self) )\n self.logger.error(slotInfoMsg)\n raise Slot.SlotNotReadyError(\"Slot isn't ready. See error log.\")\n assert self.meta.shape is not None, \\\n (\"Can't ask for slices of this slot yet:\"\n \" self.meta.shape is None!\"\n \" (operator {} [self={}] slot: {}, key={}\".format(\n self.operator.name, self.operator, self.name, key))\n return self(pslice=key)", "def on_comboBox_qihuozhanghao_currentIndexChanged(self, p0):\n # TODO: not implemented yet\n raise NotImplementedError", "def vacate_slot(self, slot, reg_num):\n self.__occupied_slots__.pop(slot.slot_number)\n self.__vehicle_slot_mapping__.pop(reg_num)\n self.__available_slots__.add(slot)", "def _reserve_bsa_slot(self):\n self._run_cmd(CAPUT + \" IOC:IN20:EV01:EDEFNAME \" + '\"' + self._edef_name + '\"')", "def emit(self, **kwargs):\n for slot in self.slots:\n result = slot(**kwargs)\n\n if result is not None:\n return result", "def slotshow_m(obj, content):\n global _slotshow_m_tbl\n\n return _slotshow(obj, content, _slotshow_m_tbl)", "def slotVolume(self, a0):\n self.sampleGroup.action('volume', value=a0)", "def execute(self, slot, subindex, roi, result):\n totalIndex = (self._subSlots.index(slot),) + subindex\n return self.operator.execute(self, totalIndex, roi, result)", "def frame(self):", "def action(self, player, game, slot):\n card = player.get_coders().get_card(slot)\n player.get_coders().remove_card(slot)\n index = game.get_sleeping_coders().index(None)\n game.set_sleeping_coder(index,card)\n game.set_action(\"NO_ACTION\")\n game.next_player()" ]
[ "0.73193145", "0.6811926", "0.6725808", "0.65843606", "0.6397118", "0.6368233", "0.63534164", "0.63223046", "0.6321543", "0.6312873", "0.6151129", "0.6127017", "0.6102243", "0.60954756", "0.60865855", "0.6085341", "0.60555625", "0.6027709", "0.6023868", "0.60039294", "0.5981614", "0.5944901", "0.5928264", "0.5860642", "0.58225965", "0.5803388", "0.57931894", "0.5743234", "0.57300717", "0.57230735", "0.5700275", "0.56719446", "0.56676686", "0.56638163", "0.5662287", "0.56184554", "0.5609259", "0.55965775", "0.5580088", "0.5575003", "0.5574187", "0.5566322", "0.55628324", "0.554456", "0.55376935", "0.55371886", "0.55152655", "0.55149907", "0.55011916", "0.5471193", "0.54608166", "0.54495156", "0.54477423", "0.5443887", "0.5396864", "0.5391362", "0.53841025", "0.5380693", "0.5379568", "0.5375968", "0.5374344", "0.53722537", "0.53629327", "0.5348422", "0.5348001", "0.53435796", "0.53432775", "0.5343046", "0.5324689", "0.5320415", "0.53172785", "0.5309329", "0.5282788", "0.528094", "0.5278367", "0.52753556", "0.52753556", "0.5262484", "0.5252292", "0.5249807", "0.52497274", "0.52403206", "0.52403206", "0.5223128", "0.5221164", "0.52208847", "0.5218931", "0.5216089", "0.52156943", "0.5213589", "0.5192793", "0.51838696", "0.5183027", "0.51815283", "0.51802087", "0.51758593", "0.5171057", "0.5164619", "0.5154298", "0.51483667", "0.5131844" ]
0.0
-1
Slot documentation goes here.
def on_action_QT_triggered(self): # TODO: not implemented yet print('关于qt') my_button_about_QT = QMessageBox.aboutQt(self, '关于QT')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slot(self, name):\n raise ClixxException(\"Not implemented.\")", "def play_slot__(self):\n print(\"play_slot__\")\n self.valkkafs_manager.play()", "def add_slot(self, slot):\n slot.set_location(len(self.slots)+1)\n self.slots.append(slot)", "def visit_slot(self, slot_name: str, slot: SlotDefinition) -> None:\n sn = underscore(slot_name)\n self.emit('slot', sn)\n if slot.domain:\n self.emit('domain', sn, underscore(slot.domain))\n if slot.range:\n self.emit('range', sn, underscore(slot.range))\n for p in slot.mixins:\n self.emit('mixin', sn, underscore(p))\n if slot.is_a:\n is_a = underscore(slot.is_a)\n\n #uri = self.owlgen._prop_uri(slot.name)\n uri = f'http://w3id.org/biolink/vocab/{sn}'\n self.emit('has_uri', sn, uri)\n if slot.multivalued:\n self.emit('multivalued', sn)\n if slot.required:\n self.emit('required', sn)", "def time_slot(self):\n pass", "def _slot(self, event) -> None:\n self.events.gui_event(original_event=event)", "def isSlot(self, p_int): # real signature unknown; restored from __doc__\n return False", "def slot(self,num):\n if num in ApexAP1000.SLOTS:\n self.__slot=num\n else:\n raise ValueError('Bad slot number !')", "def get_slot(self, idx):\n assert (idx >= 0) and (idx < self.size()), \"Index is out of range\"\n return self.slots[idx]", "def get_slots(self) -> int:", "def slot_receive(self):\n\n self.start_time = str(time.strftime(\"%Y%m%d%H%M%S\", time.localtime()))\n\n self.setter.start()\n self.getter.start()\n self.timer.start(1)\n # self.plotter.start()", "def perceive(self, slot):\n # right now, let's just observe the first element in the world\n if isinstance(slot, Slot):\n self.percept = slot.get_state()\n else:\n raise RuntimeError(\"Cannot observe other objects right now!\")", "def reserve(self):\n assert self.is_available() is True, \"this slot is not available\"", "def var(self, _type):\n return Slot()", "def slot(self):\n if self.__slot in ApexAP1000.SLOTS:\n return self.__slot\n else:\n raise ValueError('Bad slot number !')", "def slot_changed(self, dummy_sender, dummy_data):\r\n self.do_paint()", "def slot_tick(self, gox, (bid, ask)):\r\n pass", "def slot_wallet_changed(self, gox, _dummy):\r\n pass", "def required_slots(self,tracker) -> List[Text]:", "def slot_owns_changed(self, orderbook, _dummy):\r\n pass", "def on_select_clip_slot(self, clip_slot):\n pass", "def connect(self, slot):\n #if inspect.getargspec(slot).keywords is None:\n # raise exceptions.SlotMustAcceptKeywords(self, slot)\n\n if not self.is_connected(slot):\n self.slots.append(slot)", "def stop_slot__(self):\n print(\"stop_slot__\")\n self.valkkafs_manager.stop()", "def InSlotSet(self,slot,RTSignalIndex):\n ## Make Header\n hex_rep = self.NanonisTCP.make_header('Signals.InSlotSet', body_size=8)\n \n ## Arguments\n hex_rep += self.NanonisTCP.to_hex(slot,4)\n hex_rep += self.NanonisTCP.to_hex(RTSignalIndex,4)\n \n self.NanonisTCP.send_command(hex_rep)\n \n self.NanonisTCP.receive_response(0)", "def remove_slot(self, slot):\n if slot in self.slots:\n idx = self.slots.index(slot)\n # update location of rest slot\n for s in self.slots[idx:]:\n s.set_location(s.get_location()-1)\n self.slots.remove(slot)", "def onConnect(self, *args):\n UAV_Logger.log(\"connect signal recieved\")\n self.socketIO.emit('CORE_SL_SLOTS_SET', Request(self.getModuleName()).newData({\"slots\":self.slots}).toDictionary())", "def _GetSlots(mcs, attrs):\n raise NotImplementedError", "def slots(self):\n return self.__slots.values()", "def time_slot_p(self, c, p):\n pass", "def ParseSlot(self, G, node):\n slot = BLNlpClipsSlotMap()\n slot.Name = node\n return slot", "def add_item(self, item):\n\n if item.descriptor in self.__slots:\n slot = self.__slots[item.descriptor]\n slot.quantity += 1\n else:\n self.__slots[item.descriptor] = Slot(item)", "def on_triggered(self, slot):\n self.triggered.connect(slot)", "def update(self):\n if self.size() < 2: return\n idx = random.randint(0, 100) % 3\n if idx < 2:\n slot = self.slots[idx]\n if slot.get_state() == Slot.CLEAN:\n slot.set_state(Slot.DIRTY)\n # self.slots[idx] = slot", "def run(self):\r\n for slot in self.slots:\r\n slot.work()\r\n self.increment()", "def source_slot(self):\n return self._container_slot(True)", "def handle_slot_select(data: bytes) -> Tuple[bytes, str]:\n new_slot = struct.unpack('B', data[:1])[0]\n return data[1:], f'New slot: {new_slot}'", "def get_frame_slot_value(self, slot):\n # FrameObject is a dictionary of slot names and values.\n [slotName, value] = self.pgdb.sendPgdbFnCall('get-frame-slot-value', self.frameid, Symbol(slot))\n if not slotName:\n raise PythonCycError(\"Slot \"+slot+\" does not exist for frame \"+self.frameid+\" from organism (orgid) \"+self.pgdb._orgid)\n # Modify slot name to allow Python's syntax (e.g., '_' instead of '-').\n self.__dict__[convertLispIdtoPythonId(slotName)] = value\n return self", "def __validate_slots(self, slots: int):\n\n if (not isinstance(slots, int)):\n raise ValueError('slots', slots, 'The value must be an integer')\n if (slots <= 0):\n raise ValueError('slots', slots, 'The value must be greater than zero')", "def decide(self, slot_index):\n self._slot_decided[slot_index] = True", "def slot_trade(self, gox, (date, price, volume, typ, own)):\r\n pass", "def setInSlot(self, slot, subindex, roi, value):\n # Determine which subslot this is and prepend it to the totalIndex\n totalIndex = (self._subSlots.index(slot),) + subindex\n # Forward the call to our operator\n self.operator.setInSlot(self, totalIndex, roi, value)", "def size(self):\n return len(self.slots)", "def occupy_slot(self, slot, vehicle):\n self.__occupied_slots__[slot.slot_number] = vehicle.registration_number, vehicle.color\n self.__vehicle_slot_mapping__[vehicle.registration_number] = slot.slot_number\n self.__available_slots__.remove(slot)", "def target_slot(self):\n return self._container_slot(False)", "def slot_history_changed(self, history, _dummy):\r\n pass", "def intuit(self):\n raise NotImplemented()", "def slot_info(self) -> Dict[int, Tuple[int, str, int]]:\n return self._slot_info", "def increment(self):\r\n self.add_output()\r\n for i in range(self.length-1, 0, -1):\r\n self.slots[i].item = self.slots[i-1].item\r\n self.slots[0].item = Item.random()", "def get_num_slots(self):\n # Your code here\n return self.capacity", "def action(self, player, game, slot):\n super().action(player, game, slot)\n card = game.get_sleeping_coder(slot)\n game.set_sleeping_coder(slot,None)\n player.get_coders().add_card(card)\n game.set_action(\"NO_ACTION\")\n game.next_player()", "def get_slot(self, c):\n if 'slot_number' in c.keys():\n slot_number = c['slot_number']\n return slot_number\n else:\n raise ValueError(self.no_selection_msg())\n \n # returnValue(voltage * units.V)", "async def slot(self, ctx: commands.Context, bid: int):\r\n author = ctx.author\r\n guild = ctx.guild\r\n channel = ctx.channel\r\n if await bank.is_global():\r\n valid_bid = await self.config.SLOT_MIN() <= bid <= await self.config.SLOT_MAX()\r\n slot_time = await self.config.SLOT_TIME()\r\n last_slot = await self.config.user(author).last_slot()\r\n else:\r\n valid_bid = (\r\n await self.config.guild(guild).SLOT_MIN()\r\n <= bid\r\n <= await self.config.guild(guild).SLOT_MAX()\r\n )\r\n slot_time = await self.config.guild(guild).SLOT_TIME()\r\n last_slot = await self.config.member(author).last_slot()\r\n now = calendar.timegm(ctx.message.created_at.utctimetuple())\r\n\r\n if (now - last_slot) < slot_time:\r\n await ctx.send(_(\"You're on cooldown, try again in a bit.\"))\r\n return\r\n if not valid_bid:\r\n await ctx.send(_(\"That's an invalid bid amount, sorry :/\"))\r\n return\r\n if not await bank.can_spend(author, bid):\r\n await ctx.send(_(\"You ain't got enough money, friend.\"))\r\n return\r\n if await bank.is_global():\r\n await self.config.user(author).last_slot.set(now)\r\n else:\r\n await self.config.member(author).last_slot.set(now)\r\n await self.slot_machine(author, channel, bid)", "def get_slot(self, *args, **kwargs):\n return self._optimizer.get_slot(*args, **kwargs)", "def get_num_slots(self):\n # Your code here\n return len(self.data)", "def slot_before_unload(self, _sender, _data):\r\n pass", "def slotAccept(self, id):\n self.category = self.buttonGroup.find(id).text().ascii()\n self.accept()", "def render_ifslot(self, slotName):\n\t\tdef render(ctx, data):\n\t\t\ttry:\n\t\t\t\tif ctx.locateSlotData(slotName):\n\t\t\t\t\treturn ctx.tag\n\t\t\t\telse:\n\t\t\t\t\treturn \"\"\n\t\t\texcept KeyError:\n\t\t\t\treturn \"\"\n\t\treturn render", "def on_VI_518p_set_clicked(self):\n # TODO: not implemented yet\n disp_518P()\n self.up_slot = qmdz_const.up_slot\n self.down_slot = qmdz_const.down_slot", "def update_Q(self):", "def set_signal(self):\n self.pbSave.clicked.connect(self.save_match_result)\n self.pbCancel.clicked.connect(self.close_match_result)\n self.lwScoreList.doubleClicked.connect(self.display_player_name)\n self.lwAssistList.doubleClicked.connect(self.display_player_name)", "def timeslot(self, timeslot: List[TimeslotTimeslot]):\n\n self._timeslot = timeslot", "def get_slot(item_id):\n if item_id in all_items:\n return all_items[item_id]['slot']\n return None", "def connect_slots(self):\n\n self.set_elf_sig.connect(self.set_elf)\n self.run_target_sig.connect(self.run_target)\n self.continue_target_sig.connect(self.continue_target)\n self.set_breakpoint_sig.connect(self.set_breakpoint)\n self.set_app_mode_sig.connect(self.set_app_mode)\n self.write_hex_value_sig.connect(self.write_word_to_address)", "def connect(self, slot):\r\n if inspect.ismethod(slot):\r\n instance = slot.__self__\r\n function = slot.__func__\r\n if instance not in self._methods:\r\n self._methods[instance] = set()\r\n if function not in self._methods[instance]:\r\n self._methods[instance].add(function)\r\n else:\r\n if slot not in self._functions:\r\n self._functions.add(slot)", "def __init__(self, name=\"\", operator=None, stype=ArrayLike,\n rtype=rtype.SubRegion, value=None, optional=False,\n level=0, nonlane=False):\n # This assertion is here for a reason: default values do NOT work on OutputSlots.\n # (We should probably change that at some point...)\n assert value is None or isinstance(self, InputSlot), \"Only InputSlots can have default values. OutputSlots cannot.\"\n \n if not hasattr(self, \"_type\"):\n self._type = None\n if type(stype) == str:\n stype = ArrayLike\n self.partners = []\n self.name = name\n self._optional = optional\n self.operator = operator\n self._real_operator = None # Memoized in getRealOperator()\n\n # in the case of an InputSlot this is the slot to which it is\n # connected\n self.partner = None\n self.level = level\n\n # in the case of an InputSlot one can directly assign a value\n # to a slot instead of connecting it to a partner, this\n # attribute holds the value\n self._value = None\n\n self._defaultValue = value\n\n # Causes calls to setValue to be propagated backwards to the\n # partner slot. Used by the OperatorWrapper.\n self._backpropagate_values = False\n\n self.rtype = rtype\n\n # the MetaDict that holds the slots meta information\n self.meta = MetaDict()\n\n # if level > 0, this holds the sub-Input/Output slots\n self._subSlots = []\n self._stypeType = stype\n\n # the slot type instance\n self.stype = stype(self)\n self.nonlane = nonlane\n\n self._sig_changed = OrderedSignal()\n self._sig_value_changed = OrderedSignal()\n self._sig_ready = OrderedSignal()\n self._sig_unready = OrderedSignal()\n self._sig_dirty = OrderedSignal()\n self._sig_connect = OrderedSignal()\n self._sig_disconnect = OrderedSignal()\n self._sig_resize = OrderedSignal()\n self._sig_resized = OrderedSignal()\n self._sig_remove = OrderedSignal()\n self._sig_removed = OrderedSignal()\n self._sig_preinsertion = OrderedSignal()\n self._sig_inserted = OrderedSignal()\n\n self._resizing = False\n\n self._executionCount = 0\n self._settingUp = False\n self._condition = threading.Condition()\n\n # Allow slots to be sorted by their order of creation for\n # debug output and diagramming purposes.\n self._global_slot_id = Slot._global_counter.next()", "def action(self, player, game, slot): \n super().action(player, game, slot) \n card = player.get_coders().get_card(slot)\n player.get_coders().remove_card(slot)\n game.current_player().get_coders().add_card(card)\n game.set_action(\"NO_ACTION\")\n game.next_player()", "def __init__(self, driver_age=None, vehicle_registration_number=None):\n self.slot_status = 0 # 0 Means the slot is free\n self.vehicle_driver_age = driver_age\n self.vehicle_registration_number = vehicle_registration_number", "def _comboSlot(self, select):\n select = self.sender().itemText(select)\n if qt4:\n qs = str(self.sender().property(\"dom address\").toPyObject())\n else:\n qs = str(self.sender().property(\"dom address\"))\n item = QtXml.QDomElement()\n\n ind = qs.rfind('/')\n ids = qs[ind:]\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() != select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n widget_enabled = h.elem.attribute(\"Enabled\", \"True\")\n widget_visible = h.elem.attribute(\"Visible\", \"Unknown\")\n h.widget.setEnabled(False)\n if(widget_visible != \"Unknown\"):\n h.label.hide()\n h.widget.hide()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() == select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n h.widget.setEnabled(True)\n h.label.show()\n h.widget.show()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")", "def showSlot(self, number):\n if number <= 0:\n self.log.info('Showing slot \"Negative\" (%d) slot ignored' % number)\n else:\n slot = self.slots[self.number - number]\n if slot.isOccupied():\n slot.removeItem()\n if number == self.target:\n image = self.target_image if self.value < self.target else self.target_image_filled\n self.log.debug('Showing target row: %s, %s, %s' % (self.value, self.target, image))\n elif number <= self.value:\n image = self.fill_image\n elif number <= self.target:\n image = self.blank_image\n else:\n image = self.grey_image\n self.log.debug('Showing %s cell %d as %s (value=%s, target=%s)' % (\n self.name, number, image, self.value, self.target))\n\n slot.addItem(sprite.Sprite(image))", "def disconnect(self, slot):\n if self.is_connected(slot):\n self.slots.pop(self.slots.index(slot))", "def fillSlots(self, name, stan):\n if self._slotData is None:\n self._slotData = {}\n self._slotData[name] = stan", "def slot_ticker(self, dummy_sender, data):\r\n (bid, ask) = data\r\n self.bid = bid\r\n self.ask = ask\r\n self.last_change_type = None\r\n self.last_change_price = 0\r\n self.last_change_volume = 0\r\n self._repair_crossed_asks(ask)\r\n self._repair_crossed_bids(bid)\r\n self.signal_changed(self, None)", "def put_slot_value(self, slot_name_or_uri, value):\n self.memory[slot_name_or_uri] = value\n # print(\"MemoryManager: Implement Me\")\n return self.memory[slot_name_or_uri]", "def slotSelectRow(self, selectDict):\n if self.dataType != selectDict['dataType']:\n return\n\n #print('slotSelectRow() selectDict:', selectDict)\n \n ind = selectDict['index']\n '''\n plotDf = selectDict['plotDf']\n index = plotDf.at[ind, 'index']\n index = int(index)\n '''\n index = ind\n #index -= 1 # !!! MY VISUAL INDEX IN TABLE IS ONE BASED !!!\n column = 0\n modelIndex = self.model().index(index, column)\n self.setCurrentIndex(modelIndex)", "def getSlotforObject(cls, obj):\n if obj.__class__ in restslotattributedict.keys():\n attr = cls.getSlotAttrib(obj)\n if attr is not None:\n peek = getattr(obj, \"peek_\" + attr)\n slot = str(peek()).split('/')[0]\n else:\n return False, \"0\"\n return True, slot", "def __init__(self, slots):\n self.slots = slots\n self.table = []\n for i in range(slots):\n self.table.append([])", "def __init__(self, slots):\n self.slots = slots\n self.table = []\n for i in range(slots):\n self.table.append([])", "def num_slots(self, num_slots):\n\n self._num_slots = num_slots", "def render_ifnoslot(self, slotName):\n\t\t# just repeat the code from ifslot -- this is called frequently,\n\t\t# and additional logic just is not worth it.\n\t\tdef render(ctx, data):\n\t\t\ttry:\n\t\t\t\tif not ctx.locateSlotData(slotName):\n\t\t\t\t\treturn ctx.tag\n\t\t\t\telse:\n\t\t\t\t\treturn \"\"\n\t\t\texcept KeyError:\n\t\t\t\treturn \"\"\n\t\treturn render", "def __len__(self):\n return len(self._subSlots)", "def slot_keypress(self, gox, (key)):\r\n pass", "def Item(self) -> object:", "def Item(self) -> object:", "def extend_slots(self, prediction, item):\n spec = prediction.phrasal_pattern[0]\n slots = prediction.slots\n if is_role_specifier(spec):\n new_slots = copy(slots)\n new_slot = self.role_specifier(spec)\n if new_slot in new_slots:\n raise DuplicateSlotError('Slot %s already exists in %s.' % (\n new_slot, prediction))\n new_slots[new_slot] = item\n return new_slots\n else:\n return slots", "def SlotStatus(rangeInfo):\n if rangeInfo >= 3 and rangeInfo <= 10:\n \"\"\" Full Slot \"\"\"\n status = 1\n return status\n else:\n \"\"\" Empty Slot \"\"\"\n status = 0\n return status", "def InSlotsGet(self):\n ## Make Header\n hex_rep = self.NanonisTCP.make_header('Signals.InSlotsGet', body_size=0)\n \n self.NanonisTCP.send_command(hex_rep)\n \n response = self.NanonisTCP.receive_response()\n \n # signals_names_size = self.NanonisTCP.hex_to_int32(response[0:4])\n signals_names_num = self.NanonisTCP.hex_to_int32(response[4:8])\n \n idx = 8\n signal_names = []\n for n in range(signals_names_num):\n size = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n idx += 4\n signal_name = response[idx:idx+size].decode()\n idx += size\n signal_names.append(signal_name)\n \n signal_indexes = []\n signal_indexes_size = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n for n in range(signal_indexes_size):\n idx += 4\n signal_index = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n signal_indexes.append(signal_index)\n \n return [signal_names,signal_indexes]", "def _lSlot(self, state):\n self._param = QtXml.QDomElement()\n if qt4:\n qs = str(self.sender().property(\"dom address\").toPyObject())\n else:\n qs = str(self.sender().property(\"dom address\"))\n\n ind = qs.rfind('/')\n ids = qs[ind:]\n\n self._param = self.qhash[qs].elem.firstChildElement(\"Activate\")\n while(self._param.isNull() is False):\n qs = str(self._param.text()).strip() + ids\n self.qhash[qs].widget.setEnabled(state)\n widget_visible = self.qhash[qs].elem.attribute(\"Visible\", \"Unknown\")\n if(state is False & widget_visible != \"Unknown\"):\n self.qhash[qs].label.hide()\n self.qhash[qs].widget.hide()\n else:\n self.qhash[qs].label.show()\n self.qhash[qs].widget.show()\n self._param = self._param.nextSiblingElement(\"Activate\")\n\n self._param = self.qhash[qs].elem.firstChildElement(\"Deactivate\")\n while(self._param.isNull() is False):\n qs = str(self._param.text()).strip() + ids\n self.qhash[qs].widget.setEnabled(-state)\n widget_visible = self.qhash[qs].elem.attribute(\"Visible\", \"Unknown\")\n if(state is True & widget_visible != \"Unknown\"):\n self.qhash[qs].label.hide()\n self.qhash[qs].widget.hide()\n else:\n self.qhash[qs].label.show()\n self.qhash[qs].widget.show()\n self._param = self._param.nextSiblingElement(\"Deactivate\")", "def __init__(self):\n self.map = {} #key: val, val, idx\n self.slot = []", "def partVirtualHelixRenumberedSlot(self, sender, coord):\n vh = self._virtualHelixHash[coord]\n # check for new number\n # notify VirtualHelixHandleItem to update its label\n # notify VirtualHelix to update its xovers\n # if the VirtualHelix is active, refresh prexovers\n pass", "def extra_slots(self, extra_slots):\n\n self._extra_slots = extra_slots", "def activated(self):", "def __getitem__(self, key):\n if self.level > 0:\n if isinstance(key, tuple):\n assert len(key) > 0\n assert len(key) <= self.level\n if len(key) == 1:\n return self._subSlots[key[0]]\n else:\n return self._subSlots[key[0]][key[1:]]\n return self._subSlots[key]\n else:\n if self.meta.shape is None:\n # Something is wrong. Are we cancelled?\n Request.raise_if_cancelled()\n if not self.ready():\n msg = \"This slot ({}.{}) isn't ready yet, which means \" \\\n \"you can't ask for its data. Is it connected?\".format(self.getRealOperator().name, self.name)\n self.logger.error(msg)\n slotInfoMsg = \"Can't get data from slot {}.{} yet.\"\\\n \" It isn't ready.\"\\\n \"First upstream problem slot is: {}\"\\\n \"\".format( self.getRealOperator().__class__, self.name, Slot._findUpstreamProblemSlot(self) )\n self.logger.error(slotInfoMsg)\n raise Slot.SlotNotReadyError(\"Slot isn't ready. See error log.\")\n assert self.meta.shape is not None, \\\n (\"Can't ask for slices of this slot yet:\"\n \" self.meta.shape is None!\"\n \" (operator {} [self={}] slot: {}, key={}\".format(\n self.operator.name, self.operator, self.name, key))\n return self(pslice=key)", "def on_comboBox_qihuozhanghao_currentIndexChanged(self, p0):\n # TODO: not implemented yet\n raise NotImplementedError", "def vacate_slot(self, slot, reg_num):\n self.__occupied_slots__.pop(slot.slot_number)\n self.__vehicle_slot_mapping__.pop(reg_num)\n self.__available_slots__.add(slot)", "def _reserve_bsa_slot(self):\n self._run_cmd(CAPUT + \" IOC:IN20:EV01:EDEFNAME \" + '\"' + self._edef_name + '\"')", "def emit(self, **kwargs):\n for slot in self.slots:\n result = slot(**kwargs)\n\n if result is not None:\n return result", "def slotshow_m(obj, content):\n global _slotshow_m_tbl\n\n return _slotshow(obj, content, _slotshow_m_tbl)", "def slotVolume(self, a0):\n self.sampleGroup.action('volume', value=a0)", "def execute(self, slot, subindex, roi, result):\n totalIndex = (self._subSlots.index(slot),) + subindex\n return self.operator.execute(self, totalIndex, roi, result)", "def frame(self):", "def action(self, player, game, slot):\n card = player.get_coders().get_card(slot)\n player.get_coders().remove_card(slot)\n index = game.get_sleeping_coders().index(None)\n game.set_sleeping_coder(index,card)\n game.set_action(\"NO_ACTION\")\n game.next_player()" ]
[ "0.73193145", "0.6811926", "0.6725808", "0.65843606", "0.6397118", "0.6368233", "0.63534164", "0.63223046", "0.6321543", "0.6312873", "0.6151129", "0.6127017", "0.6102243", "0.60954756", "0.60865855", "0.6085341", "0.60555625", "0.6027709", "0.6023868", "0.60039294", "0.5981614", "0.5944901", "0.5928264", "0.5860642", "0.58225965", "0.5803388", "0.57931894", "0.5743234", "0.57300717", "0.57230735", "0.5700275", "0.56719446", "0.56676686", "0.56638163", "0.5662287", "0.56184554", "0.5609259", "0.55965775", "0.5580088", "0.5575003", "0.5574187", "0.5566322", "0.55628324", "0.554456", "0.55376935", "0.55371886", "0.55152655", "0.55149907", "0.55011916", "0.5471193", "0.54608166", "0.54495156", "0.54477423", "0.5443887", "0.5396864", "0.5391362", "0.53841025", "0.5380693", "0.5379568", "0.5375968", "0.5374344", "0.53722537", "0.53629327", "0.5348422", "0.5348001", "0.53435796", "0.53432775", "0.5343046", "0.5324689", "0.5320415", "0.53172785", "0.5309329", "0.5282788", "0.528094", "0.5278367", "0.52753556", "0.52753556", "0.5262484", "0.5252292", "0.5249807", "0.52497274", "0.52403206", "0.52403206", "0.5223128", "0.5221164", "0.52208847", "0.5218931", "0.5216089", "0.52156943", "0.5213589", "0.5192793", "0.51838696", "0.5183027", "0.51815283", "0.51802087", "0.51758593", "0.5171057", "0.5164619", "0.5154298", "0.51483667", "0.5131844" ]
0.0
-1
Create a new Band. Created band is a python representation of the Band.Create case class in the scala datamodel
def __init__(self, name, number, wavelength): self.name = name self.number = number self.wavelength = wavelength
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_bands(band_number):\n try:\n band_dict = band_lookup[band_number]\n except KeyError:\n raise KeyError('Band %s does not exist', band_number)\n\n return [Band(**band_dict)]", "def __init__(self, lbda=None, bandname=None, zp=None, \n mjd=None, empty=False,**kwargs):\n self.__build__()\n if empty:\n return\n prop = kwargs_update(dict(lbda=lbda, bandname=bandname,mjd=mjd, zp=zp),\n **kwargs)\n self.create(**prop)", "def __init__(self, bandit):\n self.bandit = bandit", "def do_bay_create(cs, args):\n baymodel = cs.baymodels.get(args.baymodel)\n\n opts = {}\n opts['name'] = args.name\n opts['baymodel_id'] = baymodel.uuid\n opts['node_count'] = args.node_count\n opts['master_count'] = args.master_count\n opts['discovery_url'] = args.discovery_url\n opts['bay_create_timeout'] = args.timeout\n try:\n bay = cs.bays.create(**opts)\n # support for non-async in 1.1\n if args.magnum_api_version and args.magnum_api_version == '1.1':\n _show_bay(bay)\n else:\n uuid = str(bay._info['uuid'])\n print(\"Request to create bay %s has been accepted.\" % uuid)\n except Exception as e:\n print(\"Create for bay %s failed: %s\" %\n (opts['name'], e))", "def __init__(self, bandit):\n super().__init__(bandit)", "def __init__(self, bandit):\n super().__init__(bandit)", "def band(self, name, bands, new_name=None, label=None, text_key=None):\n if not self._is_numeric(name):\n msg = \"Can only band numeric typed data! {} is {}.\"\n msg = msg.format(name, self._get_type(name))\n raise TypeError(msg)\n if not text_key: text_key = self.text_key\n if not new_name: new_name = '{}_banded'.format(name)\n if not label: label = self.text(name, False, text_key)\n franges = []\n for idx, band in enumerate(bands, start=1):\n lab = None\n if isinstance(band, dict):\n lab = list(band.keys())[0]\n band = list(band.values())[0]\n if isinstance(band, tuple):\n if band[0] < 0:\n raise ValueError('Cannot band with lower bound < 0.')\n elif band[1] < 0:\n raise ValueError('Cannot band with upper bound < 0.')\n r = '{}-{}'.format(band[0], band[1])\n franges.append([idx, lab or r, {name: frange(r)}])\n else:\n r = str(band)\n franges.append([idx, lab or r, {name: [band]}])\n\n self.derive(new_name, 'single', label, franges,\n text_key=text_key)\n\n return None", "def create_bw_graph(self, bw_dict):\n try:\n graph = self.awareness.graph\n link_to_port = self.awareness.link_to_port\n for link in link_to_port:\n (src_dpid, dst_dpid) = link\n (src_port, dst_port) = link_to_port[link]\n if src_dpid in bw_dict and dst_dpid in bw_dict:\n bw_src = bw_dict[src_dpid][src_port]\n bw_dst = bw_dict[dst_dpid][dst_port]\n bandwidth = min(bw_src, bw_dst)\n # add key:value of bandwidth into graph.\n graph[src_dpid][dst_dpid]['bandwidth'] = bandwidth\n else:\n graph[src_dpid][dst_dpid]['bandwidth'] = 0\n return graph\n except:\n self.logger.info(\"Create bw graph exception\")\n if self.awareness is None:\n self.awareness = lookup_service_brick('awareness')\n return self.awareness.graph", "def create(request: BedRequestCreate) -> Bed:\n if request.garden_id:\n bed = Bed(request.name, request.yard_id, request.garden_id)\n else:\n bed = Bed(request.name, request.yard_id)\n return bed", "def __init__(self, args):\n (vertices, edges) = args\n graph = Graph()\n graph.add_vertices(vertices)\n graph.add_edges(edges)\n self._graph = graph\n from sage_semigroups.categories.finite_left_regular_bands import FiniteLeftRegularBands\n Parent.__init__(self, category=FiniteLeftRegularBands().FinitelyGenerated())", "def create_b_obj(ob_name, b_obj_data):\n b_obj = bpy.data.objects.new(ob_name, b_obj_data)\n bpy.context.scene.objects.link(b_obj)\n bpy.context.scene.objects.active = b_obj\n b_obj.select = True\n return b_obj", "def band(self, val):\n if len(val) is 2:\n self.band = tuple(val)\n \n else:\n # Should raise an error here\n pass", "def __init__(self, fromPort = 0, toPort = 0, Type = 'PowerBond'):\n BGcomponent.__init__(self)\n Graph.__init__(self)\n self.id = next(self.id_generator)\n self.__id = BGbond.__ID\n BGbond.__ID += 1\n self.__id += 1\n self.__fromPort = fromPort\n self.__toPort = toPort\n self.__causalityStroke = 1\n self.__directionArrow = 1\n self.__type = Type", "def create_bigfirm(bf_data):\n return get_or_create_object(bf_data, BigFirm)", "def create():", "def create():", "def BoostDesc_create(desc=None, use_scale_orientation=None, scale_factor=None): # real signature unknown; restored from __doc__\n pass", "def create(self):", "def create_barn_door(self):\n light_shape = self.light.getShape()\n inputs = light_shape.inputs(type='aiBarndoor')\n if inputs:\n self.barn_door = inputs[0]\n else:\n self.barn_door = pm.createNode('aiBarndoor')\n self.barn_door.attr('message') >> \\\n light_shape.attr('aiFilters').next_available", "def __init__(self, NumofBandits=10):\n self.NumofBandits = NumofBandits\n\n # define success probabilities of each arm\n self.prob = np.random.random(self.NumofBandits)", "def create_new_banks():\n\n\tcity = request.form.get('bankCity', '')\n\tname = request.form.get('bankName', '')\n\taddress = request.form.get('bankAddress', '')\n\tinfo = dict(city=city, name=name, address=address)\n\t# print(info)\n\tbank = Bank(city, name, address)\n\tres = bank.save()\n\t# print('res=%d' % res)\n\treturn send_result(info, res, status=\"True\")", "def create(self):\n\n raise NotImplementedError", "def make_bb_object(name, data):\n global BLACKBOARD, TRACE_LEVEL\n bb_obj = BB_object(name, data)\n if TRACE_LEVEL > 2:\n print \"\\tCreating {0} object: {1}\".format( type(data), bb_obj )\n BLACKBOARD[name] = bb_obj\n signal_creation_event(bb_obj)\n return bb_obj", "def DrawBands(self, count):\n value = self.little[0]\n mobile_average = float(sum([float(self.little[i])\n for i in range(len(self.little))])) / float(self.period)\n standard_derivation = sqrt(sum([pow(self.little[i] - mobile_average, 2)\n for i in range(len(self.little))]) / self.period)\n upper_band = mobile_average + (standard_derivation * self.sd_coef)\n lower_band = mobile_average - (standard_derivation * self.sd_coef)\n self.upper.insert(0, upper_band)\n self.lower.insert(0, lower_band)\n if len(self.upper) >= self.period:\n self.upper.pop()\n if len(self.lower) >= self.period:\n self.lower.pop()\n if count >= self.period:\n for i in range(len(self.little) - 1):\n self.canvas.create_line((i * self.incr / 1.725) + self.incr * 4,\n self.height - self.incr * 4 + (self.little[i] - 1) * 5000 - 200,\n (i * self.incr / 1.725) + self.incr * 4 + self.incr / 1.725,\n self.height - self.incr * 4 + (self.little[i + 1] - 1) * 5000 - 200,\n fill = \"#FFFF00\", width = 2)\n for i in range(len(self.upper) - 1):\n self.canvas.create_line((i * self.incr / 1.635) + self.incr * 4,\n self.height - self.incr * 4 + (self.upper[i] - 1) * 5000 - 200,\n (i * self.incr / 1.635) + self.incr * 4 + self.incr / 1.635,\n self.height - self.incr * 4 + (self.upper[i + 1] - 1) * 5000 - 200,\n fill = \"#FF6600\", width = 3)\n self.canvas.create_line((i * self.incr / 1.635) + self.incr * 4,\n self.height - self.incr * 4 + (self.lower[i] - 1) * 5000 - 200,\n (i * self.incr / 1.635) + self.incr * 4 + self.incr / 1.635,\n self.height - self.incr * 4 + (self.lower[i + 1] - 1) * 5000 - 200,\n fill = \"#FF0000\", width = 3)", "def create(self):\n ...", "def __init__(self, chromosome, starts, ends, labels, gstrands):\n \n if 1 <= chromosome <= 24:\n self._chromosome = chromosome\n else:\n raise ValueError('wrong chromosome number %d' % chromosome)\n\n # Sort bands by starting base\n sorted_bands = sorted(zip(labels, starts, ends, gstrands),\n key=op.itemgetter(1))\n\n self._band_keys = dict((k[0], i) for i, k in enumerate(sorted_bands))\n self._bands = tuple(ChromosomeBand(self._chromosome, *band)\n for band in sorted_bands)", "def create(self, lbda=None, source=None, instrument_name=None,\n mjd=None, zp=None, bandname=None, zpsys=\"ab\",\n force_it=False, **meta):\n self._properties[\"lbda\"] = np.float(lbda) if lbda is not None else None\n self._side_properties[\"source\"] = source\n self._side_properties[\"instrument_name\"] = instrument_name\n self._side_properties[\"meta\"] = meta\n # -- Interactive ones\n self._side_properties[\"zpsys\"] = zpsys\n self.mjd = mjd\n self.zp = zp\n self.set_bandname(bandname)\n self._update_()", "def __CreateNewBlipData(self, wave_id, wavelet_id):\n blip_data = model.BlipData()\n blip_data.wave_id = wave_id\n blip_data.wavelet_id = wavelet_id\n blip_data.blip_id = 'TBD_' + str(random.random()).split('.')[1]\n return blip_data", "def add_BBANDS(self, timeperiod=20, nbdevup=2, nbdevdn=2, matype=0,\n types=['line_dashed_thin', 'line_dashed_thin'],\n colors=['tertiary', 'grey_strong'], **kwargs):\n if not self.has_close:\n raise Exception()\n\n utils.kwargs_check(kwargs, VALID_TA_KWARGS)\n if 'kind' in kwargs:\n kwargs['type'] = kwargs['kind']\n if 'kinds' in kwargs:\n types = kwargs['type']\n\n if 'type' in kwargs:\n types = [kwargs['type']] * 2\n if 'color' in kwargs:\n colors = [kwargs['color']] * 2\n\n name = 'BBANDS({},{},{})'.format(str(timeperiod),\n str(nbdevup),\n str(nbdevdn))\n ubb = name + '[Upper]'\n bb = name\n lbb = name + '[Lower]'\n self.pri[ubb] = dict(type='line_' + types[0][5:],\n color=colors[0])\n self.pri[bb] = dict(type='area_' + types[1][5:],\n color=colors[1], fillcolor='fill')\n self.pri[lbb] = dict(type='area_' + types[0][5:],\n color=colors[0], fillcolor='fill')\n (self.ind[ubb],\n self.ind[bb],\n self.ind[lbb]) = talib.BBANDS(self.df[self.cl].values,\n timeperiod, nbdevup, nbdevdn, matype)", "def __init__(self, band_collections, sample_size, input_channels, output_labels,\n transform=None, seed=42):\n self.band_collections = band_collections\n self.sample_size = sample_size\n self.input_channels = input_channels\n self.output_labels = output_labels\n self.transform = transform\n self.seed = seed #TODO: make dataset with seed", "def create(self):\n pass", "def create(self):\n pass", "def create(self):\n pass", "def create(self):\n\n pass", "def create_circuit(self, circuit, database, str_obj, current_date):\r\n if self._circuit_type in (HC, DHW):\r\n return Circuit(self._connector, circuit[ID],\r\n database, str_obj, self._circuit_type,\r\n self._bus_type, current_date)\r\n elif self._circuit_type == SC:\r\n return BasicCircuit(self._connector, circuit[ID],\r\n database, str_obj, self._circuit_type,\r\n self._bus_type)\r\n return None", "def create(self, *args, **kwargs):\n pass", "def create_band_maps(self):\n band_maps = []\n source_band_index = 1\n target_band_index = self.starting_target_band\n for band in self.image['bands']:\n band_maps.append({\n 'source': source_band_index,\n 'target': target_band_index\n })\n source_band_index += 1\n target_band_index += 1\n return band_maps", "def bandwidth_limit_rule_create(request, policy_id, **kwargs):\n body = {'bandwidth_limit_rule': kwargs}\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body = {'bandwidth_limit_rule': kwargs}\n rule = 'bandwidth_limit_rule'\n bandwidth_limit_rule = neutronclient(request)\\\n .create_bandwidth_limit_rule(policy_id, body).get(rule)\n return BandwidthLimitRule(bandwidth_limit_rule)", "def __new__(cls, *args, **kwargs):\n obj = super(RegistersBank, cls).__new__(cls, *args, **kwargs)\n obj.__dict__ = cls.data\n\n return obj", "def get_bandpass(self, fc=1.0, bw_ratio=0.2, first_element='shunt', normalized=False): \r\n if not normalized:\r\n cmps = transform_prototype(self.gs, \r\n fc,\r\n bw_ratio=bw_ratio, \r\n R0=self.R0, \r\n first_element=first_element, \r\n flt_type='bandpass')\r\n else:\r\n cmps = transform_prototype_normalized(self.gs,\r\n bw_ratio=bw_ratio, \r\n first_element=first_element, \r\n flt_type='bandpass')\r\n cmp_dict = connect_bandpass(cmps, first_element=first_element)\r\n net = spice_net.SpiceNetwork(cmp_dict)\r\n return net", "def create_db(name, path):\r\n signal_definitions = [\r\n SignalDefinition(name=\"a\", startbit=0, length=2),\r\n SignalDefinition(name=\"b\", startbit=2, length=3),\r\n SignalDefinition(name=\"c\", startbit=5, length=7),\r\n SignalDefinition(name=\"d\", startbit=12, length=6),\r\n SignalDefinition(name=\"e\", startbit=18, length=6),\r\n ]\r\n db = kvadblib.Dbc(name=name)\r\n\r\n # The signals dictionary will be filled with functions that, given a\r\n # bitlist view of the data, will return the raw signal value. Since we have\r\n # set factor=1 and offset=0, the physical value will be indentical to the\r\n # raw value.\r\n signals = {}\r\n\r\n message = db.new_message(name=\"TestMessage01\", id=42, dlc=3)\r\n for definition in signal_definitions:\r\n signal = message.new_signal(\r\n name=definition.name,\r\n size=kvadblib.ValueSize(startbit=definition.startbit, length=definition.length),\r\n )\r\n\r\n # Using `partial`, create a new function, based on\r\n # `bitlist_slice_to_int`, that has the keyword arguments `offset` and\r\n # `length` set according to the current signal definition.\r\n signals[signal.name] = partial(\r\n bitlist_slice_to_int, offset=signal.size.startbit, length=signal.size.length\r\n )\r\n\r\n filename = path / f\"{name}.dbc\"\r\n filename_str = str(filename)\r\n print(f\"Writing {filename_str}\")\r\n db.write_file(filename_str) # qqqmac Dbc.write_file() and Dbc() should take pathlib objects\r\n db.close\r\n return signals, filename_str", "def __init__(self, wavename):\n super(IDWT_2D, self).__init__()\n wavelet = pywt.Wavelet(wavename)\n self.band_low = wavelet.dec_lo\n self.band_low.reverse()\n self.band_high = wavelet.dec_hi\n self.band_high.reverse()\n assert len(self.band_low) == len(self.band_high)\n self.band_length = len(self.band_low)\n assert self.band_length % 2 == 0\n self.band_length_half = math.floor(self.band_length / 2)", "def newChemBond(self, **attrlinks):\n return ChemBond(self, **attrlinks)", "def create(self, data):\n raise NotImplementedError", "def save(self):\n raise NotImplementedError(\"Saving and updating DerivedBands is not permitted\")", "def createObject(self, *args):\n return _libsbml.FbcModelPlugin_createObject(self, *args)", "def create_cab(self):\n cab = Cab()\n cab.type = self.info['type']\n cab.driver_name = self.info.get('driver_name')\n cab.rc_number = self.info['rc_number']\n cab.city_id = self.info['city_id']\n cab.company_name = self.info['company_name']\n cab.model_name = self.info['model_name']\n cab.update_time = datetime.utcnow().replace(microsecond=0)\n self.cab_id = self.save(cab)\n\n # we can do asynchronously\n self.create_cab_state()\n return self.cab_id", "def Create(self):\n raise NotImplementedError()", "def _create_berth(self, row):\n\n berth = self.world.create_entity()\n\n position = np.array([\n float(row[\"lon\"]),\n float(row[\"lat\"])\n ])\n\n berth_info = BerthInfo(\n row[\"id\"],\n row[\"name\"],\n row['max_quay_length'],\n float(row['max_depth']),\n self.vessel_content_types(int(row[\"ship_types\"])),\n allowed_vessel_classes=\n self.berth_service_distribution_factory.get_allowed_vessel_classes_for_terminal(row[\"terminal\"]),\n section=row[\"section\"])\n\n sampler = self.berth_service_distribution_factory.service_time_sampler(row[\"terminal\"])\n\n self.world.add_component(berth, Position(lonlat=np.array(position)))\n self.world.add_component(berth, berth_info)\n self.world.add_component(berth, BerthStateMachine(sampler, self.berth_randomized_check_prob))\n \n return berth", "def format_data(self, bands, branches):\n self.new_dict = self.__gen_dict\n order = [] # list to store high-symmetry point\n band_index = {} # dict to store band index info corresponding to its high-symmetry point e.g. \"X\": 18\n formatted_bands = []\n zero_matrix = np.zeros(np.shape(bands))\n \"\"\"\n zero_matrix is for: if one configuration does not have some high-symmetry points listed in __generic_dict\n then fill zeros in those columns \n \"\"\"\n\n for i in range(len(branches)):\n order.append(branches[i][\"name\"])\n spilt = re.split('-', order[i])\n\n band_index[spilt[0]] = branches[i]['start_index']\n band_index[spilt[1]] = branches[i]['end_index']\n\n # print('>>>>>>>>>>>>>>>>>>', band_index)\n # iterate all keys in band_index, and if exists, give value to new_dict, if not, pass\n for hs_point in band_index:\n if hs_point in self.new_dict:\n self.new_dict[hs_point] = band_index[hs_point]\n # print('>>>>>>>>>>>>>>>>>', BandsData.__gen_dict)\n\n # iterate all keys in new_dict, export bands (not arranged in bands dimension)\n for hs_point in self.new_dict:\n hs_value = self.new_dict[hs_point]\n if self.new_dict[hs_point] is None:\n # fill zeros in bands\n formatted_bands.append(zero_matrix[:, 0])\n else:\n formatted_bands.append(bands[:, hs_value])\n\n # transpose of formatted_bands\n formatted_bands = np.transpose(formatted_bands)\n\n return formatted_bands, self.new_dict", "def __init__(self, links=None, bands=None, bbox=None, coordinate_system=None, datatype=None, first_acquired=None, grid=None, id=None, interval=None, item_types=None, last_acquired=None, level=None, name=None, product_type=None, quad_download=None): # noqa: E501 # noqa: E501\n\n self._links = None\n self._bands = None\n self._bbox = None\n self._coordinate_system = None\n self._datatype = None\n self._first_acquired = None\n self._grid = None\n self._id = None\n self._interval = None\n self._item_types = None\n self._last_acquired = None\n self._level = None\n self._name = None\n self._product_type = None\n self._quad_download = None\n self.discriminator = None\n\n if links is not None:\n self.links = links\n if bands is not None:\n self.bands = bands\n self.bbox = bbox\n self.coordinate_system = coordinate_system\n if datatype is not None:\n self.datatype = datatype\n self.first_acquired = first_acquired\n if grid is not None:\n self.grid = grid\n if id is not None:\n self.id = id\n if interval is not None:\n self.interval = interval\n if item_types is not None:\n self.item_types = item_types\n self.last_acquired = last_acquired\n if level is not None:\n self.level = level\n self.name = name\n self.product_type = product_type\n if quad_download is not None:\n self.quad_download = quad_download", "def createAnd(self):\n return _libsbml.FbcAnd_createAnd(self)", "def create(cls, **kwargs):\r\n return cls().fill(**kwargs).save()", "def createExtractedBandList(self):\n\n L8_bands = None\n\n # Working directory for extracted L8 bands\n working_dir = self.config['working_d']\n outpath_bands = self.scene.extractBands(working_dir)\n\n if outpath_bands is not None:\n # Create a list all filenames extracted from the downloaded tar file\n f_Bands = os.listdir(outpath_bands)\n\n # Search only for filename ending in '_Bx.TIF' and add them to the list of L8 band files\n L8_bands = [x for x in f_Bands if re.search(r'_B(\\d+)\\.TIF$', x, flags=RegexFlag.IGNORECASE)]\n L8_bands.sort(key=natural_keys)\n\n if len(L8_bands) != self.scene.getNumberOfBands():\n self.logger.critical('Skipping scene: Path/Row= [%s/%s] date= [%s]', self.scene.path, self.scene.row, self.scene.acqdate)\n raise workflowException('Missing band files detected: {0} found instead of {1}'.format(len(L8_bands), '11'))\n else:\n L8_bands = None\n self.logger.critical('Error decompressing %s', str(self.scene))\n raise workflowException('Error passing original band files')\n\n filename = L8_bands[0]\n title, info1, info2, info3 = self.scene.decodeProduct(filename)\n\n # log band file information\n self.logger.info(' ')\n self.logger.info('%s', title)\n self.logger.info('%s', info1)\n self.logger.info('%s', info2)\n self.logger.info('%s', info3)\n self.logger.info(' ')\n\n return L8_bands", "def _create_model(n_bins=10, alpha=0.1, tol=0.1, contamination=0.1):\n n_bins = int(n_bins)\n\n hbos = HBOS(\n n_bins=n_bins,\n alpha=alpha,\n tol=tol,\n contamination=contamination\n )\n\n print('Created Model: {}'.format(hbos))\n\n return hbos", "def __init__(self,make,model,year):\n super().__init__(make,model,year)\n # adicionando atributo especifico dessa classe\n self.batery_size = Batery(100)", "def _create_device(self):\n project_page = 'https://garage.maemo.org/projects/brisa'\n self.device = Device('urn:schemas-upnp-org:device:BinaryLight:1',\n self.server_name,\n manufacturer='Brisa Team. Embedded Laboratory '\\\n 'and INdT Brazil',\n manufacturer_url=project_page,\n model_name='Binary Light Device',\n model_description='A UPnP Binary Light Device',\n model_number='1.0',\n model_url=project_page)", "def bands(self, bands):\n\n self._bands = bands", "def create_blast_db(self):\n print(\"Creating blast db\")\n if self.mask:\n command = 'dustmasker -in ' + self.seq_file + ' -infmt fasta '\n command += '-outfmt maskinfo_asn1_bin -out ' + self.seq_file + '_dust.asnb'\n subprocess.check_output(command, shell=True) # identifying low-complexity regions.\n\n command = 'makeblastdb -in ' + self.seq_file + ' -input_type fasta -dbtype nucl '\n command += '-mask_data ' + self.seq_file + '_dust.asnb '\n command += '-out ' + self.seq_file + ' -title \"Whole Genome without low-complexity regions\"'\n subprocess.check_output(command, shell=True) # Overwriting the genome file.\n else:\n command = 'makeblastdb -in ' + self.seq_file + ' -input_type fasta -dbtype nucl '\n command += '-out ' + self.seq_file + ' -title \"Whole Genome unmasked\"'\n subprocess.check_output(command, shell=True)", "def __init__(self,make,model,year):\r\n\t\tsuper().__init__(make,model,year)\r\n\t\tself.battery = Battery()", "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n self.battery = Battery()", "def create_sample(obj):\n # generated by rigify.utils.write_metarig\n bpy.ops.object.mode_set(mode='EDIT')\n arm = obj.data\n\n bones = {}\n\n bone = arm.edit_bones.new('Bone')\n bone.head[:] = 0.0000, 0.0000, 0.0000\n bone.tail[:] = 0.0000, 0.5000, 0.0000\n bone.roll = 0.0000\n bone.use_connect = False\n bones['Bone'] = bone.name\n\n bpy.ops.object.mode_set(mode='OBJECT')\n pbone = obj.pose.bones[bones['Bone']]\n pbone.rigify_type = 'basic.center_of_mass'\n pbone.lock_location = (False, False, False)\n pbone.lock_rotation = (False, False, False)\n pbone.lock_rotation_w = False\n pbone.lock_scale = (False, False, False)\n pbone.rotation_mode = 'QUATERNION'\n\n bpy.ops.object.mode_set(mode='EDIT')\n for bone in arm.edit_bones:\n bone.select = False\n bone.select_head = False\n bone.select_tail = False\n for b in bones:\n bone = arm.edit_bones[bones[b]]\n bone.select = True\n bone.select_head = True\n bone.select_tail = True\n arm.edit_bones.active = bone\n\n return bones", "def __init__(self, wavename):\n super(DWT_2D, self).__init__()\n wavelet = pywt.Wavelet(wavename)\n self.band_low = wavelet.rec_lo\n self.band_high = wavelet.rec_hi\n assert len(self.band_low) == len(self.band_high)\n self.band_length = len(self.band_low)\n assert self.band_length % 2 == 0\n self.band_length_half = math.floor(self.band_length / 2)", "def WaveletCreate(self, wave_id):\n raise NotImplementedError()", "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n self.battery = Battery()", "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n self.battery = Battery()", "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n self.battery = Battery()", "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n self.battery = Battery()", "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n self.battery = Battery()", "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n self.battery = Battery()", "def create(*args):", "def test_create(self):\n B._Base__nb_objects = 0\n\n d = {'id': 5, 'width': 3, 'height': 7, 'x': 2, 'y': 1}\n r1 = Rectangle.create(**d)\n self.assertEqual(r1.to_dictionary(), d)\n self.assertEqual(B._Base__nb_objects, 1)\n\n s2 = Square(5)\n d4 = s2.to_dictionary()\n s5 = Square.create(**d4)\n self.assertEqual(s5.to_dictionary(), d4)\n self.assertEqual(B._Base__nb_objects, 3)", "def new(self):\n\n if not hasattr(self, 'required_attribs'):\n self.required_attribs = []\n\n # sanity check\n for req_var in self.required_attribs:\n if req_var not in self.kwargs:\n err = \"The '%s' kwarg is required when creating new %s!\"\n msg = err % (req_var, self.collection)\n self.logger.error(msg)\n self.logger.error('Incoming kwargs dict: %s' % self.kwargs)\n raise ValueError(msg)\n\n # do it\n self.logger.warn('Creating new %s record!' % self.collection)\n\n for req_var in self.required_attribs:\n setattr(self, req_var, self.kwargs[req_var])\n\n self.created_on = datetime.now()\n self.updated_on = datetime.now()\n self.created_by = flask_login.current_user._id\n self._id = self.mdb.insert({})\n\n try:\n self.save()\n except pymongo.errors.DuplicateKeyError as e:\n self.mdb.remove({'_id': self._id})\n self.logger.error(e)\n self.logger.error('Cannot create asset: %s' % self)\n raise ValueError('Duplicate key error prevented asset creation!')", "def create_rack(self, datacenter, name, vlan_id_min, vlan_id_max, nrsq):\n log.info(\"Adding rack %s...\" % name)\n rack = Rack.builder(self.__context, datacenter) \\\n .name(name) \\\n .vlanIdMin(vlan_id_min) \\\n .vlanIdMax(vlan_id_max) \\\n .nrsq(nrsq) \\\n .build()\n rack.save()\n return rack", "def __init__(self, ebunch=None):\n super(BayesianModel, self).__init__()\n if ebunch:\n self.add_edges_from(ebunch)\n self.cpds = []\n self.cardinalities = self.get_cardinality()\n self.probs = dict()", "def __init__(self, flag_band: FlagBand,\n layer: \"datacube_ows.ows_configuration.OWSNamedLayer\") -> None:\n super().__init__({})\n self.layer = layer\n self.bands: Set[str] = set()\n self.bands.add(flag_band.pq_band)\n self.flag_bands = {flag_band.pq_band: flag_band}\n self.product_names = tuple(flag_band.pq_names)\n self.ignore_time = flag_band.pq_ignore_time\n self.declare_unready(\"products\")\n self.declare_unready(\"low_res_products\")\n self.manual_merge = flag_band.pq_manual_merge\n self.fuse_func = flag_band.pq_fuse_func\n # pyre-ignore[16]\n self.main_product = self.products_match(layer.product_names)", "def createByte(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Data:\n ...", "def create(self, data, commit_sha='', status_url=None):\n if 'workflow' in data and isinstance(data['workflow'], list):\n data['workflow'] = list(set(data['workflow']))\n else:\n data['workflow'] = ['unittest', 'build', 'deploy']\n db_obj = objects.registry.Assembly()\n db_obj.update(data)\n db_obj.uuid = uuidutils.generate_uuid()\n db_obj.user_id = self.context.user\n db_obj.project_id = self.context.tenant\n db_obj.username = self.context.user_name\n\n db_obj.status = ASSEMBLY_STATES.QUEUED\n db_obj.create(self.context)\n\n plan_obj = objects.registry.Plan.get_by_id(self.context,\n db_obj.plan_id)\n\n artifacts = plan_obj.raw_content.get('artifacts', [])\n for arti in artifacts:\n self._build_artifact(assem=db_obj, artifact=arti,\n commit_sha=commit_sha,\n status_url=status_url)\n return db_obj", "def createAnd(self):\n return _libsbml.FbcOr_createAnd(self)", "def create_from(cls, backend):\n backend_config = backend.configuration()\n\n # TODO : Remove usage of config.defaults when backend.defaults() is updated.\n try:\n backend_default = backend.defaults()\n buffer = backend_default.buffer\n except ModelValidationError:\n try:\n buffer = backend_config.defaults.get('buffer', 0)\n except AttributeError:\n buffer = 0\n\n # system size\n n_qubits = backend_config.n_qubits\n n_registers = backend_config.n_registers\n n_uchannels = backend_config.n_uchannels\n\n # generate channels with assuming their numberings are aligned with qubits\n drives = [DriveChannel(i, buffer=buffer) for i in range(n_qubits)]\n\n measures = [MeasureChannel(i, buffer=buffer) for i in range(n_qubits)]\n\n controls = [ControlChannel(i, buffer=buffer) for i in range(n_uchannels)]\n\n acquires = [AcquireChannel(i, buffer=buffer) for i in range(n_qubits)]\n\n qubits = []\n for i in range(n_qubits):\n # TODO: get qubits <-> channels relationship from backend\n qubit = Qubit(i, drives[i], measures[i], acquires[i],\n control_channels=[] if not controls else controls)\n qubits.append(qubit)\n\n registers = [RegisterSlot(i) for i in range(n_registers)]\n # TODO: get #mem_slots from backend\n mem_slots = [MemorySlot(i) for i in range(len(qubits))]\n\n return DeviceSpecification(qubits, registers, mem_slots)", "def __init__(self):\n this = _libsbml.new_SBO()\n try: self.this.append(this)\n except: self.this = this", "def band(self):\n return self._band", "def create():\n pass", "def create_branch(self, gui_id):\n if gui_id not in self.SMGData.keys():\n self.SMGData[gui_id] = dict()\n self.SMGData[gui_id]['Mask'] = None\n self.SMGData[gui_id]['gMuns'] = None\n self.SMGData[gui_id]['phaseraw'] = None\n self.SMGData[gui_id]['deltagM'] = None\n self.SMGData[gui_id]['phasegM'] = None\n self.SMGData[gui_id]['shiftg'] = None\n self.SMGData[gui_id]['gCuns'] = None\n else:\n raise Exception('Key gui_id already exists, branch creation aborted')", "def createRecord(self):\n self.dto.getRecord().append(self.controller.createNewObj())\n print(\"Record added.\")", "def setup_b_instance(self,norm,add_ps_mask=True):\n inst_tag = self.tag + '_'+str(self.flux_array_ebin)\n b = bsm.bayesian_scan_NPTF(tag=inst_tag,nside=self.nside,work_dir='/tmp/'+self.tag+'/',psf_dir=psf_dir,nlive=700)\n # Input the data, using the external data if provided\n if self.use_external_data:\n b.load_external_data(self.f1.CTB_en_bins,[self.external_data[self.flux_array_ebin]],self.f1.CTB_exposure_maps)\n else:\n b.load_external_data(self.f1.CTB_en_bins,self.f1.CTB_count_maps,self.f1.CTB_exposure_maps)\n\n if add_ps_mask:\n b.make_mask_total(band_mask_range = [-self.band_mask,self.band_mask],mask_ring = False,ps_mask_array = self.f1.ps_mask_array)\n else:\n b.make_mask_total(band_mask_range = [-self.band_mask,self.band_mask],mask_ring = False)\n\n b.add_new_template(self.f1.template_dict)\n b.rebin_external_data(1)\n\n b.add_poiss_model('ps_model','$A_{ps}$',[0.0,3.0],False)\n b.add_poiss_model('p7','$A_{p7}$',[0.0,2.0],False)\n b.add_poiss_model('bubs','$A_{bubs}$',[0.0,2.0],False)\n b.add_poiss_model('iso','$A_{iso}$',[0.0,3.0],False)\n # Add in a fixed J_map template\n b.add_fixed_templates({'J_map':[norm*self.J_map_arr[self.flux_array_ebin]/np.mean(self.J_map_arr[self.flux_array_ebin])]})\n\n b.initiate_poissonian_edep()\n return b", "def create_balancer(ctx):\n if not self.balancer_exists():\n data = self.create_balancer()\n ctx.info('Successfully created load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)\n else:\n ctx.info('Load balancer {} already exists, nothing to create.'.format(\n self.get_balancer_name()\n ))", "def startRubberBand(self, point, color=[220,0,0], thickness=4.0, alpha=255,\n linestyle='=', fill=None, name='QIVRubberBand'):\n\n # create the rubberband object\n self.rubberBandObj = QIVPolygonItem([], color=color, thickness=thickness,\n alpha=alpha, linestyle=linestyle, fill=fill, name=name)\n\n # set the origin\n self.rubberBandOrigin = point\n\n # set the rubber band geometry\n self.rubberBandRect = QRectF(self.rubberBandOrigin, self.rubberBandOrigin)\n self.rubberBandObj.setGeometry(self.rubberBandRect)\n\n # the the \"am rubberbanding\" state variable\n self.rubberBanding = True\n\n # add it to our scene\n self.scene.addItem(self.rubberBandObj)\n\n # add it to our list of scene items\n self.sceneItems.append(self.rubberBandObj)\n\n return self.rubberBandObj", "def __init__(self, BondsList = [], ElementsList = [], \\\n graph = Graph(), Name = None):\n BGcomponent.__init__(self)\n Graph.__init__(self)\n self.id = next(self.id_generator)\n self.__id = BondGraph.__ID\n BondGraph.__ID += 1\n self.__id += 1\n self.__bondsList = BondsList\n self.__elementsList = ElementsList\n self.__name = Name", "def __init__(self, lhs: Side, rhs: Side, rate: Rate, label=None):\n self.lhs = lhs\n self.rhs = rhs\n self.rate = rate\n self.label = label", "def __init__(self, wavename):\n super(DWT_2D_tiny, self).__init__()\n wavelet = pywt.Wavelet(wavename)\n self.band_low = wavelet.rec_lo\n self.band_high = wavelet.rec_hi\n assert len(self.band_low) == len(self.band_high)\n self.band_length = len(self.band_low)\n assert self.band_length % 2 == 0\n self.band_length_half = math.floor(self.band_length / 2)", "def create(\n self,\n kind,\n branch_match_kind=\"glob\",\n branch_pattern=\"*\",\n branch_type=None,\n users=None,\n groups=None,\n value=None,\n ):\n if branch_match_kind == \"branching_model\":\n branch_pattern = \"\"\n\n data = {\n \"kind\": kind,\n \"branch_match_kind\": branch_match_kind,\n \"pattern\": branch_pattern,\n }\n\n if branch_match_kind == \"branching_model\":\n data[\"branch_type\"] = branch_type\n\n if users is not None:\n data[\"users\"] = users\n\n if groups is not None:\n data[\"groups\"] = groups\n\n if value is not None:\n data[\"value\"] = value\n\n return self.__get_object(self.post(None, data=data))", "def __init__(self,make,model,year):\n super().__init__(make,model,year)\n self.battery = Battery()", "def inst_bp(instrument,array=\"2\"):\n\n if instrument == \"MUSTANG2\" or instrument == \"MUSTANG\":\n srms = (300*u.um).to(\"m\") # surface RMS (microns)\n ### Reference: https://science.nrao.edu/facilities/gbt/proposing/GBTpg.pdf\n EA90 = 0.36 # Aperture efficiency at 90 GHz\n ### The beam efficiencies should be taken as 1.37* Aperture Efficiency\n R90 = np.exp(-4.0*np.pi*(srms/(const.c/(9.0e10*u.s**-1))).value) #\n Gnot = EA90/R90 # Unphysical, but see documentation...\n if instrument == \"MUSTANG2\":\n flow = 75.0 # GHz\n fhig = 105.0 # GHz\n else:\n flow = 82.5 # GHz\n fhig = 97.5 # GHz\n \n farr = np.arange(flow,fhig,1.0) # frequency array.\n tran = farr*0.0 + 1.0 # Let the transmission be unity everywhere.\n Larr = const.c.value/(farr*1.0e9) # Keep calm and carry on.\n ### Old formula:\n #Ruze = Gnot * np.exp(-4.0*np.pi*(srms.value)/Larr)\n ### Correct formula: (10 April 2018)\n Ruze = Gnot * np.exp(-(4.0*np.pi*srms.value/Larr)**2)\n NRuz = Ruze / np.max(Ruze) # Normalize it\n band = tran * Ruze # Bandpass, with (unnormalized) Ruze efficiency\n \n if instrument == \"NIKA2\" or instrument == \"NIKA\":\n caldir='/home/romero/NIKA2/NIKA_SVN/Processing/Pipeline/Calibration/BP/'\n bpfile=caldir+'Transmission_2017_Jan_NIKA2_v1.fits'\n hdulist = fits.open(bpfile)\n\n if array == \"1H\": # 1mm (260 GHz) array, Horizontal Polarization\n tbdata = hdulist[1].data # 1H\n freq = tbdata.field(0)\n tran = tbdata.field(1)\n erro = tbdata.field(2)\n atmt = tbdata.field(3)\n cfreq1h = np.sum(freq*tran)/np.sum(tran)\n \n if array == \"1V\": # 1mm (260 GHz) array, Vertical Polarization\n tbdata = hdulist[2].data # 1V\n freq = tbdata.field(0)\n tran = tbdata.field(1)\n erro = tbdata.field(2)\n atmt = tbdata.field(3)\n cfreq1v = np.sum(freq*tran)/np.sum(tran)\n \n if array == \"2\": # 2mm (150 GHz) array\n tbdata = hdulist[3].data # 2\n freq = tbdata.field(0)\n tran = tbdata.field(1)\n erro = tbdata.field(2)\n atmt = tbdata.field(3)\n cfreq2 = np.sum(freq*tran)/np.sum(tran)\n\n ### Trim the zero-frequency listing, if any.\n gi=np.where(freq > 0)\n freq = freq[gi]\n tran = tran[gi]\n erro = erro[gi]\n atmt = atmt[gi]\n \n### Calculate Aperture efficiencies from information found at:\n### http://www.iram.es/IRAMES/mainwiki/Iram30mEfficiencies\n Beff = 0.630 # at 210 GHz\n Aeff = Beff/1.27 # See text on webpage\n srms = (66.0*u.um).to(\"m\") # surface RMS (microns)\n R210 = np.exp(-4.0*np.pi*(srms/(const.c/(2.1e11*u.s**-1))).value) #\n Gnot = Aeff/R210 # Unphysical, but see documentation...\n\n Larr = const.c.value/(freq*1.0e9) # Keep calm and carry on. \n Ruze = Gnot * np.exp(-4.0*np.pi*(srms.value)/Larr)\n NRuz = Ruze / np.max(Ruze) # Normalize it\n band = tran * Ruze # Bandpass, with (unnormalized) Ruze efficiency\n farr = freq\n \n#########################################################################\n\n if instrument == 'ACT90':\n srms = (27.0*u.um).to(\"m\") # surface RMS (microns)\n EA90 = 0.95 # I'm making this number up...\n R90 = np.exp(-4.0*np.pi*(srms/(const.c/(9.0e10*u.s**-1))).value) #\n Gnot = EA90/R90 # Unphysical, but see documentation...\n flow = 65.0 # GHz\n fhig = 125.0 # GHz\n farr = np.arange(flow,fhig,1.0) # frequency array.\n freq_ref = 90.0 # I took EA90 to be a fictitious aperature efficiency at 90 GHz\n band = ruze_eff(farr,freq_ref,EA90,srms)\n\n if instrument == 'ACT150':\n srms = (27.0*u.um).to(\"m\") # surface RMS (microns)\n EA90 = 0.95 # I'm making this number up...\n R90 = np.exp(-4.0*np.pi*(srms/(const.c/(9.0e10*u.s**-1))).value) #\n Gnot = EA90/R90 # Unphysical, but see documentation...\n flow = 120.0 # GHz\n fhig = 180.0 # GHz\n farr = np.arange(flow,fhig,1.0) # frequency array.\n freq_ref = 90.0 # I took EA90 to be a fictitious aperature efficiency at 90 GHz\n band = ruze_eff(farr,freq_ref,EA90,srms)\n\n\n return band, farr", "def __init__(self, chromosome, label, start_base, end_base,\n gstrand=None, sub_bands=None):\n\n if start_base >= end_base:\n raise ValueError('wrong band coordinates '\n '%d-%d' % (start_base, end_base))\n\n self._chromosome = chromosome\n self._label = label\n self._start_base = start_base\n self._end_base = end_base\n\n self._gstrand = gstrand\n self._sub_bands = sub_bands", "def create(self, **kwargs):\n return self.save(self.new(**kwargs))", "def __init__(self, wfr_diameter=76.2, edge_clearance=0.0, x0=0.0, y0=0.0, width=5.0, height=5.0, filename=None):\n if filename is None:\n self.wafer = semiwafer.semiWaferCells(size='3 inch',x0=x0,y0=y0,width=width,height=height,edge_clearance=edge_clearance)\n\n # self.wfr_diameter = wfr_diameter\n # self.edge_clearance = edge_clearance\n # self.x0 = x0\n # self.y0 = y0\n # self.width = width\n # self.height = height\n\n self.bands = {} # Initialize empty dictionary of resonator distribution objects\n self.cells = {} # Initialize empty dictionary of cell placement lists", "def minimum_bandwidth_rule_create(request, policy_id, **kwargs):\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body = {'minimum_bandwidth_rule': kwargs}\n rule = 'minimum_bandwidth_rule'\n minimum_bandwidth_rule = neutronclient(request)\\\n .create_minimum_bandwidth_rule(policy_id, body).get(rule)\n return MinimumBandwidthRule(minimum_bandwidth_rule)", "def create(self,**extra_fields):\r\n print(extra_fields)\r\n data = self.model(**extra_fields)\r\n data.save(using=self._db)", "def add_bands(self, product, bands):\n fnames = {f: self.filenames[f] for f in self.filenames if f in bands}\n #import nose.tools; nose.tools.set_trace()\n if len(fnames) > 0:\n self[product].open(filenames=fnames.values(), bandnames=fnames.keys())", "def create_book(self, title, isbn):\n # new_book = Book(title, isbn)\n # return new_book\n return Book(title, isbn)" ]
[ "0.6518197", "0.64347064", "0.5994854", "0.5890965", "0.57064843", "0.57064843", "0.5659268", "0.56355095", "0.5517736", "0.54783046", "0.5404359", "0.5397878", "0.5396625", "0.53284055", "0.5327761", "0.5327761", "0.53230125", "0.5319953", "0.52875245", "0.5253581", "0.5253239", "0.52460873", "0.52438134", "0.5210647", "0.51600164", "0.51528955", "0.5130826", "0.5125624", "0.5113172", "0.510436", "0.5093392", "0.5093392", "0.5093392", "0.509017", "0.5075563", "0.5067529", "0.50630116", "0.50579214", "0.5043785", "0.50263435", "0.5002933", "0.4993507", "0.49868584", "0.49843502", "0.49833006", "0.4973637", "0.49723202", "0.4965956", "0.4965122", "0.4959189", "0.4956257", "0.4954575", "0.49431327", "0.49363512", "0.49238744", "0.4923025", "0.49198484", "0.49143648", "0.49060687", "0.4882988", "0.48767838", "0.48762587", "0.4873599", "0.4867382", "0.4865218", "0.4865218", "0.4865218", "0.4865218", "0.4865218", "0.4865218", "0.4864147", "0.48631257", "0.48604897", "0.48576042", "0.48575708", "0.48568973", "0.48475948", "0.48310605", "0.481416", "0.48083147", "0.48065266", "0.47995472", "0.47976473", "0.4794824", "0.47889748", "0.47875485", "0.47850284", "0.4784331", "0.47779027", "0.47773263", "0.47769183", "0.4776874", "0.4776163", "0.47749013", "0.4774161", "0.47722787", "0.47686937", "0.47629765", "0.47618356", "0.47604126", "0.47583586" ]
0.0
-1
Associates the specified blobs with the given encryption keys.
def WriteBlobEncryptionKeys( self, key_names: Dict[rdf_objects.BlobID, str], ) -> None: self.blob_keys.update(key_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def volume_encryption_keys(self, volume_encryption_keys):\n\n self._volume_encryption_keys = volume_encryption_keys", "def test_blob_key_stored():\n\tbackup_and_restore(\n\t\tlambda context: put_keys(lib.SET, BLOB_KEYS, \"foobar\", True),\n\t\tNone,\n\t\tlambda context: check_keys(lib.SET, BLOB_KEYS, \"foobar\", True)\n\t)", "def ReadBlobEncryptionKeys(\n self,\n blob_ids: Collection[rdf_objects.BlobID],\n ) -> Dict[rdf_objects.BlobID, Optional[str]]:\n return dict(zip(blob_ids, map(self.blob_keys.get, blob_ids)))", "def CreateBlob(self, blob_key, blob):\n self._blobs[blobstore.BlobKey(unicode(blob_key))] = blob", "def test_blob_key():\n\tbackup_and_restore(\n\t\tlambda context: put_keys(lib.SET, BLOB_KEYS, \"foobar\", False),\n\t\tNone,\n\t\tlambda context: check_keys(lib.SET, BLOB_KEYS, \"foobar\", False)\n\t)", "def encryptByteArray(self, data, keyobj):\n raise NotImplementedError(\"Is abstract\")", "def put_keys(set_name, keys, value, send_key):\n\tfor key in keys:\n\t\tlib.write_record(set_name, key, [\"value\"], [value], send_key)", "def load_keys(self, keys: List[Union[str, jwcrypto.jwk.JWK]] = None):\n if keys:\n for key in keys:\n if isinstance(key, jwcrypto.jwk.JWK):\n self.jwk_set.add(key)\n logger.info(\"Added {0} key {1}\".format(key.key_type, key.key_id))\n else:\n pem = open(key, \"rb\").read()\n\n jwk_obj = jwcrypto.jwk.JWK.from_pem(pem)\n self.jwk_set.add(jwk_obj)\n logger.info(\"Added {0} key {1}\".format(jwk_obj.key_type, jwk_obj.key_id))", "def _create_keys(bucket_name, keys=[]):\n bucket = connection.create_bucket(bucket_name)\n\n for s in keys:\n key = bucket.new_key(s)\n key.set_contents_from_string(s)\n\n return bucket", "def store_s3_contents ( s3_conn, bucket_name, key_name, key_contents = None, key_contents_filename = None ) :\n bucket = s3_conn.get_bucket( bucket_name )\n key = boto.s3.key.Key( bucket )\n key.key = key_name\n if ( key_contents_filename ) :\n key.set_contents_from_filename( key_contents_filename )\n else :\n key.set_contents_from_string( key_contents )", "def _set_blobs(cur, params, blob_fields):\n if isinstance(blob_fields, (tuple, list)):\n for bf in blob_fields:\n field_type = None\n if isinstance(bf, dict):\n f = bf.get(\"field_name\")\n field_type = bf.get(\"field_type\", None)\n else:\n f = bf\n val = params.get(f)\n if val == None:\n continue\n if field_type in [\"blob\", \"clob\"]:\n field_type = field_type.upper()\n if field_type == None:\n if isinstance(val, (str, unicode)):\n field_type = \"CLOB\"\n else:\n field_type = \"BLOB\"\n if field_type == \"CLOB\":\n blobvalue = cur.var(cx_Oracle.CLOB, len(val))\n else:\n blobvalue = cur.var(cx_Oracle.BLOB, len(val))\n blobvalue.setvalue(0, val)\n params[f] = blobvalue\n elif isinstance(blob_fields, dict):\n for f, field_type in blob_fields.items():\n val = params.get(f)\n if val == None:\n continue\n if field_type == 'clob':\n blobvalue = cur.var(cx_Oracle.CLOB, len(val))\n elif field_type == 'blob':\n blobvalue = cur.var(cx_Oracle.BLOB, len(val))\n else:\n raise Exception(\"Unsupported blob type %s.\", field_type)\n blobvalue.setvalue(0, val)\n params[f] = blobvalue", "def WriteSignedBinaryBlobs(binary_urn: rdfvalue.RDFURN,\n blobs: Iterable[rdf_crypto.SignedBlob]):\n blob_references = rdf_objects.BlobReferences()\n current_offset = 0\n for blob in blobs:\n blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(blob.SerializeToBytes())\n blob_references.items.Append(\n rdf_objects.BlobReference(\n offset=current_offset, size=len(blob.data), blob_id=blob_id))\n current_offset += len(blob.data)\n data_store.REL_DB.WriteSignedBinaryReferences(\n SignedBinaryIDFromURN(binary_urn), blob_references)", "def bake(self, keys):\n\n\t\tif keys is None:\n\t\t\treturn\n\n\t\tn = len(keys)\n\t\tself.encoded_keys = keys\n\n\t\tfree(self.encoded_counts)\n\t\tfree(self.encoded_log_probability)\n\n\t\tself.encoded_counts = <double*> calloc(n, sizeof(double))\n\t\tself.encoded_log_probability = <double*> calloc(n, sizeof(double))\n\t\tself.n = n\n\n\t\tfor i in range(n):\n\t\t\tkey = keys[i]\n\t\t\tself.encoded_counts[i] = 0\n\t\t\tself.encoded_log_probability[i] = self.log_dist.get(key, NEGINF)", "def __add_enclave_secrets(ledger_config, contract_id, client_keys, enclaveclients, provclients) :\n\n secrets = {}\n encrypted_state_encryption_keys = {}\n for enclaveclient in enclaveclients:\n psecrets = []\n for provclient in provclients:\n # Get a pspk:esecret pair from the provisioning service for each enclave\n sig_payload = pcrypto.string_to_byte_array(enclaveclient.enclave_id + contract_id)\n secretinfo = provclient.get_secret(enclaveclient.enclave_id,\n contract_id,\n client_keys.verifying_key,\n client_keys.sign(sig_payload))\n logger.debug(\"pservice secretinfo: %s\", secretinfo)\n\n # Add this pspk:esecret pair to the list\n psecrets.append(secretinfo)\n\n # Print all of the secret pairs generated for this particular enclave\n logger.debug('psecrets for enclave %s : %s', enclaveclient.enclave_id, psecrets)\n\n # Verify those secrets with the enclave\n esresponse = enclaveclient.verify_secrets(contract_id, client_keys.verifying_key, psecrets)\n logger.debug(\"verify_secrets response: %s\", esresponse)\n\n # Store the ESEK mapping in a dictionary key'd by the enclave's public key (ID)\n encrypted_state_encryption_keys[enclaveclient.enclave_id] = esresponse['encrypted_state_encryption_key']\n\n # Add this spefiic enclave to the contract\n add_enclave_to_contract(ledger_config,\n client_keys,\n contract_id,\n enclaveclient.enclave_id,\n psecrets,\n esresponse['encrypted_state_encryption_key'],\n esresponse['signature'])\n\n return encrypted_state_encryption_keys", "def _handle_uploads_inline(self, uploads_inline: List[str]) -> None:\n import base64\n import zlib\n files: Dict[str, str] = {}\n for filepath in uploads_inline:\n if not os.path.exists(filepath):\n raise CleanError(f'File not found: {filepath}')\n with open(filepath, 'rb') as infile:\n data = infile.read()\n data_zipped = zlib.compress(data)\n data_base64 = base64.b64encode(data_zipped).decode()\n files[filepath] = data_base64\n self._end_command_args['uploads_inline'] = files", "def put(self, keys: Union[str, Iterable], val: Union[bytes, str]):\n if hasattr(val, \"encode\"):\n val = val.encode(\"utf-8\")\n return (self.db.putVal(db=self.sdb, key=self._tokey(keys), val=val))", "def insert_many(self, conn, key, **kwargs):\n conn.zadd(key, **kwargs)", "def prepare_data_keys(primary_master_key, master_keys, algorithm, encryption_context):\n encrypted_data_keys = set()\n encrypted_data_encryption_key = None\n data_encryption_key = primary_master_key.generate_data_key(algorithm, encryption_context)\n _LOGGER.debug(\"encryption data generated with master key: %s\", data_encryption_key.key_provider)\n for master_key in master_keys:\n # Don't re-encrypt the encryption data key; we already have the ciphertext\n if master_key is primary_master_key:\n encrypted_data_encryption_key = EncryptedDataKey(\n key_provider=data_encryption_key.key_provider, encrypted_data_key=data_encryption_key.encrypted_data_key\n )\n encrypted_data_keys.add(encrypted_data_encryption_key)\n continue\n encrypted_key = master_key.encrypt_data_key(\n data_key=data_encryption_key, algorithm=algorithm, encryption_context=encryption_context\n )\n encrypted_data_keys.add(encrypted_key)\n _LOGGER.debug(\"encryption key encrypted with master key: %s\", master_key.key_provider)\n return data_encryption_key, encrypted_data_keys", "def put_bucket_encryption(Bucket=None, ContentMD5=None, ServerSideEncryptionConfiguration=None):\n pass", "def associate_s3_resources(memberAccountId=None, s3Resources=None):\n pass", "def _load_blob_containers(\n neo4j_session: neo4j.Session, blob_containers: List[Dict], update_tag: int,\n) -> None:\n ingest_blob_containers = \"\"\"\n UNWIND $blob_containers_list as blob\n MERGE (bc:AzureStorageBlobContainer{id: blob.id})\n ON CREATE SET bc.firstseen = timestamp(), bc.type = blob.type\n SET bc.name = blob.name,\n bc.lastupdated = $azure_update_tag,\n bc.deleted = blob.deleted,\n bc.deletedtime = blob.deleted_time,\n bc.defaultencryptionscope = blob.default_encryption_scope,\n bc.publicaccess = blob.public_access,\n bc.leasestatus = blob.lease_status,\n bc.leasestate = blob.lease_state,\n bc.lastmodifiedtime = blob.last_modified_time,\n bc.remainingretentiondays = blob.remaining_retention_days,\n bc.version = blob.version,\n bc.hasimmutabilitypolicy = blob.has_immutability_policy,\n bc.haslegalhold = blob.has_legal_hold,\n bc.leaseduration = blob.leaseDuration\n WITH bc, blob\n MATCH (bs:AzureStorageBlobService{id: blob.service_id})\n MERGE (bs)-[r:CONTAINS]->(bc)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = $azure_update_tag\n \"\"\"\n\n neo4j_session.run(\n ingest_blob_containers,\n blob_containers_list=blob_containers,\n azure_update_tag=update_tag,\n )", "def combine_keys(*keys: bytes) -> bytes:\n key = hashlib.sha3_512(keys[0]).digest()\n for k in keys[1:]:\n next_key = hashlib.sha3_512(k).digest()\n\n key = bytes([\n a ^ b\n for (a, b)\n in zip(key, next_key)\n ])\n return key", "def store_file(filename1, filename2):\n print 'Splitting ', filename1, ' into encoded comments for keys'\n file_list = read_file_into_list(filename1)\n output_file = open(filename2, 'w')\n counter_length = len(file_list)\n counter = 0\n for chunk in file_list:\n print 'Creating key ', counter, ' of ', counter_length\n counter = counter + 1\n key_id = create_key(chunk)\n output_file.write(send_key(key_id)+'\\n')\n print '--> key has been created and uploaded'\n print 'File has been successfully uploaded to ', KEYSERVER", "def encrypted_binaries(self, encrypted_binaries):\n\n self._encrypted_binaries = encrypted_binaries", "def encrypt_data ( aes_key, data ) :\n salt = Crypto.Random.new( ).read( Crypto.Cipher.AES.block_size )\n cipher = Crypto.Cipher.AES.new( aes_key, Crypto.Cipher.AES.MODE_CFB, salt )\n encrypted_data = cipher.encrypt( data )\n\n return encode_data( salt + encrypted_data )", "def add(self, keys: Iterator[int], values: Iterator[bytes], *args, **kwargs):\n redis_docs = [{'_id': i, 'values': j} for i, j in zip(keys, values)]\n\n with self.get_add_handler() as redis_handler:\n for k in redis_docs:\n redis_handler.set(k['_id'], k['values'])", "def multiple_bucket(single_bucket): # pylint: disable=redefined-outer-name\n single_bucket.insert(\"key 2\", \"value 2\")\n return single_bucket", "def _set_keys(self, listOfKeys):\n self._keys = listOfKeys", "def encrypt(cls, plaintext, aad, key, iv):", "def add_fast_rcnn_blobs(self, blobs, im_scales, roidb):\n # Sample training RoIs from each image and append them to the blob lists\n for im_i, entry in enumerate(roidb):\n frcn_blobs = self._sample_rois(entry, im_scales[im_i], im_i)\n for k, v in frcn_blobs.items():\n blobs[k].append(v)\n # Concat the training blob lists into tensors\n for k, v in blobs.items():\n if isinstance(v, list) and len(v) > 0:\n blobs[k] = np.concatenate(v)\n \n # Perform any final work and validity checks after the collating blobs for\n # all minibatch images\n valid = True\n \n return valid", "def join(self, blob, callback=None, expire_callback=None):", "def encrypt(project_id, location_id, key_ring_id, crypto_key_id,\n plaintext_file_name, ciphertext_file_name):\n\n # Creates an API client for the KMS API.\n kms_client = googleapiclient.discovery.build('cloudkms', 'v1')\n\n # The resource name of the CryptoKey.\n name = 'projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}'.format(\n project_id, location_id, key_ring_id, crypto_key_id)\n\n # Read data from the input file.\n with io.open(plaintext_file_name, 'rb') as plaintext_file:\n plaintext = plaintext_file.read()\n\n # Use the KMS API to encrypt the data.\n crypto_keys = kms_client.projects().locations().keyRings().cryptoKeys()\n request = crypto_keys.encrypt(\n name=name,\n body={'plaintext': base64.b64encode(plaintext).decode('ascii')})\n response = request.execute()\n ciphertext = base64.b64decode(response['ciphertext'].encode('ascii'))\n\n # Write the encrypted data to a file.\n with io.open(ciphertext_file_name, 'wb') as ciphertext_file:\n ciphertext_file.write(ciphertext)\n\n print('Saved ciphertext to {}.'.format(ciphertext_file_name))", "def RegisterBlobStores():\n blob_store.REGISTRY[db_blob_store.DbBlobStore.__name__] = (\n db_blob_store.DbBlobStore)", "def encrypt_byte_list_in_str(bytearray_list, public_encryption_key_obj):\n encrypted_str_list = []\n for bytearray_str in bytearray_list:\n message_text_enc = public_encryption_key_obj.encrypt(str(bytearray_str.decode(\"utf-8\")), 16)[0]\n encrypted_str_list.append(message_text_enc)\n encrypted_message_str = \"\".join(encrypted_str_list)\n return encrypted_message_str", "def insert_data_into_storage(name, image_dict):\n\n # if S3Connection supports __enter__, and __exit__ then we can use with.\n conn = S3Connection(aws_access_key_id, aws_secret_access_key)\n bucket = conn.get_bucket('hyperionstorm')\n\n k_lrg = Key(bucket)\n k_lrg.key = \"data/%s_lrg.jpg\" % name\n\n k_dft = Key(bucket)\n k_dft.key = \"data/%s.jpg\" % name\n\n k_tiny = Key(bucket)\n k_tiny.key = \"data/%s_tiny.jpg\" % name\n\n try:\n k_lrg.set_contents_from_string(image_dict[\"large\"])\n k_dft.set_contents_from_string(image_dict[\"default\"])\n k_tiny.set_contents_from_string(image_dict[\"tiny\"])\n except Exception, exp:\n conn.close()\n return False\n\n conn.close()\n return True", "def encrypt_and_encode(data, key):\r\n return base64.urlsafe_b64encode(aes_encrypt(data, key))", "def encrypt_data(self, params):\n raise NotImplementedError", "def strEnc(data, *keys):\n r = len(data) % 4\n data += (4 - r if r else 0) * chr(0)\n encData = \"\"\n for i in range(len(data) // 4):\n tempBt = strToBt(data[i * 4: i * 4 + 4])\n for key in keys:\n for b in getKeyBytes(key):\n tempBt = enc(tempBt, b)\n encData += bt64ToHex(tempBt)\n return encData", "def pin(self, keys: Union[str, Iterable], val: Union[bytes, str]):\n if hasattr(val, \"encode\"):\n val = val.encode(\"utf-8\")\n return (self.db.setVal(db=self.sdb, key=self._tokey(keys), val=val))", "def set_blob ( self, object_class_id, object_instance_id, attribute_name, blob_file, file_name ) :\n try :\n inputs = []\n inputs.append(open(blob_file, 'rb'))\n for input in inputs:\n binary_data = input.read()\n blobfile = self.oracle_cursor.var(cx_Oracle.BLOB)\n blobfile.setvalue(0, binary_data)\n self.oracle_cursor.callproc(\"sdb_interface_pck.setBlob\", [object_class_id, object_instance_id, attribute_name, file_name, blobfile ])\n except Exception, err:\n print \"Error storing BLOB: ERROR: \" + str(err)\n raise", "def upload_key():\n data = check_args(('cloudProvider', 'key'))\n provider = jobs.init_provider(data, True)\n key = decrypt_key(data['key'], data['username'])\n provider.save_key(key)\n return make_response()", "def add_key(self, key_list: list) -> None:\n\n for key, funct, desc in key_list:\n # Force keys to be lowercase\n key = key.lower()\n \n self.key_functs[key] = funct\n self.key_satified[key] = False\n self.key_description[key] = desc\n self.key_values[key] = None", "def derive_keys(self, master_salt, master_secret):\n\n self.sender_key = self._kdf(master_salt, master_secret, self.sender_id, 'Key')\n self.recipient_key = self._kdf(master_salt, master_secret, self.recipient_id, 'Key')\n\n self.common_iv = self._kdf(master_salt, master_secret, b\"\", 'IV')", "def signature_keys(self, signature_keys):\n\n self._signature_keys = signature_keys", "def store_keypair ( s3_infra_conn, region_name, aws_account_type, keypair_name, keypair_filename ) :\n keypair_bucket = get_admin_bucket_name( region_name = region_name )\n store_s3_contents( s3_conn = s3_infra_conn,\n bucket_name = keypair_bucket,\n key_name = get_keypair_keypath( aws_account_type ) + get_keypair_keyname( keypair_name ),\n key_contents_filename = keypair_filename )", "def ecb_encrypt(pt_bin_list, keys, rounds):\n enc_result = \"\"\n\n with multiprocessing.Pool() as p:\n enc_result = p.starmap(feistel_encrypt, zip(pt_bin_list, keys, repeat(rounds)))\n return enc_result", "def encrypt_blocks(self):\n splitted_data = self._chunk_data()\n for index, bin_data in enumerate(splitted_data):\n # Encryption start\n header = b\"Sent by Geoffroy Givry.\"\n data_to_be_encrypted = bin_data\n\n cipher = AES.new(self.key, AES.MODE_EAX)\n cipher.update(header)\n\n ciphertext, tag = cipher.encrypt_and_digest(data_to_be_encrypted)\n json_k = ['nonce', 'header', 'ciphertext', 'tag']\n json_v = [b64encode(x).decode(\"utf-8\") for x in [cipher.nonce,\n header,\n ciphertext,\n tag]]\n result = json.dumps(dict(zip(json_k, json_v)))\n with open(\"block_{:0{numb}d}.json\"\n .format(index,\n numb=len(str(self.num_of_chunks))),\n 'w') as json_file:\n json_file.write(result)\n # Encryption end", "def set_encryption(key):\n global_scope['enc'] = Encryption(key.encode())", "def store_file(filename1, filename2):\n\tprint 'Splitting ', filename1, ' into encoded comments for keys'\n\n\t# watch out return value is the name of the newly packed file now\n\tfilename1 = pack_file_to_disk(filename1)\n\n\tfile_list = read_file_into_list(filename1)\n\toutput_file = open(filename2, 'w')\n\tcounter_length = len(file_list)\n\tcounter = 0\n\tfor chunk in file_list:\n\t\tprint 'Creating key ', counter, ' of ', counter_length\n\t\tcounter = counter + 1\n\t\tkey_id = create_key(chunk)\n\t\toutput_file.write(send_key(key_id)+'\\n')\n\t\toutput_file.flush()\n\t\tprint '--> key has been created and uploaded'\n\tprint 'File has been successfully uploaded to ', KEYSERVER\n\n\t# TODO adjust with args\n\t# tempfile is created at packing stage\n\tos.system('tempfile.tmp')", "def encode(text, key):\n encrypted = []\n for i in text:\n encrypted.append(key[i])\n return encrypted", "def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):\n if docs:\n docs_batch_generator = get_docs_batch_generator(\n docs,\n traversal_path=parameters.get(\n 'traversal_paths', self.default_traversal_path\n ),\n batch_size=parameters.get('batch_size', self.default_batch_size),\n needs_attr='blob',\n )\n self._compute_embeddings(docs_batch_generator)", "def enclosure_disks(self, enclosure_disks):\n\n self._enclosure_disks = enclosure_disks", "def encrypt(self, filename):\n\t f = Fernet(self.key)\n\t with open(filename, \"rb\") as file:\n\t # read all file data\n\t file_data = file.read()\n\t # encrypt data\n\t encrypted_data = f.encrypt(file_data)\n\t # write the encrypted file\n\t with open(filename+\".enc\", \"wb\") as file:\n\t file.write(encrypted_data)", "def get_encrypted_data_keys(self, data_key, encryption_context):\n encrypted_data_keys = [message.header.EncryptedDataKey(b'aws-kms',\n bytes(data_key['KeyId']),\n bytes(data_key['CiphertextBlob']))]\n\n for client in self.kms_clients[1:]:\n key = client.encrypt(KeyId=self.master_key_id,\n Plaintext=data_key['Plaintext'],\n EncryptionContext=encryption_context)\n encrypted_data_key = message.header.EncryptedDataKey(b'aws-kms',\n bytes(key['KeyId']),\n bytes(key['CiphertextBlob']))\n encrypted_data_keys.append(encrypted_data_key)\n\n return encrypted_data_keys", "def encrypt(key, text):\n key = _key_array(key)\n text = _text_array(text)\n aes = mxit.aes.AES()\n parts = _split(text, 16)\n encoded = []\n for part in parts:\n encoded += aes.encrypt(part, key, aes.keySize[\"SIZE_128\"])\n return encoded", "def enc(elements):\n encoded = ''\n for key, dtype, value in elements:\n binary = enc_elem(dtype, value)\n encoded += struct.pack('>HBH', key, dtype, len(binary)) + binary\n return encoded", "def _add_files(self, category, files, session, bucket=None):\n\n with session[category].make_commit('master') as commit:\n for filename, content in files.items():\n if bucket:\n commit.put_file_url(\n filename,\n 's3://%s/%s' % (bucket, content)\n )\n else:\n commit.put_file_bytes(\n filename,\n content\n )", "def _encrypt_aes_key(aes_key: bytes, receiver_public_key: RsaKey) -> bytes:\n cipher_rsa = PKCS1_OAEP.new(receiver_public_key)\n return cipher_rsa.encrypt(aes_key)", "def attach(self, storages):\n self.tracer.info(\"%s.attach method called\" % self.__class__.__name__)\n\n # reload global.ini\n self._cfg.reload()\n\n # connect to Google API\n conn = self.api_conn()\n\n # fetch the GCE zone for this host\n zone = self.get_zone(conn, HOSTNAME)\n\n for storage in storages:\n # fetch pd & dev variables from global.ini for specified partition & usage\n connectionData = self._getConnectionDataForLun(storage.get(\"partition\"), storage.get(\"usage_type\"))\n try:\n pd = connectionData[\"pd\"]\n dev = connectionData[\"dev\"]\n except:\n raise Exception(\"pd or dev not set in global.ini\")\n\n # fetch mount options from global.ini\n try:\n mount_options = connectionData[\"mountoptions\"]\n except:\n mount_options = \"\"\n\n # fetch fencing options from global.ini\n try:\n fencing = connectionData[\"fencing\"]\n except:\n fencing = \"\"\n\n # fetch the host which currently owns the disk & the file path\n pdhost = self.get_pd_host(conn, pd, zone)\n path = storage.get(\"path\")\n\n # check if the require disk is already attached somewhere. If it is, detach it and fence the old host\n if pdhost == HOSTNAME:\n self.tracer.info(\"disk %s is already attached to %s(%s)\" % (pd, HOSTNAME, zone))\n self.mount(dev, path, mount_options)\n continue\n elif pdhost != \"\":\n self.tracer.info(\"unable to attach %s to %s(%s) as it is still attached to %s\" % (pd, HOSTNAME, zone, pdhost))\n self.detach_pd(conn, pdhost, pd)\n if fencing.lower() == \"enabled\" or fencing.lower() == \"true\" or fencing.lower() == \"yes\":\n self.fence(conn, pdhost)\n\n # prepare payload for API call\n pdurl = self.zonal_url(zone, \"disks\", pd)\n body = {\n \"deviceName\": pd,\n \"source\": pdurl\n }\n\n # send API call to disconnect disks\n self.tracer.info(\"attempting to attach %s to %s(%s)\" % (pd, HOSTNAME, zone))\n operation = conn.instances().attachDisk(project=PROJECT, zone=zone, instance=HOSTNAME, body=body).execute()\n self.wait_for_operation(conn, operation, zone)\n\n # check if disk is attached and if so, mount the volumes\n if self.get_pd_host(conn, pd, zone) == HOSTNAME:\n self.tracer.info(\"successfully attached %s to %s(%s)\" % (pd, HOSTNAME, zone))\n self.mount(dev, path, mount_options)\n else:\n raise Exception(\"failed to attached %s to %s(%s)\" % (pd, HOSTNAME, zone))\n\n # tell HANA is all good and to continue the load process\n return 0", "def encrypt(self, sensor_data):\r\n \r\n # set encryption parameters\r\n encryption1 = aes(self.ivkey, 2, self.staticiv)\r\n encryption2 = aes(self.datakey, 2, self.iv)\r\n # encrypt data\r\n self.encrypted_data = encryption2.encrypt(sensor_data) \r\n self.encrypted_iv = encryption1.encrypt(self.iv)\r\n self.encrypted_nodeid = encryption2.encrypt(self.nodeid)\r\n \r\n self.iv = bytes(random.getrandbits(8) for _ in range(16)) # changes every time\r", "def _put_buffers(state, buffer_paths, buffers):\n for buffer_path, buffer in zip(buffer_paths, buffers):\n # we'd like to set say sync_data['x'][0]['y'] = buffer\n # where buffer_path in this example would be ['x', 0, 'y']\n obj = state\n for key in buffer_path[:-1]:\n obj = obj[key]\n obj[buffer_path[-1]] = buffer", "def encryptFromString(self, data, keyobj):\n return self.encryptByteArray(bytearray(data, 'utf-8'), keyobj)", "def set_access_control_list_blob(bucket_name, blob_name, role_type, member_type):\n\n # initialize client, get bucket, & get blob\n _, _, blob = create_client(bucket_name, blob_name)\n\n # reload fetches the current ACL from cloud storage\n blob.acl.reload()\n\n # get member type\n member = get_member_blob_level(member_type, blob)\n \n # grant role to member\n grant_role_blob_level(role_type, member)\n\n blob.acl.save()\n\n print(\n \"added {} to '{}' on blob {} in bucket {}\".format(\n member_type, role_type, blob_name, bucket_name\n )\n )", "def load_keys(self, keys):\n paths = []\n for key in keys:\n if isinstance(key, SSHKeyFile):\n if not key.is_loaded:\n paths.append(key.path)\n elif isinstance(key, str):\n paths.append(key)\n\n if paths:\n self.log.debug('Loading {:d} keys to SSH agent'.format(len(paths)))\n cmd = ['ssh-add'] + paths\n p = Popen(cmd, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr)\n p.wait()\n else:\n self.log.debug('All SSH keys were already loaded to SSH agent')", "def encrypt(key, plaintext, associated_data=None):\n\n cipher = AES.new(key, AES.MODE_GCM)\n if associated_data:\n cipher.update(associated_data)\n\n ciphertext, tag = cipher.encrypt_and_digest(plaintext)\n nonce = cipher.nonce\n\n return nonce, ciphertext, tag", "def push_file_to_server(cnc_bot, filename, content, encryption_key=None):\r\n c = content\r\n if encryption_key is not None:\r\n c = rc4.encrypt(c, encryption_key, salt_length=0) # encrypt content via rc4\r\n cfg = {'filename': filename, 'content': c}\r\n cnc_bot.host_orders(cPickle.dumps(cfg)) # upload a serialized dict\r", "def encrypt(algorithm, key, plaintext, associated_data, iv):\n encryptor = Encryptor(algorithm, key, associated_data, iv)\n ciphertext = encryptor.update(plaintext) + encryptor.finalize()\n return EncryptedData(encryptor.iv, ciphertext, encryptor.tag)", "def upload(bucket, key, content, extra_agrs):\n # validate_content(content)\n validate_bucket_name(bucket)\n validate_key_name(key)\n client = get_client()\n if extra_agrs:\n client.put_object(Body=content, Bucket=bucket, Key=key, ContentType=extra_agrs['ContentType'])\n else:\n client.put_object(Body=content, Bucket=bucket, Key=key)", "def addEntries(self, docId, entities):\n\n entities = self.__removeDuplicateEntities(entities)\n table = self.__getTable()\n with table.batch_write() as batch:\n for entity in entities:\n encodedEntity = EncodedEntity(entity)\n if encodedEntity.encoded:\n batch.put_item(data={\n 'docId': docId,\n 'entity': encodedEntity.encoded,\n 'plain': encodedEntity.plain},\n overwrite = True)", "def add_mock_s3_data(mock_s3_fs, data, time_modified=None):\n if time_modified is None:\n time_modified = datetime.utcnow()\n for bucket_name, key_name_to_bytes in data.iteritems():\n mock_s3_fs.setdefault(bucket_name, {'keys': {}, 'location': ''})\n bucket = mock_s3_fs[bucket_name]\n\n for key_name, bytes in key_name_to_bytes.iteritems():\n bucket['keys'][key_name] = (bytes, time_modified)", "def mifare_change_keys(self,address,key_a,key_b):\n if address < 128:\n trailer_address = address | 3\n else:\n trailer_address = address | 15\n data = self.mifare_read(trailer_address)\n data = key_a + data[6:10] + key_b\n self.mifare_write_standard(trailer_address,data)", "def _load_blob_services(\n neo4j_session: neo4j.Session, blob_services: List[Dict], update_tag: int,\n) -> None:\n ingest_blob_services = \"\"\"\n UNWIND $blob_services_list as bservice\n MERGE (bs:AzureStorageBlobService{id: bservice.id})\n ON CREATE SET bs.firstseen = timestamp(), bs.type = bservice.type\n SET bs.name = bservice.name,\n bs.lastupdated = $azure_update_tag\n WITH bs, bservice\n MATCH (s:AzureStorageAccount{id: bservice.storage_account_id})\n MERGE (s)-[r:USES]->(bs)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = $azure_update_tag\n \"\"\"\n\n neo4j_session.run(\n ingest_blob_services,\n blob_services_list=blob_services,\n azure_update_tag=update_tag,\n )", "def read_keys(files, split):\n for file in files:\n with open(file, 'r') as file:\n data = file.read()\n data = \"<keys>\" + data + \"</keys>\"\n\n tree = ET.fromstring(data)\n # there're two types of tags -> C: causal, R: related (mixed)\n for child in tree:\n doc_id = child.attrib['id']\n doc_tags = []\n tags = child.text.split(\"\\n\")\n for tag in tags:\n # reading causal tags\n if tag.startswith(\"C\") or tag.startswith(\"R\"):\n if \"\\t\" in tag:\n tag_var = tag.split(\"\\t\")\n else:\n tag_var = tag.split(\" \")\n orig_id = tag.replace('\\t', ' ').replace(' ', '_') + str(doc_id)\n # the first combo is always CAUSE and the second combo is EFFECT\n doc_tags.append({'p1': tag_var[1], 'p2': tag_var[2], 'split': split,\n 'original_id': orig_id})\n keys[doc_id] = doc_tags", "def do_advertize_batch(self, filename_dict, remove_files=True):\n for factory_pool in filename_dict:\n self.do_advertize_batch_one(factory_pool, filename_dict[factory_pool], remove_files)", "def insertKeyCtx(*args, breakdown: bool=True, exists: bool=True, history: bool=True, image1:\n Union[AnyStr, bool]=\"\", image2: Union[AnyStr, bool]=\"\", image3: Union[AnyStr,\n bool]=\"\", name: AnyStr=\"\", q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def pack_keys_for_xfer(pub_nacl_key: base64 = None,\n prv_nacl_key: base64 = None,\n path=paths.nacl_keys,\n *args,\n **kwargs) -> dict:\n\n key_pack = {}\n # prv_key = NaclCipher.load_prv_key() or prv_key\n public_box = make_nacl_pub_box(pub_nacl_key, prv_nacl_key)\n\n aes_key = AES256Cipher().load_key_for_xport()\n key_pack[\"aes\"] = aes_key\n\n fernet_key = FernetCipher().load_key_for_xport()\n key_pack[\"fernet\"] = fernet_key\n\n chacha_key = XChaCha20Poly1305.load_key_for_xport()\n key_pack[\"chacha\"] = chacha_key\n\n key_pack = json.dumps(key_pack)\n enc_keys = public_box.encrypt(key_pack.encode())\n\n return enc_keys", "def add_keys_tags(self,\r\n index=None,\r\n keyset=None,\r\n addkeys=True,\r\n sequences=True):\r\n\r\n def expand (keys):\r\n\r\n \"\"\"returns variant forms of a name\"\"\"\r\n finalkeys = set()\r\n\r\n for key in keys:\r\n returnkeyset = set()\r\n\r\n if SLASH in key:\r\n has_tags = True\r\n tag_tail = key.split(SLASH)[1]\r\n key = key.split(SLASH)[0]\r\n else:\r\n has_tags = False\r\n tag_tail = EMPTYCHAR\r\n if ATSIGN in key or PERIOD not in key or PERIOD+BLANK in key or key[0].isnumeric():\r\n all_keys = [key]\r\n else:\r\n key_parts = key.split(PERIOD)\r\n if len(key_parts)==2:\r\n all_keys = [key_parts[1],\r\n key_parts[0]+BLANK+key_parts[1],\r\n key_parts[0][0]+BLANK+key_parts[1]]\r\n else:\r\n abbreviated = EMPTYCHAR\r\n for x in key_parts[0:-1]:\r\n abbreviated += x[0].upper()\r\n\r\n\r\n all_keys = [key_parts[-1],\r\n key_parts[0]+BLANK+key_parts[-1],\r\n BLANK.join(key_parts),\r\n abbreviated+BLANK+key_parts[-1]]\r\n for k in all_keys:\r\n returnkeyset.add(k+SLASH*has_tags+tag_tail)\r\n\r\n if len(returnkeyset) > 1:\r\n if input('ADD '+', '.join(returnkeyset)+' AS EQUIVALENCES?') in YESTERMS:\r\n\r\n display.noteprint(('ADDING EQUIVALENTS',', '.join(returnkeyset)))\r\n self.default_dict['equivalences'].new_class(list(returnkeyset))\r\n finalkeys.add(key.replace('.',' '))\r\n else:\r\n finalkeys.update(returnkeyset)\r\n else:\r\n finalkeys.update(returnkeyset)\r\n\r\n return finalkeys\r\n\r\n\r\n\r\n newkeyset = set()\r\n\r\n if self.name_interpret:\r\n keyset = expand(keyset)\r\n\r\n\r\n for key in keyset:\r\n key = key.strip()\r\n\r\n if key.startswith(QUESTIONMARK):\r\n #For keywords that engage with the knowledge base\r\n\r\n key = key[1:]\r\n after_slash = EMPTYCHAR\r\n if SLASH in key:\r\n after_slash = key.split(SLASH)[1]\r\n key = key.split(SLASH)[0]\r\n\r\n key += '??' # TO prevent index error!\r\n\r\n node,relation,other_node = key.split(QUESTIONMARK)[0], \\\r\n key.split(QUESTIONMARK)[1], \\\r\n key.split(QUESTIONMARK)[2]\r\n if node and not relation and not other_node:\r\n if not self.default_dict['generalknowledge'].node_exists(node):\r\n display.noteprint(self.default_dict['generalknowledge'].text_interpret(node))\r\n elif node and relation and other_node:\r\n if self.default_dict['generalknowledge'].relation_exists(relation):\r\n if not self.default_dict['generalknowledge'].node_exists(node):\r\n display.noteprint(self.default_dict['generalknowledge']\r\n .text_interpret(node))\r\n if not self.default_dict['generalknowledge'].node_exists(other_node):\r\n display.noteprint(self.default_dict['generalknowledge']\r\n .text_interpret(other_node))\r\n display.noteprint(self.default_dict['generalknowledge']\r\n .text_interpret(node+':'\r\n +relation+';'\r\n +other_node))\r\n else:\r\n display.noteprint(('ATTENTION!',\r\n 'RELATION not defined'))\r\n else:\r\n display.noteprint(('ATTENTION',\r\n 'Incomplete knowledge phrase!'))\r\n\r\n key = node\r\n if after_slash:\r\n key = node + '/' + after_slash\r\n\r\n\r\n if SLASH in key:\r\n # if there is a tag in the keyword\r\n\r\n if PERIOD in key:\r\n tags = key.split(SLASH)[1].split(PERIOD)\r\n else:\r\n tags = [key.split(SLASH)[1]]\r\n tagkey = key.split(SLASH)[0]\r\n for tag in tags:\r\n if RIGHTBRACKET in tag:\r\n\r\n definitions = tag.split(RIGHTBRACKET)[1:]\r\n tag = tag.split(RIGHTBRACKET)[0]\r\n\r\n if EQUAL in tag:\r\n equivalent_terms = tag.split('=')\r\n tag = equivalent_terms[0]\r\n display.noteprint(('ADDING EQUIVALENTS',', '.join(equivalent_terms)))\r\n self.default_dict['equivalences'].new_class(equivalent_terms)\r\n\r\n definitions = [tag]+definitions\r\n if len(definitions) > 1:\r\n for r in range(0, len(definitions)-1):\r\n self.default_dict['knower'].learn(definitions[r],\r\n definitions[r+1])\r\n self.display_buffer.append(alerts.LEARNED_BEG\r\n +definitions[r]\r\n +alerts.LEARNED_MIDDLE\r\n +definitions[r+1])\r\n\r\n\r\n if self.tag_dict_contains(tag):\r\n self.add_tag(tag,tagkey)\r\n\r\n else:\r\n self.initiate_new_tag(tag,tagkey)\r\n\r\n\r\n if addkeys:\r\n\r\n if SLASH in key:\r\n # adds keys to keylist\r\n\r\n if PERIOD in key:\r\n\r\n # If there are multiple tags\r\n\r\n tags = key.split(SLASH)[1].split(PERIOD)\r\n else:\r\n tags = [key.split(SLASH)[1]]\r\n tagkey = key.split(SLASH)[0]\r\n if EQUAL in tagkey:\r\n tagkey, equivalent_terms = tagkey.split(EQUAL)[0], tagkey.split(EQUAL)\r\n display.noteprint(('ADDING EQUIVALENTS',', '.join(equivalent_terms)))\r\n self.default_dict['equivalences'].new_class(equivalent_terms)\r\n for tag in tags:\r\n key = tagkey+SLASH+tag.split(RIGHTBRACKET)[0].split(EQUAL)[0]\r\n newkeyset.add(key)\r\n\r\n if self.key_dict_contains(key):\r\n self.add_key(key,index)\r\n else:\r\n self.initiate_new_key(key,index)\r\n\r\n\r\n else:\r\n # If there are no tags\r\n\r\n if EQUAL in key:\r\n key, equivalent_terms = key.split(EQUAL)[0], key.split(EQUAL)\r\n display.noteprint(('ADDING EQUIVALENTS',', '.join(equivalent_terms)))\r\n self.default_dict['equivalences'].new_class(equivalent_terms)\r\n\r\n newkeyset.add(key)\r\n if self.key_dict_contains(key):\r\n self.add_key(key,index)\r\n\r\n else:\r\n self.initiate_new_key(key,index)\r\n\r\n\r\n if sequences:\r\n\r\n # For sequences\r\n\r\n if ATSIGN in key and key[0] != ATSIGN and key[-1] !=ATSIGN:\r\n # Parses the sequence key\r\n identifier = key.split(ATSIGN)[0]\r\n seq_value = key.split(ATSIGN)[1]\r\n\r\n## is_sequence = True\r\n if 'date' in identifier and POUND not in seq_value:\r\n seq_value = POUND + seq_value\r\n\r\n seq_mark, seq_value, seq_type, seq_value2 = self.parse_sequence_key(seq_value)\r\n\r\n if not self.default_dict['sequences'].query(term1=identifier,action='in'):\r\n if not self.default_dict['sequences'].query(term1='#TYPE#',\r\n term2=identifier,\r\n action='in'):\r\n # Initiates a new sequence\r\n self.default_dict['sequences'].query(term1='#TYPE#',\r\n term2=identifier,\r\n term3=seq_type,\r\n action='set')\r\n self.default_dict['sequences'].query(term1=identifier,\r\n term2=seq_value,\r\n action='set')\r\n print()\r\n display.noteprint((alerts.ATTENTION,alerts.NEW_SEQUENCE+str(seq_type)))\r\n else:\r\n # For existing sequences\r\n\r\n self.default_dict['sequences'].query(term1='#TYPE#',\r\n term2=identifier,\r\n action='delete')\r\n self.default_dict['sequences'].query(term1='#TYPE#',\r\n term2=identifier,\r\n term3=seq_type,\r\n action='set')\r\n display.noteprint((alerts.ATTENTION,alerts.OVERWRITTEN+str(seq_type)))\r\n self.default_dict['sequences'].query(term1=identifier,\r\n term2=seq_value,\r\n action='set')\r\n\r\n else:\r\n x = self.default_dict['sequences'].query(term1='#TYPE#',\r\n term2=identifier,\r\n action='get')\r\n if seq_type == x:\r\n self.default_dict['sequences'].query(term1=identifier,\r\n term2=seq_value,\r\n action='set')\r\n else:\r\n temp_label = 'POSSIBLE TYPE ERROR!' + str(seq_type) + '/'\\\r\n + str(identifier) + '/' + str(seq_value) + str(x)\r\n nprint(temp_label)\r\n\r\n return newkeyset", "def encryptFile(files, key, output = None):\n\tfrom os.path import commonprefix, split, normpath, join\n\tif isString(files):\n\t\tfiles = [files]\n\tcontent = tarList(files)\n\tcyphered = encrypt(content, key)\n\tif output == None:\n\t\toutput = join(split(normpath(commonprefix(files)))[0],getTimeString() + \".\" + ENCRYPTED_EXTENSION)\n\twith open(output, 'wb') as fo:\n\t\tfo.write(cyphered)\n\treturn output", "def key_upload(self, key=None):\n raise NotImplementedError", "def _addKeys(self, keyframes, weightedTangents):\n # variables\n inAngle = Vector2D(-1, 0)\n outAngle = Vector2D(1, 0)\n\n # loop keyframes\n for keyframe in keyframes:\n # create keyframe point\n cmds.setKeyframe(self.path, time=keyframe.point.x, value=keyframe.point.y)\n\n # set keyframe tangent variable\n arguments = {\"edit\": True, \"absolute\": True, \"time\": (keyframe.point.x,)}\n\n # set weighted tangents\n cmds.keyTangent(self.path, weightedTangents=weightedTangents, **arguments)\n\n # unlock tangents if either in our out handle is not defined.\n if not keyframe.inHandle or not keyframe.outHandle:\n cmds.keyTangent(self.path, lock=False, **arguments)\n\n # add in tangent to arguments\n if keyframe.inHandle:\n arguments[\"inAngle\"] = math.degrees(inAngle.signedAngle(keyframe.inHandle))\n arguments[\"inWeight\"] = keyframe.inHandle.length()\n\n # add out tangent to arguments\n if keyframe.outHandle:\n arguments[\"outAngle\"] = math.degrees(outAngle.signedAngle(keyframe.outHandle))\n arguments[\"outWeight\"] = keyframe.outHandle.length()\n\n # set keyframe tangent\n cmds.keyTangent(self.path, **arguments)", "def batch_associate_user_stack(UserStackAssociations=None):\n pass", "def encrypt(plaintext, key, associated_data=''):\n\n iv = os.urandom(12)\n\n encryptor = Cipher(\n algorithms.AES(key), modes.GCM(iv),\n backend=default_backend()).encryptor()\n\n encryptor.authenticate_additional_data(associated_data)\n\n ciphertext = encryptor.update(plaintext) + encryptor.finalize()\n\n return (iv, ciphertext, encryptor.tag)", "def initiate_multipart_upload(self, key_name, headers=None,\r\n reduced_redundancy=False,\r\n metadata=None, encrypt_key=False):\r\n query_args = 'uploads'\r\n provider = self.connection.provider\r\n if headers is None:\r\n headers = {}\r\n if reduced_redundancy:\r\n storage_class_header = provider.storage_class_header\r\n if storage_class_header:\r\n headers[storage_class_header] = 'REDUCED_REDUNDANCY'\r\n # TODO: what if the provider doesn't support reduced redundancy?\r\n # (see boto.s3.key.Key.set_contents_from_file)\r\n if encrypt_key:\r\n headers[provider.server_side_encryption_header] = 'AES256'\r\n if metadata is None:\r\n metadata = {}\r\n\r\n headers = boto.utils.merge_meta(headers, metadata,\r\n self.connection.provider)\r\n response = self.connection.make_request('POST', self.name, key_name,\r\n query_args=query_args,\r\n headers=headers)\r\n body = response.read()\r\n boto.log.debug(body)\r\n if response.status == 200:\r\n resp = MultiPartUpload(self)\r\n h = handler.XmlHandler(resp, self)\r\n xml.sax.parseString(body, h)\r\n return resp\r\n else:\r\n raise self.connection.provider.storage_response_error(\r\n response.status, response.reason, body)", "def add_rpn_blobs(self, blobs, im_scales, rawdata):\n im_i = 0\n scale = im_scales[im_i]\n im_height = np.round(rawdata['height'] * scale)\n im_width = np.round(rawdata['width'] * scale)\n gt_inds = np.where(\n (rawdata['gt_classes'] > 0) & (rawdata['is_crowd'] == 0)\n )[0]\n gt_rois = rawdata['boxes'][gt_inds, :] * scale\n # TODO(rbg): gt_boxes is poorly named;\n # should be something like 'gt_rois_info'\n gt_boxes = np.zeros((len(gt_inds), 6), dtype=np.float32)\n gt_boxes[:, 0] = im_i # batch inds\n gt_boxes[:, 1:5] = gt_rois\n gt_boxes[:, 5] = rawdata['gt_classes'][gt_inds]\n im_info = np.array([[im_height, im_width, scale]], dtype=np.float32)\n blobs['im_info'].append(im_info)\n\n # Add RPN targets\n # Classical RPN, applied to a single feature level\n rpn_blobs = self._get_rpn_blobs(im_height, im_width, [self.foa], self.all_anchors, gt_rois)\n for k, v in rpn_blobs.items():\n blobs[k].append(v)\n \n #\n for k, v in blobs.items():\n if isinstance(v, list) and len(v) > 0:\n blobs[k] = np.concatenate(v)\n \n valid_keys = [\n 'has_visible_keypoints', 'boxes', 'segms', 'seg_areas', 'gt_classes',\n 'gt_overlaps', 'is_crowd', 'box_to_gt_ind_map', 'gt_keypoints'\n ]\n minimal_roidb = [{} for _ in range(1)]\n i = 0\n e = rawdata\n for k in valid_keys:\n if k in e:\n minimal_roidb[i][k] = e[k]\n # blobs['roidb'] = blob_utils.serialize(minimal_roidb)\n blobs['roidb'] = minimal_roidb\n \n \n # Always return valid=True, since RPN minibatches are valid by design\n return True", "def encrypt(key, plaintext):\n data = fk(keyGen(key)[0], ip(plaintext))\n return fp(fk(keyGen(key)[1], swapNibbles(data)))", "def encrypt(self, plaintext: bytes,\n padding: AsymmetricPadding) -> bytes:\n pass", "def send_keys(self, element, keys):\n pass", "def encrypt(plaintext: str) -> Iterable:\n return simplesubstitution.encrypt(KEY, plaintext)", "def store_blob(self, data, download_meta, blob_id=None):\n if blob_id is None:\n blob_id = str(uuid.uuid4())\n\n content_type = download_meta.get('type', 'binary/octet-stream')\n put_kwargs = dict(\n Bucket=self.bucket,\n Key=blob_id,\n Body=data,\n ContentType=content_type\n )\n if self.kms_key_id:\n put_kwargs.update({\n 'ServerSideEncryption': 'aws:kms',\n 'SSEKMSKeyId': self.kms_key_id\n })\n self.s3.put_object(**put_kwargs)\n download_meta['bucket'] = self.bucket\n download_meta['key'] = blob_id\n download_meta['blob_id'] = str(blob_id)", "def ctr_encrypt(pt_bin_list, keys, rounds):\n msg = pt_bin_list\n nonce = generate_random_binary(len(pt_bin_list[0])-8) # Initialization Vector\n counter = range(0,len(msg))\n enc_result = \"\"\n\n with multiprocessing.Pool() as p:\n enc_result = p.starmap(ctr_process, zip(msg, repeat(nonce), counter, keys, repeat(rounds)))\n\n enc_result.insert(0,nonce+\"00000000\") # Store padded IV to the start of ciphertext\n return enc_result", "def put(self, keys: Union[str, Iterable], val: coring.Matter):\n return (self.db.putVal(db=self.sdb,\n key=self._tokey(keys),\n val=val.qb64b))", "def encrypt_data(self, filename, data, master_pass, website): \n\n \"\"\"Concatenated extra characters in the case that the master password\n is less than 16 characters. However, this isn't a big safety trade off\n as the full length master password is hashed and checked for.\"\"\"\n concatenated_master = master_pass + \"================\"\n\n key = concatenated_master[:16].encode(\"utf-8\")\n\n cipher = AES.new(key, AES.MODE_EAX)\n\n \"\"\"A value that must never be reused for any other encryption done with\n this key saved alongside encrypted password. Converted to hexadecimal\n to be saved in DB. Later converted back to bytes to decode data\"\"\"\n nonce = cipher.nonce.hex()\n\n data_to_encrypt = data.encode(\"utf-8\")\n # again, bytes is invalid data for JSON so we convert it\n encrypted_data = cipher.encrypt(data_to_encrypt).hex()\n\n self.__save_password(filename, encrypted_data, nonce, website)", "def _encrypt_data_key(self, data_key, algorithm, encryption_context):\n # Raw key string to EncryptedData\n encrypted_wrapped_key = self.config.wrapping_key.encrypt(\n plaintext_data_key=data_key.data_key, encryption_context=encryption_context\n )\n # EncryptedData to EncryptedDataKey\n return aws_encryption_sdk.internal.formatting.serialize.serialize_wrapped_key(\n key_provider=self.key_provider,\n wrapping_algorithm=self.config.wrapping_key.wrapping_algorithm,\n wrapping_key_id=self.key_id,\n encrypted_wrapped_key=encrypted_wrapped_key,\n )", "def encrypt_download(self, uid, to_encrypt: list, filepath: str) -> str:\n # encrypt\n print(\"Encrypting data with context uid: %s\" % uid)\n #start_time = time.perf_counter()\n uid_enc = self.provider_encrypt(uid, to_encrypt)\n #end_time = time.perf_counter()\n #print(\"Encryption took\", end_time - start_time, \"seconds.\")\n assert uid_enc is not None\n print(\"Success encrypting data with uid: %s\" % uid_enc)\n # now perform download\n print(\"Downloading %s...\" % filepath)\n #start_time = time.perf_counter()\n num_bytes = self.provider_download_data(uid_enc, filepath)\n #end_time = time.perf_counter()\n #print(\"Downloading took\", end_time - start_time, \"seconds.\")\n print(\"Success downloading provider enc data with new uid: %s (%d bytes)\" %\n (uid_enc, num_bytes))\n return uid_enc", "def upload(jsonfiles):\n # clear S3 Bucket\n bucket = S3Bucket()\n bucket.clear()\n for jsonfile in jsonfiles:\n filename = os.path.basename(jsonfile)\n key = build_key(filename)\n logging.info(\"%s %s\", filename, key)\n # store json in S3 object\n bucket.store(key, jsonfile)", "def upload(self, bucket_name, key_name, fname):\n bucket = self.s3_.get_bucket(bucket_name)\n key = boto.s3.key.Key(bucket)\n with open(fname, 'rb') as infile:\n key.key = key_name\n return key.set_contents_from_file(infile)", "def combine_election_public_keys(\n election_public_keys: DataStore[GUARDIAN_ID, ElectionPublicKey]\n) -> ElectionJointKey:\n public_keys = map(lambda public_key: public_key.key, election_public_keys.values())\n\n return elgamal_combine_public_keys(public_keys)", "def store_blob(self, data, download_meta, blob_id=None):\n if blob_id is None:\n blob_id = uuid.uuid4()\n elif isinstance(blob_id, str):\n blob_id = uuid.UUID(blob_id)\n session = self.DBSession()\n blob = Blob(blob_id=blob_id, data=data)\n session.add(blob)\n download_meta['blob_id'] = str(blob_id)", "def setup_keys(self, dh_object, public_key, private_key):\n public_numbers = DHPublicNumbers(public_key, dh_object.parameter_numbers)\n private_numbers = DHPrivateNumbers(private_key, public_numbers)\n dh_object.private_key = private_numbers.private_key(default_backend())", "def put_bucket_encryption(self, bucket_name, configuration):\n self._client.put_bucket_encryption(\n Bucket=bucket_name,\n ServerSideEncryptionConfiguration=configuration,\n )" ]
[ "0.5732032", "0.55105233", "0.54380447", "0.5313522", "0.5286251", "0.5265419", "0.5222959", "0.52177703", "0.5150052", "0.51047397", "0.5063412", "0.50368476", "0.49367145", "0.49267337", "0.49244836", "0.4919951", "0.49074957", "0.49044165", "0.49000615", "0.4885995", "0.48736697", "0.48617673", "0.48613137", "0.48476648", "0.48404098", "0.48352095", "0.4826224", "0.48198363", "0.48062176", "0.48029706", "0.4786317", "0.47808585", "0.47610775", "0.47603518", "0.47539723", "0.4749243", "0.47317657", "0.47242388", "0.47088027", "0.46845883", "0.4672723", "0.46571985", "0.46413815", "0.46372068", "0.46371025", "0.4619184", "0.46164382", "0.46141848", "0.46135238", "0.46132067", "0.46024278", "0.4581818", "0.45727006", "0.45689353", "0.45621866", "0.45481682", "0.4547051", "0.45384476", "0.45364812", "0.45334944", "0.4510495", "0.4507507", "0.45024508", "0.4491485", "0.44910353", "0.4490435", "0.44892478", "0.44831857", "0.44729328", "0.44660684", "0.4464971", "0.44482765", "0.44368222", "0.44361472", "0.44329083", "0.443202", "0.4430409", "0.44271436", "0.4420931", "0.44154465", "0.44104946", "0.44075266", "0.43992677", "0.4397638", "0.4395132", "0.43948206", "0.4394391", "0.43925342", "0.43904278", "0.4386507", "0.43864262", "0.4375635", "0.43693334", "0.43600795", "0.43545884", "0.43466952", "0.43394437", "0.43377796", "0.4333568", "0.43131617" ]
0.7061965
0
Retrieves encryption keys associated with blobs.
def ReadBlobEncryptionKeys( self, blob_ids: Collection[rdf_objects.BlobID], ) -> Dict[rdf_objects.BlobID, Optional[str]]: return dict(zip(blob_ids, map(self.blob_keys.get, blob_ids)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_keys(self):\n bucket = self.resource.Bucket(self.bucketname)\n return [key.key for key in bucket.objects.all()]", "def get_encrypted_data_keys(self, data_key, encryption_context):\n encrypted_data_keys = [message.header.EncryptedDataKey(b'aws-kms',\n bytes(data_key['KeyId']),\n bytes(data_key['CiphertextBlob']))]\n\n for client in self.kms_clients[1:]:\n key = client.encrypt(KeyId=self.master_key_id,\n Plaintext=data_key['Plaintext'],\n EncryptionContext=encryption_context)\n encrypted_data_key = message.header.EncryptedDataKey(b'aws-kms',\n bytes(key['KeyId']),\n bytes(key['CiphertextBlob']))\n encrypted_data_keys.append(encrypted_data_key)\n\n return encrypted_data_keys", "def blobs(self):\n def blob_iterator():\n with s3conn(self.access_id, self.secret) as s3:\n key_iter = s3.list_bucket(self.bucket, prefix=self.prefix+\"/\")\n for key in key_iter:\n blob = key[len(self.prefix)+1:]\n yield blob\n return blob_iterator", "def get_key_from_blob(blob):\n keys = pgpy.PGPKey.from_blob(blob)\n logging.debug(keys)\n return keys[0]", "def GetSSHKeys():\n keydict = {}\n for rec in database.db.itervalues():\n if 'keys' in rec:\n keydict[rec['name']] = rec['keys']\n return keydict", "def get_s3_keys(client, bucket, prefix, file_type = \"png\"):\n keys = []\n resp = client.list_objects(Bucket = bucket, Prefix = prefix+'/', Delimiter='/')\n for obj in resp['Contents']:\n if re.search(\"[.]\" + file_type + \"$\", obj['Key']):\n keys.append(obj['Key'])\n return keys", "def get_keys(self):\n return list(self.public_keys.keys())", "def keys(self):\n return self.get_list(self.cloudman.list_keypairs(),\n kind=\"key\")", "def get_keys(weat_db):\n import updater\n keys = updater.list_keys(weat_db, verbose=False)\n return keys", "def account_keys(chain):\n return chain.backend.account_keys", "def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:\n return self._values.get('encryption_key')", "def get_keys(self):\r\n\t\tlogger.debug(\"Getting the keys\")\r\n\t\t\r\n\t\treturn db.get_items('keys')", "def _gpg_keys(self) -> ListKeys:\n return self.gpg.list_keys()", "def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:\n return jsii.get(self, \"encryptionKey\")", "def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:\n return jsii.get(self, \"encryptionKey\")", "def get_keys(user_id):\n\n db_conn = sqlite3.connect(db_path)\n db = db_conn.cursor()\n keys = []\n try:\n for row in db.execute(\"SELECT public_key FROM public_keys WHERE username=? AND status=?\", [user_id, PK_STATUS_OK]):\n keys.append({\"public\": row[0]})\n db_conn.close()\n except sqlite3.IntegrityError:\n db_conn.close()\n abort(400)\n if(keys == []):\n abort(404)\n return jsonify({'user':{'username':user_id, 'keys':keys}})", "def disk_encryption_key(self) -> Optional[pulumi.Input['KeyVaultAndSecretReferenceArgs']]:\n return pulumi.get(self, \"disk_encryption_key\")", "def get_key(self):\r\n return self.__encryption_key", "def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:\n ...", "def get_api_keys(owner):\n api.get_all(owner)", "def get_all_keys(self, headers=None, **params):\r\n key = Key(self.name, self.contained_key)\r\n return SimpleResultSet([key])", "def get_bucket_encryption(Bucket=None):\n pass", "def list_keys(self, bucket_name, prefix=None):\n url = self.endpoint + '/rest/v2/caches/' + self.cache_name + '?action=keys'\n res = self.infinispan_client.get(url, auth=self.basicAuth)\n data = res.content\n return data", "def encryption_key(self) -> bytearray:\n # Handle if encryption is disabled.\n if self.aes_on == 0:\n return None\n # Encryption is enabled so read the key and return it.\n key = bytearray(16)\n self._read_into(_REG_AES_KEY1, key)\n return key", "def get_public_keys():\n return public_keys", "def list_keys(self, label=None):\r\n _filter = NestedDict({})\r\n if label:\r\n _filter['sshKeys']['label'] = query_filter(label)\r\n\r\n return self.client['Account'].getSshKeys(filter=_filter.to_dict())", "def WriteBlobEncryptionKeys(\n self,\n key_names: Dict[rdf_objects.BlobID, str],\n ) -> None:\n self.blob_keys.update(key_names)", "def getAuthorizedKeysFiles(self, credentials):\n #pwent = self._userdb.getpwnam(credentials.username)\n #root = FilePath(pwent.pw_dir).child('.ssh')\n #files = ['authorized_keys', 'authorized_keys2']\n #return [root.child(f) for f in files]\n return self._files", "def get_all_credential_keys(filename, warn_on_readonly=True):\n multistore = _get_multistore(filename, warn_on_readonly=warn_on_readonly)\n multistore._lock()\n try:\n return multistore._get_all_credential_keys()\n finally:\n multistore._unlock()", "def get_keys(self):\r\n\r\n #using database\r\n\r\n if self.using_database:\r\n aprint('GET KEYS')\r\n value_tuple = (notebookname,)\r\n db_cursor.execute(\"SELECT keyword\"\r\n +\" FROM keys_to_indexes\"\r\n +\" WHERE notebook=?;\",\r\n value_tuple)\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {key[0] for key in fetched}\r\n\r\n return set()\r\n\r\n #using shelf\r\n\r\n return self.key_dict.keys()", "def get_all_keys(self, headers=None, **params):\r\n return self._get_all([('Contents', self.key_class),\r\n ('CommonPrefixes', Prefix)],\r\n '', headers, **params)", "def keys(self):\n return self._get_storage().keys()", "def get_keys(opts):\n hosts = KnownHostsStore()\n serverkey = hosts.serverkey(opts.vip_address)\n key_store = KeyStore()\n publickey = key_store.public\n secretkey = key_store.secret\n return {\"publickey\": publickey, \"secretkey\": secretkey,\n \"serverkey\": serverkey}", "def get_matching_s3_keys(client, bucket, prefix=\"\", suffix=\"\"):\n\n for obj in get_matching_s3_objects(client, bucket, prefix, suffix):\n yield obj[\"Key\"]", "def list_keys_request(self, vault_name: str, limit: int, offset: int) -> list[dict]:\n url = f'https://{vault_name}{self.azure_cloud.suffixes.keyvault_dns}/keys'\n response = self.http_request(\n 'GET', full_url=url, resource=self.get_vault_resource(), ok_codes=[200])\n\n return self.get_entities_independent_of_pages(response, limit, offset, self.get_vault_resource())", "def get_keys(self) -> Dict[str, str]:\n return self.http.get(self.config.paths.keys)", "def _get_encryption_key(self, **options):\n\n return self._public_key", "def keys(self, bucket, user=None):\n raise NotImplementedError('TODO')", "def get_asymm_keys(parameters):\n\tprivate_key = parameters.generate_private_key()\n\treturn private_key,private_key.public_key()", "def list_blobs(self, prefix=''):\n return [b.name for b in self.bucket.list_blobs(prefix=prefix)]", "def keys_bucket(self):\n return self.s3.get_bucket(self.keys_bucket_name)", "async def get_keys(self, collection):\n raise NotImplementedError", "def _get_all_credential_keys(self):\n return [dict(key) for key in self._data.keys()]", "def getPublicKeys(self):\n ks = {}\n for filename in os.listdir(self.dataRoot):\n if filename[:9] == 'ssh_host_' and filename[-8:]=='_key.pub':\n try:\n k = keys.Key.fromFile(\n os.path.join(self.dataRoot, filename))\n t = common.getNS(k.blob())[0]\n ks[t] = k\n except Exception as e:\n log.msg('bad public key file %s: %s' % (filename, e))\n return ks", "def keys(self):\r\n return keys.RepoKeys(self)", "def get_matching_s3_keys(bucket, prefix='', suffix=''):\n for obj in get_matching_s3_objects(bucket, prefix, suffix):\n yield obj['Key']", "def keys(self):\n sql = u\"\"\"\n SELECT `key` FROM `{table}` WHERE 1\n \"\"\".format(table=self.name)\n\n for row in self.conn.execute(sql):\n yield row['key']", "async def list_keys(request: web.Request) -> web.Response:\n keys = [\n {'uri': '/wifi/keys/{}'.format(key.directory),\n 'id': key.directory,\n 'name': os.path.basename(key.file)} for key in wifi.list_keys()\n ]\n return web.json_response({'keys': keys}, status=200)", "def _keys(self):\n for name in listdir(abspath(self._path)):\n key, ext = splitext(name)\n if ext == \".pkl\":\n yield key", "def key_encryption_key(self) -> Optional[pulumi.Input['KeyVaultAndKeyReferenceArgs']]:\n return pulumi.get(self, \"key_encryption_key\")", "def encryption_key(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"encryption_key\")", "def get_list_of_keys(self, bucket_name=None, callback_fn=None):\n list_of_keys = []\n if not callback_fn:\n callback_fn = lambda x: print(x.key)\n if bucket_name:\n buckets = [self.s3_.get_bucket(bucket_name)]\n else:\n buckets = [b for b in self.s3_.get_all_buckets() if b.name not in self.BLACKLIST]\n for bucket in buckets:\n for key in bucket.list():\n callback_fn(key)\n list_of_keys.append(key)\n return list_of_keys", "def _get_encryption_key(self, **options):\n\n raise CoreNotImplementedError()", "async def keys(self) -> Iterable[str]:", "async def get_keys(tfm_id, token):\n\turl = f'https://api.tocuto.tk/tfm/get/keys/{tfm_id}/{token}'\n\tdata = await request_api(url)\n\n\tsuccess = data.pop('success', False)\n\terror = data.pop('error', '').capitalize()\n\tdescription = data.pop('description', 'No description was provided.')\n\n\tif not success:\n\t\tif error == 'Maintenance':\n\t\t\traise MaintenanceError('The game is under maintenance.')\n\n\t\tif error == 'Internal':\n\t\t\traise InternalError(description)\n\n\t\traise EndpointError(f'{error}: {description}')\n\n\tkeys = Keys(**data.get('server', {}), **data.get('keys', {}))\n\tif len(keys.packet) > 0 and len(keys.identification) > 0 and len(keys.msg) > 0 and keys.version != 0:\n\t\treturn keys\n\n\traise EndpointError('Something went wrong: A key is empty ! {}'.format(data))", "def key_encryption_key_identity(self) -> Optional[pulumi.Input['ClusterPropertiesKeyEncryptionKeyIdentityArgs']]:\n return pulumi.get(self, \"key_encryption_key_identity\")", "def get_key_list(self) -> list:\n return self.key_functs.keys()", "def test_blob_key():\n\tbackup_and_restore(\n\t\tlambda context: put_keys(lib.SET, BLOB_KEYS, \"foobar\", False),\n\t\tNone,\n\t\tlambda context: check_keys(lib.SET, BLOB_KEYS, \"foobar\", False)\n\t)", "def secret_keys(self):\n return self._secret_keys", "def describe_user_encryption_key_list(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_user_encryption_key_list_with_options(request, runtime)", "def keys(self):\n if self.dtype != 'array':\n raise TypeError('Property `keys` only exists for DataSet arrays')\n return [os.path.basename(p).split('.')[0] for p in\n s3.ls(self.s3_path, suffix=self.format.lower())]", "def blobs(self):\n if not self._blobs:\n workspace = self.attributes.workspace\n # Instantiates a google client, & get all blobs in bucket\n storage_client = storage.Client(project=self._user_project)\n bucket = storage_client.bucket(workspace['bucketName'], user_project=self._user_project)\n # get subset of data\n _blobs = {}\n try:\n for b in bucket.list_blobs(fields='items(size, etag, crc32c, name, timeCreated),nextPageToken'):\n name = f\"gs://{workspace['bucketName']}/{b.name}\"\n # cache.put(name, {'size': b.size, 'etag': b.etag, 'crc32c': b.crc32c, 'time_created': b.time_created, 'name': name})\n _blobs[name] = AttrDict({'size': b.size, 'etag': b.etag, 'crc32c': b.crc32c, 'time_created': b.time_created, 'name': name})\n self._blobs = _blobs\n except Exception as e:\n print(f\"{self.id} {workspace['bucketName']} {e}\")\n self._blobs = _blobs\n return self._blobs", "def get_s3_keys(bucket, user_keys = None):\n keys = []\n if user_keys is None:\n \t\t\t\ts3 = boto3.client('s3')\n else:\n s3 = boto3.client('s3', \n aws_access_key_id = user_keys[\"AWS_ACCESS_KEY_ID\"], \n aws_secret_access_key = user_keys[\"AWS_SECRET_ACCESS_KEY\"], \n region_name = user_keys[\"REGION_NAME\"]\n ) \t \n \n resp = s3.list_objects_v2(Bucket= bucket)\n for obj in resp['Contents']:\n keys.append(obj['Key'])\n return keys", "def source_disk_encryption_key(self) -> 'outputs.CustomerEncryptionKeyResponse':\n return pulumi.get(self, \"source_disk_encryption_key\")", "def get(self):\n client = ManagePsb(credentials, databaseName)\n projection = {\n 'imageId': 0,\n \"_id\": 0\n }\n cursor = client.Filter(collection, Projection=projection)\n info = list(cursor)\n newInfo = ManageKeys(info)\n return newInfo.LikeJson()", "def keys(self, installer_context):\n return self.spec.keys(self.data, installer_context)", "def test_blob_key_stored():\n\tbackup_and_restore(\n\t\tlambda context: put_keys(lib.SET, BLOB_KEYS, \"foobar\", True),\n\t\tNone,\n\t\tlambda context: check_keys(lib.SET, BLOB_KEYS, \"foobar\", True)\n\t)", "def get_keys(self):\r\n return self._keys", "def get_keys(self):\n self._logger.info(ME + '.get_keys()')\n\n tmp_primary_keys = []\n tmp_data_keys = []\n try:\n tmp_primary_keys = config.get(ME, 'primary_keys').split(',')\n tmp_data_keys = config.get(ME, 'data_keys').split(',')\n self.index_key = config.get(ME, 'index_key') #FIXME: this is bad\n except KeyError as error_msg:\n self._logger.error(\n 'EXCEPTION: Keys missing' +\n '\\r\\tprimary_keys={0}'.format(','.join(tmp_primary_keys)) +\n '\\r\\tdata_keys={0}'.format(','.join(tmp_data_keys)) +\n '\\r\\tindex_key={0}'.format(self.index_key),\n exc_info=True\n )\n raise Connection.TableKeysMissing(error_msg, ME)\n\n self._logger.debug(\n 'keys validated:' + \\\n '\\r\\tprimary_keys={0}'.format(','.join(tmp_primary_keys)) +\n '\\r\\tdata_keys={0}'.format(','.join(tmp_data_keys)) +\n '\\r\\tindex_key={0}'.format(self.index_key)\n )\n return tmp_primary_keys, tmp_data_keys", "def keys(self):\n pattern = r'^\\d+-aws-billing-csv-[\\d+]{4}-[\\d+]{2}.csv$'\n for key in self.bucket.get_all_keys():\n if re.search(pattern, key.name):\n yield key", "def get_list_of_blobs(bucket_name, prefix=None, delimiter=None):\r\n\r\n # initialize client\r\n storage_client = storage.Client()\r\n\r\n # get list blobs\r\n blobs = storage_client.list_blobs(bucket_name, prefix=prefix, delimiter=delimiter)\r\n\r\n for blob in blobs:\r\n print(blob.name)\r\n\r\n if delimiter:\r\n print(\"Prefixes:\")\r\n for prefix in blobs.prefixes:\r\n print(prefix)\r\n\r\n return None", "def _index_decryption_keys(raw_keys):\n index = {}\n if raw_keys:\n for raw_key in raw_keys:\n if not raw_key:\n continue\n key = parse_key(raw_key)\n if key.type == KeyType.CSEK:\n index[key.sha256] = key\n return index", "def keys(self):\n return DeviceKeyCollection(client=self)", "def get(self, id=None):\n response = []\n publickeys = []\n if id:\n # For testing\n #if action == \"delete_key\":\n # self.delete()\n # For testing\n #elif action == \"edit_key\":\n # self.put()\n #else\n id = str(urllib.unquote(id))\n publickeys = [PublicKey.get_by_id(long(id))]\n else:\n publickeys = PublicKey.all().run(batch_size=1000)\n \n for seq, publickey in enumerate(publickeys):\n response.append({ 'key_name' : publickey.name, 'key_description' : publickey.description, \n 'key_owner' : str(publickey.owner.email()), 'created' : str(publickey.created), \n 'is_default_key' : publickey.is_default_key, 'key_id' : publickey.key().id()})\n self.response.out.write(json.dumps(response))", "def _get_encryption_key(self, obj, field_name: str):\n return hashlib.sha256(\n f'{obj.pk}::{self.get_encryption_key(obj)}::'\n f'{settings.GDPR_KEY if hasattr(settings, \"GDPR_KEY\") else settings.SECRET_KEY}::{field_name}'.encode(\n 'utf-8')).hexdigest()", "def getBlobs( self ):\n return self.__blobs;", "def download_key_from_blob(self):\n source_blob_name = \"generated-keys/{}\".format(self.service_account_email)\n destination_name = self.service_account_email\n\n # generate destination folder and file if they do not yet exist\n Path(\"downloaded-key/\").mkdir(parents=True, exist_ok=True) # creates folder if not exists\n folder = Path(\"downloaded-key/\") # folder where all the newly generated keys go\n destination_file_name = folder / \"{}\".format(destination_name) # file named after service-account name\n destination_file_name.touch(exist_ok=True)\n\n # download the file and store it locally\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(self.bucket_name)\n blob = bucket.blob(source_blob_name)\n blob.download_to_filename(destination_file_name)\n\n # prints source and destination indicating successful download\n print('Encrypted key downloaded to -----> \\n {}.'.format(\n source_blob_name,\n destination_file_name))\n\n return destination_file_name", "def snapshot_encryption_key(self) -> 'outputs.CustomerEncryptionKeyResponse':\n return pulumi.get(self, \"snapshot_encryption_key\")", "def encryption(self) -> typing.Optional[\"BucketEncryption\"]:\n return self._values.get('encryption')", "def list_blobs(bucket_name):\n # bucket_name = \"your-bucket-name\"\n\n storage_client = storage.Client()\n\n # Note: Client.list_blobs requires at least package version 1.17.0.\n blobs = storage_client.list_blobs(bucket_name)\n\n return [blob.name for blob in blobs]", "def download_key():\n data = check_args(('cloudProvider', ))\n provider = jobs.init_provider(data, True)\n key = encrypt_key(provider.get_key(), data['username'])\n return make_response(keyName=provider.keyname, key=key)", "def read_encoders(self):\n enc_list = Drive.list()\n rtn = {}\n\n for enc in enc_list:\n rtn[enc] = self.read_encoder(enc)\n\n return rtn", "def get_public_keys(self):\n return self.control_connection.call('get_agents_publickeys')", "def list_blobs(bucket_name):\n # bucket_name = \"your-bucket-name\"\n\n storage_client = storage.Client()\n print(storage_client.current_batch)\n # Note: Client.list_blobs requires at least package version 1.17.0.\n blobs = storage_client.list_blobs(bucket_name)\n\n # print(len([1 for blob in blobs]))\n for blob in blobs:\n print(blob.name)", "def iterkeys(self, essid):\n return self.cli.essids.keys(essid).__iter__()", "def get_key_list(self, email=\"\"):\n\t\tif email:\n\t\t\twhere_clause = \" where email = '%s'\" % email\n\t\telse:\n\t\t\twhere_clause = \"\"\n\n\t\treturn self.app.db.query(\n\t\t\t\"\"\"\n\t\t\tselect\n\t\t\t\tapi_key,\n\t\t\t\towner,\n\t\t\t\tapp_name,\n\t\t\t\temail,\n\t\t\t\turl,\n\t\t\t\tcreated\n\t\t\tfrom\n\t\t\t\tapi_keys\n\t\t\t%s\n\t\t\t\"\"\" % where_clause)", "def get_public_keys(vm_):\n key_filename = config.get_cloud_config_value(\n \"ssh_public_key\", vm_, __opts__, search_global=False, default=None\n )\n if key_filename is not None:\n key_filename = os.path.expanduser(key_filename)\n if not os.path.isfile(key_filename):\n raise SaltCloudConfigError(\n \"The defined ssh_public_key '{}' does not exist\".format(key_filename)\n )\n ssh_keys = []\n with salt.utils.files.fopen(key_filename) as rfh:\n for key in rfh.readlines():\n ssh_keys.append(salt.utils.stringutils.to_unicode(key))\n\n return ssh_keys", "async def describe_user_encryption_key_list_async(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_user_encryption_key_list_with_options_async(request, runtime)", "def list_keys(self, s3_prefix_path, delimiter='/'):\n bucket_name, prefix = S3Util.get_bucket_and_key(s3_prefix_path)\n bucket = self.s3_resource.Bucket(bucket_name)\n result = bucket.meta.client.list_objects(Bucket=bucket_name,\n Prefix=prefix,\n Delimiter=delimiter)\n if result.get('CommonPrefixes') is not None:\n return [o.get('Prefix') for o in result.get('CommonPrefixes')]", "def get_key(self, object_id):\n try:\n info = self.storage.load(\"\", object_id)\n return info[\"object_key\"]\n except KeyError:\n return None", "def keys(self):\n # Collect all keys in each bucket\n all_keys = []\n for bucket in self.buckets:\n for key, value in bucket.items():\n all_keys.append(key)\n return all_keys", "def get_matching_s3_keys(\n bucket: str,\n prefix: str = \"\",\n suffix: str = \"\",\n session: Optional[boto3.Session] = None,\n) -> Iterator[str]:\n for obj in get_matching_s3_objects(bucket, prefix, suffix, session):\n if \"Key\" in obj:\n yield obj[\"Key\"]", "def cli(ctx):\n return ctx.gi.cannedkeys.get_keys()", "def describe_dbinstance_encryption_key(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_dbinstance_encryption_key_with_options(request, runtime)", "def get_keys(self, yk_publicname):\n query = \"\"\"SELECT yk_publicname\n FROM yubikeys\n WHERE active = 1\"\"\"\n params = None\n if yk_publicname != 'all':\n query += ' AND yk_publicname = %s'\n params = (yk_publicname,)\n self._execute(query, params)\n return self._dictfetchall()", "def _get_s3_keys(self, keys: list = [], marker: str = '') -> list:\n response = self.s3client.list_objects(Bucket=self.bucket_name, Prefix=os.path.join(self.prefix, 'log/task_log'), Marker=marker)\n if 'Contents' in response:\n keys.extend([{'Key': content['Key'], 'LastModified': content['LastModified']} for content in response['Contents']])\n if 'IsTruncated' in response:\n return self._get_s3_keys(keys=keys, marker=keys[-1]['Key'])\n return keys", "def create_ssh_keys(self):\n self.random_ssh()\n\n return self.keys", "def all_keys(self):\n return self.derivable_keys() + self.loadable_keys()", "def read_keys(path):\n with open(path) as walletfile:\n b_keys = walletfile.read()\n p_keys = base64.b64decode(b_keys)\n return pickle.loads(p_keys)", "def keys(self, mode=None):\n if self._state == 'open':\n shelve_keys = list(self._shelve.keys())\n\n else:\n self.open()\n shelve_keys = list(self._shelve.keys())\n self.close()\n\n dict_keys = list(self._dict.keys())\n\n if mode == 'shelve':\n return shelve_keys\n\n if mode == 'dict':\n return dict_keys\n\n return shelve_keys + dict_keys" ]
[ "0.6437575", "0.6124688", "0.61170375", "0.5970704", "0.5920652", "0.5918394", "0.5872749", "0.5832748", "0.58217853", "0.58021", "0.5690256", "0.56733716", "0.5656133", "0.56275576", "0.56275576", "0.5610084", "0.56076646", "0.55941993", "0.55817664", "0.55722076", "0.55719936", "0.5542672", "0.55405426", "0.55257547", "0.5518588", "0.54986227", "0.54950535", "0.54904693", "0.5484998", "0.5480899", "0.54803586", "0.54668695", "0.5465974", "0.54585165", "0.54481506", "0.5438085", "0.543057", "0.54100513", "0.54083955", "0.5405989", "0.5384625", "0.5378961", "0.5374463", "0.53690696", "0.5361483", "0.5351318", "0.53378475", "0.53362006", "0.53305644", "0.53281957", "0.53258127", "0.53050345", "0.52965134", "0.5288518", "0.528458", "0.5280934", "0.5261961", "0.526035", "0.52600425", "0.5258682", "0.5251054", "0.52467006", "0.5245808", "0.52412784", "0.52361625", "0.5228413", "0.52104735", "0.5202298", "0.5201489", "0.5199898", "0.5194232", "0.5171955", "0.51653385", "0.51616406", "0.5157081", "0.5136602", "0.5129208", "0.5112813", "0.51077944", "0.5107403", "0.50888467", "0.5084077", "0.5083814", "0.50834614", "0.50818276", "0.5079386", "0.50703996", "0.50702655", "0.5065519", "0.5061626", "0.5061113", "0.5053787", "0.5044383", "0.5044037", "0.5032908", "0.5032772", "0.50326383", "0.5026065", "0.5023109", "0.5022309" ]
0.7701036
0
Intercept http and mock client (get_repo)
def test_branch_can_be_copied(): setup_org() setup_repo() responses.add(responses.GET, "https://api.github.com/repos/my-org/my-repo/branches/master", body=my_repo_branch, content_type='text/json', status=200) responses.add(responses.POST, "https://api.github.com/repos/my-org/my-repo/git/refs", body=my_new_ref, content_type='text/json', status=201) responses.add(responses.GET, "https://api.github.com/repos/my-org/my-repo/branches/main", body=my_repo_branch, content_type='text/json', status=200) token = '__dummy__' org = "my-org" client = GithubRestClient(token) new_branch_name = "main" repo = get_repository(client, org, "my-repo") new_branch = copy_branch(repo, repo.default_branch, new_branch_name) assert None is not new_branch
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_issue_50():\n utils.set_http_mock()\n\n client = Github(proxy_host=\"my.proxy.com\", proxy_port=9000)\n setup_args = client.request._http.called_with\n assert_equals(type(setup_args['proxy_info']), httplib2.ProxyInfo)\n assert_equals(setup_args['proxy_info'].proxy_host, 'my.proxy.com')\n assert_equals(setup_args['proxy_info'].proxy_port, 9000)\n\n utils.unset_http_mock()", "def mock_github_get(url):\n mock_repo_key = url.split(\"/\")[-1]\n\n result = requests.Response()\n result.status_code = 200\n result.encoding = \"utf-8\"\n result._content = repos[mock_repo_key].encode()\n\n return result", "def setUp(self):\n self.client = HTTPClient()\n self.method = 'GET'\n self.url = 'http://github.com/ojengwa'\n self.headers = {}", "def setUp(self):\n self.client = RequestsClient()\n self.method = 'GET'\n self.url = 'http://github.com/ojengwa'\n self.headers = {}", "def test_fetch_repositories(self):\n self.maxDiff = None\n with self.logged_in(\n access_token=\"nbiousndegoijubdognlksdngndsgmngds\",\n extra_mocks=[\n (\n \"get\",\n \"https://api.github.com/user/repos\",\n dict(\n json=self.fixtures['./tests/fixtures/repos_ponteineptique.response.json'][0],\n headers=self.fixtures['./tests/fixtures/repos_ponteineptique.response.json'][1]\n )\n ),\n (\n \"get\",\n re.compile(\"https://api.github.com/user/repos\\?.*page=2\"),\n dict(\n json=self.fixtures['./tests/fixtures/repos_ponteineptique.page2.response.json'][0],\n headers=self.fixtures['./tests/fixtures/repos_ponteineptique.page2.response.json'][1]\n )\n )\n ]\n ):\n index = self.client.get(\"/\").data.decode()\n self.assertNotIn(\"Sign-in\", index, \"We are logged in\")\n self.assertIn(\"Hi ponteineptique!\", index, \"We are logged in\")\n\n # We check\n repositories = loads(self.client.get(\"/api/hook/v2.0/user/repositories\").data.decode())\n self.assertEqual(repositories, {\"repositories\": []}, \"No repository on first get\")\n\n # We refresh by posting\n repositories = loads(self.client.post(\"/api/hook/v2.0/user/repositories\").data.decode())\n repositories[\"repositories\"] = sorted(repositories[\"repositories\"], key=lambda x: x[\"name\"])\n self.assertEqual(\n repositories,\n {\n \"repositories\": [\n {'name': 'canonical-greekLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-latinLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-norseLit', 'owner': 'PerseusDL'},\n {'name': 'octodog', 'owner': 'octocat'}\n ]\n },\n \"Github API is parsed correctly\"\n )\n\n with self.logged_in(\n access_token=\"nbiousndegoijubdognlksdngndsgmngds\",\n extra_mocks=[\n (\n \"get\",\n \"https://api.github.com/user/repos\",\n dict(\n json=self.fixtures['./tests/fixtures/repos_ponteineptique.response.json'][0],\n headers=self.fixtures['./tests/fixtures/repos_ponteineptique.response.json'][1]\n )\n ),\n (\n \"get\",\n re.compile(\"https://api.github.com/user/repos\\?.*page=2\"),\n dict(\n json=self.fixtures['./tests/fixtures/repos_ponteineptique.page2.alt.response.json'][0],\n headers=self.fixtures['./tests/fixtures/repos_ponteineptique.page2.alt.response.json'][1]\n )\n )\n ]\n ):\n # We check it was saved\n repositories = loads(self.client.get(\"/api/hook/v2.0/user/repositories\").data.decode())\n repositories[\"repositories\"] = sorted(repositories[\"repositories\"], key=lambda x: x[\"name\"])\n self.assertEqual(\n repositories,\n {\n \"repositories\": [\n {'name': 'canonical-greekLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-latinLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-norseLit', 'owner': 'PerseusDL'},\n {'name': 'octodog', 'owner': 'octocat'}\n ]\n },\n \"When logging in back, we should have the same old repos\"\n )\n\n # We update by posting\n repositories = loads(self.client.post(\"/api/hook/v2.0/user/repositories\").data.decode())\n repositories[\"repositories\"] = sorted(repositories[\"repositories\"], key=lambda x: x[\"name\"])\n self.assertEqual(\n repositories,\n {\n \"repositories\": [\n {'name': 'canonical-greekLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-latinLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-norseLit', 'owner': 'PerseusDL'},\n {'name': 'octodog', 'owner': 'octocat'},\n {'name': 'oneKGreek', 'owner': 'ponteineptique'}\n ]\n },\n \"Github API is parsed correctly\"\n )\n\n # We check it was saved and cleared before\n repositories = loads(self.client.get(\"/api/hook/v2.0/user/repositories\").data.decode())\n repositories[\"repositories\"] = sorted(repositories[\"repositories\"], key=lambda x: x[\"name\"])\n self.assertEqual(\n repositories,\n {\n \"repositories\": [\n {'name': 'canonical-greekLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-latinLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-norseLit', 'owner': 'PerseusDL'},\n {'name': 'octodog', 'owner': 'octocat'},\n {'name': 'oneKGreek', 'owner': 'ponteineptique'}\n ]\n },\n \"Old repos should have been cleared, new ones should be there !\"\n )\n\n with self.logged_in(\n access_token=\"nbiousndegoijubdognlksdngndsgmngds\",\n extra_mocks=[\n (\n \"get\",\n \"https://api.github.com/user/repos\",\n dict(\n json=self.fixtures['./tests/fixtures/repos_ponteineptique.response.json'][0],\n headers=self.fixtures['./tests/fixtures/repos_ponteineptique.response.json'][1]\n )\n ),\n (\n \"get\",\n re.compile(\"https://api.github.com/user/repos\\?.*page=2\"),\n dict(\n json=self.fixtures['./tests/fixtures/repos_ponteineptique.page2.alt2.response.json'][0],\n headers=self.fixtures['./tests/fixtures/repos_ponteineptique.page2.alt2.response.json'][1]\n )\n )\n ]\n ):\n # We check it was saved\n repositories = loads(self.client.get(\"/api/hook/v2.0/user/repositories\").data.decode())\n repositories[\"repositories\"] = sorted(repositories[\"repositories\"], key=lambda x: x[\"name\"])\n self.assertEqual(\n repositories,\n {\n \"repositories\": [\n {'name': 'canonical-greekLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-latinLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-norseLit', 'owner': 'PerseusDL'},\n {'name': 'octodog', 'owner': 'octocat'},\n {'name': 'oneKGreek', 'owner': 'ponteineptique'}\n ]\n },\n \"When logging in back, we should have the same old repos\"\n )\n\n # We update by posting\n repositories = loads(self.client.post(\"/api/hook/v2.0/user/repositories\").data.decode())\n repositories[\"repositories\"] = sorted(repositories[\"repositories\"], key=lambda x: x[\"name\"])\n self.assertEqual(\n repositories,\n {\n 'repositories': [\n {'name': 'Ahab', 'owner': 'Capitains'},\n {'name': 'Capitains.github.io', 'owner': 'Capitains'},\n {'name': 'Cavern', 'owner': 'Capitains'},\n {'name': 'ahab-legacy-existdb', 'owner': 'Capitains'},\n {'name': 'ahab-legacy-python', 'owner': 'Capitains'},\n {'name': 'alignment-editor', 'owner': 'alpheios-project'},\n {'name': 'alpheios-docs', 'owner': 'alpheios-project'},\n {'name': 'alpheios-flask', 'owner': 'alpheios-project'},\n {'name': 'alpheios5', 'owner': 'alpheios-project'},\n {'name': 'angular-nemo', 'owner': 'Capitains'},\n {'name': 'arethusa', 'owner': 'alpheios-project'},\n {'name': 'arethusa-cli', 'owner': 'alpheios-project'},\n {'name': 'arethusa-configs', 'owner': 'alpheios-project'},\n {'name': 'arethusa-example-data', 'owner': 'alpheios-project'},\n {'name': 'arethusa-experiments', 'owner': 'alpheios-project'},\n {'name': 'arethusa-ngdocs-generator', 'owner': 'alpheios-project'},\n {'name': 'arethusa-server', 'owner': 'alpheios-project'},\n {'name': 'basic-reader', 'owner': 'alpheios-project'},\n {'name': 'canonical-greekLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-latinLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-norseLit', 'owner': 'PerseusDL'},\n {'name': 'chrome-wrapper', 'owner': 'alpheios-project'},\n {'name': 'cookiecutter-guidelines', 'owner': 'Capitains'},\n {'name': 'cts-api', 'owner': 'alpheios-project'},\n {'name': 'ctsworklist', 'owner': 'alpheios-project'},\n {'name': 'dummy1', 'owner': 'alpheios-project'},\n {'name': 'edit-utils', 'owner': 'alpheios-project'},\n {'name': 'inflection-analysis-prototype', 'owner': 'alpheios-project'},\n {'name': 'morphlib', 'owner': 'alpheios-project'},\n {'name': 'morphwrappers', 'owner': 'alpheios-project'},\n {'name': 'nemo_arethusa_plugin', 'owner': 'alpheios-project'},\n {'name': 'schemas', 'owner': 'alpheios-project'},\n {'name': 'tei-digital-age', 'owner': 'alpheios-project'}\n ]},\n \"Github API is parsed correctly\"\n )", "def __init__(self, github_repo, module='', main_dir='main'):\n self._http_client = HttpClient()\n self._github_repo = github_repo.rstrip('/').replace('https://github.com', 'https://api.github.com/repos')\n self._main_dir = main_dir\n self._module = module.rstrip('/')", "def _get_repo(self, owner, repo):\n url = f\"{BASE_URL}/repos/{owner}/{repo}\"\n status, data, _ = self.get(url)\n if (status == 200):\n return data\n else:\n log.warning(\"GHUB\", f\"Unexpected status code {status} for request {url}.\")", "def stub_http(hass):\n mock_http_component(hass)", "def svc_protected_repo(svc_client, identity_headers, it_protected_repo_url):\n from renku.core.models.git import GitURL\n\n payload = {\n \"git_url\": it_protected_repo_url,\n \"depth\": 0,\n }\n\n response = svc_client.post(\"/cache.project_clone\", data=json.dumps(payload), headers=identity_headers)\n\n data = {\n \"project_id\": response.json[\"result\"][\"project_id\"],\n \"skip_template_update\": True,\n \"skip_docker_update\": True,\n }\n svc_client.post(\"/cache.migrate\", data=json.dumps(data), headers=identity_headers)\n\n url_components = GitURL.parse(it_protected_repo_url)\n\n with integration_repo(identity_headers, response.json[\"result\"][\"project_id\"], url_components) as repo:\n with _mock_cache_sync(repo):\n yield svc_client, identity_headers, payload, response", "def repository_create_proxy():\n pass", "def setUpClass(cls):\n cls.get_patcher = patch('requests.get')\n cls.mock = cls.get_patcher.start()\n cls.mock.return_value.json.side_effect = [\n cls.org_payload, cls.repos_payload,\n cls.org_payload, cls.repos_payload,\n ]", "def test_returns_cloned_repo_by_name(self):\n # Need to set up a git repo with origin info.\n full_path = path.join(settings.REPO_ROOT, 'test')\n envoy.run('git init {0}'.format(full_path))\n fake_origin = 'git://localhost'\n envoy.run('git -C {0} remote add origin {1}'.format(full_path,\n fake_origin))\n url = reverse(\"find\", kwargs={'name': 'test'})\n response = self.client.get(url)\n self.assertEqual(200, response.status_code)\n result = json.loads(response.content.decode())\n expected_url = settings.REPO_URL + u'test'\n self.assertEqual(result['url'], expected_url)\n self.assertEqual(result['name'], u'test')", "def svc_client_with_repo(svc_client_setup):\n svc_client, headers, project_id, url_components, repo = svc_client_setup\n\n response = svc_client.post(\n \"/cache.migrate\", data=json.dumps(dict(project_id=project_id, skip_docker_update=True)), headers=headers\n )\n\n assert response.json[\"result\"]\n\n with _mock_cache_sync(repo):\n yield svc_client, deepcopy(headers), project_id, url_components", "def test_get_client(self):\n pass", "def test_load_github(self):\n\n c = Client()\n response = c.get('/taric_books/github/')\n\n self.assertEqual(response.status_code, 200)", "def api_repo_get(access_key):\n repo = Repo.query.get(access_key)\n if not repo:\n return jsonify(error=\"Repo not found\"), 404\n \n if repo.is_private and 'working_repo' not in session:\n return jsonify(error=\"Unauthorized\"), 401\n elif repo.is_private and session['working_repo'] != repo.access_key:\n return jsonify(error=\"Unauthorized\"), 403\n elif repo.is_private and session['working_repo'] == repo.access_key:\n return jsonify(repo.to_json())\n else:\n return jsonify(repo.to_json())", "def http_client(http_client, base_url):\n original_fetch = http_client.fetch\n\n def _fetch(url):\n fetch = partial(original_fetch, base_url + url)\n return http_client.io_loop.run_sync(fetch)\n\n http_client.fetch = _fetch\n return http_client", "def test_public_repos_url(self, org, expected):\n with patch('client.get_json') as mock:\n instance = GithubOrgClient(org)\n mock.return_value = expected\n self.assertEqual(instance._public_repos_url, expected[\"repos_url\"])", "def test_returns_cloned_repo_by_name_auto_host(self):\n # Need to set up a git repo with origin info.\n full_path = path.join(settings.REPO_ROOT, 'test')\n envoy.run('git init {0}'.format(full_path))\n fake_origin = 'git://localhost'\n envoy.run('git -C {0} remote add origin {1}'.format(full_path,\n fake_origin))\n url = reverse(\"find\", kwargs={'name': 'test'})\n\n del settings.REPO_URL\n\n response = self.client.get(url, HTTP_HOST='test-host')\n\n self.assertEqual(200, response.status_code)\n result = json.loads(response.content.decode())\n expected_url = 'git://test-host/test'\n self.assertEqual(result['url'], expected_url)\n self.assertEqual(result['name'], u'test')", "def test_api_repo_status_get(self):\n default_api = DefaultApi(api_client=self.api_client)\n params = dlrnapi_client.Params2()\n path, method = default_api.api_repo_status_get(params)\n self.assertEqual(path, '/api/repo_status')\n self.assertEqual(method, 'GET')", "def setUpClass(self):\n self.repo = Repository(\"https://github.com/qcoumes/gitload_test.git\")\n self.repo.get_repo()", "def test_api_last_tested_repo_get(self):\n default_api = DefaultApi(api_client=self.api_client)\n params = dlrnapi_client.Params()\n path, method = default_api.api_last_tested_repo_get(params)\n self.assertEqual(path, '/api/last_tested_repo')\n self.assertEqual(method, 'GET')", "def DoStubHttp(status, mime, resp_body):\n def Request(unused_self, unused_url, method, body, headers):\n _ = method, body, headers # unused kwargs\n response = httplib2.Response({\n 'status': status,\n 'content-type': mime,\n })\n return response, resp_body\n return mock.patch('httplib2.Http.request', new=Request)", "def test_public_repos(self, mock_json):\n\n Response_payload = [{\"name\": \"Google\"}]\n mock_json.return_value = Response_payload\n\n with patch('client.GithubOrgClient._public_repos_url',\n new_callable=PropertyMock) as mock_public:\n\n mock_public.return_value = \"hello/world\"\n test_class = GithubOrgClient('test')\n result = test_class.public_repos()\n\n check = [rep[\"name\"] for rep in Response_payload]\n self.assertEqual(result, check)\n\n mock_public.assert_called_once()\n mock_json.assert_called_once()", "def github_request(self, path, callback, access_token=None,\n method='GET', body=None, **args):\n args[\"access_token\"] = access_token\n url = tornado.httputil.url_concat(self._API_URL + path, args)\n logging.debug('request to ' + url)\n http = tornado.httpclient.AsyncHTTPClient()\n if body is not None:\n body = tornado.escape.json_encode(body)\n logging.debug('body is' + body)\n headers = {}\n headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36'\n \n http.fetch(url, callback=self.async_callback(\n self._parse_response, callback), method=method, body=body, headers=headers)", "def test_client_retrieve(self):\n pass", "def test_get(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n \n # Mock good response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.get(rest_url)", "def mocked_requests_scrapping_get(*args, **kwargs):\n class MockResponse:\n def __init__(self, json_data, status_code, url):\n self.content = json_data\n self.status_code = status_code\n self.url = url\n self.cookies = {\"JSESSIONID\": \"jkghhjgjhgfjgfgjg\"}\n self.encoding = \"utf-8\"\n\n def json(self):\n return self.json_data\n\n dn = os.path.dirname(os.path.realpath(__file__))\n for url, provider in {f\"{settings.BASE_URL}/eAnnuaire/formulaire?appelRetour=true\": \"form\",\n f\"{settings.BASE_URL}/eAnnuaire/resultat\": \"suivant\",\n f\"{settings.BASE_URL}/eAnnuaire/fiche\": \"detail\"}.items():\n if args[0].startswith(url):\n with open(os.path.join(dn, \"fixtures\", f\"{provider}.html\"), \"rb\") as fp:\n return MockResponse(fp.read(), 200, args[0])", "def get(self, *args, **kwargs):\r\n url = '{0}/user/repositories/'.format(self.parent.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "def callback_repo_check(self, request, uri, headers, status_code=404):\n self.assertEqual(\n request.headers['Authorization'],\n 'token {0}'.format(self.OAUTH2_TOKEN)\n )\n # Handle the new \"rerun\" repo differently\n if self.TEST_RERUN_REPO in uri:\n status_code = 404\n return (status_code, headers, json.dumps({'message': 'testing'}))", "def test_public_repos(self):\n\n instance = GithubOrgClient('do')\n self.assertEqual(instance.org, self.org_payload)\n self.assertAlmostEqual(instance._public_repos_url,\n 'https://api.github.com/orgs/google/repos')\n self.assertEqual(instance.repos_payload, self.repos_payload)\n self.assertEqual(instance.public_repos(), self.expected_repos)\n self.assertEqual(instance.public_repos(\"sdsd\"), [])\n self.mock.assert_called()", "def local_remote_repository(svc_client, tmp_path, mock_redis, identity_headers, real_sync):\n from click.testing import CliRunner\n from git.config import GitConfigParser, get_config_path\n from marshmallow import pre_load\n\n from renku.cli import cli\n from renku.core.utils.contexts import chdir\n from renku.service.config import PROJECT_CLONE_NO_DEPTH\n from renku.service.serializers import cache\n\n # NOTE: prevent service from adding an auth token as it doesn't work with local repos\n def _no_auth_format(self, data, **kwargs):\n return data[\"git_url\"]\n\n orig_format_url = cache.ProjectCloneContext.format_url\n cache.ProjectCloneContext.format_url = _no_auth_format\n\n # NOTE: mock owner/project so service is happy\n def _mock_owner(self, data, **kwargs):\n data[\"owner\"] = \"dummy\"\n\n data[\"name\"] = \"project\"\n data[\"slug\"] = \"project\"\n\n return data\n\n orig_set_owner = cache.ProjectCloneContext.set_owner_name\n cache.ProjectCloneContext.set_owner_name = pre_load(_mock_owner)\n\n remote_repo_path = tmp_path / \"remote_repo\"\n remote_repo_path.mkdir()\n\n remote_repo = Repo.init(remote_repo_path, bare=True)\n remote_repo_checkout_path = tmp_path / \"remote_repo_checkout\"\n remote_repo_checkout_path.mkdir()\n\n remote_repo_checkout = remote_repo.clone(str(remote_repo_checkout_path))\n\n home = tmp_path / \"user_home\"\n home.mkdir()\n\n with modified_environ(HOME=str(home), XDG_CONFIG_HOME=str(home)):\n try:\n with GitConfigParser(get_config_path(\"global\"), read_only=False) as global_config:\n global_config.set_value(\"user\", \"name\", \"Renku @ SDSC\")\n global_config.set_value(\"user\", \"email\", \"[email protected]\")\n\n # NOTE: init \"remote\" repo\n runner = CliRunner()\n with chdir(remote_repo_checkout_path):\n\n result = runner.invoke(\n cli, [\"init\", \".\", \"--template-id\", \"python-minimal\", \"--force\"], \"\\n\", catch_exceptions=False\n )\n assert 0 == result.exit_code, format_result_exception(result)\n\n remote_name = remote_repo_checkout.active_branch.tracking_branch().remote_name\n remote = remote_repo_checkout.remotes[remote_name]\n result = remote.push()\n finally:\n try:\n shutil.rmtree(home)\n except OSError: # noqa: B014\n pass\n\n payload = {\"git_url\": f\"file://{remote_repo_path}\", \"depth\": PROJECT_CLONE_NO_DEPTH}\n response = svc_client.post(\"/cache.project_clone\", data=json.dumps(payload), headers=identity_headers)\n\n assert response\n assert {\"result\"} == set(response.json.keys()), response.json\n\n project_id = response.json[\"result\"][\"project_id\"]\n assert isinstance(uuid.UUID(project_id), uuid.UUID)\n\n try:\n yield svc_client, identity_headers, project_id, remote_repo, remote_repo_checkout\n finally:\n cache.ProjectCloneContext.format_url = orig_format_url\n cache.ProjectCloneContext.set_owner_name = orig_set_owner\n\n try:\n shutil.rmtree(remote_repo_path)\n except OSError: # noqa: B014\n pass\n\n try:\n shutil.rmtree(remote_repo_checkout_path)\n except OSError: # noqa: B014\n pass", "def _call_api(self, verb, url, **request_kwargs):\n api = 'https://api.github.com{}'.format(url)\n auth_headers = {'Authorization': 'token {}'.format(self.api_token)}\n headers = {**auth_headers, **request_kwargs.pop('headers', {})}\n return getattr(requests, verb)(api, headers=headers, **request_kwargs)", "async def test_cli(\n aiohttp_client,\n otupdate_config,\n monkeypatch,\n version_file_path,\n mock_name_synchronizer,\n):\n app = await buildroot.get_app(\n name_synchronizer=mock_name_synchronizer,\n system_version_file=version_file_path,\n config_file_override=otupdate_config,\n boot_id_override=\"dummy-boot-id-abc123\",\n )\n client = await aiohttp_client(app)\n return client", "def mock_requests_get(mocker, mocked_requests_get):\n\n def _requests_get(module, content, status):\n mock_func = mocker.patch(f'{module}.requests.get')\n mock_func.return_value = mocked_requests_get(content, status)\n\n return _requests_get", "def test_get_request_normal_response(self, mock_get):\n\n # Arrange\n # Construct our mock response object, giving it relevant expected behaviours\n mock_resp_instance = MockResponse({\"msg\": \"success\"}, 200, content=\"abc\")\n mock_get.return_value = mock_resp_instance\n\n # Act\n response = get_request_data(self.url, json_resp=False)\n\n # Assert that the request-response cycle completed successfully.\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, mock_resp_instance)", "def mock_base_api_client():\n def _make_mock_api_client(status_code=200,\n response=None,\n component='reana-server'):\n mock_http_response, mock_response = Mock(), Mock()\n mock_http_response.status_code = status_code\n mock_http_response.raw_bytes = str(response).encode()\n mock_response = response\n reana_server_client = make_mock_api_client(\n component)(mock_response, mock_http_response)\n reana_client_server_api = Client(component)\n reana_client_server_api._client = reana_server_client\n return reana_client_server_api\n return _make_mock_api_client", "def mock_requests_get(request, monkeypatch, mock_get_args):\n\n def mock_get(url, *args, **kwargs):\n mock_get_args(url, *args, **kwargs)\n\n mock_resp = MagicMock()\n if url == qml.data.data_manager.FOLDERMAP_URL:\n json_data = _folder_map\n elif url == qml.data.data_manager.DATA_STRUCT_URL:\n json_data = _data_struct\n else:\n json_data = None\n\n mock_resp.json.return_value = json_data\n if hasattr(request, \"param\"):\n mock_resp.content = request.param\n\n return mock_resp\n\n monkeypatch.setattr(qml.data.data_manager, \"get\", mock_get)\n\n return mock_get", "def test_request(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n\n # Mock good get response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.request('get', rest_url)\n assert r.status_code == 200\n assert r.json()['value'] == 'good!'\n \n # Mock bad get response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, f'{self.host}/{rest_url}', status=401,\n json={'value':\"bad!\"})\n with raises(requests.HTTPError):\n r = client.request('get', rest_url)\n r = client.request('get', rest_url, checkstatus=False)\n assert r.status_code == 401\n assert r.json()['value'] == 'bad!'", "def mocked_requests_get():\n\n class MockResponse:\n def __init__(self, _content, _status):\n self.content = _content\n self.status_code = _status\n\n def content(self):\n return self.content\n\n return MockResponse", "def http_client(\n env=None, sim_types=None, job_run_mode=None, empty_work_dir=True, port=None\n):\n global _client\n t = sim_types or CONFTEST_DEFAULT_CODES\n if t:\n if isinstance(t, (tuple, list)):\n t = \":\".join(t)\n env.SIREPO_FEATURE_CONFIG_SIM_TYPES = t\n\n from pykern import pkconfig\n\n pkconfig.reset_state_for_testing(env)\n if _client:\n return _client\n\n from pykern import pkunit\n\n if empty_work_dir:\n pkunit.empty_work_dir()\n else:\n pkunit.work_dir()\n setup_srdb_root(cfg=env)\n\n from sirepo import modules\n\n modules.import_and_init(\"sirepo.uri\")\n _client = _TestClient(env=env, job_run_mode=job_run_mode, port=port)\n return _client", "def svc_protected_old_repo(svc_synced_client, it_protected_repo_url):\n svc_client, identity_headers, cache, user = svc_synced_client\n\n payload = {\n \"git_url\": it_protected_repo_url,\n \"depth\": 1,\n }\n\n response = svc_client.post(\"/cache.project_clone\", data=json.dumps(payload), headers=identity_headers)\n project_id = response.json[\"result\"][\"project_id\"]\n\n yield svc_client, identity_headers, project_id, cache, user", "def test_activate_repositories(self):\n self.Mokes.add_repo_to_pi()\n with self.logged_in(access_token=\"nbiousndegoijubdognlksdngndsgmngds\"):\n index = self.client.get(\"/\").data.decode()\n self.assertIn('href=\"/repo/PerseusDl/canonical-greekLit\"', index, \"GreekLit link should be there\")\n self.assertIn('href=\"/repo/PerseusDl/canonical-greekLit\"', index, \"LatinLit link should be there\")\n index = BeautifulSoup(self.client.get(\"/\").data.decode(), 'html.parser')\n self.assertEqual(\n len(index.select(\"#repos .repo-menu-card a\")), 0,\n \"There should be no active repo in menu\"\n )\n\n activate = self.client.put(\"/api/hook/v2.0/user/repositories/PerseusDl/canonical-greekLit\")\n self.assertEqual(activate.status_code, 200, \"Request should be positive\")\n\n index = BeautifulSoup(self.client.get(\"/\").data.decode(), 'html.parser')\n self.assertEqual(\n index.select(\"#repos .repo-menu-card a\")[0][\"href\"], \"/repo/PerseusDl/canonical-greekLit\",\n \"Active repo should be in menu\"\n )\n\n with self.logged_in(access_token=\"nbiousndegoijubdognlksdngndsgmngds\"):\n \" Relogging should be okay \"\n index = BeautifulSoup(self.client.get(\"/\").data.decode(), 'html.parser')\n self.assertEqual(\n index.select(\"#repos .repo-menu-card a\")[0][\"href\"], \"/repo/PerseusDl/canonical-greekLit\",\n \"Active repo should be in menu\"\n )\n\n # We can switch off\n activate = self.client.put(\"/api/hook/v2.0/user/repositories/PerseusDl/canonical-greekLit\")\n self.assertEqual(activate.status_code, 200, \"Request should be positive\")\n\n index = BeautifulSoup(self.client.get(\"/\").data.decode(), 'html.parser')\n self.assertEqual(len(index.select(\"#repos .repo-menu-card a\")), 0, \"There should be no active repo in menu\")\n\n # Wrong repo is 404\n activate = self.client.put(\"/api/hook/v2.0/user/repositories/PerseusDl/canonical-greekLit-fake\")\n self.assertEqual(activate.status_code, 404, \"Request should be positive\")", "def setUp(self):\n\n c = Client()\n self.response = c.get('/')\n self.content = self.response.content", "def get(self, request, repo_id, format=None):\n\n # argument check\n path = request.GET.get('p', None)\n if not path:\n error_msg = 'p invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n\n name = request.GET.get('name', None)\n if not name:\n error_msg = 'name invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n\n # recourse check\n repo = get_repo(repo_id)\n if not repo:\n error_msg = 'Library %s not found.' % repo_id\n return api_error(status.HTTP_404_NOT_FOUND, error_msg)\n\n # permission check\n if not check_folder_permission(request, repo_id, path) or \\\n not request.user.permissions.can_add_repo():\n error_msg = 'Permission denied.'\n return api_error(status.HTTP_403_FORBIDDEN, error_msg)\n\n username = request.user.username\n password = request.GET.get('password', '')\n if repo.encrypted:\n # check password for encrypted repo\n if not password:\n error_msg = 'password invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n else:\n try:\n syncwerk_api.set_passwd(repo_id, username, password)\n except RpcsyncwerkError as e:\n if e.msg == 'Bad arguments':\n error_msg = 'Bad arguments'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n elif e.msg == 'Incorrect password':\n error_msg = _(u'Wrong password')\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n elif e.msg == 'Internal server error':\n error_msg = _(u'Internal server error')\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)\n else:\n error_msg = _(u'Decrypt library error')\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)\n\n # create sub-lib for encrypted repo\n try:\n if is_org_context(request):\n org_id = request.user.org.org_id\n sub_repo_id = syncwerk_api.create_org_virtual_repo(\n org_id, repo_id, path, name, name, username, password)\n else:\n sub_repo_id = syncwerk_api.create_virtual_repo(\n repo_id, path, name, name, username, password)\n except RpcsyncwerkError as e:\n logger.error(e)\n error_msg = 'Internal Server Error'\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)\n else:\n # create sub-lib for common repo\n try:\n if is_org_context(request):\n org_id = request.user.org.org_id\n sub_repo_id = syncwerk_api.create_org_virtual_repo(\n org_id, repo_id, path, name, name, username)\n else:\n sub_repo_id = syncwerk_api.create_virtual_repo(\n repo_id, path, name, name, username)\n except RpcsyncwerkError as e:\n logger.error(e)\n error_msg = 'Internal Server Error'\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)\n\n # return Response({'sub_repo_id': sub_repo_id})\n resp = {'sub_repo_id': sub_repo_id}\n return api_response(status.HTTP_200_OK, '', resp)", "def github_api(request):\n if not request.startswith('https://api.github.com/'):\n request = 'https://api.github.com/' + request\n d = time.time() - github_api.last_time\n if d < 1:\n time.sleep(1 - d) # wait at least one second between GitHub API calls\n key = os.getenv('GITHUB_API_KEY')\n req = Request(request)\n req.add_header('User-Agent', github_api.user_agent)\n if key is not None:\n req.add_header('Authorization', 'token %s' % key)\n content = ''\n try:\n response = urlopen(req)\n content = response.read().decode()\n except HTTPError as e:\n print(request)\n print(e.reason)\n print(e.info())\n raise(e)\n github_api.last_time = time.time()\n return json.loads(content)", "def _get_client(self):\n return Github(\n base_url=github_trigger.api_endpoint(),\n login_or_token=self.auth_token if self.auth_token else github_trigger.client_id(),\n password=None if self.auth_token else github_trigger.client_secret(),\n timeout=5,\n )", "def test_legacy_client(self):\n response = self.legacy_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))\n self.assertEquals(response.content, RESPONSE)", "def enable_api_call():\n with mock.patch.object(\n github_api._PathMetadata, '_query_github', _original_query_github\n ):\n yield", "async def repository(self, *args, **kwargs):\n\n return await self._makeApiCall(self.funcinfo[\"repository\"], *args, **kwargs)", "def test_update_repository_token(self):\n self.Mokes.add_repo_to_pi()\n response = self.client.get(\"/repo/PerseusDl/canonical-latinLit\").data.decode()\n index = BeautifulSoup(response, 'html.parser')\n self.assertEqual(\n len(index.select(\".travis_env\")), 0, \"Sha should not be shown when not connected\"\n )\n\n # Update the REPO !\n response = self.client.patch(\"/api/hook/v2.0/user/repositories/PerseusDl/canonical-latinLit/token\")\n self.assertEqual(response.status_code, 401, \"Request Forbidden\")\n\n with self.logged_in(access_token=\"nbiousndegoijubdognlksdngndsgmngds\"):\n response = self.client.get(\"/repo/PerseusDl/canonical-latinLit\").data.decode()\n index = BeautifulSoup(response, 'html.parser')\n travis_env1 = index.select(\".travis_env\")\n self.assertEqual(len(travis_env1), 1, \"Sha should be shown when not connected\")\n self.assertEqual(len(travis_env1[0].text), 40)\n\n # Update the REPO !\n response = loads(self.client.patch(\"/api/hook/v2.0/user/repositories/PerseusDl/canonical-latinLit/token\")\\\n .data.decode())\n self.assertEqual(response, {\"status\": True})\n\n response = self.client.get(\"/repo/PerseusDl/canonical-latinLit\").data.decode()\n index = BeautifulSoup(response, 'html.parser')\n travis_env2 = index.select(\".travis_env\")\n self.assertEqual(len(travis_env2), 1, \"Sha should be shown when not connected\")\n self.assertEqual(len(travis_env2[0].text), 40)\n self.assertNotEqual(travis_env1[0].text, travis_env2[0].text, \"Sha should be different\")", "async def test_request(client, monkeypatch, caplog):\n mock_refresh_token_called = 0\n\n async def mock_refresh_token():\n nonlocal mock_refresh_token_called\n mock_refresh_token_called += 1\n\n monkeypatch.setattr(\n client._auth_client, 'refresh_token', mock_refresh_token)\n\n async def mock_valid_token_set():\n return False\n\n monkeypatch.setattr(client, 'valid_token_set', mock_valid_token_set)\n\n resp_text = 'ohai'\n\n with aioresponses() as mocked:\n mocked.get(conftest.API_URL, status=200, body=resp_text)\n resp = await client.request('get', conftest.API_URL)\n\n assert resp == resp_text\n\n assert 1 == mock_refresh_token_called\n assert 1 == len(mocked.requests)\n request = mocked.requests.popitem()[1][0]\n authorization_header = request.kwargs['headers']['Authorization']\n assert authorization_header == f'Bearer {client._auth_client.token}'\n assert 2 == len(caplog.records)", "def test_patch_o_auth_client(self):\n pass", "def test_org(self, input, mock):\n test_class = GithubOrgClient(input)\n mock.side_effect = Exception()\n try:\n test_class.org()\n except Exception as e:\n mock.assert_called_once_with(\n f'https://api.github.com/orgs/{input}')", "def test_offline_repo_template(base_command, mock_git):\n base_command.tools.git = mock_git\n\n mock_repo = mock.MagicMock()\n mock_remote = mock.MagicMock()\n mock_remote_head = mock.MagicMock()\n\n # Git returns a Repo, that repo can return a remote, and it has\n # heads that can be accessed. However, calling fetch on the remote\n # will cause a git error (error code 128).\n base_command.tools.git.Repo.return_value = mock_repo\n mock_repo.remote.return_value = mock_remote\n mock_remote.refs.__getitem__.return_value = mock_remote_head\n mock_remote.fetch.side_effect = git_exceptions.GitCommandError(\"git\", 128)\n\n cached_path = cookiecutter_cache_path(\n \"https://example.com/magic/special-template.git\"\n )\n\n # Update the cache\n cached_template = base_command.update_cookiecutter_cache(\n template=\"https://example.com/magic/special-template.git\", branch=\"special\"\n )\n\n # The cookiecutter cache location will be interrogated.\n base_command.tools.git.Repo.assert_called_once_with(cached_path)\n\n # The origin of the repo was fetched\n mock_repo.remote.assert_called_once_with(name=\"origin\")\n mock_remote.fetch.assert_called_once_with()\n\n # The right branch was accessed\n mock_remote.refs.__getitem__.assert_called_once_with(\"special\")\n\n # The remote head was checked out.\n mock_remote_head.checkout.assert_called_once_with()\n\n # The template that will be used is the original URL\n assert cached_template == cached_path", "def github_client(self):\n if getattr(self, \"_github_client\", None) is None:\n self._github_client = AsyncGitHubClient(self.log, self.client)\n return self._github_client", "def mocked_requests_get(*args, **kwargs):\n response = {'message': '',\n 'data': {\n 'cti_token': 'secret-cti-token',\n 'passivetotal_token': 'secret-passivetotal-token',\n 'passivetotal_user': '[email protected]',\n 'shodan_token': 'secret-shodan-token'\n }\n }\n return MockResponse(json.dumps(response), 200, HEADERS)", "def test_get_repo_pulled(self):\n repo = Repository(\"https://github.com/qcoumes/gitload_test.git\")\n self.assertTrue(repo.get_repo())\n self.assertTrue(os.path.exists(MEDIA_ROOT+\"/gitload_test\"))\n if (os.path.exists(MEDIA_ROOT+\"/gitload_test\")):\n shutil.rmtree(MEDIA_ROOT+\"/gitload_test\")", "def test_get_git_wrapper_returns_the_wrapper(tmp_path: str) -> None:\n Repo.init(tmp_path)\n repo = Repository(str(tmp_path))\n assert repo.get_git_wrapper() is not None\n assert repo.get_git_wrapper().git_repo == GitWrapper(tmp_path).git_repo", "def test_public_repos_with_license(self):\n instance = GithubOrgClient(\"do\")\n self.assertEqual(instance.org, self.org_payload)\n self.assertAlmostEqual(instance._public_repos_url,\n 'https://api.github.com/orgs/google/repos')\n self.assertEqual(instance.repos_payload, self.repos_payload)\n self.assertEqual(instance.public_repos(), self.expected_repos)\n self.assertEqual(instance.public_repos(\"nolicence\"), [])\n self.assertEqual(instance.public_repos(\n \"apache-2.0\"), self.apache2_repos)\n self.mock.assert_called()", "def test_client_can_load_client_requests_directly(self):\n\n req = self.httpbin.get_request_data('get_my_ip')\n self.assertEqual(req, self.httpbin.client['get_my_ip'])\n req = self.httpbin.get_request_data('get_my_headers')\n self.assertEqual(req, self.httpbin.client['get_my_headers'])\n\n req = self.httpbin_2.get_request_data('get_my_ip')\n self.assertEqual(req, self.httpbin_2.client['get_my_ip'])\n req = self.httpbin_2.get_request_data('get_my_headers')\n self.assertEqual(req, self.httpbin_2.client['get_my_headers'])", "def execute_request(path):\n headers = {\n \"Accept\": \"application/vnd.github.v3+json\"\n }\n url = \"https://api.github.com\" + path\n\n # GET https://api.github.com/<path> Accept: \"application/vnd.github.v3+json\"\n\n response = requests.get(url, headers=headers, timeout=GLOBAL_TIMEOUT)\n\n response.raise_for_status() # Raise a RequestException if we failed, and trigger retry\n\n return response.json()", "def get_repo_data(repo, session=None):\n url = f'{GITHUB_API_URL}/repos/{repo}'\n return get_whole_response_as_json(url, session)", "def obtain(self, *args: Any, **kwargs: Any) -> None:\n self.ensure_dir()\n\n url = self.url\n\n self.log.info(\"Cloning.\")\n # todo: log_in_real_time\n self.cmd.clone(\n url=url,\n progress=True,\n depth=1 if self.git_shallow else None,\n config={\"http.sslVerify\": False} if self.tls_verify else None,\n log_in_real_time=True,\n )\n\n self.log.info(\"Initializing submodules.\")\n self.cmd.submodule.init(\n log_in_real_time=True,\n )\n self.cmd.submodule.update(\n init=True,\n recursive=True,\n log_in_real_time=True,\n )\n\n self.set_remotes(overwrite=True)", "def setup_class(cls):\n cls.mock_get_patcher = patch('project.services.requests.get')\n cls.mock_get = cls.mock_get_patcher.start()", "def _create_repository(self, github=True, repository_plan='public-org'):\n if github:\n account = HostingServiceAccount(service_name='github',\n username='myuser')\n\n def _http_get_user(_self, url, *args, **kwargs):\n self.assertEqual(url, 'https://api.github.com/user')\n\n payload = b'{}'\n headers = {\n str('X-OAuth-Scopes'): str('admin:repo_hook, repo, user'),\n }\n\n return HostingServiceHTTPResponse(\n request=HostingServiceHTTPRequest(url=url),\n url=url,\n data=payload,\n headers=headers,\n status_code=200)\n\n service = account.service\n self.spy_on(service.client.http_get,\n call_fake=_http_get_user)\n\n service.authorize('myuser', 'mypass', None)\n self.assertTrue(account.is_authorized)\n\n service.client.http_get.unspy()\n\n repository = self.create_repository()\n repository.hosting_account = account\n repository.extra_data['repository_plan'] = repository_plan\n\n if repository_plan == 'public':\n repository.extra_data['github_public_repo_name'] = \\\n 'mypublicrepo'\n elif repository_plan == 'public-org':\n repository.extra_data['github_public_org_name'] = 'mypublicorg'\n repository.extra_data['github_public_org_repo_name'] = \\\n 'mypublicorgrepo'\n elif repository_plan == 'private':\n repository.extra_data['github_private_repo_name'] = \\\n 'myprivaterepo'\n elif repository_plan == 'private-org':\n repository.extra_data['github_private_org_name'] = \\\n 'myprivateorg'\n repository.extra_data['github_private_org_repo_name'] = \\\n 'myprivateorgrepo'\n\n repository.save()\n return repository\n else:\n return self.create_repository()", "def register_repo_check(self, body):\n httpretty.register_uri(\n httpretty.GET,\n re.compile(\n '^{url}repos/{org}/({repo}|{repo_rerun})$'.format(\n url=self.URL,\n org=self.ORG,\n repo=re.escape(self.TEST_REPO),\n repo_rerun=re.escape(self.TEST_RERUN_REPO)\n )\n ),\n body=body\n )", "def mock_status(request):\n\n install = AsyncMock()\n install.contract = CONTRACT\n install.status = request.param\n\n with patch(\"pyprosegur.installation.Installation.retrieve\", return_value=install):\n yield", "def __init__(self, user, proj):\n auth_hdr = {\"Authorization\" : \"token \" + input(\"Enter PA token: \")}\n self._session = requests.Session()\n self._session.headers.update(auth_hdr)\n self._base = self.API_ROOT + \"/repos/{}/{}\".format(user, proj)", "def test_authenticated_request(self):\n http = FakeHttp([(FakeResponse(200), {})])\n self.mock.Http.return_value = http\n self.mock.read_data_from_file.return_value = 'cached auth token'\n response, _ = http_utils.request('https://url/', configuration=self.config)\n\n # Ensure that all expected requests were made.\n self.assertEqual(http.replies, [])\n\n self.assertEqual(http.last_headers, {\n 'Authorization': 'cached auth token',\n 'User-Agent': 'clusterfuzz-reproduce'\n })\n self.assertEqual(response.status, 200)", "def _http_client_origin(self):\n self._setup_http_mock()\n pipeline_builder = self.sdc_builder.get_pipeline_builder()\n http_client = pipeline_builder.add_stage('HTTP Client', type='origin')\n http_client.resource_url = f'{self.http_mock.pretend_url}/{self.dataset}'\n http_client.json_content = 'ARRAY_OBJECTS'\n return http_client, pipeline_builder", "def callback_repo_create(self, request, uri, headers, status_code=201):\n # Disabling unused-argument because this is a callback with\n # required method signature.\n # pylint: disable=unused-argument\n self.assertEqual(\n request.headers['Authorization'],\n 'token {0}'.format(self.OAUTH2_TOKEN)\n )\n repo_dict = json.loads(request.body)\n self.assertTrue(\n repo_dict['name'] in [self.TEST_REPO, self.TEST_RERUN_REPO]\n )\n self.assertEqual(repo_dict['description'], self.TEST_DESCRIPTION)\n self.assertEqual(repo_dict['private'], True)\n\n return (status_code, headers, json.dumps({'html_url': 'testing'}))", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def patch_client(target, mock_client=None):\n with mock.patch(target) as client_getter:\n client = mock_client or MockBox()\n client_getter.return_value = client\n yield client", "def _client(self) -> httpx.Client:\n return httpx.Client(\n base_url=self._base_url,\n headers=self._authorization_headers,\n proxies=self._proxies,\n )", "def _init_raw_client(self) -> None:\n if self.credentials:\n auth = HTTPBasicAuth(self.credentials['username'], self.credentials['password'])\n else:\n auth = None\n base_url = \"http://\" if self.untrusted else \"https://\"\n base_url += self.url\n self.raw_client = client.DockerRegistryClient(base_url=base_url, auth=auth)", "def mock_moira_client(mocker):\n return mocker.patch(\"moira_lists.moira_api.get_moira_client\", autospec=True)", "def test_client_can_load_client_page_requests_directly(self):\n\n req = self.httpbin_3.get_request_data('get_my_ip')\n\n self.assertEqual(req, self.httpbin_3.client[\"homepage\"]['get_my_ip'])\n req = self.httpbin_3.get_request_data('test_requests_patch_method')\n self.assertEqual(req, self.httpbin_3.client[\"homepage\"]['test_requests_patch_method'])\n req = self.httpbin_3.get_request_data('test_requests_delete_method')\n self.assertEqual(req, self.httpbin_3.client[\"second_page\"]['test_requests_delete_method'])\n\n req = self.httpbin_4.get_request_data('get_my_ip')\n self.assertEqual(req, self.httpbin_4.client['get_my_ip'])\n req = self.httpbin_4.get_request_data('get_user_my_agent')\n self.assertEqual(req, self.httpbin_4.client['get_user_my_agent'])\n req = self.httpbin_4.get_request_data('test_requests_put_method')\n self.assertEqual(req, self.httpbin_4.client['test_requests_put_method'])\n req = self.httpbin_4.get_request_data('test_requests_post_method')\n self.assertEqual(req, self.httpbin_4.client['test_requests_post_method'])", "async def test_forward_request_onboarded_noauth_get(\n hassio_noauth_client, aioclient_mock: AiohttpClientMocker, path: str\n) -> None:\n aioclient_mock.get(f\"http://127.0.0.1/{path}\", text=\"response\")\n\n resp = await hassio_noauth_client.get(f\"/api/hassio/{path}\")\n\n # Check we got right response\n assert resp.status == HTTPStatus.OK\n body = await resp.text()\n assert body == \"response\"\n\n # Check we forwarded command\n assert len(aioclient_mock.mock_calls) == 1\n # We only expect a single header.\n assert aioclient_mock.mock_calls[0][3] == {\"X-Hass-Source\": \"core.http\"}", "def create_api_handler(self):\n self.github = github3.login(username=GH_USER, password=GH_PASSWORD)\n if hasattr(self.github, 'set_user_agent'):\n self.github.set_user_agent('Jonathan Reeve: http://jonreeve.com')\n self.org = self.github.organization(login='Git-Lit')\n # FIXME: logging\n print(\"ratelimit: \" + str(self.org.ratelimit_remaining))", "def test_create_files_with_gitignore(self, mock_get):\n\n mock_resp = mock.Mock()\n mock_resp.raise_for_status = mock.Mock()\n mock_resp.content = \"IGNORE ME\\n\"\n mock_get.return_value = mock_resp\n\n opts = mock.Mock()\n opts.repo = self.repo\n opts.create_version_file = True\n opts.source = 'src'\n opts.version = '0.0.1'\n opts.version_file = None\n opts.org = \"ORG\"\n opts.desc = \"DESCRIPTION\"\n opts.templates = []\n opts.test_mode = False\n opts.history_file = 'HISTORY.md'\n opts.package = 'unittests'\n opts.develop = 'develop'\n opts.requirements = 'requirements.txt'\n opts.pypi_package_name = 'pypi.package.unittest'\n opts.python = 'python3'\n opts.gitignore_url = \"GIT_IGNORE_URL\"\n opts.add_gitignore = True\n opts.test_requirements = 'test-requirements.txt'\n version = os.path.join(self.repo, 'src', 'unittests', '__init__.py')\n os.system('rm -f {}'.format(version))\n create_files(opts)\n\n dir_list = os.listdir(self.repo)\n self.failUnless('cirrus.conf' in dir_list)\n self.failUnless('HISTORY.md' in dir_list)\n self.failUnless('MANIFEST.in' in dir_list)\n self.failUnless('setup.py' in dir_list)\n self.failUnless('.gitignore' in dir_list)\n\n gitignore = os.path.join(self.repo, '.gitignore')\n with open(gitignore, 'r') as handle:\n content = handle.read()\n self.assertEqual(content.strip(), \"IGNORE ME\")", "def test_pull_from_origin(tmpdir):\n gitwrapper.clone_from('git://github.com/Tinche/bower-cache', tmpdir)\n gitwrapper.pull_from_origin(tmpdir)", "def _request(self, url, **kwargs):\n headers = {'PRIVATE-TOKEN': self.token}\n response = make_request(self.base_url + url, headers=headers, **kwargs)\n logging.info('Requested: {0}'.format(url))\n logging.info('Method: {0}'.format(kwargs.get('method', 'GET')))\n logging.info(response.content)\n return json.loads(response.content)", "def get(self, repo: Repository):\n cache_key = self.cache_key.format(repo_id=repo.id.hex)\n\n result = redis.get(cache_key)\n if result is None:\n vcs = repo.get_vcs()\n if not vcs:\n return self.respond([])\n\n vcs.ensure()\n result = vcs.get_known_branches()\n redis.setex(cache_key, json.dumps(result), self.cache_expire)\n else:\n result = json.loads(result)\n\n return self.respond([{\"name\": r} for r in result])", "def api_client(api_client):\n assert api_client().get(\"/bin/anything/bin\").status_code == 200\n\n return api_client(disable_retry_status_list={404})", "def create_mock_client(self, fake_request_method):\n class FakeHttpLib2(object):\n pass\n\n FakeHttpLib2.request = fake_request_method\n mock_client = self.mox.CreateMock(DNSaasClient)\n mock_client.http_pool = pools.Pool()\n mock_client.http_pool.create = FakeHttpLib2\n mock_client.auth_token = 'token'\n return mock_client", "def setUp(self):\n self.client = api.Client(config.get_config(), api.json_handler)", "def client(pure_client, response):\n with requests_mock.Mocker() as m:\n pure_client.m = m\n yield pure_client", "def _test__import_api(self, response):\n requests_get_mock = MagicMock()\n requests_get_mock.return_value.status_code = 200\n requests_get_mock.return_value.text = response\n with patch(\"requests.get\", requests_get_mock):\n proxmox._import_api()\n self.assertEqual(proxmox.api, [{\"info\": {}}])\n return", "def test_for_client():", "def __init__(self, url='', credentials=None,\n get_credentials=True, http=None, model=None,\n log_request=False, log_response=False,\n credentials_args=None, default_global_params=None,\n additional_http_headers=None, response_encoding=None):\n url = url or self.BASE_URL\n super(SourceV1, self).__init__(\n url, credentials=credentials,\n get_credentials=get_credentials, http=http, model=model,\n log_request=log_request, log_response=log_response,\n credentials_args=credentials_args,\n default_global_params=default_global_params,\n additional_http_headers=additional_http_headers,\n response_encoding=response_encoding)\n self.projects_repos_aliases_files = self.ProjectsReposAliasesFilesService(self)\n self.projects_repos_aliases = self.ProjectsReposAliasesService(self)\n self.projects_repos_files = self.ProjectsReposFilesService(self)\n self.projects_repos_revisions_files = self.ProjectsReposRevisionsFilesService(self)\n self.projects_repos_revisions = self.ProjectsReposRevisionsService(self)\n self.projects_repos_workspaces_files = self.ProjectsReposWorkspacesFilesService(self)\n self.projects_repos_workspaces_snapshots_files = self.ProjectsReposWorkspacesSnapshotsFilesService(self)\n self.projects_repos_workspaces_snapshots = self.ProjectsReposWorkspacesSnapshotsService(self)\n self.projects_repos_workspaces = self.ProjectsReposWorkspacesService(self)\n self.projects_repos = self.ProjectsReposService(self)\n self.projects = self.ProjectsService(self)\n self.v1 = self.V1Service(self)", "def test_06_get(self, mock_gupycurl,\n mock_guexecurl, mock_msg):\n self._init()\n geturl = udocker.GetURL()\n self.assertRaises(TypeError, geturl.get)\n #\n geturl = udocker.GetURL()\n geturl._geturl = type('test', (object,), {})()\n geturl._geturl.get = self._get\n self.assertEqual(geturl.get(\"http://host\"), \"http://host\")", "def _mock_cache_sync(repo):\n from renku.service.controllers.api import mixins\n\n current_head = repo.head.ref\n\n def _mocked_repo_reset(self, project):\n \"\"\"Mock repo reset to work with mocked renku save.\"\"\"\n repo.git.reset(\"--hard\", current_head)\n\n reset_repo_function = mixins.RenkuOperationMixin.reset_local_repo\n mixins.RenkuOperationMixin.reset_local_repo = _mocked_repo_reset\n\n try:\n yield\n finally:\n mixins.RenkuOperationMixin.reset_local_repo = reset_repo_function", "def test_read_o_auth_client(self):\n pass", "def test_repository(self):\n os.environ['GITHUB_REPOSITORY'] = 'repo/owner'\n self.assertIsNone(self.env.git_url)", "def _external_request(self, method, url, *args, **kwargs):\n self.last_url = url\n if url in self.responses.keys() and method == 'get':\n return self.responses[url] # return from cache if its there\n\n headers = kwargs.pop('headers', None)\n custom = {'User-Agent': useragent}\n if headers:\n headers.update(custom)\n kwargs['headers'] = headers\n else:\n kwargs['headers'] = custom\n\n response = getattr(requests, method)(url, *args, **kwargs)\n\n if self.verbose:\n print(\"Got Response: %s\" % url)\n\n if response.status_code == 503:\n raise SkipThisService(\"Service returned 503 - Temporarily out of service.\")\n\n if method == 'get':\n self.responses[url] = response # cache for later\n\n self.last_raw_response = response\n return response", "def repository(\n project: Project,\n mocker: MockerFixture,\n repository_pypi_json: Path,\n local_finder: type[None],\n) -> TestRepository:\n rv = TestRepository([], project.environment, repository_pypi_json)\n mocker.patch.object(project, \"get_repository\", return_value=rv)\n return rv" ]
[ "0.66696745", "0.6602459", "0.63437665", "0.6236709", "0.6184174", "0.61553884", "0.6148627", "0.6107372", "0.5980647", "0.593272", "0.59284574", "0.5912947", "0.58175915", "0.58170277", "0.5788988", "0.5725378", "0.5725306", "0.5720646", "0.56785136", "0.5651482", "0.5646013", "0.56451887", "0.5621783", "0.56168747", "0.56158614", "0.5604532", "0.5603608", "0.55754817", "0.55160314", "0.550298", "0.5464835", "0.54642516", "0.54490644", "0.54435396", "0.5437486", "0.54347336", "0.5425634", "0.54169464", "0.5412834", "0.54063547", "0.53987443", "0.539189", "0.53905976", "0.5389424", "0.5389296", "0.5380827", "0.5361535", "0.5355044", "0.5353695", "0.5350278", "0.5336514", "0.53274137", "0.5326266", "0.53218544", "0.5320243", "0.5311637", "0.5305608", "0.5291968", "0.5290026", "0.5288134", "0.5287834", "0.5276537", "0.5262164", "0.5258653", "0.5251144", "0.5242955", "0.52385855", "0.52384263", "0.5238015", "0.5233955", "0.5233452", "0.5222553", "0.5217035", "0.5217035", "0.5217035", "0.5217035", "0.5217035", "0.5212915", "0.52078867", "0.5201969", "0.5197935", "0.51955914", "0.5193531", "0.5193461", "0.51918155", "0.51852185", "0.5178371", "0.51758647", "0.5171397", "0.5169348", "0.5168997", "0.51672435", "0.51649696", "0.5159046", "0.51558703", "0.5149112", "0.51485854", "0.5136063", "0.51359373", "0.5129657", "0.5127449" ]
0.0
-1
Intercept http and mock client (get_repo)
def test_protection_can_be_copied(): setup_org("octocat") protection_url = "https://api.github.com/repos/octocat/Hello-World/branches/master/protection" responses.add(responses.GET, protection_url, status=200, content_type='text/json', body=branch_protection) put_url = "https://api.github.com/repos/octocat/Hello-World/branches/main/protection" responses.add(responses.PUT, put_url) token = '__dummy__' org = "octocat" repo = "Hello-World" client = GithubRestClient(token) success = copy_branch_protection(client, org, repo, 'master', 'main') assert True == success last_request = json.loads(responses.calls[-1].request.body) assert True == last_request['enforce_admins']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_issue_50():\n utils.set_http_mock()\n\n client = Github(proxy_host=\"my.proxy.com\", proxy_port=9000)\n setup_args = client.request._http.called_with\n assert_equals(type(setup_args['proxy_info']), httplib2.ProxyInfo)\n assert_equals(setup_args['proxy_info'].proxy_host, 'my.proxy.com')\n assert_equals(setup_args['proxy_info'].proxy_port, 9000)\n\n utils.unset_http_mock()", "def mock_github_get(url):\n mock_repo_key = url.split(\"/\")[-1]\n\n result = requests.Response()\n result.status_code = 200\n result.encoding = \"utf-8\"\n result._content = repos[mock_repo_key].encode()\n\n return result", "def setUp(self):\n self.client = HTTPClient()\n self.method = 'GET'\n self.url = 'http://github.com/ojengwa'\n self.headers = {}", "def setUp(self):\n self.client = RequestsClient()\n self.method = 'GET'\n self.url = 'http://github.com/ojengwa'\n self.headers = {}", "def test_fetch_repositories(self):\n self.maxDiff = None\n with self.logged_in(\n access_token=\"nbiousndegoijubdognlksdngndsgmngds\",\n extra_mocks=[\n (\n \"get\",\n \"https://api.github.com/user/repos\",\n dict(\n json=self.fixtures['./tests/fixtures/repos_ponteineptique.response.json'][0],\n headers=self.fixtures['./tests/fixtures/repos_ponteineptique.response.json'][1]\n )\n ),\n (\n \"get\",\n re.compile(\"https://api.github.com/user/repos\\?.*page=2\"),\n dict(\n json=self.fixtures['./tests/fixtures/repos_ponteineptique.page2.response.json'][0],\n headers=self.fixtures['./tests/fixtures/repos_ponteineptique.page2.response.json'][1]\n )\n )\n ]\n ):\n index = self.client.get(\"/\").data.decode()\n self.assertNotIn(\"Sign-in\", index, \"We are logged in\")\n self.assertIn(\"Hi ponteineptique!\", index, \"We are logged in\")\n\n # We check\n repositories = loads(self.client.get(\"/api/hook/v2.0/user/repositories\").data.decode())\n self.assertEqual(repositories, {\"repositories\": []}, \"No repository on first get\")\n\n # We refresh by posting\n repositories = loads(self.client.post(\"/api/hook/v2.0/user/repositories\").data.decode())\n repositories[\"repositories\"] = sorted(repositories[\"repositories\"], key=lambda x: x[\"name\"])\n self.assertEqual(\n repositories,\n {\n \"repositories\": [\n {'name': 'canonical-greekLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-latinLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-norseLit', 'owner': 'PerseusDL'},\n {'name': 'octodog', 'owner': 'octocat'}\n ]\n },\n \"Github API is parsed correctly\"\n )\n\n with self.logged_in(\n access_token=\"nbiousndegoijubdognlksdngndsgmngds\",\n extra_mocks=[\n (\n \"get\",\n \"https://api.github.com/user/repos\",\n dict(\n json=self.fixtures['./tests/fixtures/repos_ponteineptique.response.json'][0],\n headers=self.fixtures['./tests/fixtures/repos_ponteineptique.response.json'][1]\n )\n ),\n (\n \"get\",\n re.compile(\"https://api.github.com/user/repos\\?.*page=2\"),\n dict(\n json=self.fixtures['./tests/fixtures/repos_ponteineptique.page2.alt.response.json'][0],\n headers=self.fixtures['./tests/fixtures/repos_ponteineptique.page2.alt.response.json'][1]\n )\n )\n ]\n ):\n # We check it was saved\n repositories = loads(self.client.get(\"/api/hook/v2.0/user/repositories\").data.decode())\n repositories[\"repositories\"] = sorted(repositories[\"repositories\"], key=lambda x: x[\"name\"])\n self.assertEqual(\n repositories,\n {\n \"repositories\": [\n {'name': 'canonical-greekLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-latinLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-norseLit', 'owner': 'PerseusDL'},\n {'name': 'octodog', 'owner': 'octocat'}\n ]\n },\n \"When logging in back, we should have the same old repos\"\n )\n\n # We update by posting\n repositories = loads(self.client.post(\"/api/hook/v2.0/user/repositories\").data.decode())\n repositories[\"repositories\"] = sorted(repositories[\"repositories\"], key=lambda x: x[\"name\"])\n self.assertEqual(\n repositories,\n {\n \"repositories\": [\n {'name': 'canonical-greekLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-latinLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-norseLit', 'owner': 'PerseusDL'},\n {'name': 'octodog', 'owner': 'octocat'},\n {'name': 'oneKGreek', 'owner': 'ponteineptique'}\n ]\n },\n \"Github API is parsed correctly\"\n )\n\n # We check it was saved and cleared before\n repositories = loads(self.client.get(\"/api/hook/v2.0/user/repositories\").data.decode())\n repositories[\"repositories\"] = sorted(repositories[\"repositories\"], key=lambda x: x[\"name\"])\n self.assertEqual(\n repositories,\n {\n \"repositories\": [\n {'name': 'canonical-greekLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-latinLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-norseLit', 'owner': 'PerseusDL'},\n {'name': 'octodog', 'owner': 'octocat'},\n {'name': 'oneKGreek', 'owner': 'ponteineptique'}\n ]\n },\n \"Old repos should have been cleared, new ones should be there !\"\n )\n\n with self.logged_in(\n access_token=\"nbiousndegoijubdognlksdngndsgmngds\",\n extra_mocks=[\n (\n \"get\",\n \"https://api.github.com/user/repos\",\n dict(\n json=self.fixtures['./tests/fixtures/repos_ponteineptique.response.json'][0],\n headers=self.fixtures['./tests/fixtures/repos_ponteineptique.response.json'][1]\n )\n ),\n (\n \"get\",\n re.compile(\"https://api.github.com/user/repos\\?.*page=2\"),\n dict(\n json=self.fixtures['./tests/fixtures/repos_ponteineptique.page2.alt2.response.json'][0],\n headers=self.fixtures['./tests/fixtures/repos_ponteineptique.page2.alt2.response.json'][1]\n )\n )\n ]\n ):\n # We check it was saved\n repositories = loads(self.client.get(\"/api/hook/v2.0/user/repositories\").data.decode())\n repositories[\"repositories\"] = sorted(repositories[\"repositories\"], key=lambda x: x[\"name\"])\n self.assertEqual(\n repositories,\n {\n \"repositories\": [\n {'name': 'canonical-greekLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-latinLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-norseLit', 'owner': 'PerseusDL'},\n {'name': 'octodog', 'owner': 'octocat'},\n {'name': 'oneKGreek', 'owner': 'ponteineptique'}\n ]\n },\n \"When logging in back, we should have the same old repos\"\n )\n\n # We update by posting\n repositories = loads(self.client.post(\"/api/hook/v2.0/user/repositories\").data.decode())\n repositories[\"repositories\"] = sorted(repositories[\"repositories\"], key=lambda x: x[\"name\"])\n self.assertEqual(\n repositories,\n {\n 'repositories': [\n {'name': 'Ahab', 'owner': 'Capitains'},\n {'name': 'Capitains.github.io', 'owner': 'Capitains'},\n {'name': 'Cavern', 'owner': 'Capitains'},\n {'name': 'ahab-legacy-existdb', 'owner': 'Capitains'},\n {'name': 'ahab-legacy-python', 'owner': 'Capitains'},\n {'name': 'alignment-editor', 'owner': 'alpheios-project'},\n {'name': 'alpheios-docs', 'owner': 'alpheios-project'},\n {'name': 'alpheios-flask', 'owner': 'alpheios-project'},\n {'name': 'alpheios5', 'owner': 'alpheios-project'},\n {'name': 'angular-nemo', 'owner': 'Capitains'},\n {'name': 'arethusa', 'owner': 'alpheios-project'},\n {'name': 'arethusa-cli', 'owner': 'alpheios-project'},\n {'name': 'arethusa-configs', 'owner': 'alpheios-project'},\n {'name': 'arethusa-example-data', 'owner': 'alpheios-project'},\n {'name': 'arethusa-experiments', 'owner': 'alpheios-project'},\n {'name': 'arethusa-ngdocs-generator', 'owner': 'alpheios-project'},\n {'name': 'arethusa-server', 'owner': 'alpheios-project'},\n {'name': 'basic-reader', 'owner': 'alpheios-project'},\n {'name': 'canonical-greekLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-latinLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-norseLit', 'owner': 'PerseusDL'},\n {'name': 'chrome-wrapper', 'owner': 'alpheios-project'},\n {'name': 'cookiecutter-guidelines', 'owner': 'Capitains'},\n {'name': 'cts-api', 'owner': 'alpheios-project'},\n {'name': 'ctsworklist', 'owner': 'alpheios-project'},\n {'name': 'dummy1', 'owner': 'alpheios-project'},\n {'name': 'edit-utils', 'owner': 'alpheios-project'},\n {'name': 'inflection-analysis-prototype', 'owner': 'alpheios-project'},\n {'name': 'morphlib', 'owner': 'alpheios-project'},\n {'name': 'morphwrappers', 'owner': 'alpheios-project'},\n {'name': 'nemo_arethusa_plugin', 'owner': 'alpheios-project'},\n {'name': 'schemas', 'owner': 'alpheios-project'},\n {'name': 'tei-digital-age', 'owner': 'alpheios-project'}\n ]},\n \"Github API is parsed correctly\"\n )", "def __init__(self, github_repo, module='', main_dir='main'):\n self._http_client = HttpClient()\n self._github_repo = github_repo.rstrip('/').replace('https://github.com', 'https://api.github.com/repos')\n self._main_dir = main_dir\n self._module = module.rstrip('/')", "def _get_repo(self, owner, repo):\n url = f\"{BASE_URL}/repos/{owner}/{repo}\"\n status, data, _ = self.get(url)\n if (status == 200):\n return data\n else:\n log.warning(\"GHUB\", f\"Unexpected status code {status} for request {url}.\")", "def stub_http(hass):\n mock_http_component(hass)", "def svc_protected_repo(svc_client, identity_headers, it_protected_repo_url):\n from renku.core.models.git import GitURL\n\n payload = {\n \"git_url\": it_protected_repo_url,\n \"depth\": 0,\n }\n\n response = svc_client.post(\"/cache.project_clone\", data=json.dumps(payload), headers=identity_headers)\n\n data = {\n \"project_id\": response.json[\"result\"][\"project_id\"],\n \"skip_template_update\": True,\n \"skip_docker_update\": True,\n }\n svc_client.post(\"/cache.migrate\", data=json.dumps(data), headers=identity_headers)\n\n url_components = GitURL.parse(it_protected_repo_url)\n\n with integration_repo(identity_headers, response.json[\"result\"][\"project_id\"], url_components) as repo:\n with _mock_cache_sync(repo):\n yield svc_client, identity_headers, payload, response", "def repository_create_proxy():\n pass", "def setUpClass(cls):\n cls.get_patcher = patch('requests.get')\n cls.mock = cls.get_patcher.start()\n cls.mock.return_value.json.side_effect = [\n cls.org_payload, cls.repos_payload,\n cls.org_payload, cls.repos_payload,\n ]", "def test_returns_cloned_repo_by_name(self):\n # Need to set up a git repo with origin info.\n full_path = path.join(settings.REPO_ROOT, 'test')\n envoy.run('git init {0}'.format(full_path))\n fake_origin = 'git://localhost'\n envoy.run('git -C {0} remote add origin {1}'.format(full_path,\n fake_origin))\n url = reverse(\"find\", kwargs={'name': 'test'})\n response = self.client.get(url)\n self.assertEqual(200, response.status_code)\n result = json.loads(response.content.decode())\n expected_url = settings.REPO_URL + u'test'\n self.assertEqual(result['url'], expected_url)\n self.assertEqual(result['name'], u'test')", "def svc_client_with_repo(svc_client_setup):\n svc_client, headers, project_id, url_components, repo = svc_client_setup\n\n response = svc_client.post(\n \"/cache.migrate\", data=json.dumps(dict(project_id=project_id, skip_docker_update=True)), headers=headers\n )\n\n assert response.json[\"result\"]\n\n with _mock_cache_sync(repo):\n yield svc_client, deepcopy(headers), project_id, url_components", "def test_get_client(self):\n pass", "def test_load_github(self):\n\n c = Client()\n response = c.get('/taric_books/github/')\n\n self.assertEqual(response.status_code, 200)", "def http_client(http_client, base_url):\n original_fetch = http_client.fetch\n\n def _fetch(url):\n fetch = partial(original_fetch, base_url + url)\n return http_client.io_loop.run_sync(fetch)\n\n http_client.fetch = _fetch\n return http_client", "def api_repo_get(access_key):\n repo = Repo.query.get(access_key)\n if not repo:\n return jsonify(error=\"Repo not found\"), 404\n \n if repo.is_private and 'working_repo' not in session:\n return jsonify(error=\"Unauthorized\"), 401\n elif repo.is_private and session['working_repo'] != repo.access_key:\n return jsonify(error=\"Unauthorized\"), 403\n elif repo.is_private and session['working_repo'] == repo.access_key:\n return jsonify(repo.to_json())\n else:\n return jsonify(repo.to_json())", "def test_public_repos_url(self, org, expected):\n with patch('client.get_json') as mock:\n instance = GithubOrgClient(org)\n mock.return_value = expected\n self.assertEqual(instance._public_repos_url, expected[\"repos_url\"])", "def test_returns_cloned_repo_by_name_auto_host(self):\n # Need to set up a git repo with origin info.\n full_path = path.join(settings.REPO_ROOT, 'test')\n envoy.run('git init {0}'.format(full_path))\n fake_origin = 'git://localhost'\n envoy.run('git -C {0} remote add origin {1}'.format(full_path,\n fake_origin))\n url = reverse(\"find\", kwargs={'name': 'test'})\n\n del settings.REPO_URL\n\n response = self.client.get(url, HTTP_HOST='test-host')\n\n self.assertEqual(200, response.status_code)\n result = json.loads(response.content.decode())\n expected_url = 'git://test-host/test'\n self.assertEqual(result['url'], expected_url)\n self.assertEqual(result['name'], u'test')", "def test_api_repo_status_get(self):\n default_api = DefaultApi(api_client=self.api_client)\n params = dlrnapi_client.Params2()\n path, method = default_api.api_repo_status_get(params)\n self.assertEqual(path, '/api/repo_status')\n self.assertEqual(method, 'GET')", "def setUpClass(self):\n self.repo = Repository(\"https://github.com/qcoumes/gitload_test.git\")\n self.repo.get_repo()", "def test_api_last_tested_repo_get(self):\n default_api = DefaultApi(api_client=self.api_client)\n params = dlrnapi_client.Params()\n path, method = default_api.api_last_tested_repo_get(params)\n self.assertEqual(path, '/api/last_tested_repo')\n self.assertEqual(method, 'GET')", "def DoStubHttp(status, mime, resp_body):\n def Request(unused_self, unused_url, method, body, headers):\n _ = method, body, headers # unused kwargs\n response = httplib2.Response({\n 'status': status,\n 'content-type': mime,\n })\n return response, resp_body\n return mock.patch('httplib2.Http.request', new=Request)", "def test_public_repos(self, mock_json):\n\n Response_payload = [{\"name\": \"Google\"}]\n mock_json.return_value = Response_payload\n\n with patch('client.GithubOrgClient._public_repos_url',\n new_callable=PropertyMock) as mock_public:\n\n mock_public.return_value = \"hello/world\"\n test_class = GithubOrgClient('test')\n result = test_class.public_repos()\n\n check = [rep[\"name\"] for rep in Response_payload]\n self.assertEqual(result, check)\n\n mock_public.assert_called_once()\n mock_json.assert_called_once()", "def github_request(self, path, callback, access_token=None,\n method='GET', body=None, **args):\n args[\"access_token\"] = access_token\n url = tornado.httputil.url_concat(self._API_URL + path, args)\n logging.debug('request to ' + url)\n http = tornado.httpclient.AsyncHTTPClient()\n if body is not None:\n body = tornado.escape.json_encode(body)\n logging.debug('body is' + body)\n headers = {}\n headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36'\n \n http.fetch(url, callback=self.async_callback(\n self._parse_response, callback), method=method, body=body, headers=headers)", "def test_client_retrieve(self):\n pass", "def test_get(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n \n # Mock good response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.get(rest_url)", "def mocked_requests_scrapping_get(*args, **kwargs):\n class MockResponse:\n def __init__(self, json_data, status_code, url):\n self.content = json_data\n self.status_code = status_code\n self.url = url\n self.cookies = {\"JSESSIONID\": \"jkghhjgjhgfjgfgjg\"}\n self.encoding = \"utf-8\"\n\n def json(self):\n return self.json_data\n\n dn = os.path.dirname(os.path.realpath(__file__))\n for url, provider in {f\"{settings.BASE_URL}/eAnnuaire/formulaire?appelRetour=true\": \"form\",\n f\"{settings.BASE_URL}/eAnnuaire/resultat\": \"suivant\",\n f\"{settings.BASE_URL}/eAnnuaire/fiche\": \"detail\"}.items():\n if args[0].startswith(url):\n with open(os.path.join(dn, \"fixtures\", f\"{provider}.html\"), \"rb\") as fp:\n return MockResponse(fp.read(), 200, args[0])", "def get(self, *args, **kwargs):\r\n url = '{0}/user/repositories/'.format(self.parent.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "def callback_repo_check(self, request, uri, headers, status_code=404):\n self.assertEqual(\n request.headers['Authorization'],\n 'token {0}'.format(self.OAUTH2_TOKEN)\n )\n # Handle the new \"rerun\" repo differently\n if self.TEST_RERUN_REPO in uri:\n status_code = 404\n return (status_code, headers, json.dumps({'message': 'testing'}))", "def test_public_repos(self):\n\n instance = GithubOrgClient('do')\n self.assertEqual(instance.org, self.org_payload)\n self.assertAlmostEqual(instance._public_repos_url,\n 'https://api.github.com/orgs/google/repos')\n self.assertEqual(instance.repos_payload, self.repos_payload)\n self.assertEqual(instance.public_repos(), self.expected_repos)\n self.assertEqual(instance.public_repos(\"sdsd\"), [])\n self.mock.assert_called()", "def local_remote_repository(svc_client, tmp_path, mock_redis, identity_headers, real_sync):\n from click.testing import CliRunner\n from git.config import GitConfigParser, get_config_path\n from marshmallow import pre_load\n\n from renku.cli import cli\n from renku.core.utils.contexts import chdir\n from renku.service.config import PROJECT_CLONE_NO_DEPTH\n from renku.service.serializers import cache\n\n # NOTE: prevent service from adding an auth token as it doesn't work with local repos\n def _no_auth_format(self, data, **kwargs):\n return data[\"git_url\"]\n\n orig_format_url = cache.ProjectCloneContext.format_url\n cache.ProjectCloneContext.format_url = _no_auth_format\n\n # NOTE: mock owner/project so service is happy\n def _mock_owner(self, data, **kwargs):\n data[\"owner\"] = \"dummy\"\n\n data[\"name\"] = \"project\"\n data[\"slug\"] = \"project\"\n\n return data\n\n orig_set_owner = cache.ProjectCloneContext.set_owner_name\n cache.ProjectCloneContext.set_owner_name = pre_load(_mock_owner)\n\n remote_repo_path = tmp_path / \"remote_repo\"\n remote_repo_path.mkdir()\n\n remote_repo = Repo.init(remote_repo_path, bare=True)\n remote_repo_checkout_path = tmp_path / \"remote_repo_checkout\"\n remote_repo_checkout_path.mkdir()\n\n remote_repo_checkout = remote_repo.clone(str(remote_repo_checkout_path))\n\n home = tmp_path / \"user_home\"\n home.mkdir()\n\n with modified_environ(HOME=str(home), XDG_CONFIG_HOME=str(home)):\n try:\n with GitConfigParser(get_config_path(\"global\"), read_only=False) as global_config:\n global_config.set_value(\"user\", \"name\", \"Renku @ SDSC\")\n global_config.set_value(\"user\", \"email\", \"[email protected]\")\n\n # NOTE: init \"remote\" repo\n runner = CliRunner()\n with chdir(remote_repo_checkout_path):\n\n result = runner.invoke(\n cli, [\"init\", \".\", \"--template-id\", \"python-minimal\", \"--force\"], \"\\n\", catch_exceptions=False\n )\n assert 0 == result.exit_code, format_result_exception(result)\n\n remote_name = remote_repo_checkout.active_branch.tracking_branch().remote_name\n remote = remote_repo_checkout.remotes[remote_name]\n result = remote.push()\n finally:\n try:\n shutil.rmtree(home)\n except OSError: # noqa: B014\n pass\n\n payload = {\"git_url\": f\"file://{remote_repo_path}\", \"depth\": PROJECT_CLONE_NO_DEPTH}\n response = svc_client.post(\"/cache.project_clone\", data=json.dumps(payload), headers=identity_headers)\n\n assert response\n assert {\"result\"} == set(response.json.keys()), response.json\n\n project_id = response.json[\"result\"][\"project_id\"]\n assert isinstance(uuid.UUID(project_id), uuid.UUID)\n\n try:\n yield svc_client, identity_headers, project_id, remote_repo, remote_repo_checkout\n finally:\n cache.ProjectCloneContext.format_url = orig_format_url\n cache.ProjectCloneContext.set_owner_name = orig_set_owner\n\n try:\n shutil.rmtree(remote_repo_path)\n except OSError: # noqa: B014\n pass\n\n try:\n shutil.rmtree(remote_repo_checkout_path)\n except OSError: # noqa: B014\n pass", "def _call_api(self, verb, url, **request_kwargs):\n api = 'https://api.github.com{}'.format(url)\n auth_headers = {'Authorization': 'token {}'.format(self.api_token)}\n headers = {**auth_headers, **request_kwargs.pop('headers', {})}\n return getattr(requests, verb)(api, headers=headers, **request_kwargs)", "async def test_cli(\n aiohttp_client,\n otupdate_config,\n monkeypatch,\n version_file_path,\n mock_name_synchronizer,\n):\n app = await buildroot.get_app(\n name_synchronizer=mock_name_synchronizer,\n system_version_file=version_file_path,\n config_file_override=otupdate_config,\n boot_id_override=\"dummy-boot-id-abc123\",\n )\n client = await aiohttp_client(app)\n return client", "def mock_requests_get(mocker, mocked_requests_get):\n\n def _requests_get(module, content, status):\n mock_func = mocker.patch(f'{module}.requests.get')\n mock_func.return_value = mocked_requests_get(content, status)\n\n return _requests_get", "def test_get_request_normal_response(self, mock_get):\n\n # Arrange\n # Construct our mock response object, giving it relevant expected behaviours\n mock_resp_instance = MockResponse({\"msg\": \"success\"}, 200, content=\"abc\")\n mock_get.return_value = mock_resp_instance\n\n # Act\n response = get_request_data(self.url, json_resp=False)\n\n # Assert that the request-response cycle completed successfully.\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, mock_resp_instance)", "def mock_base_api_client():\n def _make_mock_api_client(status_code=200,\n response=None,\n component='reana-server'):\n mock_http_response, mock_response = Mock(), Mock()\n mock_http_response.status_code = status_code\n mock_http_response.raw_bytes = str(response).encode()\n mock_response = response\n reana_server_client = make_mock_api_client(\n component)(mock_response, mock_http_response)\n reana_client_server_api = Client(component)\n reana_client_server_api._client = reana_server_client\n return reana_client_server_api\n return _make_mock_api_client", "def mock_requests_get(request, monkeypatch, mock_get_args):\n\n def mock_get(url, *args, **kwargs):\n mock_get_args(url, *args, **kwargs)\n\n mock_resp = MagicMock()\n if url == qml.data.data_manager.FOLDERMAP_URL:\n json_data = _folder_map\n elif url == qml.data.data_manager.DATA_STRUCT_URL:\n json_data = _data_struct\n else:\n json_data = None\n\n mock_resp.json.return_value = json_data\n if hasattr(request, \"param\"):\n mock_resp.content = request.param\n\n return mock_resp\n\n monkeypatch.setattr(qml.data.data_manager, \"get\", mock_get)\n\n return mock_get", "def test_request(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n\n # Mock good get response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.request('get', rest_url)\n assert r.status_code == 200\n assert r.json()['value'] == 'good!'\n \n # Mock bad get response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, f'{self.host}/{rest_url}', status=401,\n json={'value':\"bad!\"})\n with raises(requests.HTTPError):\n r = client.request('get', rest_url)\n r = client.request('get', rest_url, checkstatus=False)\n assert r.status_code == 401\n assert r.json()['value'] == 'bad!'", "def mocked_requests_get():\n\n class MockResponse:\n def __init__(self, _content, _status):\n self.content = _content\n self.status_code = _status\n\n def content(self):\n return self.content\n\n return MockResponse", "def http_client(\n env=None, sim_types=None, job_run_mode=None, empty_work_dir=True, port=None\n):\n global _client\n t = sim_types or CONFTEST_DEFAULT_CODES\n if t:\n if isinstance(t, (tuple, list)):\n t = \":\".join(t)\n env.SIREPO_FEATURE_CONFIG_SIM_TYPES = t\n\n from pykern import pkconfig\n\n pkconfig.reset_state_for_testing(env)\n if _client:\n return _client\n\n from pykern import pkunit\n\n if empty_work_dir:\n pkunit.empty_work_dir()\n else:\n pkunit.work_dir()\n setup_srdb_root(cfg=env)\n\n from sirepo import modules\n\n modules.import_and_init(\"sirepo.uri\")\n _client = _TestClient(env=env, job_run_mode=job_run_mode, port=port)\n return _client", "def test_activate_repositories(self):\n self.Mokes.add_repo_to_pi()\n with self.logged_in(access_token=\"nbiousndegoijubdognlksdngndsgmngds\"):\n index = self.client.get(\"/\").data.decode()\n self.assertIn('href=\"/repo/PerseusDl/canonical-greekLit\"', index, \"GreekLit link should be there\")\n self.assertIn('href=\"/repo/PerseusDl/canonical-greekLit\"', index, \"LatinLit link should be there\")\n index = BeautifulSoup(self.client.get(\"/\").data.decode(), 'html.parser')\n self.assertEqual(\n len(index.select(\"#repos .repo-menu-card a\")), 0,\n \"There should be no active repo in menu\"\n )\n\n activate = self.client.put(\"/api/hook/v2.0/user/repositories/PerseusDl/canonical-greekLit\")\n self.assertEqual(activate.status_code, 200, \"Request should be positive\")\n\n index = BeautifulSoup(self.client.get(\"/\").data.decode(), 'html.parser')\n self.assertEqual(\n index.select(\"#repos .repo-menu-card a\")[0][\"href\"], \"/repo/PerseusDl/canonical-greekLit\",\n \"Active repo should be in menu\"\n )\n\n with self.logged_in(access_token=\"nbiousndegoijubdognlksdngndsgmngds\"):\n \" Relogging should be okay \"\n index = BeautifulSoup(self.client.get(\"/\").data.decode(), 'html.parser')\n self.assertEqual(\n index.select(\"#repos .repo-menu-card a\")[0][\"href\"], \"/repo/PerseusDl/canonical-greekLit\",\n \"Active repo should be in menu\"\n )\n\n # We can switch off\n activate = self.client.put(\"/api/hook/v2.0/user/repositories/PerseusDl/canonical-greekLit\")\n self.assertEqual(activate.status_code, 200, \"Request should be positive\")\n\n index = BeautifulSoup(self.client.get(\"/\").data.decode(), 'html.parser')\n self.assertEqual(len(index.select(\"#repos .repo-menu-card a\")), 0, \"There should be no active repo in menu\")\n\n # Wrong repo is 404\n activate = self.client.put(\"/api/hook/v2.0/user/repositories/PerseusDl/canonical-greekLit-fake\")\n self.assertEqual(activate.status_code, 404, \"Request should be positive\")", "def svc_protected_old_repo(svc_synced_client, it_protected_repo_url):\n svc_client, identity_headers, cache, user = svc_synced_client\n\n payload = {\n \"git_url\": it_protected_repo_url,\n \"depth\": 1,\n }\n\n response = svc_client.post(\"/cache.project_clone\", data=json.dumps(payload), headers=identity_headers)\n project_id = response.json[\"result\"][\"project_id\"]\n\n yield svc_client, identity_headers, project_id, cache, user", "def setUp(self):\n\n c = Client()\n self.response = c.get('/')\n self.content = self.response.content", "def get(self, request, repo_id, format=None):\n\n # argument check\n path = request.GET.get('p', None)\n if not path:\n error_msg = 'p invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n\n name = request.GET.get('name', None)\n if not name:\n error_msg = 'name invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n\n # recourse check\n repo = get_repo(repo_id)\n if not repo:\n error_msg = 'Library %s not found.' % repo_id\n return api_error(status.HTTP_404_NOT_FOUND, error_msg)\n\n # permission check\n if not check_folder_permission(request, repo_id, path) or \\\n not request.user.permissions.can_add_repo():\n error_msg = 'Permission denied.'\n return api_error(status.HTTP_403_FORBIDDEN, error_msg)\n\n username = request.user.username\n password = request.GET.get('password', '')\n if repo.encrypted:\n # check password for encrypted repo\n if not password:\n error_msg = 'password invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n else:\n try:\n syncwerk_api.set_passwd(repo_id, username, password)\n except RpcsyncwerkError as e:\n if e.msg == 'Bad arguments':\n error_msg = 'Bad arguments'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n elif e.msg == 'Incorrect password':\n error_msg = _(u'Wrong password')\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n elif e.msg == 'Internal server error':\n error_msg = _(u'Internal server error')\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)\n else:\n error_msg = _(u'Decrypt library error')\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)\n\n # create sub-lib for encrypted repo\n try:\n if is_org_context(request):\n org_id = request.user.org.org_id\n sub_repo_id = syncwerk_api.create_org_virtual_repo(\n org_id, repo_id, path, name, name, username, password)\n else:\n sub_repo_id = syncwerk_api.create_virtual_repo(\n repo_id, path, name, name, username, password)\n except RpcsyncwerkError as e:\n logger.error(e)\n error_msg = 'Internal Server Error'\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)\n else:\n # create sub-lib for common repo\n try:\n if is_org_context(request):\n org_id = request.user.org.org_id\n sub_repo_id = syncwerk_api.create_org_virtual_repo(\n org_id, repo_id, path, name, name, username)\n else:\n sub_repo_id = syncwerk_api.create_virtual_repo(\n repo_id, path, name, name, username)\n except RpcsyncwerkError as e:\n logger.error(e)\n error_msg = 'Internal Server Error'\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)\n\n # return Response({'sub_repo_id': sub_repo_id})\n resp = {'sub_repo_id': sub_repo_id}\n return api_response(status.HTTP_200_OK, '', resp)", "def github_api(request):\n if not request.startswith('https://api.github.com/'):\n request = 'https://api.github.com/' + request\n d = time.time() - github_api.last_time\n if d < 1:\n time.sleep(1 - d) # wait at least one second between GitHub API calls\n key = os.getenv('GITHUB_API_KEY')\n req = Request(request)\n req.add_header('User-Agent', github_api.user_agent)\n if key is not None:\n req.add_header('Authorization', 'token %s' % key)\n content = ''\n try:\n response = urlopen(req)\n content = response.read().decode()\n except HTTPError as e:\n print(request)\n print(e.reason)\n print(e.info())\n raise(e)\n github_api.last_time = time.time()\n return json.loads(content)", "def _get_client(self):\n return Github(\n base_url=github_trigger.api_endpoint(),\n login_or_token=self.auth_token if self.auth_token else github_trigger.client_id(),\n password=None if self.auth_token else github_trigger.client_secret(),\n timeout=5,\n )", "def test_legacy_client(self):\n response = self.legacy_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))\n self.assertEquals(response.content, RESPONSE)", "def enable_api_call():\n with mock.patch.object(\n github_api._PathMetadata, '_query_github', _original_query_github\n ):\n yield", "async def repository(self, *args, **kwargs):\n\n return await self._makeApiCall(self.funcinfo[\"repository\"], *args, **kwargs)", "def test_update_repository_token(self):\n self.Mokes.add_repo_to_pi()\n response = self.client.get(\"/repo/PerseusDl/canonical-latinLit\").data.decode()\n index = BeautifulSoup(response, 'html.parser')\n self.assertEqual(\n len(index.select(\".travis_env\")), 0, \"Sha should not be shown when not connected\"\n )\n\n # Update the REPO !\n response = self.client.patch(\"/api/hook/v2.0/user/repositories/PerseusDl/canonical-latinLit/token\")\n self.assertEqual(response.status_code, 401, \"Request Forbidden\")\n\n with self.logged_in(access_token=\"nbiousndegoijubdognlksdngndsgmngds\"):\n response = self.client.get(\"/repo/PerseusDl/canonical-latinLit\").data.decode()\n index = BeautifulSoup(response, 'html.parser')\n travis_env1 = index.select(\".travis_env\")\n self.assertEqual(len(travis_env1), 1, \"Sha should be shown when not connected\")\n self.assertEqual(len(travis_env1[0].text), 40)\n\n # Update the REPO !\n response = loads(self.client.patch(\"/api/hook/v2.0/user/repositories/PerseusDl/canonical-latinLit/token\")\\\n .data.decode())\n self.assertEqual(response, {\"status\": True})\n\n response = self.client.get(\"/repo/PerseusDl/canonical-latinLit\").data.decode()\n index = BeautifulSoup(response, 'html.parser')\n travis_env2 = index.select(\".travis_env\")\n self.assertEqual(len(travis_env2), 1, \"Sha should be shown when not connected\")\n self.assertEqual(len(travis_env2[0].text), 40)\n self.assertNotEqual(travis_env1[0].text, travis_env2[0].text, \"Sha should be different\")", "async def test_request(client, monkeypatch, caplog):\n mock_refresh_token_called = 0\n\n async def mock_refresh_token():\n nonlocal mock_refresh_token_called\n mock_refresh_token_called += 1\n\n monkeypatch.setattr(\n client._auth_client, 'refresh_token', mock_refresh_token)\n\n async def mock_valid_token_set():\n return False\n\n monkeypatch.setattr(client, 'valid_token_set', mock_valid_token_set)\n\n resp_text = 'ohai'\n\n with aioresponses() as mocked:\n mocked.get(conftest.API_URL, status=200, body=resp_text)\n resp = await client.request('get', conftest.API_URL)\n\n assert resp == resp_text\n\n assert 1 == mock_refresh_token_called\n assert 1 == len(mocked.requests)\n request = mocked.requests.popitem()[1][0]\n authorization_header = request.kwargs['headers']['Authorization']\n assert authorization_header == f'Bearer {client._auth_client.token}'\n assert 2 == len(caplog.records)", "def test_patch_o_auth_client(self):\n pass", "def test_org(self, input, mock):\n test_class = GithubOrgClient(input)\n mock.side_effect = Exception()\n try:\n test_class.org()\n except Exception as e:\n mock.assert_called_once_with(\n f'https://api.github.com/orgs/{input}')", "def test_offline_repo_template(base_command, mock_git):\n base_command.tools.git = mock_git\n\n mock_repo = mock.MagicMock()\n mock_remote = mock.MagicMock()\n mock_remote_head = mock.MagicMock()\n\n # Git returns a Repo, that repo can return a remote, and it has\n # heads that can be accessed. However, calling fetch on the remote\n # will cause a git error (error code 128).\n base_command.tools.git.Repo.return_value = mock_repo\n mock_repo.remote.return_value = mock_remote\n mock_remote.refs.__getitem__.return_value = mock_remote_head\n mock_remote.fetch.side_effect = git_exceptions.GitCommandError(\"git\", 128)\n\n cached_path = cookiecutter_cache_path(\n \"https://example.com/magic/special-template.git\"\n )\n\n # Update the cache\n cached_template = base_command.update_cookiecutter_cache(\n template=\"https://example.com/magic/special-template.git\", branch=\"special\"\n )\n\n # The cookiecutter cache location will be interrogated.\n base_command.tools.git.Repo.assert_called_once_with(cached_path)\n\n # The origin of the repo was fetched\n mock_repo.remote.assert_called_once_with(name=\"origin\")\n mock_remote.fetch.assert_called_once_with()\n\n # The right branch was accessed\n mock_remote.refs.__getitem__.assert_called_once_with(\"special\")\n\n # The remote head was checked out.\n mock_remote_head.checkout.assert_called_once_with()\n\n # The template that will be used is the original URL\n assert cached_template == cached_path", "def github_client(self):\n if getattr(self, \"_github_client\", None) is None:\n self._github_client = AsyncGitHubClient(self.log, self.client)\n return self._github_client", "def mocked_requests_get(*args, **kwargs):\n response = {'message': '',\n 'data': {\n 'cti_token': 'secret-cti-token',\n 'passivetotal_token': 'secret-passivetotal-token',\n 'passivetotal_user': '[email protected]',\n 'shodan_token': 'secret-shodan-token'\n }\n }\n return MockResponse(json.dumps(response), 200, HEADERS)", "def test_get_repo_pulled(self):\n repo = Repository(\"https://github.com/qcoumes/gitload_test.git\")\n self.assertTrue(repo.get_repo())\n self.assertTrue(os.path.exists(MEDIA_ROOT+\"/gitload_test\"))\n if (os.path.exists(MEDIA_ROOT+\"/gitload_test\")):\n shutil.rmtree(MEDIA_ROOT+\"/gitload_test\")", "def test_client_can_load_client_requests_directly(self):\n\n req = self.httpbin.get_request_data('get_my_ip')\n self.assertEqual(req, self.httpbin.client['get_my_ip'])\n req = self.httpbin.get_request_data('get_my_headers')\n self.assertEqual(req, self.httpbin.client['get_my_headers'])\n\n req = self.httpbin_2.get_request_data('get_my_ip')\n self.assertEqual(req, self.httpbin_2.client['get_my_ip'])\n req = self.httpbin_2.get_request_data('get_my_headers')\n self.assertEqual(req, self.httpbin_2.client['get_my_headers'])", "def test_get_git_wrapper_returns_the_wrapper(tmp_path: str) -> None:\n Repo.init(tmp_path)\n repo = Repository(str(tmp_path))\n assert repo.get_git_wrapper() is not None\n assert repo.get_git_wrapper().git_repo == GitWrapper(tmp_path).git_repo", "def test_public_repos_with_license(self):\n instance = GithubOrgClient(\"do\")\n self.assertEqual(instance.org, self.org_payload)\n self.assertAlmostEqual(instance._public_repos_url,\n 'https://api.github.com/orgs/google/repos')\n self.assertEqual(instance.repos_payload, self.repos_payload)\n self.assertEqual(instance.public_repos(), self.expected_repos)\n self.assertEqual(instance.public_repos(\"nolicence\"), [])\n self.assertEqual(instance.public_repos(\n \"apache-2.0\"), self.apache2_repos)\n self.mock.assert_called()", "def execute_request(path):\n headers = {\n \"Accept\": \"application/vnd.github.v3+json\"\n }\n url = \"https://api.github.com\" + path\n\n # GET https://api.github.com/<path> Accept: \"application/vnd.github.v3+json\"\n\n response = requests.get(url, headers=headers, timeout=GLOBAL_TIMEOUT)\n\n response.raise_for_status() # Raise a RequestException if we failed, and trigger retry\n\n return response.json()", "def get_repo_data(repo, session=None):\n url = f'{GITHUB_API_URL}/repos/{repo}'\n return get_whole_response_as_json(url, session)", "def obtain(self, *args: Any, **kwargs: Any) -> None:\n self.ensure_dir()\n\n url = self.url\n\n self.log.info(\"Cloning.\")\n # todo: log_in_real_time\n self.cmd.clone(\n url=url,\n progress=True,\n depth=1 if self.git_shallow else None,\n config={\"http.sslVerify\": False} if self.tls_verify else None,\n log_in_real_time=True,\n )\n\n self.log.info(\"Initializing submodules.\")\n self.cmd.submodule.init(\n log_in_real_time=True,\n )\n self.cmd.submodule.update(\n init=True,\n recursive=True,\n log_in_real_time=True,\n )\n\n self.set_remotes(overwrite=True)", "def setup_class(cls):\n cls.mock_get_patcher = patch('project.services.requests.get')\n cls.mock_get = cls.mock_get_patcher.start()", "def _create_repository(self, github=True, repository_plan='public-org'):\n if github:\n account = HostingServiceAccount(service_name='github',\n username='myuser')\n\n def _http_get_user(_self, url, *args, **kwargs):\n self.assertEqual(url, 'https://api.github.com/user')\n\n payload = b'{}'\n headers = {\n str('X-OAuth-Scopes'): str('admin:repo_hook, repo, user'),\n }\n\n return HostingServiceHTTPResponse(\n request=HostingServiceHTTPRequest(url=url),\n url=url,\n data=payload,\n headers=headers,\n status_code=200)\n\n service = account.service\n self.spy_on(service.client.http_get,\n call_fake=_http_get_user)\n\n service.authorize('myuser', 'mypass', None)\n self.assertTrue(account.is_authorized)\n\n service.client.http_get.unspy()\n\n repository = self.create_repository()\n repository.hosting_account = account\n repository.extra_data['repository_plan'] = repository_plan\n\n if repository_plan == 'public':\n repository.extra_data['github_public_repo_name'] = \\\n 'mypublicrepo'\n elif repository_plan == 'public-org':\n repository.extra_data['github_public_org_name'] = 'mypublicorg'\n repository.extra_data['github_public_org_repo_name'] = \\\n 'mypublicorgrepo'\n elif repository_plan == 'private':\n repository.extra_data['github_private_repo_name'] = \\\n 'myprivaterepo'\n elif repository_plan == 'private-org':\n repository.extra_data['github_private_org_name'] = \\\n 'myprivateorg'\n repository.extra_data['github_private_org_repo_name'] = \\\n 'myprivateorgrepo'\n\n repository.save()\n return repository\n else:\n return self.create_repository()", "def mock_status(request):\n\n install = AsyncMock()\n install.contract = CONTRACT\n install.status = request.param\n\n with patch(\"pyprosegur.installation.Installation.retrieve\", return_value=install):\n yield", "def register_repo_check(self, body):\n httpretty.register_uri(\n httpretty.GET,\n re.compile(\n '^{url}repos/{org}/({repo}|{repo_rerun})$'.format(\n url=self.URL,\n org=self.ORG,\n repo=re.escape(self.TEST_REPO),\n repo_rerun=re.escape(self.TEST_RERUN_REPO)\n )\n ),\n body=body\n )", "def __init__(self, user, proj):\n auth_hdr = {\"Authorization\" : \"token \" + input(\"Enter PA token: \")}\n self._session = requests.Session()\n self._session.headers.update(auth_hdr)\n self._base = self.API_ROOT + \"/repos/{}/{}\".format(user, proj)", "def test_authenticated_request(self):\n http = FakeHttp([(FakeResponse(200), {})])\n self.mock.Http.return_value = http\n self.mock.read_data_from_file.return_value = 'cached auth token'\n response, _ = http_utils.request('https://url/', configuration=self.config)\n\n # Ensure that all expected requests were made.\n self.assertEqual(http.replies, [])\n\n self.assertEqual(http.last_headers, {\n 'Authorization': 'cached auth token',\n 'User-Agent': 'clusterfuzz-reproduce'\n })\n self.assertEqual(response.status, 200)", "def _http_client_origin(self):\n self._setup_http_mock()\n pipeline_builder = self.sdc_builder.get_pipeline_builder()\n http_client = pipeline_builder.add_stage('HTTP Client', type='origin')\n http_client.resource_url = f'{self.http_mock.pretend_url}/{self.dataset}'\n http_client.json_content = 'ARRAY_OBJECTS'\n return http_client, pipeline_builder", "def callback_repo_create(self, request, uri, headers, status_code=201):\n # Disabling unused-argument because this is a callback with\n # required method signature.\n # pylint: disable=unused-argument\n self.assertEqual(\n request.headers['Authorization'],\n 'token {0}'.format(self.OAUTH2_TOKEN)\n )\n repo_dict = json.loads(request.body)\n self.assertTrue(\n repo_dict['name'] in [self.TEST_REPO, self.TEST_RERUN_REPO]\n )\n self.assertEqual(repo_dict['description'], self.TEST_DESCRIPTION)\n self.assertEqual(repo_dict['private'], True)\n\n return (status_code, headers, json.dumps({'html_url': 'testing'}))", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def patch_client(target, mock_client=None):\n with mock.patch(target) as client_getter:\n client = mock_client or MockBox()\n client_getter.return_value = client\n yield client", "def _client(self) -> httpx.Client:\n return httpx.Client(\n base_url=self._base_url,\n headers=self._authorization_headers,\n proxies=self._proxies,\n )", "def _init_raw_client(self) -> None:\n if self.credentials:\n auth = HTTPBasicAuth(self.credentials['username'], self.credentials['password'])\n else:\n auth = None\n base_url = \"http://\" if self.untrusted else \"https://\"\n base_url += self.url\n self.raw_client = client.DockerRegistryClient(base_url=base_url, auth=auth)", "def mock_moira_client(mocker):\n return mocker.patch(\"moira_lists.moira_api.get_moira_client\", autospec=True)", "def test_client_can_load_client_page_requests_directly(self):\n\n req = self.httpbin_3.get_request_data('get_my_ip')\n\n self.assertEqual(req, self.httpbin_3.client[\"homepage\"]['get_my_ip'])\n req = self.httpbin_3.get_request_data('test_requests_patch_method')\n self.assertEqual(req, self.httpbin_3.client[\"homepage\"]['test_requests_patch_method'])\n req = self.httpbin_3.get_request_data('test_requests_delete_method')\n self.assertEqual(req, self.httpbin_3.client[\"second_page\"]['test_requests_delete_method'])\n\n req = self.httpbin_4.get_request_data('get_my_ip')\n self.assertEqual(req, self.httpbin_4.client['get_my_ip'])\n req = self.httpbin_4.get_request_data('get_user_my_agent')\n self.assertEqual(req, self.httpbin_4.client['get_user_my_agent'])\n req = self.httpbin_4.get_request_data('test_requests_put_method')\n self.assertEqual(req, self.httpbin_4.client['test_requests_put_method'])\n req = self.httpbin_4.get_request_data('test_requests_post_method')\n self.assertEqual(req, self.httpbin_4.client['test_requests_post_method'])", "async def test_forward_request_onboarded_noauth_get(\n hassio_noauth_client, aioclient_mock: AiohttpClientMocker, path: str\n) -> None:\n aioclient_mock.get(f\"http://127.0.0.1/{path}\", text=\"response\")\n\n resp = await hassio_noauth_client.get(f\"/api/hassio/{path}\")\n\n # Check we got right response\n assert resp.status == HTTPStatus.OK\n body = await resp.text()\n assert body == \"response\"\n\n # Check we forwarded command\n assert len(aioclient_mock.mock_calls) == 1\n # We only expect a single header.\n assert aioclient_mock.mock_calls[0][3] == {\"X-Hass-Source\": \"core.http\"}", "def create_api_handler(self):\n self.github = github3.login(username=GH_USER, password=GH_PASSWORD)\n if hasattr(self.github, 'set_user_agent'):\n self.github.set_user_agent('Jonathan Reeve: http://jonreeve.com')\n self.org = self.github.organization(login='Git-Lit')\n # FIXME: logging\n print(\"ratelimit: \" + str(self.org.ratelimit_remaining))", "def test_create_files_with_gitignore(self, mock_get):\n\n mock_resp = mock.Mock()\n mock_resp.raise_for_status = mock.Mock()\n mock_resp.content = \"IGNORE ME\\n\"\n mock_get.return_value = mock_resp\n\n opts = mock.Mock()\n opts.repo = self.repo\n opts.create_version_file = True\n opts.source = 'src'\n opts.version = '0.0.1'\n opts.version_file = None\n opts.org = \"ORG\"\n opts.desc = \"DESCRIPTION\"\n opts.templates = []\n opts.test_mode = False\n opts.history_file = 'HISTORY.md'\n opts.package = 'unittests'\n opts.develop = 'develop'\n opts.requirements = 'requirements.txt'\n opts.pypi_package_name = 'pypi.package.unittest'\n opts.python = 'python3'\n opts.gitignore_url = \"GIT_IGNORE_URL\"\n opts.add_gitignore = True\n opts.test_requirements = 'test-requirements.txt'\n version = os.path.join(self.repo, 'src', 'unittests', '__init__.py')\n os.system('rm -f {}'.format(version))\n create_files(opts)\n\n dir_list = os.listdir(self.repo)\n self.failUnless('cirrus.conf' in dir_list)\n self.failUnless('HISTORY.md' in dir_list)\n self.failUnless('MANIFEST.in' in dir_list)\n self.failUnless('setup.py' in dir_list)\n self.failUnless('.gitignore' in dir_list)\n\n gitignore = os.path.join(self.repo, '.gitignore')\n with open(gitignore, 'r') as handle:\n content = handle.read()\n self.assertEqual(content.strip(), \"IGNORE ME\")", "def test_pull_from_origin(tmpdir):\n gitwrapper.clone_from('git://github.com/Tinche/bower-cache', tmpdir)\n gitwrapper.pull_from_origin(tmpdir)", "def _request(self, url, **kwargs):\n headers = {'PRIVATE-TOKEN': self.token}\n response = make_request(self.base_url + url, headers=headers, **kwargs)\n logging.info('Requested: {0}'.format(url))\n logging.info('Method: {0}'.format(kwargs.get('method', 'GET')))\n logging.info(response.content)\n return json.loads(response.content)", "def get(self, repo: Repository):\n cache_key = self.cache_key.format(repo_id=repo.id.hex)\n\n result = redis.get(cache_key)\n if result is None:\n vcs = repo.get_vcs()\n if not vcs:\n return self.respond([])\n\n vcs.ensure()\n result = vcs.get_known_branches()\n redis.setex(cache_key, json.dumps(result), self.cache_expire)\n else:\n result = json.loads(result)\n\n return self.respond([{\"name\": r} for r in result])", "def api_client(api_client):\n assert api_client().get(\"/bin/anything/bin\").status_code == 200\n\n return api_client(disable_retry_status_list={404})", "def create_mock_client(self, fake_request_method):\n class FakeHttpLib2(object):\n pass\n\n FakeHttpLib2.request = fake_request_method\n mock_client = self.mox.CreateMock(DNSaasClient)\n mock_client.http_pool = pools.Pool()\n mock_client.http_pool.create = FakeHttpLib2\n mock_client.auth_token = 'token'\n return mock_client", "def setUp(self):\n self.client = api.Client(config.get_config(), api.json_handler)", "def client(pure_client, response):\n with requests_mock.Mocker() as m:\n pure_client.m = m\n yield pure_client", "def _test__import_api(self, response):\n requests_get_mock = MagicMock()\n requests_get_mock.return_value.status_code = 200\n requests_get_mock.return_value.text = response\n with patch(\"requests.get\", requests_get_mock):\n proxmox._import_api()\n self.assertEqual(proxmox.api, [{\"info\": {}}])\n return", "def test_for_client():", "def __init__(self, url='', credentials=None,\n get_credentials=True, http=None, model=None,\n log_request=False, log_response=False,\n credentials_args=None, default_global_params=None,\n additional_http_headers=None, response_encoding=None):\n url = url or self.BASE_URL\n super(SourceV1, self).__init__(\n url, credentials=credentials,\n get_credentials=get_credentials, http=http, model=model,\n log_request=log_request, log_response=log_response,\n credentials_args=credentials_args,\n default_global_params=default_global_params,\n additional_http_headers=additional_http_headers,\n response_encoding=response_encoding)\n self.projects_repos_aliases_files = self.ProjectsReposAliasesFilesService(self)\n self.projects_repos_aliases = self.ProjectsReposAliasesService(self)\n self.projects_repos_files = self.ProjectsReposFilesService(self)\n self.projects_repos_revisions_files = self.ProjectsReposRevisionsFilesService(self)\n self.projects_repos_revisions = self.ProjectsReposRevisionsService(self)\n self.projects_repos_workspaces_files = self.ProjectsReposWorkspacesFilesService(self)\n self.projects_repos_workspaces_snapshots_files = self.ProjectsReposWorkspacesSnapshotsFilesService(self)\n self.projects_repos_workspaces_snapshots = self.ProjectsReposWorkspacesSnapshotsService(self)\n self.projects_repos_workspaces = self.ProjectsReposWorkspacesService(self)\n self.projects_repos = self.ProjectsReposService(self)\n self.projects = self.ProjectsService(self)\n self.v1 = self.V1Service(self)", "def test_06_get(self, mock_gupycurl,\n mock_guexecurl, mock_msg):\n self._init()\n geturl = udocker.GetURL()\n self.assertRaises(TypeError, geturl.get)\n #\n geturl = udocker.GetURL()\n geturl._geturl = type('test', (object,), {})()\n geturl._geturl.get = self._get\n self.assertEqual(geturl.get(\"http://host\"), \"http://host\")", "def _mock_cache_sync(repo):\n from renku.service.controllers.api import mixins\n\n current_head = repo.head.ref\n\n def _mocked_repo_reset(self, project):\n \"\"\"Mock repo reset to work with mocked renku save.\"\"\"\n repo.git.reset(\"--hard\", current_head)\n\n reset_repo_function = mixins.RenkuOperationMixin.reset_local_repo\n mixins.RenkuOperationMixin.reset_local_repo = _mocked_repo_reset\n\n try:\n yield\n finally:\n mixins.RenkuOperationMixin.reset_local_repo = reset_repo_function", "def test_read_o_auth_client(self):\n pass", "def test_repository(self):\n os.environ['GITHUB_REPOSITORY'] = 'repo/owner'\n self.assertIsNone(self.env.git_url)", "def _external_request(self, method, url, *args, **kwargs):\n self.last_url = url\n if url in self.responses.keys() and method == 'get':\n return self.responses[url] # return from cache if its there\n\n headers = kwargs.pop('headers', None)\n custom = {'User-Agent': useragent}\n if headers:\n headers.update(custom)\n kwargs['headers'] = headers\n else:\n kwargs['headers'] = custom\n\n response = getattr(requests, method)(url, *args, **kwargs)\n\n if self.verbose:\n print(\"Got Response: %s\" % url)\n\n if response.status_code == 503:\n raise SkipThisService(\"Service returned 503 - Temporarily out of service.\")\n\n if method == 'get':\n self.responses[url] = response # cache for later\n\n self.last_raw_response = response\n return response", "def repository(\n project: Project,\n mocker: MockerFixture,\n repository_pypi_json: Path,\n local_finder: type[None],\n) -> TestRepository:\n rv = TestRepository([], project.environment, repository_pypi_json)\n mocker.patch.object(project, \"get_repository\", return_value=rv)\n return rv" ]
[ "0.6668953", "0.6600891", "0.63424224", "0.62350804", "0.61830807", "0.6153527", "0.61463815", "0.6108667", "0.5978955", "0.59318805", "0.5927703", "0.59109044", "0.58161503", "0.5816118", "0.57876474", "0.5726691", "0.57236135", "0.5717952", "0.5676887", "0.5649835", "0.56439185", "0.56434613", "0.562346", "0.561513", "0.56149995", "0.5603356", "0.5603286", "0.55754304", "0.5515164", "0.55018806", "0.54626846", "0.5462201", "0.54488194", "0.5444237", "0.54383856", "0.54347694", "0.5425958", "0.54180086", "0.5412764", "0.5406748", "0.53983176", "0.5390254", "0.5389898", "0.5389336", "0.53870106", "0.53799856", "0.536067", "0.5354612", "0.53538764", "0.5348888", "0.5335015", "0.5328372", "0.53256696", "0.53197", "0.53192437", "0.531063", "0.5305588", "0.52893806", "0.52889496", "0.5287789", "0.52862155", "0.52758557", "0.5260315", "0.5258327", "0.52515525", "0.5241187", "0.5238898", "0.52386945", "0.52364296", "0.5235245", "0.52339244", "0.5220725", "0.5217012", "0.5217012", "0.5217012", "0.5217012", "0.5217012", "0.521362", "0.52088875", "0.5202408", "0.5197252", "0.51965696", "0.51937085", "0.51931435", "0.51902175", "0.5183949", "0.51791275", "0.5173892", "0.5171725", "0.5169946", "0.5168433", "0.5168347", "0.51654476", "0.5158792", "0.5154198", "0.5148507", "0.5146802", "0.5136024", "0.51334924", "0.5130764", "0.51251453" ]
0.0
-1
Supports sum([dataset1, dataset2, dataset3]).
def __radd__(self, other): if other == 0: return self else: return self.__add__(other)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sum_datasets(dslist):\n #Assume all same length, same axis values\n newds = zeros_like(dslist[0])\n AddCifMetadata.add_standard_metadata(newds)\n title_info = \"\"\n proc_info = \"\"\"This dataset was created by summing points from multiple datasets. Points were \n assumed to coincide exactly. Data reduction information for the individual source datasets is as follows:\"\"\"\n for one_ds in dslist:\n newds += one_ds\n title_info = title_info + one_ds.title + \"+\"\n proc_info += \"\\n\\n===Dataset %s===\\n\" % str(one_ds.title) \n try:\n proc_info += one_ds.harvest_metadata(\"CIF\")[\"_pd_proc_info_data_reduction\"]\n except KeyError,AttributeError:\n pass\n newds.title = title_info[:-1] #chop off trailing '+'\n newds.axes[0] = dslist[0].axes[0]\n # Add some basic metadata based on metadata of first dataset\n newds.copy_cif_metadata(dslist[0])\n newds.add_metadata('_pd_proc_info_data_reduction',proc_info,\"CIF\")\n return newds", "def calculate_shares_cost_sum(dataset):\n cost_sum = 0\n for data in dataset:\n cost_sum += data[1]\n return cost_sum", "def add(self,*datas):\n\t\tresult = sum(datas)\n\t\treturn result", "def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def _generalised_sum(data, func):\n count = len(data)\n if func is None:\n total = math.fsum(data)\n else:\n total = math.fsum(func(x) for x in data)\n return count, total", "def sum(data, **kwargs):\n return Component(\n \"Sum\",\n arguments={\n 'data': Component.of(data)\n },\n options={\n \n },\n constraints=kwargs)", "def weighted_sum_ds(ds, dim=None, weights=None):\n if weights is None:\n warn('Computing sum using equal weights for all data points')\n return ds.sum(dim)\n else:\n ds.apply(weighted_sum_da, dim=dim, weights=weights)", "def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"sum\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"sum\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def sum(cls, vectors):\n result = cls.null()\n for vector in vectors:\n result += vector\n return result", "def sum_values(self):\n raise NotImplementedError", "def data_dict_add_total(data_dict, sum_args, feat_name):\n for key in data_dict:\n data_dict[key][feat_name] = 0\n for feat in sum_args:\n if data_dict[key][feat] != 'NaN':\n data_dict[key][feat_name] += data_dict[key][feat]", "def calculate_dataset_stats(dsets):\n dsinfo = {\n 'nfiles': 0,\n 'nfilesfinished': 0,\n 'nfilesfailed': 0,\n 'nfilesmissing': 0,\n 'pctfinished': 0.0,\n 'pctfailed': 0,\n 'neventsTot': 0,\n 'neventsUsedTot': 0,\n 'neventsRemaining': 0,\n 'neventsOutput': 0,\n }\n if not dsets or len(dsets) == 0:\n return dsets, dsinfo\n\n if len(dsets) > 0:\n for ds in dsets:\n if 'datasetname' in ds and len(ds['datasetname']) > 0:\n if not str(ds['datasetname']).startswith('user'):\n scope = str(ds['datasetname']).split('.')[0]\n else:\n scope = '.'.join(str(ds['datasetname']).split('.')[:2])\n if ':' in scope:\n scope = str(scope).split(':')[0]\n ds['scope'] = scope\n\n # input primary datasets\n if 'type' in ds and ds['type'] in ['input', 'pseudo_input'] and 'masterid' in ds and ds['masterid'] is None:\n if 'nevents' in ds and ds['nevents'] is not None and int(ds['nevents']) > 0:\n dsinfo['neventsTot'] += int(ds['nevents'])\n if 'neventsused' in ds and ds['neventsused'] is not None and int(ds['neventsused']) > 0:\n dsinfo['neventsUsedTot'] += int(ds['neventsused'])\n\n if 'nfiles' in ds and int(ds['nfiles']) > 0:\n dsinfo['nfiles'] += int(ds['nfiles'])\n dsinfo['nfilesfinished'] += int(ds['nfilesfinished']) if 'nfilesfinished' in ds else 0\n dsinfo['nfilesfailed'] += int(ds['nfilesfailed']) if 'nfilesfailed' in ds else 0\n ds['percentfinished'] = round_to_n_digits(100. * int(ds['nfilesfinished']) / int(ds['nfiles']), 1, method='floor')\n\n # nfilesmissing is not counted in nfiles in the DB\n if 'nfilesmissing' in ds and ds['nfilesmissing'] is not None:\n dsinfo['nfilesmissing'] += int(ds['nfilesmissing'])\n\n elif 'type' in ds and ds['type'] in ('output', ) and 'streamname' in ds and ds['streamname'] is not None and ds['streamname'] == 'OUTPUT0':\n # OUTPUT0 - the first and the main steam of outputs\n dsinfo['neventsOutput'] += int(ds['nevents']) if 'nevents' in ds and ds['nevents'] and ds['nevents'] > 0 else 0\n\n dsinfo['neventsRemaining'] = dsinfo['neventsTot'] - dsinfo['neventsUsedTot']\n dsinfo['pctfinished'] = round_to_n_digits(100.*dsinfo['nfilesfinished']/dsinfo['nfiles'], 0, method='floor') if dsinfo['nfiles'] > 0 else 0\n dsinfo['pctfailed'] = round_to_n_digits(100.*dsinfo['nfilesfailed']/dsinfo['nfiles'], 0, method='floor') if dsinfo['nfiles'] > 0 else 0\n\n return dsets, dsinfo", "def all_sum(structure, name=None):\n num_replicas = get_num_replicas()\n\n if num_replicas <= 1:\n return structure\n\n tf_replicator = get_tf_replicator()\n if tf_replicator:\n return tf_replicator.all_sum(structure)\n\n elif tf.distribute.has_strategy() and tf.distribute.get_replica_context():\n return tf.distribute.get_replica_context().all_reduce(\n tf.distribute.ReduceOp.SUM, structure)\n\n elif is_tpu_replicated():\n def tpu_all_sum(tensor):\n return tpu_ops.cross_replica_sum(tensor, name=name)\n\n return nest.map_structure(tpu_all_sum, structure)\n\n return structure", "def calcSum2(data1, data2): \n \n data11 = data1[0]\n data12 = data1[1]\n \n data21 = data2[0]\n data22 = data2[1]\n \n \n err1 = [data11[i] + data21[i] for i in range(0,len(data11))]\n err2 = [data12[i] + data22[i] for i in range(0,len(data12))]\n \n \n return [err1, err2]", "def evaluate(self, dataset):\n\t\tpass", "def task_8_sum_of_ints(data: List[int]) -> int:\n return sum(data)", "def sum(*args):\n result = 0\n for i in args:\n result += i\n return result", "def sum_app(*numbers_to_sum: typing.Tuple[int, ...]):\n return sum(numbers_to_sum)", "def sum_app(*numbers_to_sum: Required[typing.Tuple[int, ...]]):\n return sum(numbers_to_sum)", "def test_sum(n, m, o, result):\n from series import sum_series\n assert sum_series(n, m, o) == result", "def get_dataset_total_list(chart_list):\n dataset_total_list = dict()\n for chart in chart_list:\n region_list = chart['dataFields']['labels']\n value_list = chart['dataFields']['values']\n\n for index, region in enumerate(region_list):\n try:\n dataset_total_list[region] += value_list[index]\n except KeyError:\n dataset_total_list[region] = value_list[index]\n\n return(dataset_total_list)", "def Concat(datasets):\n\n dataset_num = len(datasets)\n dataset = datasets[0]\n for i in range(1, dataset_num):\n dataset.concatenate(datasets[i])\n return dataset", "def add_to_dataset(self, dataset: Dataset):\n pass", "def test_array_sum_equals_one(self):\n plugin = NonLinearWeights(0.85)\n result = plugin.process(self.cube, self.coord_name)\n self.assertAlmostEqual(result.data.sum(), 1.0)", "def sum(self, dim=None):\n if dim is None:\n x = self.flatten()\n else:\n x = self.transpose(0, dim)\n\n # Add all BinarySharedTensors\n while x.size(0) > 1:\n extra = None\n if x.size(0) % 2 == 1:\n extra = x[0]\n x = x[1:]\n x0 = x[: (x.size(0) // 2)]\n x1 = x[(x.size(0) // 2) :]\n x = x0 + x1\n if extra is not None:\n x.share = torch_cat([x.share, extra.share.unsqueeze(0)])\n\n if dim is None:\n x = x.squeeze()\n else:\n x = x.transpose(0, dim).squeeze(dim)\n return x", "def sum(self):\n # skipna == True\n # only_numerical == True\n # skipna == True\n return self._lift(\"sum\")", "def test_sum(make_data):\n x, _, X = make_data\n\n # This replicates the input layer behaviour\n def f(**kwargs):\n return kwargs['X'], 0.0\n\n def g(**kwargs):\n return kwargs['Y'], 0.0\n\n addlayer = ab.Sum(f, g)\n\n F, KL = addlayer(X=X, Y=X)\n\n tc = tf.test.TestCase()\n with tc.test_session():\n forked = F.eval()\n orig = X.eval()\n assert forked.shape == orig.shape\n assert np.all(forked == 2 * orig)\n assert KL.eval() == 0.0", "def calcSum3(data1, data2):\n \n #SxW @high resolution\n data111 = data1[0][0]\n #SxW @low resolution\n data112 = data1[0][1]\n #phi @high resolution\n data121 = data1[1][0]\n #phi @low resolution\n data122 = data1[1][1]\n \n #SxW @high resolution\n data211 = data2[0][0]\n #SxW @low resolution\n data212 = data2[0][1]\n #phi @high resolution\n data221 = data2[1][0]\n #phi @low resolution\n data222 = data2[1][1]\n \n \n \n swh = [data111[i] + data211[i] for i in range(0,len(data111))]\n swl = [data112[i] + data212[i] for i in range(0,len(data112))]\n \n phih = [data121[i] + data221[i] for i in range(0,len(data121))]\n phil = [data122[i] + data222[i] for i in range(0,len(data122))]\n \n \n return [[swh, swl], [phih, phil]]", "def sum_of(self, names):\n vals = self.get_values(names)\n if vals is None:\n return None\n return sum(vals)", "def compute(cls, dataset):\n return dataset", "def SUM(*args):\n return _group_function(lambda x, y: x + y, *args)", "def weighted_sum(data, dim=None, weights=None):\n if isinstance(data, xr.DataArray):\n return weighted_sum_da(data, dim, weights)\n elif isinstance(data, xr.Dataset):\n return weighted_sum_ds(data, dim, weights)\n else:\n raise ValueError('Data must be an xarray Dataset or DataArray')", "def calculate_sum(self):\n\n left_sum = self.left.calculate_sum() if self.left else 0\n right_sum = self.right.calculate_sum() if self.right else 0\n return self.data + left_sum + right_sum", "def diffSquaredSum(classObj ,dataSet, mean):\r\n return np.sum((dataSet - mean)**2)", "def sum(self, other):\n if is_matrix(other):\n return self._sum_matrix(other, 1)\n elif mathutil.is_scalar(other):\n return self._sum_scalar(other, 1)\n else:\n self._logger.error(\n \"'Matrix' instance, int, float or complex expected, not '{}'\".format(type(other)))\n raise TypeError(\n \"'Matrix' instance, int, float or complex expected, not '{}'\".format(type(other)))", "def mult_and_sum(*arg_list):\r\n result = numpy.empty(arg_list[0].shape, dtype=numpy.float32)\r\n result[:] = nodata\r\n array_stack = numpy.array(arg_list[0::2])\r\n scalar_list = numpy.array(arg_list[1::2])\r\n # make a valid mask as big as a single array\r\n valid_mask = numpy.logical_and.reduce(\r\n array_stack != pop_nodata, axis=0)\r\n\r\n # mask out all invalid elements but reshape so there's still the same\r\n # number of arrays\r\n valid_array_elements = (\r\n array_stack[numpy.broadcast_to(valid_mask, array_stack.shape)])\r\n array_stack = None\r\n\r\n # sometimes this array is empty, check first before reshaping\r\n if valid_array_elements.size != 0:\r\n valid_array_elements = valid_array_elements.reshape(\r\n -1, numpy.count_nonzero(valid_mask))\r\n # multiply each element of the scalar with each row of the valid\r\n # array stack, then sum along the 0 axis to get the result\r\n result[valid_mask] = numpy.sum(\r\n (valid_array_elements.T * scalar_list).T, axis=0)\r\n scalar_list = None\r\n valid_mask = None\r\n valid_array_elements = None\r\n return result", "def sum(self):\n return np.dot(self.data.T, self.weights)", "def _process_matrices(self, **kwargs):\n\n if self.datasource == 'graph':\n # Must store the adj_matrix\n self.data.adjacencymat = self.data.adj_matrix()\n return self._sum_on_axis(self.data.adjacencymat, **kwargs)\n elif self.datasource == 'custom':\n return self._sum_on_axis(self.data, **kwargs)\n elif self.datasource == 'MRD':\n return {k: self._sum_on_axis(self.data.data[k].todense(), undirected=False) for k in self.data.data.keys() if k != 'pk'}", "def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"sum\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"sum\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def sum(app, args):\n if not args.key:\n db = get_db(app)\n notrans = db.transient.count()\n print(\"No Transient records: \", notrans)\n if notrans > 0:\n print(\"Total data Transient: \", nicesize(\n list(db.transient.aggregate([\n {\"$group\": {\"_id\": None,\n \"total\": {\"$sum\": \"$size\"}}}]))[0]['total']))\n print(\" No Core records: \", db.transient.count())\n return\n\n kname, kinfo = key_info(app.conf, args.key)\n res = _single_sum(app, group_by=kname, force=args.force)\n total_size = int(0)\n total_count = 0\n mgn = len(\"Total\")\n for reshost in res:\n gid = reshost['_id']\n if gid is None:\n mgn = max(4, mgn)\n else:\n mgn = max(len(str(reshost['_id'])), mgn)\n\n fms = \"{:\" + str(mgn) + \"}\\t{:>10}\\t{:>9}\"\n if args.human:\n print(\"# {}:\".format(kname))\n for reshost in res:\n total = reshost['total']\n count = reshost['count']\n total_size += int(total)\n total_count += count\n if args.human:\n total_human = nicesize(total)\n count_human = nicenumber(count)\n categ = reshost['_id']\n if categ is None:\n categ = \"<undefined>\"\n\n print(fms.format(\n categ, total_human, count_human))\n else:\n print(\"{}\\t{}\\t{}\".format(\n reshost['_id'], total, count))\n\n if args.human:\n total_size_human = nicesize(total_size)\n total_count_human = nicenumber(total_count)\n print(fms.format('', '-'*10, '-'*9))\n print(fms.format(\n \"Total\", total_size_human, total_count_human))\n else:\n print(\"Total\\t{}\\t{}\".format(total_size, total_count))", "def custom_sum(*args):\n return functools.reduce(lambda x, y: x + y, args)", "def test_sum_list_int(self):\n\n list_of_int = [1, 2, 3]\n result = sum(list_of_int)\n\n self.assertEqual(result, 6)", "def test_sum(self):\n ages = [23, 54, 12, 94, 27]\n for i, age in enumerate(ages):\n self.Person(name=\"test%s\" % i, age=age).save()\n\n assert self.Person.objects.sum(\"age\") == sum(ages)\n\n self.Person(name=\"ageless person\").save()\n assert self.Person.objects.sum(\"age\") == sum(ages)\n\n for i, age in enumerate(ages):\n self.Person(\n name=\"test meta%s\" % i, person_meta=self.PersonMeta(weight=age)\n ).save()\n\n assert self.Person.objects.sum(\"person_meta.weight\") == sum(ages)\n\n self.Person(name=\"weightless person\").save()\n assert self.Person.objects.sum(\"age\") == sum(ages)\n\n # test summing over a filtered queryset\n assert self.Person.objects.filter(age__gte=50).sum(\"age\") == sum(\n a for a in ages if a >= 50\n )", "def sumSet(weightedSet):\n\tsum = 0\n\tfor example in weightedSet:\n\t\tsum += example.weight\n\treturn sum", "def sum(self, values):\n return self.aggregate(values, \"sum\")", "def sum(self):\n return sum(self.values)", "def calc_sum(a, b, c, d, e):\n return (a + b + c + d + e)", "def sum(*args):\n return reduce(lambda x, y: x + y, args)", "def test_that_sum_of_four_function():\r\n A = [1, -1, 2, -2]\r\n B = [-1, 1, -2, 2]\r\n C = [-3, 3, -5, 5]\r\n D = [-3, 3, -5, 5]\r\n assert check_sum_of_four(A, B, C, D) == 6", "def add_arrays(arr1, arr2):\n if len(arr1) != len(arr2):\n return None\n\n matrix_sum = []\n for i, j in zip(arr1, arr2):\n matrix_sum.append(i + j)\n return matrix_sum", "def sum(lists) -> list:\r\n return list(np.sum(lists, 0))", "def get_sum(a,b):\n return", "def sum(cls, points, **kwargs):\n p = Point.null(**kwargs)\n for point in points:\n p += point\n return p", "def matrixSum( self, matrix0, matrix1 ):\r\n result = {}\r\n keys = sorted( set( matrix0.keys() ) )\r\n count = range( len( matrix0.keys() ) )\r\n \r\n for key in keys:\r\n result[ key ] = []\r\n for i in count:\r\n soma = matrix0[ key ][ i ] + matrix1[ key ][ i ]\r\n result[ key ].append( soma )\r\n \r\n return result", "def sum(x, reduce_instance_dims=True, name=None): # pylint: disable=redefined-builtin\n return _numeric_combine(x, np.sum, reduce_instance_dims, name)", "def custom_sum(*args):\n return functools.reduce(lambda x, y: x + y, args)", "def custom_sum(*args):\n return functools.reduce(lambda x, y: x + y, args)", "def total_exs(dataset):\n total = 0\n for article in dataset['data']:\n for para in article['paragraphs']:\n total += len(para['qas'])\n return total", "def test_local_sum_sum_dtype(self):\r\n x = tensor.tensor3(dtype='int8')\r\n y = x.sum(axis=0, dtype='int32').sum(axis=1, dtype='int64')\r\n backup = config.on_opt_error\r\n config.on_opt_error = 'raise'\r\n try:\r\n # This compilation would fail prior to fix.\r\n f = theano.function([x], y)\r\n finally:\r\n config.on_opt_error = backup", "def sum(self):\n return self.aggregate(np.sum)", "def sum(values):\n total = 0\n for i in values:\n total += i\n return total", "def sum(self, values: pdarray, skipna: bool = True) -> Tuple[groupable, pdarray]:\n k, v = self.aggregate(values, \"sum\", skipna)\n return k, cast(pdarray, v)", "def sum(tensor, axis=None):\n raise NotImplementedError", "def summation(self):\n return sum(self.read_ints())", "def prodSumNumpy(*arrays):\n return np.sum(np.prod(arrays,axis=0))", "def add_get_arrays(dataset, data):\n\n # add to dataset\n for index, array in enumerate(data):\n key = f\"array_{str(index)}\"\n dataset.add_tensor(key, array)\n\n # get from dataset\n for index, array in enumerate(data):\n key = f\"array_{str(index)}\"\n rarray = dataset.get_tensor(key)\n np.testing.assert_array_equal(\n rarray,\n array,\n \"Returned array from get_tensor not equal to tensor added to dataset\",\n )", "def sum_mixed_list(mxd_lst: List[Union[int, float]]) -> float:\n return sum(mxd_lst)", "def add_arrays(arr1, arr2):\n if len(arr1) != len(arr2):\n return None\n return [sum(element_wise) for element_wise in zip(arr1, arr2)]", "def weighted_sum_da(da, dim=None, weights=None):\n if weights is None:\n warn('Computing sum using equal weights for all data points')\n return da.sum(dim)\n else:\n weights, _ = validate_weights(da, dim, weights)\n return (da * weights).sum(dim)", "def GetSumOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterID3_GetSumOutput(self, *args)", "def calculate_cost(data, centers, clusters):\n total = 0\n for i in range(len(centers)):\n total = total + np.sum(data[centers[i]][clusters[i]]) \n return total", "def sum_of_numbers(numbers):\r\n return sum(numbers)", "def sum(self):\n if self.isscalar():\n s = self.defval\n else:\n if self.defval:\n msg = \"Sum of a tensor wish defval != 0 not implemented.\"\n raise NotImplementedError(msg)\n s = 0\n for v in self.sects.values():\n s += np.sum(v)\n return s", "def add(*args):\n if any(len(error_value := group) != len(args[0]) for group in args):\n raise ValueError(f\"Given matrices {error_value} are not the same size.\")\n\n res = []\n for x in zip(*args):\n if any(len(error_value := group) != len(x[0]) for group in x):\n raise ValueError(f\"Given matrices {error_value} are not the same size.\")\n res.append([sum(y) for y in zip(*x)])\n\n return res", "def sum(self):\n return self._reduce_for_stat_function(F.sum, only_numeric=True)", "def sum_elements(arr):\n return sum(arr)", "def test_sum_cube(self):\n self.init()\n assert sum_cube(self.i64_3) == np.sum(self.i64_3)\n assert sum_cube(self.fi64_3) == np.sum(self.fi64_3)\n assert sum_cube(self.f64_3) == np.sum(self.f64_3)\n assert sum_cube(self.ff64_3) == np.sum(self.ff64_3)\n assert type(sum_cube(self.i64_3)) == int\n assert type(sum_cube(self.fi64_3)) == int\n assert type(sum_cube(self.f64_3)) == float\n assert type(sum_cube(self.ff64_3)) == float", "def addition_homework(data: Iterator[str]) -> int:\n n = final_sum(data)\n return n.magnitude", "def print_sum():\n\n sum = 0\n for item in data:\n sum += item[\"price\"]\n\n print(f\"The sum is: {sum}\")", "def op_sum(self, args):\n sum = 0\n stack_levels = len(self.stack)\n if args != None:\n stack_levels = int(args[0])\n self.require_stack(stack_levels)\n for i in range(0, stack_levels):\n sum += self.stack.pop()\n self.stack.append(sum)", "def rowsums (self):\n return self.values.sum (axis=0)", "def get_datasets_summary(rs):\n\n\tif rs == \"rs1\":\n\t\tdataset_list = db.get_engine(current_app, 'methylation_data').execute(\"SELECT * FROM datasets WHERE dataset NOT LIKE 'CEMBA_RS2_%%'\").fetchall()\n\t\tdataset_list += db.get_engine(current_app, 'snATAC_data').execute(\"SELECT * FROM datasets WHERE dataset NOT LIKE 'CEMBA_RS2_%%'\").fetchall()\n\n\t\t# This is a hack to get unique values in a list of dictionaries\n\t\tdataset_list = list({x['dataset']:x for x in dataset_list}.values());\n\t\ttotal_methylation_cell_each_dataset = db.get_engine(current_app, 'methylation_data').execute(\"SELECT dataset, COUNT(*) as `num` FROM cells WHERE dataset NOT LIKE 'CEMBA_RS2_%%' GROUP BY dataset\").fetchall()\n\t\ttotal_snATAC_cell_each_dataset = db.get_engine(current_app, 'snATAC_data').execute(\"SELECT dataset, COUNT(*) as `num` FROM cells WHERE dataset NOT LIKE 'CEMBA_RS2_%%' GROUP BY dataset\").fetchall()\n\telif rs == \"rs2\":\n\t\tdataset_list = db.get_engine(current_app, 'methylation_data').execute(\"SELECT * FROM datasets WHERE dataset LIKE 'CEMBA_RS2_%%'\").fetchall()\n\t\ttotal_methylation_cell_each_dataset = db.get_engine(current_app, 'methylation_data').execute(\"SELECT dataset, COUNT(*) as `num` FROM cells WHERE dataset LIKE 'CEMBA_RS2_%%' GROUP BY dataset\").fetchall()\n\t\ttotal_snATAC_cell_each_dataset = db.get_engine(current_app, 'snATAC_data').execute(\"SELECT dataset, COUNT(*) as `num` FROM cells WHERE dataset LIKE 'CEMBA_RS2_%%' GROUP BY dataset\").fetchall()\n\telif rs == \"all\":\n\t\tdataset_list = db.get_engine(current_app, 'methylation_data').execute(\"SELECT * FROM datasets\").fetchall()\n\t\tdataset_list += db.get_engine(current_app, 'snATAC_data').execute(\"SELECT * FROM datasets\").fetchall()\n\t\t# This is a hack to get unique values in a list of dictionaries\n\t\tdataset_list = list({x['dataset']:x for x in dataset_list}.values());\n\t\ttotal_methylation_cell_each_dataset = db.get_engine(current_app, 'methylation_data').execute(\"SELECT dataset, COUNT(*) as `num` FROM cells GROUP BY dataset\").fetchall()\n\t\ttotal_snATAC_cell_each_dataset = db.get_engine(current_app, 'snATAC_data').execute(\"SELECT dataset, COUNT(*) as `num` FROM cells GROUP BY dataset\").fetchall()\n\telse:\n\t\treturn\n\n\ttotal_methylation_cell_each_dataset = [ {d['dataset']: d['num']} for d in total_methylation_cell_each_dataset ]\n\ttotal_methylation_cell_each_dataset = { k: v for d in total_methylation_cell_each_dataset for k, v in d.items() }\n\ttotal_snATAC_cell_each_dataset = [ {d['dataset']: d['num']} for d in total_snATAC_cell_each_dataset ]\n\ttotal_snATAC_cell_each_dataset = { k: v for d in total_snATAC_cell_each_dataset for k, v in d.items() }\n\n\tdataset_cell_counts = []\n\tfor dataset in dataset_list:\n\t\ttry:\n\t\t\tnum_snATAC_cells = total_snATAC_cell_each_dataset[dataset['dataset']]\n\t\texcept KeyError as e:\n\t\t\tnum_snATAC_cells = 0\n\n\t\tif \"RS2\" not in dataset['dataset']:\n\t\t\tbrain_region_code = dataset['dataset'].split('_')[1]\n\t\t\tresearch_segment = \"RS1\"\n\t\telse:\n\t\t\tbrain_region_code = dataset['dataset'].split('_')[2]\n\t\t\tbrain_region_code = brain_region_code[-2:]\n\t\t\tresearch_segment = \"RS2\"\n\n\t\tregions_sql = db.get_engine(current_app, 'methylation_data').execute(\"SELECT ABA_description FROM ABA_regions WHERE ABA_acronym='%s'\" % dataset['brain_region']).fetchone()\n\t\tif regions_sql is not None:\n\t\t\tABA_regions_descriptive = regions_sql['ABA_description'].replace('+', ', ')\n\t\telse:\n\t\t\tABA_regions_descriptive = \"\"\n\n\t\tif rs == \"rs1\":\n\t\t\ttry:\n\t\t\t\tdataset_cell_counts.append( {\"dataset_name\": dataset['dataset'],\n\t\t\t\t\t\t\t\t\t\t\t \"sex\": dataset['sex'],\n\t\t\t\t\t\t\t\t\t\t\t \"methylation_cell_count\": total_methylation_cell_each_dataset[dataset['dataset']],\n\t\t\t\t\t\t\t\t\t\t\t \"snATAC_cell_count\": num_snATAC_cells,\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_acronym\": dataset['brain_region'].replace('+', ', '),\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_descriptive\": ABA_regions_descriptive,\n\t\t\t\t\t\t\t\t\t\t\t \"slice\": brain_region_code,\n\t\t\t\t\t\t\t\t\t\t\t \"date_added\": str(dataset['date_online']),\n\t\t\t\t\t\t\t\t\t\t\t \"description\": dataset['description'] })\n\t\t\texcept:\n\t\t\t\tdataset_cell_counts.append( {\"dataset_name\": dataset['dataset'],\n\t\t\t\t\t\t\t\t\t\t\t \"sex\": dataset['sex'],\n\t\t\t\t\t\t\t\t\t\t\t \"methylation_cell_count\": 0,\n\t\t\t\t\t\t\t\t\t\t\t \"snATAC_cell_count\": num_snATAC_cells,\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_acronym\": dataset['brain_region'].replace('+', ', '),\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_descriptive\": ABA_regions_descriptive,\n\t\t\t\t\t\t\t\t\t\t\t \"slice\": brain_region_code,\n\t\t\t\t\t\t\t\t\t\t\t \"date_added\": str(dataset['date_online']),\n\t\t\t\t\t\t\t\t\t\t\t \"description\": dataset['description'] })\n\t\telse:\n\t\t\ttarget_region_sql = db.get_engine(current_app, 'methylation_data').execute(\"SELECT ABA_description FROM ABA_regions WHERE ABA_acronym='%s'\" % dataset['target_region']).fetchone()\n\t\t\tif target_region_sql is not None:\n\t\t\t\ttarget_region_descriptive = target_region_sql['ABA_description'].replace('+', ', ')\n\t\t\telse:\n\t\t\t\ttarget_region_descriptive = \"\"\n\n\t\t\ttry:\n\t\t\t\tdataset_cell_counts.append( {\"dataset_name\": dataset['dataset'],\n\t\t\t\t\t\t\t\t\t\t\t \"research_segment\": research_segment,\n\t\t\t\t\t\t\t\t\t\t\t \"sex\": dataset['sex'],\n\t\t\t\t\t\t\t\t\t\t\t \"methylation_cell_count\": total_methylation_cell_each_dataset[dataset['dataset']],\n\t\t\t\t\t\t\t\t\t\t\t \"snATAC_cell_count\": num_snATAC_cells,\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_acronym\": dataset['brain_region'].replace('+', ', '),\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_descriptive\": ABA_regions_descriptive,\n\t\t\t\t\t\t\t\t\t\t\t \"slice\": brain_region_code,\n\t\t\t\t\t\t\t\t\t\t\t \"date_added\": str(dataset['date_online']),\n\t\t\t\t\t\t\t\t\t\t\t \"description\": dataset['description'],\n\t\t\t\t\t\t\t\t\t\t\t \"target_region_acronym\": dataset['target_region'],\n\t\t\t\t\t\t\t\t\t\t\t \"target_region_descriptive\": target_region_descriptive})\n\t\t\texcept:\n\t\t\t\tdataset_cell_counts.append( {\"dataset_name\": dataset['dataset'],\n\t\t\t\t\t\t\t\t\t\t\t \"research_segment\": research_segment,\n\t\t\t\t\t\t\t\t\t\t\t \"sex\": dataset['sex'],\n\t\t\t\t\t\t\t\t\t\t\t \"methylation_cell_count\": 0,\n\t\t\t\t\t\t\t\t\t\t\t \"snATAC_cell_count\": num_snATAC_cells,\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_acronym\": dataset['brain_region'].replace('+', ', '),\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_descriptive\": ABA_regions_descriptive,\n\t\t\t\t\t\t\t\t\t\t\t \"slice\": brain_region_code,\n\t\t\t\t\t\t\t\t\t\t\t \"date_added\": str(dataset['date_online']),\n\t\t\t\t\t\t\t\t\t\t\t \"description\": dataset['description'],\n\t\t\t\t\t\t\t\t\t\t\t \"target_region_acronym\": dataset['target_region'],\n\t\t\t\t\t\t\t\t\t\t\t \"target_region_descriptive\": target_region_descriptive})\n\n\treturn json.dumps(dataset_cell_counts)", "def test_suite():\n test(sum_all_elements([1,3,1,4,3,8]) == 5)\n test(sum_all_elements([1,3,5,7]) == 16)\n test(sum_all_elements([1, -7, 10, 23]) == -6)\n test(sum_all_elements(range(1,555,2)) == 76729)", "def compare_sum(values, weights):\n return np.sum(values.numpy())", "def __init__(self, datasets: Union[str, list], **kwargs) -> None:\n if isinstance(datasets, str):\n datasets = [datasets]\n\n self._data = None\n for dataset in datasets:\n assert dataset in DATASETS, f\"[!] Dataset not found: {dataset}\"\n\n if self._data is None:\n self._data = DATASETS[dataset](kwargs)\n else:\n self._data = ConcatDataset([self._data, DATASETS[dataset](kwargs)])", "def sum(self, axis=None, keepdims=False, dtype=None, out=None):\n return np.add.reduce(self, out=out, axis=axis, keepdims=keepdims, dtype=dtype)", "def GetSumOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUL3_GetSumOutput(self, *args)", "def sum(a,b,c):\n return (a+b+c)", "def test_accumulation(preds, targets, exact_match, f1):\n squad_metric = SQuAD()\n for pred, target in zip(preds, targets):\n squad_metric.update(preds=[pred], target=[target])\n metrics_score = squad_metric.compute()\n\n _assert_tensor(metrics_score[\"exact_match\"])\n _assert_tensor(metrics_score[\"f1\"])\n _assert_allclose(metrics_score[\"exact_match\"], torch.mean(torch.tensor(exact_match)))\n _assert_allclose(metrics_score[\"f1\"], torch.mean(torch.tensor(f1)))", "def test_add():\n data = io.create_sample_Dataset()\n tmp = data + data\n assert tmp[\"u\"][0, 0, 0] == 2.0", "def sum_values(values):\n return (sum(values))", "def test_graph_traversal_sum(self):\n # Run the test case for the given example network\n values_out1, _ = self._run_test_case_for_method(ExampleNetwork1, graph_traversal_sum,\n number_of_executions=1, length=1000)\n # 'Node 4 should be sum of 0, 1, and 2'\n np.testing.assert_array_almost_equal(\n list(values_out1[:, 4]),\n list(values_out1[:, 0] + values_out1[:, 1] + values_out1[:, 2]),\n decimal=10,\n err_msg='Network1: Node 4 should be sum of 0, 1, and 2')\n # 'Node 6 should be same as 4'\n np.testing.assert_array_almost_equal(\n list(values_out1[:, 6]),\n list(values_out1[:, 3]),\n decimal=10,\n err_msg ='Network1: Node 6 should be same as 3')\n # 'Node 9 should be sum of 3 and 4'\n np.testing.assert_array_almost_equal(\n list(values_out1[:, 9]),\n list((values_out1[:, 3] + values_out1[:, 4])),\n decimal=10,\n err_msg='Network1: Node 9 should be sum of 3 and 4')\n\n # Run the test case for the second example network\n values_out2, _ = self._run_test_case_for_method(ExampleNetwork2, graph_traversal_sum,\n number_of_executions=1, length=1000)\n # 'Node 7 should be sum of 5, 3, 9'\n np.testing.assert_array_almost_equal(\n list(values_out2[:, 7]),\n list(values_out2[:, 5] + values_out2[:, 3] + values_out2[:, 9]),\n decimal=10,\n err_msg='Network1: Node 7 should be sum of 5, 3, 9')\n # 'Node 6 should be same as 8'\n np.testing.assert_array_almost_equal(\n list(values_out2[:, 6]),\n list(values_out2[:, 8]),\n decimal=10,\n err_msg='Network1: Node 6 should be same as 8')\n # 'Node 6 should be sum of 4, 0, 1, 9'\n np.testing.assert_array_almost_equal(\n list(values_out2[:, 6]),\n list((values_out2[:, 4] + values_out2[:, 9] + values_out2[:, 0] + values_out2[:, 1])),\n decimal=10,\n err_msg='Network1: Node 9 should be sum of 3 and 4')", "def sum (self):\n return self.values.sum ()", "def sum (self):\n return self.values.sum ()", "def assert_array_sum(base_raster_path, desired_sum):\r\n base_raster = gdal.OpenEx(base_raster_path, gdal.OF_RASTER)\r\n base_band = base_raster.GetRasterBand(1)\r\n base_array = base_band.ReadAsArray()\r\n raster_sum = numpy.sum(base_array)\r\n numpy.testing.assert_almost_equal(raster_sum, desired_sum)", "def __iadd__(self,value):\n if isinstance(value,LiveStat):\n raise Exception(\"Cannot sum statistics\")\n if value.vcount < 1 or self.vcount < 1:\n raise Exception(\"Cannot sum empty statistics\")\n else:\n # sum of two considered pairwise: z_i = stat(x_i + y_i)\n #\n # data have different weights due to number of samples.. TODO\n self.vmin += value.vmin \n self.vmax += value.vmax\n self.vmean += value.vmean\n self.vsum += value.vsum\n # variance is sum of variance?\n self.vm2 += value.vm2\n # TODO vm3 vm4\n self.vcount = min(value.vcount,self.vcount)\n self.vcountsq = self.vcount**2\n self.dirty = True\n print (\"add Missing: M3 and M4\")\n else:\n # constant bias\n if self.vmin is not None:\n self.vmin += value\n self.vmax += value\n self.vmean += value\n self.vsum += self.vcount*value\n print (\"add Missing: M3 and M4\")\n self.dirty = True\n return self", "def sumAllValues(self,*toSkip):\n sum=0\n for counterKey in self.counters.keys():\n if not counterKey in toSkip: sum += self.counters[counterKey]\n # 026 #self.debug.mainLogger.debug(\"Sumation of all counters finished with result %i.\"%(sum))\n return sum", "def match_data(self, datasets):\n raise NotImplementedError" ]
[ "0.69257575", "0.6655116", "0.6303091", "0.6137", "0.59186286", "0.5906148", "0.580326", "0.576853", "0.576853", "0.5768271", "0.5721376", "0.5690767", "0.5661961", "0.56405276", "0.56123817", "0.56020796", "0.55628824", "0.55342525", "0.5516941", "0.55149585", "0.55093265", "0.5505355", "0.55009276", "0.54875296", "0.5472624", "0.5471613", "0.54554164", "0.5450035", "0.5434738", "0.5432699", "0.5425089", "0.5419572", "0.54140073", "0.5408545", "0.5404551", "0.53956515", "0.5379466", "0.53728867", "0.5368465", "0.53586507", "0.5355176", "0.5355176", "0.53489184", "0.53482467", "0.5346929", "0.53378475", "0.5317557", "0.5314934", "0.5310582", "0.53062195", "0.5303432", "0.52893776", "0.5287667", "0.52867997", "0.52862185", "0.52817994", "0.5274712", "0.52743846", "0.52663475", "0.52663475", "0.52585703", "0.52492636", "0.5248985", "0.5240893", "0.5240245", "0.5237153", "0.52348876", "0.5229858", "0.5229586", "0.522684", "0.5220523", "0.52071553", "0.5192969", "0.5192382", "0.51846194", "0.5179735", "0.517909", "0.5172292", "0.51658577", "0.5157842", "0.51560056", "0.51542425", "0.515123", "0.5149462", "0.5147667", "0.5146275", "0.5143743", "0.51374465", "0.51351357", "0.51349753", "0.5133057", "0.51302934", "0.5126694", "0.51228493", "0.5119775", "0.5118105", "0.5118105", "0.51060444", "0.51015675", "0.50984186", "0.5083719" ]
0.0
-1
Parses data list and returns the number of person IDs and the number of camera views.
def parse_data(self, data): pids = set() cams = set() for info in data: pids.add(info[1]) cams.add(info[2]) return len(pids), len(cams)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_num_cams(self, data):\n cams = set()\n for items in data:\n camid = items[2]\n cams.add(camid)\n return len(cams)", "def get_num_pids(self, data):\n pids = set()\n for items in data:\n pid = items[1]\n pids.add(pid)\n return len(pids)", "def getNumberOfReviews(self):\n try:\n count = 0\n with open(self.metadata_path, \"r\", newline='') as metadata:\n mdata = csv.reader(metadata, delimiter=' ', quotechar='|')\n for review_data in mdata:\n count += 1\n return count\n except Exception:\n print(\"Cant load metadata file\")\n traceback.print_exc()", "def get_num_cams(self, data):\n return self.parse_data(data)[1]", "def get_number_of_persons(self):\n self.__load_persons_from_file_into_memory()\n return super().get_number_of_persons()", "def people(self):\n count = self.db.query(\n 'select count(id) as people_count from \\\n (select id from staff union all select id from fellows)')\n return count.all()[0]['people_count']", "def get_num_pids(self, data):\n return self.parse_data(data)[0]", "def dataDimensions(data):\n logging.info('Number of rows of data: %s' % len(data))\n logging.info('Number of columns of data: %s' % len(data[1]))", "def _read_dataset_info(self) -> Tuple[int, int]:\n print('Reading dataset information...')\n dataset_file = os.path.join(self.dataset_dir, 'dataset.json')\n assert os.path.isfile(dataset_file), \"The dataset is not complete, the dataset.json file is missing\"\n\n with open(dataset_file) as f:\n data = json.load(f)\n\n # Get total number of frames and upsample\n # num_frames = data['valid_frames'][-1] * 2\n\n # Total frames, for now.. --> Horrible hardcoding but necessary, please change it in the future!\n if self.half_resolution:\n total_frames = {\n '181129': 64862,\n '190502': 89585,\n '190719': 87778,\n '190726': 88819,\n }\n else:\n total_frames = {\n '181129': 64862 * 2 - 1,\n '190502': 89585 * 2,\n '190719': 87778 * 2,\n '190726': 88819 * 2,\n }\n return data['n_cameras'], total_frames[self.dataset_name]", "def numberOfCamera():\n return numCams", "def count_persons(self):\n\n # full path of personData file\n path = self.pretty_print_path()\n\n data = self.person_data[[\"person_id\", \"telestatus\"]].copy()\n\n conditions = [(data[\"telestatus\"] == \"Not a Worker\"),\n (data[\"telestatus\"].isin([\"No Telecommute\",\n \"1 day a week\",\n \"2-3 days a week\",\n \"4+ days a week\",\n \"Work from Home\"]))]\n choices = [\"Non-worker\", \"Worker\"]\n data[\"telestatus\"] = np.select(conditions, choices, default=data[\"telestatus\"])\n\n counts = data.groupby([\"telestatus\"]).count()\n counts.loc['Total'] = counts.sum()\n counts.reset_index(inplace=True)\n counts['File'] = path # add file name\n\n results = counts.pivot(index=\"File\", columns=\"telestatus\", values=\"person_id\")\n\n # add percentage share\n counts[\"Total\"] = data.person_id.count()\n counts[\"share\"] = counts.person_id / counts.Total\n percent_share = counts.pivot(index=\"File\", columns=\"telestatus\", values=\"share\")\n\n results = results.append(percent_share, ignore_index=False, sort=True)\n\n cols = [\"Worker\",\n \"Non-worker\",\n \"Total\"]\n\n return results[cols]", "def data_count(self):\n return(len(self.data))", "def getPeopleIDs():\n\n\t\tpeople_ids = self.mem.getData(\"GazeAnalysis/PeopleLookingAtRobot\")\n\n\t\tif len(people_ids) == 0:\n\t\t\treturn None\n\n\t\treturn people_ids", "def get_camera_count():\n return Camera.getNumberOfCameras()", "def _process_persons(self, persons_json: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n print('Processing persons...')\n persons = []\n ids = []\n last_frame = -1\n for d in persons_json:\n frame = d['frame']\n\n # Complete if there are empty frames in between\n if frame > last_frame + 1:\n for i in range(last_frame+1, frame):\n persons.append([])\n #\n # Add persons in current frame if there are persons in it\n if 'persons' in d:\n persons_in_frame = d['persons']\n\n # Get the ids of the annotated persons\n for person in persons_in_frame:\n ids.append(person['pid'])\n\n persons.append(persons_in_frame)\n\n last_frame = frame\n\n # Add empty frames at the end if the dataset has unnanotated frames to avoid errors\n if len(persons) <= self.num_frames//2:\n for i in range(len(persons), self.num_frames//2+1):\n persons.append([])\n\n # Make sure that there are no repeated IDs\n ids = np.unique(np.array(ids, dtype=np.int))\n\n return np.array(persons, dtype=object), ids", "def _count_parties(data_set): #DEMOCRATS, THEN REPUBLICANS\r\n reps = 0\r\n dems = 0\r\n for data_point in data_set:\r\n if data_point.dat_party == \"R\": reps+=1\r\n if data_point.dat_party == \"D\": dems+=1\r\n\r\n return (dems, reps)", "def analyze_count(data):\n\n dsct_vk = pd.unique(data['vk'])\n dsct_itemid = pd.unique(data['itemid'])\n\n print 'number of user:', dsct_vk.shape\n print 'number of items:', dsct_itemid.shape\n print 'the number of ratings:', data.shape\n\n print 'unique actions:', pd.unique(data['action'])\n print 'the number of action 0:', np.sum(data['action'] == 0)\n print 'the number of action 1:', np.sum(data['action'] == 1)\n print 'the number of action 2:', np.sum(data['action'] == 2)\n print 'the number of action 3:', np.sum(data['action'] == 3)\n print 'the number of action 4:', np.sum(data['action'] == 4)\n \n time_range_item = data.groupby('itemid')['real_time'].aggregate(sum_unique)\n print 'Max Range:', np.max(time_range_item)\n print 'Mean Range:', np.mean(time_range_item)\n print 'Median Range:', np.median(time_range_item)", "def get_info(data_list):\n info = [{TEST_SAMPLE: data_elem[TEST_SAMPLE], CONTROL_SAMPLE: data_elem[CONTROL_SAMPLE],\n N_UP: len(data_elem[REG_UP].index), N_DOWN: len(data_elem[REG_DOWN].index)} for data_elem in data_list]", "def count_extracted(j_data):\n count = 0\n for record in j_data:\n tmp = {}\n desc = record['lcr_desc'].lower().split('/')\n title = desc[0]\n cat = category(title)\n if cat and 'location' in record:\n count += 1\n return count", "def _get_observation_count(self):\n observation_count = 0\n for sequence in self.seq_list:\n observation_count += sequence.shape[0] \n \n return observation_count", "def getNumberOfViews(self) -> int:\n ...", "def count_score(data: GameData) -> Tuple[int, int]:\n score_user = score_ai = 0\n\n for item in data.items:\n if not hasattr(item, \"prediction_user\"):\n return score_user, score_ai\n\n if item.prediction_user == item.ground_truth:\n score_user += 1\n\n if item.prediction_ai[0] == item.ground_truth:\n score_ai += 1\n\n return score_user, score_ai", "def people_count(self):\n return len(self.__users)", "def process_camera():\n\n pic_array = take_picture()\n detections, shapes, descriptors = detect_faces(person_database,pic_array)\n\n names = []\n\n for desc in descriptors:\n name = find_match(person_database, desc)\n names.append(name)\n\n return pic_array, names, detections, shapes, descriptors", "def count_data(self):\n try:\n ndata = len(self.x)\n logger.info(\"Number of data points: {0}\".format(ndata))\n except AttributeError:\n logger.error(\"Data object has not been defined\")\n ndata = 0\n return ndata", "def _parse_list(self):\n frame_path = [x.strip().split(' ') for x in open(self._image_set)] \n self.video_list = [VideoRecord(item) for item in frame_path]\n print('Sequence number/ video number:%d' % (len(self.video_list)))", "def _parse_list(self):\n frame_path = [x.strip().split(' ') for x in open(self._image_set)] \n self.video_list = [VideoRecord(item) for item in frame_path]\n print('Sequence number/ video number:%d' % (len(self.video_list)))", "def parseCommentsTotalCount(data):\n p = re.compile(r\"\\d+ Reviews\")\n\n for line in data:\n line = line.replace(\",\", \"\")\n match = re.search(p, line)\n if match != None:\n getNmbr = match.group().split(\" \")\n return int(getNmbr[0])\n return -1", "def getNumData(self):\n return len(self.data)", "def extract_work_info(self, data_items):\n result = []\n count = 0\n for data_item in data_items:\n keep = True\n if self.filters.get('min') != None and data_item['bookmarkCount'] < self.filters['min']:\n keep = False\n if self.filters.get('max') != None and data_item['bookmarkCount'] > self.filters['max']:\n keep = False\n if self.filters['multi'] == False and data_item['pageCount'] > 1:\n keep = False\n if keep:\n url = data_item['url']\n begin = url.find('img/')\n end = url.find('_master')\n url_info = url[begin + 4:end - 3] # no real source here since there might be multi images\n\n result.append({\n 'id': data_item['illustId'],\n 'name': data_item['illustTitle'], # filename\n 'username': data_item['userName'], # filename\n 'url_info': url_info, # for fetching real source\n 'count': data_item['pageCount'], # for fetching multiple images\n 'type': data_item['illustType'] # for determining picture/ugoira\n })\n count += data_item['pageCount']\n return result, count", "def get_data(self):\n data = list(IgnitionRowPredictionCVX.objects.all().order_by('-pub_date')[:self.num_ticks].values())\n two_hours = data[::-1]\n num_players_data = [[elem['num_players_{}'.format(key)] for elem in two_hours] for key in self.keys]\n return num_players_data", "def count_data(path):\n matcher = re.compile(r'[0-9]+\\.json')\n match = lambda name: bool(matcher.match(name))\n names = os.listdir(path)\n n_data = len(list(filter(match, names)))\n return n_data", "def so_data_statistics(data_file):\n with open(data_file, \"r\") as f:\n data = json.load(f)\n\n answer_to_num_questions = collections.Counter()\n comment_to_num_questions = collections.Counter()\n num_comments = 0\n num_answers = 0\n num_questions = len(data)\n\n for q in data:\n q = json.loads(q)\n q_comments = 0\n q_comments += len(q[\"comments\"])\n q_answers = len(q[\"answers\"])\n for a in q[\"answers\"]:\n q_comments += len(a[\"comments\"])\n\n answer_to_num_questions[q_answers] += 1\n comment_to_num_questions[q_comments] += 1\n\n num_comments += q_comments\n num_answers += q_answers\n\n print \"Num comments: {0}, Num answers: {1}, Num_questions: {2}\".format(\n num_comments, num_answers, num_questions)\n print \"-\" * 10\n print \"Answers map: \", answer_to_num_questions\n print \"Comments map: \", comment_to_num_questions\n\n return num_comments, num_answers, num_questions, answer_to_num_questions, \\\n comment_to_num_questions", "def get_faces_nr(self):\r\n\r\n logger.debug('Getting number of faces in each frame')\r\n\r\n if len(self.tracked_faces) == 0:\r\n\r\n # Try to load YAML file\r\n\r\n if os.path.exists(self.track_path):\r\n\r\n print 'Loading YAML file with tracking results'\r\n logger.debug('Loading YAML file with tracking results')\r\n\r\n with open(self.track_path) as f:\r\n\r\n self.tracked_faces = yaml.load(f)\r\n\r\n print 'YAML file with tracking results loaded'\r\n logger.debug('YAML file with tracking results loaded')\r\n\r\n else:\r\n\r\n print 'Warning! No tracking results found!'\r\n logger.warning('No tracking results found!')\r\n\r\n return\r\n\r\n self.faces_nr = {}\r\n\r\n for segment_dict in self.tracked_faces:\r\n\r\n frame_list = segment_dict[c.FRAMES_KEY]\r\n\r\n for frame_dict in frame_list:\r\n\r\n frame_name = frame_dict[c.SAVED_FRAME_NAME_KEY]\r\n\r\n if frame_name in self.faces_nr:\r\n\r\n self.faces_nr[frame_name] += 1\r\n\r\n else:\r\n\r\n self.faces_nr[frame_name] = 1\r\n\r\n # Save YAML file\r\n\r\n utils.save_YAML_file(self.faces_nr_path, self.faces_nr)", "def get_num_datasets(self, data):\n dsets = set()\n for items in data:\n dsetid = items[3]\n dsets.add(dsetid)\n return len(dsets)", "def count_data_items(fileids, train=True):\n sizes = 28000 if train else 22500\n return len(fileids) * sizes", "def count_list_4(my_list):\r\n\tcount = 0\r\n\t\r\n\tfor l in my_list:\r\n\t\tif l == 4:\r\n\t\t\tcount += 1\r\n\t\t\t\r\n\treturn count", "def __len__(self):\n return len(self.data_list)", "def __len__(self) -> int:\n import h5py\n\n with h5py.File(\n os.path.join(self.root, self.data_dir, self.img_file_name), \"r\"\n ) as f:\n num_datapoints: int = f[self.split][\"pv_log\"].shape[0]\n\n return num_datapoints", "def countDataSize(self,filename):\n \n d = h5py.File(filename,'r')\n features = d['spectrometer/features'][:]\n select = self.selectData(features.astype(float), self.ifeature, d)\n N = len(features[select])\n d.close()\n\n N = (N//self.offsetLen) * self.offsetLen\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def count(self):\n \n return len(self.img_lst)", "def processData(data):\n ids, instances, labels = [], [], []\n for i in data:\n idField = int(i[0])\n instance = i[1:-1]\n label = i[-1]\n ids.append(idField)\n instances.append(instance)\n labels.append(label)\n\n ids = np.array(ids)\n instances = np.array(instances)\n labels = np.array(labels)\n\n return (ids, instances, labels)", "def count_data_points(conversation, parse_text, i, **kwargs):\n data = conversation.temp_dataset.contents['X']\n num_elements = len(data)\n\n parse_op = gen_parse_op_text(conversation)\n\n if len(parse_op) > 0:\n description_text = f\" where <b>{parse_op}</b>\"\n else:\n description_text = \"\"\n\n message = f\"There are <b>{num_elements} items</b> in the data{description_text}.\"\n\n message += \"<br><br>\"\n message += \"Let me know if you want to see their ids.\"\n ids = list(data.index)\n rest_of_text = str(ids)\n conversation.store_followup_desc(rest_of_text)\n return message, 1", "def get_data_ninstances(self):\n return self.data_ninstances", "def numberActivities(self):\n if self.use_dic:\n nb_data = self.dic.keys()\n nb_act = (self.dic[nb_data[0]]).keys()\n return len(nb_data)*len(nb_act)\n else:\n return -1", "def data_info(data):\n filename = data[\"filename\"]\n X_var = data[\"X_var\"]\n Y_var = data[\"Y_var\"]\n X,Y = read_file(filename,X_var,Y_var)\n input_dim = len(X_var)\n output_dim = len(Y_var)\n return X,Y,input_dim,output_dim", "def get_data(self):\n data = list(IgnitionRowPredictionOLS.objects.all().order_by('-pub_date')[:self.num_ticks].values())\n two_hours = data[::-1]\n num_players_data = [[elem['num_players_{}'.format(key)] for elem in two_hours] for key in self.keys]\n return num_players_data", "def iterate_data(self):\n if \"single\" in self.dataset_name:\n # Index 0 for list of sentence lengths, index 1 for list of token lengths\n self.stat_dict = {'question': [[], []], 'summary': [[], []], 'article': [[], []]}\n for answer_id in self.data:\n summary = self.data[answer_id]['summary']\n articles = self.data[answer_id]['articles']\n question = self.data[answer_id]['question']\n if args.tokenize:\n self._get_token_cnts(summary, 'summary')\n self._get_token_cnts(articles, 'article')\n self._get_token_cnts(question, 'question')\n self._write_stats(\"token_counts\")\n\n if \"multi\" in self.dataset_name:\n self.stat_dict = {'question': [[], []], 'summary': [[], []], 'article': [[], []]}\n for q_id in self.data:\n summary = self.data[q_id]['summary']\n question = self.data[q_id]['question']\n if args.tokenize:\n self._get_token_cnts(summary, 'summary')\n self._get_token_cnts(question, 'question')\n question = self.data[q_id]['question']\n for answer_id in self.data[q_id]['articles']:\n articles = self.data[q_id]['articles'][answer_id][0]\n if args.tokenize:\n self._get_token_cnts(articles, 'article')\n self._write_stats(\"token_counts\")\n\n if self.dataset_name == \"complete_dataset\":\n self.stat_dict = {'urls': [], 'sites': []}\n article_dict = {}\n print(\"Counting answers, sites, unique urls, and tokenized counts of unique articles\")\n answer_cnt = 0\n for q_id in self.data:\n for a_id in self.data[q_id]['answers']:\n answer_cnt += 1\n url = self.data[q_id]['answers'][a_id]['url']\n article = self.data[q_id]['answers'][a_id]['article']\n if url not in article_dict:\n article_dict[url] = article\n self.stat_dict['urls'].append(url)\n assert \"//\" in url, url\n site = url.split(\"//\")[1].split(\"/\")\n self.stat_dict['sites'].append(site[0])\n print(\"# of Answers:\", answer_cnt)\n print(\"Unique articles: \", len(article_dict)) # This should match up with count written to file\n self._write_stats(\"full collection\")\n\n # Get token/sent averages of unique articles\n if args.tokenize:\n self.stat_dict = {'article': [[], []]}\n for a in article_dict:\n self._get_token_cnts(article_dict[a], 'article')\n self._write_stats(\"token_counts\")", "def count(self):\r\n return self.data_array.size", "def getCounts():\n for area in AREAS:\n print(area['name'])\n lat = area['lat']\n lng = area['lng']\n count = utils.getExtracted(countCrimes, lat, lng)\n print('count: %s' % count)\n if type(count) is list:\n area['count'] = count[0]\n return AREAS", "def member_count(self):\n\n url = '{}/members'.format(self.url)\n headers = {\n 'User-Agent': 'GeoUsage (https://github.com/geopython/GeoUsage)'\n }\n\n LOGGER.debug('Fetching URL: {}'.format(url))\n response = requests.post(url,\n headers=headers,\n data={'adminpw': self.password})\n LOGGER.debug('Parsing HTML')\n\n element = re.search(r'(\\d+) members total', response.text).group(0)\n members = int(element.split('members total')[0].strip())\n\n return members", "def countDataSize(self,filename):\n \n try:\n d = h5py.File(filename,'r')\n except:\n print(filename)\n return \n\n N = 0\n scan_edges = d['level2/Statistics/scan_edges'][:]\n for (start,end) in scan_edges:\n N += (end-start)//self.offsetLen * self.offsetLen\n d.close()\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def getNumViews(self):\n\n # Compute number of views of each 2D points\n self.num_views = np.sum( np.sum(self.pts2D, axis = 0) != 0, 1 )\n return self.num_views", "def pobj_counts(pcode_obj):\n pcode = (pcode_obj.asDict())['pcode'][0] # no multiple pcode blocks - no delimiter\n counts = {'galleries': 0, 'spreads': 0, 'layouts': 0, 'panelgroups': 0}\n # , 'panels': 0, 'skips': 0 }\n galleries = pcode.pop('gallery', '')\n counts['galleries'] = len(galleries)\n for gallery in galleries:\n spreads = gallery.pop('spread', '')\n counts['spreads'] += len(spreads)\n for spread in spreads:\n layouts = spread.pop('layout', '')\n counts['layouts'] += len(layouts)\n for layout in layouts:\n panelgroups = layout.pop('panelgroup', '')\n counts['panelgroups'] += len(panelgroups)\n return counts", "def load_data(self):\n super(MudderyObjectCreater, self).load_data()\n \n data = self.get_data_record()\n if not data:\n return\n \n # set common object's info\n self.obj_list = {}\n\n for obj in data.obj_list.split(\",\"):\n obj_key = \"\"\n number = 0\n arg = obj.split(\":\", 1)\n if len(arg) == 1:\n obj_key = arg[0]\n number = 1\n elif len(arg) >= 2:\n obj_key = arg[0]\n number = int(arg[1])\n\n self.obj_list[obj_key] = number", "def preview_count(self, conditions):\r\n\r\n url = '{0}/{1}'.format(self.get_url(), 'preview/count')\r\n\r\n view = {'view': conditions.copy()}\r\n request = http.Request('POST', url, view)\r\n return request, parsers.parse_json", "def get_total_cameras(self) -> int:\n return self.num_cameras", "def readImages(self):\r\n\r\n #Read the file camera.csv for the image file name\r\n lines = [line.strip() for line in open(self.cameraFile)]\r\n i = 0;\r\n\tself.centers = []\r\n\tself.lefts = []\r\n\tself.rights = []\r\n\r\n for line in lines:\r\n info = line.split(',')\r\n \r\n\r\n if info[0] == 'seq':\r\n i += 1\r\n continue\r\n \r\n if info[4] == 'left_camera':\r\n self.lefts.append(info)\r\n if info[4] == 'center_camera':\r\n self.centers.append(info)\r\n if info[4] == 'right_camera':\r\n self.rights.append(info)\r\n i += 1\r\n\r\n print \"Total Frames: %d \" % (len(self.centers))", "def getViewPortUserCount(self):\n logger.debug('Getting map view port user count...')\n elements = get_elements_by_css(\".leaflet-marker-icon.srcCluster\")\n users = 0\n for element in elements:\n users += int(get_text(element))\n return users", "def get_about_count_results(soup):\n title = soup.find('div', {'id': 'gs_ab_md'})\n if title:\n title = title.find('div', {'class': 'gs_ab_mdw'})\n if title:\n count_papers = title.text\n if count_papers:\n count_papers = count_papers.split(' ')[1].replace(',', '')\n else:\n count_papers = len(soup.find_all('h3', class_=\"gs_rt\"))\n try:\n int(count_papers)\n except:\n count_papers = title.text.split(' ')[0].replace(',', '')\n else:\n count_papers = len(soup.find_all('h3', class_=\"gs_rt\"))\n return int(count_papers)", "def get_data_info(data_path):\n if data_path.endswith('.npz'):\n data = np.load(data_path)\n labels = data['labels'][...]\n example_img = data['images'][:data['ids'][1]]\n out = {'size': len(labels),\n 'num_classes': len(np.unique(labels)),\n 'is_segment': False\n }\n elif data_path.endswith('.csv') or data_path.endswith('.txt'): # list of datasets\n lst = pd.read_csv(data_path)\n base_dir = os.path.dirname(data_path)\n segment0 = os.path.join(base_dir, lst['path'].tolist()[0])\n data = np.load(segment0)\n example_img = data['images'][:data['ids'][1]]\n out = {'size': sum(lst['N'].tolist()),\n 'num_classes': int(lst['num_classes'][0]),\n 'is_segment': True\n }\n else:\n raise TypeError(\"Error! dataset not supported.\")\n vocab_size = max(example_img) + 1\n out['vocab_size'] = vocab_size\n out['SEP'] = vocab_size - 3\n out['SOS'] = vocab_size - 2\n out['EOS'] = vocab_size - 1\n return out", "def N_POINTS(self) -> int:\n try:\n with self.fs.open(\n self.get_url().replace(\".\" + self.erddap.response, \".ncHeader\")\n ) as of:\n ncHeader = of.read().decode(\"utf-8\")\n lines = [line for line in ncHeader.splitlines() if \"row = \" in line][0]\n return int(lines.split(\"=\")[1].split(\";\")[0])\n except Exception:\n pass", "def total_data(map_index, next_sse_index, ss_def, contacts_def):\n no_of_contacts = 0\n contacts_true = contacts_def.keys()\n start, end = ss_def[next_sse_index][3], ss_def[next_sse_index][4]\n for i in range(start, end + 1):\n if i in contacts_true:\n contacts = contacts_def[i]\n for contact in contacts:\n for index in map_index:\n tstart, tend = ss_def[index][3], ss_def[index][4]\n if contact in range(tstart, tend + 1):\n no_of_contacts += 1\n return no_of_contacts", "def split_show_views(line):\n # split the input line in word and count on the comma\n show,views=line.split(\",\")\n # turn the count to an integer \n views=int(views)\n return (show, views)", "def lens_json(filename):\n lens = []\n with open(filename, \"r\") as f:\n print(\"Loading JSON...\")\n data = json.load(f)\n summaries = data[\"summaries\"]\n hyps = map(lambda s: s[\"hypothesis\"], summaries)\n for h in hyps:\n lens.append(len(h.split()))\n\n return lens", "def get_data(self):\n data = list(IgnitionRowPredictionTobit.objects.all().order_by('-pub_date')[:self.num_ticks].values())\n two_hours = data[::-1]\n num_players_data = [[elem['num_players_{}'.format(key)] for elem in two_hours] for key in self.keys]\n return num_players_data", "def info(self, list: list[int], /) -> list[int]:", "def get_statistic():\n\n data = get_data_from_URL(url)[\"data\"]\n results = dict()\n\n for genre in data:\n # get information about one genre\n genre_url = f\"{url}/{genre['id']}/artists\"\n genre_data = get_data_from_URL(genre_url)[\"data\"]\n\n nb_fan = 0\n for artist in genre_data:\n # get information about one artist (number of fans)\n art_data = get_data_from_URL(\n f'https://api.deezer.com/artist/{artist[\"id\"]}')\n nb_fan += art_data[\"nb_fan\"]\n\n # add to dictionary received information\n results[genre[\"name\"]] = (len(genre_data), nb_fan)\n\n return results", "async def people(self, context):\n collection = db['people']\n person_count = []\n count_dict = {}\n for person in collection.find({}, {'_id': 0, 'person': 1}):\n person_count.append(person['person'])\n for person in list(set(person_count)):\n count_dict[person] = person_count.count(person)\n person_print = [f'`{k.capitalize()}: {v}`\\t' for k, v in sorted(count_dict.items())]\n\n await context.send('Current Image Totals:\\n')\n await context.send(''.join(person_print))", "def parse_line(line):\n fields = line.split(',')\n age = int(fields[2])\n num_friends = int(fields[3])\n\n return age, num_friends", "def Points_Counting(self):\n return len(self.__traectory_list)", "def count_support(projection):\n\tprev_id = -1\n\tsize = 0\n\tfor p in projection:\n\t\tif prev_id != p.id:\n\t\t\tprev_id = p.id\n\t\t\tsize += 1\n\treturn size", "def n_items(self) -> int:\n return len(self._data_vars)", "def _get_item_lengths(self) -> List[int]:\n return [len(x[0]) for x in self.data]", "def num_of_images(self):\n return len(self.data['image_infos'])", "def get_data(self):\n data = list(IgnitionRow.objects.all().order_by('-pub_date')[:self.num_ticks].values())\n two_hours = data[::-1]\n #two_hours = data\n num_players_data = [[max(min(elem['num_players_{}'.format(key)],50),0) for elem in two_hours] for key in self.keys]\n return num_players_data", "def get_number_of_activities(self):\n self.__load_activities_from_file_into_memory()\n return super().get_number_of_activities()", "def _count_shapes(self, shape_data : dict) -> dict:\n shape_count = {}\n for item in shape_data:\n item_shape_type = item.get('shapeType')\n if item_shape_type not in shape_count:\n shape_count[item_shape_type] = 1\n else:\n shape_count[item_shape_type] += 1\n return shape_count", "def count_exsiting_data(target_dir, check_all_number=True):\n num_color = len(glob.glob(osp.join(target_dir, '*', 'color', '*png')))\n\n if not check_all_number:\n num_color\n\n num_depth = len(glob.glob(osp.join(target_dir, '*', 'depth', '*npy')))\n num_camera_info = len(glob.glob(osp.join(\n target_dir, '*', 'camera_info', '*yaml')))\n\n assert num_color == num_depth == num_camera_info,\\\n '{} num_color:{} num_depth:{}, num_camera_info:{}'.format(\n target_dir, num_color, num_depth, num_camera_info)\n\n return num_color", "def display_players_list_length(players_list):\r\n print('Nombre de joueurs importés: ', len(players_list))", "def get_cameras_number():\n lib.initlib()\n return lib.is_GetNumberOfCameras()", "def find_people(data):\n youngest_idx = 0\n oldest_idx = 0\n\n for index, item in enumerate(data):\n if item['age'] < data[youngest_idx]['age'] and item['age'] > 0:\n youngest_idx = index\n if item['age'] > data[oldest_idx]['age'] and item['age'] < 80:\n oldest_idx = index\n return youngest_idx, oldest_idx", "def _get_count(results):\n return len(results)", "def count(self):\n return len(self.read_ints())", "def viewdata(data):\n\n print('_' * 50)\n print('Number of Results: ' + str(data[0]['numResults']))\n print('\\nSearchURL: ' + data[0]['searchURL'])\n print('_' * 50)\n\n i = 1\n for m in data[1]:\n print(str(i) + '. ')\n for n in m:\n print(str(n) + ': ' + str(m[n]))\n i += 1\n print('\\n')", "def count(self):\n return self.data_container.count", "def count(self):\n return len(self.wallpapers)", "def total_candidate_mentions(self):\n total_candidate_mentions = self.sentence_data().loc[:, self.candidates].sum(axis = 0, skipna = True)\n total_candidate_mentions = total_candidate_mentions.to_frame(name = 'count').rename_axis('candidate').reset_index()\n \n self._total_candidate_mentions = total_candidate_mentions\n \n return self._total_candidate_mentions", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def _get_reviewers_and_counts(project_name, from_datetime):\n reviewers_info = []\n for reviewer in _get_reviewers(project_name, from_datetime):\n reviewer_name = reviewer.full_name\n review_count = _get_reviewer_change_count(reviewer, project_name,\n from_datetime)\n comment_count = _get_reviewer_comment_count(reviewer, project_name,\n from_datetime)\n reviewers_info.append(\n _create_reviewer_info(reviewer_name, review_count,\n comment_count))\n\n return reviewers_info", "def construct_personal_group_data(content_list_of_dictionaries, own_id, own_uname, their_uname, own_avurl, their_avurl):\n\tfor dictionary in content_list_of_dictionaries:\n\t\tis_own_blob = True if dictionary['id'] == str(own_id) else False \n\t\twhich_blob = dictionary.get(\"which_blob\",None) # identifies 'nor' (normal), 'res' (response), 'action', 'notif' (notification) blobs\n\t\tif is_own_blob:\n\t\t\tdictionary[\"username\"] = own_uname\n\t\t\tdictionary[\"av_url\"] = own_avurl\n\t\telse:\n\t\t\tdictionary[\"username\"] = their_uname\n\t\t\tdictionary[\"av_url\"] = their_avurl\n\t\tif which_blob == 'res':\n\t\t\tdictionary[\"res_time\"] = float(dictionary[\"res_time\"])\n\t\t\tif is_own_blob:\n\t\t\t\tdictionary[\"t_username\"] = their_uname \n\t\t\t\tdictionary[\"t_av_url\"] = their_avurl\n\t\t\telse:\n\t\t\t\tdictionary[\"t_username\"] = own_uname\n\t\t\t\tdictionary[\"t_av_url\"] = own_avurl\n\t\telif which_blob in ('action','notif'):\n\t\t\tif is_own_blob:\n\t\t\t\tdictionary[\"t_username\"] = their_uname \n\t\t\t\tdictionary[\"t_av_url\"] = their_avurl\n\t\t\telse:\n\t\t\t\tdictionary[\"t_username\"] = own_uname\n\t\t\t\tdictionary[\"t_av_url\"] = own_avurl\n\t\telse:\n\t\t\t\"\"\"\n\t\t\tDegree of completeness (of retrieved metadata):\n\n\t\t\t'0': no metadata retrieved\n\t\t\t'1': just image retrieved\n\t\t\t'2': just title retrieved\n\t\t\t'3': just desc retrieved\n\t\t\t'4': just img and img_dim retrieved\n\t\t\t'5': just desc and img retrieved\n\t\t\t'6': just title and img retrieved\n\t\t\t'7': just desc and title retrieved\n\t\t\t'8': just title, img and img_dim retrieved\n\t\t\t'9': just desc, img and img_dim retrieved\n\t\t\t'10': just desc, title and img retrieved\n\t\t\t'11': desc, title, img and img_dim retrieved\n\t\t\t\"\"\"\n\t\t\tnormal_chat = []\n\t\t\tfor i in range(1,int(dictionary[\"idx\"])+1):\n\t\t\t\tidx = str(i)\n\t\t\t\tdoc = 'doc'+idx\n\t\t\t\thas_url_meta = doc in dictionary\n\t\t\t\tif has_url_meta and dictionary['type'+idx] == 'text':\n\t\t\t\t\tmeta_complete = dictionary[doc]\n\t\t\t\t\t# add meta_complete in every 5th index (i.e. tup.5)\n\t\t\t\t\t# add meta_data in this order: url, desc, title, img, img_hw_ratio, 'yt' - youtube (add empty index in case data doesn't exist - useful in personal_group.html)\n\t\t\t\t\tif meta_complete == '1':\n\t\t\t\t\t\t# just image retrieved\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx]),'1',\\\n\t\t\t\t\t\t\tdictionary['url'+idx],'','',dictionary['url_img'+idx],'',dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '2':\n\t\t\t\t\t\t# just title retrieved\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx]),'2',\\\n\t\t\t\t\t\t\tdictionary['url'+idx],'',dictionary['url_title'+idx],'','',dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '3':\n\t\t\t\t\t\t# just desc retrieved\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx]),'3',\\\n\t\t\t\t\t\t\tdictionary['url'+idx], dictionary['url_desc'+idx],'','','',dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '4':\n\t\t\t\t\t\t# img and img_dim\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx]),'4',\\\n\t\t\t\t\t\t\tdictionary['url'+idx],'','',dictionary['url_img'+idx],dictionary['url_hw_ratio'+idx],dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '5':\n\t\t\t\t\t\t# desc and img\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx]),'5',\\\n\t\t\t\t\t\t\tdictionary['url'+idx], dictionary['url_desc'+idx],'',dictionary['url_img'+idx],'',dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '6':\n\t\t\t\t\t\t# title and img\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx]),'6',\\\n\t\t\t\t\t\t\tdictionary['url'+idx],'',dictionary['url_title'+idx],dictionary['url_img'+idx],'',dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '7':\n\t\t\t\t\t\t# desc and title\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx]),'7',\\\n\t\t\t\t\t\t\tdictionary['url'+idx],dictionary['url_desc'+idx],dictionary['url_title'+idx],'','',dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '8':\n\t\t\t\t\t\t# title, img and img_dim\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx]),'8',\\\n\t\t\t\t\t\t\tdictionary['url'+idx],'',dictionary['url_title'+idx],dictionary['url_img'+idx],dictionary['url_hw_ratio'+idx],\\\n\t\t\t\t\t\t\tdictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '9':\n\t\t\t\t\t\t# desc, img and img_dim\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx]),'9',\\\n\t\t\t\t\t\t\tdictionary['url'+idx],dictionary['url_desc'+idx],'',dictionary['url_img'+idx],dictionary['url_hw_ratio'+idx],\\\n\t\t\t\t\t\t\tdictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '10':\n\t\t\t\t\t\t# desc, title and img\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx]),'10',\\\n\t\t\t\t\t\t\tdictionary['url'+idx],dictionary['url_desc'+idx],dictionary['url_title'+idx],dictionary['url_img'+idx],'',\\\n\t\t\t\t\t\t\tdictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '11':\n\t\t\t\t\t\t# desc, title, img and img_dim\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx]),'11',\\\n\t\t\t\t\t\t\tdictionary['url'+idx],dictionary['url_desc'+idx],dictionary['url_title'+idx],dictionary['url_img'+idx],\\\n\t\t\t\t\t\t\tdictionary['url_hw_ratio'+idx],dictionary['yt'+idx]))\n\t\t\t\t\telse:\n\t\t\t\t\t\t# no meaningful metadata\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx])))\n\t\t\t\telif has_url_meta and dictionary['type'+idx] == 'img':\n\t\t\t\t\tmeta_complete = dictionary[doc]\n\t\t\t\t\t# add meta_complete in each 11th index (i.e. tup.11)\n\t\t\t\t\t# add meta_data in this order: url, desc, title, img, img_hw_ratio, 'yt' - youtube (add empty index in case data doesn't exist - useful in personal_group.html)\n\t\t\t\t\tif meta_complete == '1':\n\t\t\t\t\t\t# just image retrieved\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx],'1',dictionary['url'+idx],'','',dictionary['url_img'+idx],'',\\\n\t\t\t\t\t\t\tdictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '2':\n\t\t\t\t\t\t# just title retrieved\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx],'2',dictionary['url'+idx],'',dictionary['url_title'+idx],'','',\\\n\t\t\t\t\t\t\tdictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '3':\n\t\t\t\t\t\t# just desc retrieved\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx],'3',dictionary['url'+idx],dictionary['url_desc'+idx],'','','',\\\n\t\t\t\t\t\t\tdictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '4':\n\t\t\t\t\t\t# img and img_dim\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx],'4',dictionary['url'+idx],'','',dictionary['url_img'+idx],\\\n\t\t\t\t\t\t\tdictionary['url_hw_ratio'+idx],dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '5':\n\t\t\t\t\t\t# desc and img\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx],'5',dictionary['url'+idx],dictionary['url_desc'+idx],'',\\\n\t\t\t\t\t\t\tdictionary['url_img'+idx],'',dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '6':\n\t\t\t\t\t\t# title and img\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx],'6',dictionary['url'+idx],'',dictionary['url_title'+idx],\\\n\t\t\t\t\t\t\tdictionary['url_img'+idx],'',dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '7':\n\t\t\t\t\t\t# desc and title\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx],'7',dictionary['url'+idx],dictionary['url_desc'+idx],\\\n\t\t\t\t\t\t\tdictionary['url_title'+idx],'','',dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '8':\n\t\t\t\t\t\t# title, img and img_dim\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx],'8',dictionary['url'+idx],'',dictionary['url_title'+idx],\\\n\t\t\t\t\t\t\tdictionary['url_img'+idx],dictionary['url_hw_ratio'+idx],dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '9':\n\t\t\t\t\t\t# desc, img and img_dim\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx],'9',dictionary['url'+idx],dictionary['url_desc'+idx],'',\\\n\t\t\t\t\t\t\tdictionary['url_img'+idx],dictionary['url_hw_ratio'+idx],dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '10':\n\t\t\t\t\t\t# desc, title and img\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx],'10',dictionary['url'+idx],dictionary['url_desc'+idx],\\\n\t\t\t\t\t\t\tdictionary['url_title'+idx],dictionary['url_img'+idx],'',dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '11':\n\t\t\t\t\t\t# desc, title, img and img_dim\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx],'11',dictionary['url'+idx],dictionary['url_desc'+idx],\\\n\t\t\t\t\t\t\tdictionary['url_title'+idx],dictionary['url_img'+idx],dictionary['url_hw_ratio'+idx],dictionary['yt'+idx]))\n\t\t\t\t\telse:\n\t\t\t\t\t\t# no meaningful metadata\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx]))\n\t\t\t\telif dictionary['type'+idx] == 'text':\n\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx])))\n\t\t\t\telif dictionary['type'+idx] == 'img':\n\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx]))\n\t\t\t\telif dictionary['type'+idx] == 'shared_img':\n\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'shared_img', dictionary['shared_img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx],dictionary['owner_uname'+idx].decode('utf-8')))\n\t\t\t\telse:\n\t\t\t\t\t# append nothing - this case shouldn't arise\n\t\t\t\t\tpass\n\t\t\tdictionary[\"iterator\"] = normal_chat\n\treturn content_list_of_dictionaries", "def member_count(self):\n return len(self.members)", "def elemCount(memoryManager, paramsList):\n handleEmpty(paramsList, \"count elements of\")\n head = paramsList[0]\n \n if not validateList(head):\n raise Exception('Tried to get element count of non-list')\n \n def countHelper(head):\n if head == None:\n return 0\n\n if type(head) == float:\n return 1\n elif (validateList(head)):\n acum = 0\n for e in head:\n acum += countHelper(e)\n return acum\n return 0\n\n size = countHelper(head)\n return [float(size)]", "def get_camera_metadata(self):\n return self.camera.getHeight(), self.camera.getWidth(), 4 # channels", "def __len__(self):\n self.__load_persons_from_file_into_memory()\n return super().__len__()", "def _count(self):\n if self._count_valid:\n return self._total_results\n\n result = self._cb.get_object(self._doc_class.urlobject.format(self._cb.credentials.org_key))\n results = result.get(\"results\", [])\n\n self._total_results = len(results)\n self._count_valid = True\n\n return self._total_results", "def count_gender(data):\n data = column_to_list(data, -2)\n male = data.count(\"Male\")\n female = data.count(\"Female\")\n return [male, female]", "def count_elements_in_dataset(dataset):\n return dataset.count()", "def trainCount(\n trainData, \n questionType,\n questionDict,\n questionIdict, \n objDict, \n objIdict,\n numAns):\n count_wa = np.zeros((len(objIdict), numAns))\n count_a = np.zeros((numAns))\n objIds = extractObjId(\n trainData[0], \n questionType, \n questionDict, \n questionIdict)\n for i in range(objIds.shape[0]):\n objId = objIds[i]\n obj = questionIdict[objId - 1]\n ansId = trainData[1][i, 0]\n objId2 = objDict[obj]\n count_wa[objId2, ansId] += 1\n count_a[ansId] += 1\n # Add UNK count\n count_a[-1] += 1\n return count_wa, count_a", "def getViewPortAppCount(self):\n logger.debug('Getting map view port app count...')\n elements = get_elements_by_css(\".leaflet-marker-icon.dstCluster\")\n users = 0\n for element in elements:\n users += int(get_text(element))\n return users" ]
[ "0.6483834", "0.5873225", "0.56902164", "0.559616", "0.5581715", "0.55694574", "0.55310565", "0.5526454", "0.5519276", "0.5515513", "0.55004495", "0.543157", "0.53432065", "0.5338561", "0.5330016", "0.5312213", "0.5268463", "0.5256779", "0.52174443", "0.520235", "0.5201785", "0.5200194", "0.517756", "0.51623195", "0.5141766", "0.51362413", "0.51362413", "0.5127095", "0.5126265", "0.51216716", "0.5096596", "0.5082647", "0.5077629", "0.506917", "0.506894", "0.50657755", "0.50609237", "0.50468314", "0.5040548", "0.50389063", "0.5018998", "0.5014177", "0.5013131", "0.5009027", "0.500726", "0.50043094", "0.5002028", "0.49941435", "0.49901167", "0.49889818", "0.4956754", "0.49448854", "0.492521", "0.4917445", "0.49116713", "0.49102327", "0.4905235", "0.49050504", "0.48993725", "0.4899213", "0.4864357", "0.48605558", "0.4860036", "0.48597085", "0.48592576", "0.48557115", "0.48547235", "0.48513734", "0.4850389", "0.48445022", "0.48433304", "0.4842963", "0.48397297", "0.4836078", "0.48277584", "0.4822936", "0.48176917", "0.48056147", "0.4803911", "0.47999752", "0.4796851", "0.47936755", "0.4792293", "0.4785532", "0.47832987", "0.4775163", "0.47746527", "0.4772083", "0.477009", "0.47592154", "0.47495052", "0.47459903", "0.4741745", "0.4735382", "0.47343278", "0.4733224", "0.4732199", "0.47315577", "0.47300616", "0.472716" ]
0.6852167
0
Returns the number of training person identities.
def get_num_pids(self, data): return self.parse_data(data)[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def training_set_count(self) -> int:\n return pulumi.get(self, \"training_set_count\")", "def get_number_of_training(self):\n return self.n_train", "def num_training_examples(self):", "def get_nb_personne(self):\n return self.nb_personne", "def participant_count(self) -> int:\n return self.participants.count() + 1", "def get_num_train_samples(self):\n raise NotImplementedError", "def num_train_instances(self):\n raise NotImplementedError()", "def people_count(self):\n return len(self.__users)", "def number_of_employees(self) -> object:\n return self._number_of_employees", "def studNumber(self):\n return len(self.students)", "def get_speaker_number(self):\n return len(self.metadata)", "def num_train_samples(self):\n if self._num_training_samples is None:\n for key, value in self._training_data.items():\n self._num_training_samples[key] = len(value[0])\n return self._num_training_samples", "def getTotalIndividualCount(self):\r\n return self._n", "def number_of_players(self) -> int:\n return self.param.number_of_players", "def get_num_sequence(self):\n return len(self.study_list)", "def Nprofiles(self):\n return self._nprofiles", "def GetTrainSampleCount(self) :\r\n\t\ttry :\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['SampleSetCount'],(0,))\r\n\t\t\tCurSampleCount = self.DB_Cursor.fetchone()[0]\r\n\t\texcept Exception as detail:\r\n\t\t\tlogging.error(\"Failed to get count of training samples in database: %s\"%detail)\r\n\t\treturn CurSampleCount", "def people(self):\n count = self.db.query(\n 'select count(id) as people_count from \\\n (select id from staff union all select id from fellows)')\n return count.all()[0]['people_count']", "def get_number_of_persons(self):\n self.__load_persons_from_file_into_memory()\n return super().get_number_of_persons()", "def __len__(self):\n if self.settype == \"train\":\n return 64000\n else:\n return len(self.list_ids)", "def get_number_of_testing(self):\n return self.n_test", "def get_n_features(self):\n # +1 due to dummy bit\n return self.model.n_latent_features + 1", "def num_annuli(self) -> int:\n return self._num_ann", "def noOfPlayers(self):\n\t\tnumber = 0\n\t\tfor n in range(6):\n\t\t\tif self.playerList[n] != None:\n\t\t\t\tnumber = number + 1\n\t\treturn number", "def get_teacher_num(self):\n return self.teacher_set.all().count()", "def number_of_iterations(self):\n return self._solution.nit", "def n_train(self):\n return self.factors[0].shape[0]", "def nr_features(self):\n if self.is_predict_only:\n return clib.xlinear_get_int_attr(self.model_chain, \"nr_features\")\n else:\n return self.model_chain[0].nr_features", "def count_explorations():\n return exp_models.ExplorationModel.get_exploration_count()", "def Numtrials(self):\n\t\treturn self._get_attribute('numtrials')", "def IterationCount(self):\r\n\t\treturn self._get_attribute('iterationCount')", "def getN(self)->int:\n return self.n", "def n(self):\n return len(self.marks)", "def get_count_train(datagen):\n y_train = np.array([])\n\n for i in range(len(datagen)):\n y_train = np.append(y_train, datagen[i][1])\n \n age_train, count_train = np.unique(y_train, return_counts=True)\n \n return count_train", "def get_Iu(uid):\n try:\n return len(trainSet.ur[trainSet.to_inner_uid(uid)])\n except ValueError: # user was not part of the trainset\n return 0", "def num_inducing(self) -> tf.Tensor:\n raise NotImplementedError", "def node_count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"node_count\")", "def node_count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"node_count\")", "def count_indications(self) -> int:\n return self._count_model(Indication)", "def get_num_petals(self):\n return self._num_petals", "def numberOfPlayers(self):\r\n return len(self.playerPreparers)", "def count(self):\n return int()", "def count(self):\n return int()", "def get_Iu(uid):\n try:\n return len(trainset.ur[trainset.to_inner_uid(uid)])\n except ValueError: # user was not part of the trainset\n return 0", "def __len__(self):\n if self.TRAIN_BOOL is True:\n count = len(self.dict_batch_1[b'data'])\n count += len(self.dict_batch_2[b'data'])\n count += len(self.dict_batch_3[b'data'])\n count += len(self.dict_batch_4[b'data'])\n count += len(self.dict_batch_5[b'data'])\n else:\n count = len(self.dict_batch_test[b'data'])\n return count", "def _get_observation_count(self):\n observation_count = 0\n for sequence in self.seq_list:\n observation_count += sequence.shape[0] \n \n return observation_count", "def num_enrolled_in(cls, course_id):\r\n enrollment_number = CourseEnrollment.objects.filter(course_id=course_id, is_active=1).count()\r\n\r\n return enrollment_number", "def present_voter_cnt(self):\n\n return len(self._present_voters())", "def node_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"node_count\")", "def node_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"node_count\")", "def __len__(self) -> int:\n return self._num_ann", "def n_evidence(self):\n return self.state.get('n_evidence', 0)", "def get_training_index():\n return list(range(0, 305))", "def node_count(self) -> int:\n return pulumi.get(self, \"node_count\")", "def get_num_nodes(profileDict):\n assert isinstance(profileDict, dict)\n\n return profileDict[\"info\"][\"number_of_nodes\"]", "def num_years():\n years = movies['Year']\n return ('num_years', years.nunique())", "def get_num_tigers(self) -> int:\n return len(self.get_all_tiger_positions())", "def get_country_count():\n numbers=country_populations.split('\\n')\n count_numbers= len(numbers)-1\n return count_numbers", "def num_rows(self) -> str:\n return pulumi.get(self, \"num_rows\")", "def get_num_features(self):\r\n \r\n return len(self[0]['x'])", "def node_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"node_count\")", "def n_users(self):\n if self._n_users is None:\n self._n_users = len(self.user_unique_vals)\n return self._n_users", "def count_players(definition):\n _, player_definition = parse_player_definition(definition)\n return (int(player_definition['left_players']) +\n int(player_definition['right_players']))", "def get_number_of_movies(self):\n raise NotImplementedError", "def count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"count\")", "def num_node_features(self) -> int:\n data, _, _ = self[0]\n if hasattr(data, 'num_node_features'):\n return data.num_node_features\n raise AttributeError(f\"'{data.__class__.__name__}' object has no \"\n f\"attribute 'num_node_features'\")", "def get_annotation_count(self):\n return self._num_annos", "def num_eval_instances(self):\n return self.num_train_instances // 4", "def count(self) -> int:\n return pulumi.get(self, \"count\")", "def _num_nodes(self):\n return len(self._nid2partid)", "def getNumod_byerSentences(self): # !!! Need to rewrite this to match graph\n return len(self.__document)", "def candidate_count(self):\n return self.candidate_set.count()", "def candidate_count(self):\n return self.candidate_set.count()", "def get_num_features(self, ndim: int) -> int:\n nb_features = 0\n for feature_group in self.features_group_list:\n nb_features += feature_group.num_features(ndim)\n return nb_features", "def countSites(self):\n self.ni = len(self.sites)\n return self.ni", "def get_num_psus(self):\n return len(self._psu_list)", "def node_count(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"node_count\")", "def get_count(username):\n return get_contributor(username)[\"count\"]", "def get_num_plants(self) -> int:\r\n\r\n return len(self.plants)", "def get_num_instances(self):\n return len( self.get_instances_ids() )", "def num_examples(self):\r\n raise NotImplementedError", "def getNrFeatures(self):\n return self.featureNames.size", "def N ( self ) :\n return self.__N", "def getNeuronCount(self):\n\t\treturn self.loader.getNeuronCount()", "def sentence_count(self):\n count = 0\n for line in self.lines:\n if '.' in line:\n count += 1\n if count == 0:\n count = 1\n return count\n #return line.count('.')\n #else:\n #return 1", "def npks(self):\n try:\n return self.xp.size\n except Exception:\n return 0", "def num_feature(self):\n if self.handle is None:\n raise AttributeError('Model not loaded yet')\n out = ctypes.c_size_t()\n _check_call(_LIB.TreeliteQueryNumFeature(self.handle, ctypes.byref(out)))\n return out.value", "def get_number_of_models():\n return 8", "def get_number_of_instances(model):\n if model is None:\n return 0\n else:\n return float(len(model[0].split('d'))-2)", "def num_node_features(self):\n return self[0].num_node_features", "def get_number_of_validation(self):\n return self.n_valid", "def __len__(self):\n if self._train:\n return len(self._train_data)\n return len(self._test_data)", "def __len__(self):\n if self._train:\n return len(self._train_data)\n return len(self._test_data)", "def get_train_index():\n data_size = (NUM_CLASS - 1) * NUM_DATA_PER_CLASS\n return np.array([i for i in range(0, data_size)])", "def get_no_of_annotations(database, label, train_vids_all):\n count = 0\n for vid in train_vids_all:\n for ann in database[vid]['annotations']:\n if ann['label'] == label:\n count += 1\n return count", "def __len__(self):\n return self.nb_iterations", "def get_model_count(self):\n return len(self._model_start_i)", "def number_of_nodes(self):\n return int(self._data['number_of_nodes'])", "def __len__(self):\r\n return len(self.train_data)", "def N(self) -> int:\n return self.params.N" ]
[ "0.7130858", "0.7117933", "0.67800456", "0.664693", "0.63542926", "0.63365936", "0.6317348", "0.62643784", "0.62291634", "0.60861695", "0.6079785", "0.6076607", "0.604624", "0.6041788", "0.5999601", "0.59986293", "0.5977678", "0.5972249", "0.5956189", "0.59479755", "0.5901012", "0.5896225", "0.58803135", "0.5879439", "0.5878438", "0.5862683", "0.58518845", "0.5851409", "0.5836727", "0.58006096", "0.57884115", "0.578511", "0.5768536", "0.5763156", "0.5750875", "0.5745219", "0.574517", "0.574517", "0.57379144", "0.5732589", "0.5719292", "0.5708624", "0.5708624", "0.5706858", "0.56896424", "0.5677542", "0.56690955", "0.5667871", "0.56647277", "0.56647277", "0.56603914", "0.5658328", "0.5656308", "0.56518716", "0.5630991", "0.5626172", "0.56224835", "0.56210095", "0.5621008", "0.5605096", "0.56036806", "0.5598959", "0.5597386", "0.5588134", "0.5574157", "0.5561737", "0.5560016", "0.55579656", "0.55554354", "0.5543206", "0.5539226", "0.5535148", "0.5535148", "0.5519252", "0.5516343", "0.55143833", "0.551304", "0.55075735", "0.55058736", "0.5503213", "0.55011964", "0.5483589", "0.5483133", "0.547837", "0.54764456", "0.54737794", "0.5472339", "0.5469952", "0.54641104", "0.5463475", "0.5461319", "0.546112", "0.546112", "0.54597086", "0.54577225", "0.545555", "0.5452451", "0.5452196", "0.54507065", "0.5449485" ]
0.5565302
65
Returns the number of training cameras.
def get_num_cams(self, data): return self.parse_data(data)[1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_total_cameras(self) -> int:\n return self.num_cameras", "def get_cameras_number():\n lib.initlib()\n return lib.is_GetNumberOfCameras()", "def ncameras(self):\n n = ct.c_long()\n self.lib.GetAvailableCameras(ct.pointer(n))\n return n.value", "def numberOfCamera():\n return numCams", "def get_camera_count():\n return Camera.getNumberOfCameras()", "def get_number_of_training(self):\n return self.n_train", "def num_training_examples(self):", "def num_train_instances(self):\n raise NotImplementedError()", "def get_num_of_images(self):", "def count_camera_connection(limit=10):\n print(\"searching cameras...\")\n valid_cameras = []\n\n for camera_number in range(limit):\n cap_cam = cv2.VideoCapture(camera_number, cv2.CAP_DSHOW)\n if cap_cam.isOpened():\n valid_cameras.append(camera_number)\n\n print(len(valid_cameras), \"cameras available now.\")\n return len(valid_cameras)", "def training_set_count(self) -> int:\n return pulumi.get(self, \"training_set_count\")", "def numberOfPlayers(self):\r\n return len(self.playerPreparers)", "def get_num_train_images(hparams):\n num_images_map = {\n 'imagenet': 1281167,\n 'cifar10': 50000,\n }\n if hparams.input_data.input_fn not in num_images_map:\n raise ValueError(\n f'Unknown dataset size for input_fn {hparams.input_data.input_fn}')\n\n num_images = num_images_map[hparams.input_data.input_fn]\n\n if hparams.input_data.max_samples > 0:\n return min(num_images, hparams.input_data.max_samples)\n return num_images", "def get_num_epochs(self):\n return self.__num_epochs", "def num_eval_instances(self):\n return self.num_train_instances // 4", "def get_num_instances(self):\n return len( self.get_instances_ids() )", "def num_train_samples(self):\n if self._num_training_samples is None:\n for key, value in self._training_data.items():\n self._num_training_samples[key] = len(value[0])\n return self._num_training_samples", "def num_training_steps(self) -> int:\n if self.trainer.max_steps:\n return self.trainer.max_steps\n\n limit_batches = self.trainer.limit_train_batches\n batches = len(self.train_dataloader())\n batches = (\n min(batches, limit_batches)\n if isinstance(limit_batches, int)\n else int(limit_batches * batches)\n )\n\n num_devices = max(1, self.trainer.num_gpus, self.trainer.num_processes)\n if self.trainer.tpu_cores:\n num_devices = max(num_devices, self.trainer.tpu_cores)\n\n effective_accum = self.trainer.accumulate_grad_batches * num_devices\n return (batches // effective_accum) * self.trainer.max_epochs", "def _getNumcam( self, bSeed ):\n\n\t\treturn ( ( bSeed >> 20 ) & 0xF ) + 1", "def number_of_players(self) -> int:\n return self.param.number_of_players", "def num_streams(self):\n self._num_streams = self.lib.iperf_get_test_num_streams(self._test)\n return self._num_streams", "def get_number_of_models():\n return 8", "def num_epochs(self):\n return len(self._history)", "def num_of_classes(self):\n return len(self.classes_())", "def num_of_classes(self):\n return len(self.classes_())", "def total_train_batches(self) -> int:\n return self.trainer.num_training_batches", "def size(self):\r\n return len(self._train_datas)", "def num_launches(self):\n return len(self.launches)", "def num_of_images(self):\n return len(self.data['image_infos'])", "def num_devices(self):\n\t\t\treturn cuda.Device.count()", "def num_layers(self): # -> int:\n ...", "def get_num_train_samples(self):\n raise NotImplementedError", "def count(self):\n \n return len(self.img_lst)", "def num_layers(self):\n return self._num_layers", "def NumberOfRobots(self):\n\t\treturn len(self.__robotList)", "def num_runs(self):\n return len(self._h5[RUNS])", "def getNumberOfImages(self):\n\t\treturn self.numberOfImages", "def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)", "def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)", "def get_cameras_list():\n lib.initlib()\n return lib.is_GetCameraList()", "def get_num_classes(self):", "def get_number_of_movies(self):\n raise NotImplementedError", "def noOfPlayers(self):\n\t\tnumber = 0\n\t\tfor n in range(6):\n\t\t\tif self.playerList[n] != None:\n\t\t\t\tnumber = number + 1\n\t\treturn number", "def num_classes(self):\n return len(self.classes)", "def n_train(self):\n return self.factors[0].shape[0]", "def get_image_count(self):\n return self._num_images", "def getNeuronCount(self):\n\t\treturn self.loader.getNeuronCount()", "def numberOfClasses(self):\n classes = self.classesAndFrames()\n return len(classes.keys())", "def get_num_frames(self):\n return self._frames.shape[0]", "def get_train_data_size(self):\n return len(self.pipeline.data['train'])", "def __len__(self):\n return math.ceil(self.number_of_images / self.batch_size)", "def _num_layers(model):\n\n if hasattr(model, \"n_layers\"): # eg. xlm\n num_layers = model.n_layers\n elif hasattr(model, \"layer\"): # eg. xlnet\n num_layers = len(model.layer)\n elif hasattr(model, \"encoder\"): # eg. bert\n num_layers = len(model.encoder.layer)\n elif hasattr(model, \"transformer\"): # eg. sentence_transformers models\n num_layers = len(model.transformer.layer)\n else:\n raise ValueError(f\"Not supported model {model} to obtain number of layers\")\n\n return num_layers", "def get_num_eval_images(hparams):\n num_images_map = {\n 'imagenet': 50000,\n 'cifar10': 10000,\n }\n if hparams.input_data.input_fn not in num_images_map:\n raise ValueError(\n f'Unknown dataset size for input_fn {hparams.input_data.input_fn}')\n\n return num_images_map[hparams.input_data.input_fn]", "def num_classes(self):\n\t\treturn len(self.classes)", "def count(self):\n return len(self.wallpapers)", "def __len__(self):\n return int(np.ceil(self.total_frame_count / self.batch_size))", "def num_classes(self):\n return self._num_classes", "def device_count() -> int:\n return flow._oneflow_internal.CudaGetDeviceCount()", "def get_face_sets_number(self):\n return len(self.content_eval.get(\"face_sets_info\"))", "def num_classes():\n return NUM_CLASSES", "def num_classes(self):\n\t\t\treturn len(self.classes)", "def get_num_cams(self, data):\n cams = set()\n for items in data:\n camid = items[2]\n cams.add(camid)\n return len(cams)", "def epoch(self):\n return len(self.history)", "def count_models(self):\n return len(self.model_list)", "def get_num_episodes(self) -> int:\n return len(self.episodes)", "def num_frames(self):\n return self._first_rgb.shape[1]", "def num_faces(self):\n return self._top_exp.number_of_faces()", "def num_examples_per_epoch(mode):\n if mode == tf.estimator.ModeKeys.TRAIN:\n return 45000\n return 5000", "def num_models():\n N = input(\"How many models would you like to test?\")\n N = int(N)\n return N", "def num_classes_a(self):\r\n return self._num_classes_a", "def get_ncores(self):\n return self._ncores", "def __len__(self):\n return int(np.ceil(len(self.image_filenames) / (self.batch_size)))", "def get_model_count(self):\n return len(self._model_start_i)", "def numRunningProcesses():\n try:\n proc = subprocess.run(\"ps -Af|grep -i \\\"online2-wav-nnet3-latgen-faster\\\"\", stdout=subprocess.PIPE, shell=True)\n np = (len(str(proc.stdout).split(\"\\\\n\")) - 3)\n if(np == None):\n np = 0\n return np\n except Exception as e:\n\t\t Tools.writeException(\"numRunningProcesses\", e)\n return -1", "def num_examples_per_epoch(mode):\n if mode == tf.estimator.ModeKeys.TRAIN:\n return 1281167\n return 50000", "def __len__(self):\n if self.TRAIN_BOOL is True:\n count = len(self.dict_batch_1[b'data'])\n count += len(self.dict_batch_2[b'data'])\n count += len(self.dict_batch_3[b'data'])\n count += len(self.dict_batch_4[b'data'])\n count += len(self.dict_batch_5[b'data'])\n else:\n count = len(self.dict_batch_test[b'data'])\n return count", "def number_of_launches(self):\n return self._number_of_launches", "def number_performers(self):\n return len(self.touches['device_id'].unique().tolist())", "def num_carns(self):\n return self._num_carns", "def number_of_electrodes(self):\n return self._pre_kernel.shape[1]", "def test_num_layers(self):\n\t\tdetails = self.watcher.describe()\n\t\tprint(\"Testing Keras on VGG16\")\n\t\tprint(details)\n\t\tself.assertEqual(len(details), 16)", "def _get_n_players(env):\n return len(env.action_space.spaces)", "def __len__(self):\n return self.num_images", "def __len__(self):\n return self.num_images", "def __len__(self):\n return self.num_images", "def __len__(self):\n return self.num_images", "def __len__(self):\n return self.num_images", "def num_examples_per_epoch(self):\n\t\tif self.subset == 'train':\n\t\t\treturn 50000\n\t\tif self.subset == 'validation':\n\t\t\treturn 10000", "def count(train_dir):\r\n path = train_dir\r\n count = 0\r\n for fn in os.listdir(path): #fn 表示的是文件名\r\n count = count + 1\r\n return count", "def num_parameters(self) -> int:\n if vocabulary.is_empty(self.vocab, self.config.features.configured_namespaces):\n self._LOGGER.warning(\n \"At least one vocabulary of your features is still empty! \"\n \"The number of trainable parameters usually depends on the size of your vocabulary.\"\n )\n return sum(p.numel() for p in self._model.parameters())", "def numberOfImages(self):\n return len(self.imageList)", "def Nprofiles(self):\n return self._nprofiles", "def get_number_of_devices(self):\n return self.drt_manager.get_number_of_devices()", "def number_of_eyes(img):\n return eye_cascade.detectMultiScale(img).__len__()", "def get_parameter_numbers(self) -> int:\n # TODO(jeikeilim): return the number of parameter list of each layers.\n n_param = sum([x.numel() for x in self.model.parameters()])\n return n_param", "def num_layers(self):\n\n return 2 + self.num_hidden_layers", "def num_parameters(self) -> int:\n if self._model:\n return self._model.num_parameters()\n return 0", "def GetNumberOfNetworks(self):\n return len(self.LastScan)", "def get_num_classes(self):\n return len(self.class_map_dict)", "def __len__(self):\n if self._train:\n return len(self._train_data)\n return len(self._test_data)", "def __len__(self):\n if self._train:\n return len(self._train_data)\n return len(self._test_data)" ]
[ "0.8353758", "0.80624014", "0.7873767", "0.7767602", "0.7643237", "0.7341902", "0.73000854", "0.71402013", "0.67232484", "0.6706012", "0.6674748", "0.6601394", "0.65858144", "0.6535598", "0.6527929", "0.64885134", "0.6479306", "0.6477255", "0.6447356", "0.6396966", "0.63825214", "0.6381122", "0.6370293", "0.6354671", "0.6354671", "0.63516134", "0.63508695", "0.63286597", "0.6318209", "0.6313222", "0.6302274", "0.6294521", "0.6290887", "0.6279451", "0.62776154", "0.62758183", "0.62692297", "0.62556356", "0.62556356", "0.6251444", "0.62511796", "0.62509507", "0.6246265", "0.62458545", "0.6239167", "0.6236513", "0.62305456", "0.6215822", "0.6205895", "0.61983985", "0.6194577", "0.6186385", "0.61784905", "0.6158513", "0.6152257", "0.6136921", "0.61358476", "0.61238754", "0.6115061", "0.6098346", "0.6086712", "0.60823685", "0.60789114", "0.60731214", "0.6066376", "0.60588586", "0.60558176", "0.60533506", "0.60513633", "0.6048465", "0.6042334", "0.60359305", "0.6034509", "0.60334927", "0.6032129", "0.60214496", "0.6021122", "0.60210353", "0.6007114", "0.60016114", "0.5999843", "0.599569", "0.59874", "0.59874", "0.59874", "0.59874", "0.59874", "0.5981968", "0.598071", "0.5980465", "0.5973533", "0.59676725", "0.5959559", "0.59471273", "0.5943706", "0.5932778", "0.592966", "0.59287095", "0.59226114", "0.59209496", "0.59209496" ]
0.0
-1
Combines train, query and gallery in a dataset for training.
def combine_all(self): combined = copy.deepcopy(self.train) def _combine_data(data): for img_path, pid, camid in data: if pid in self._junk_pids: continue #pdb.set_trace() pid = self.dataset_name + "_" + str(pid) camid = self.dataset_name + "_" + str(camid) combined.append((img_path, pid, camid)) _combine_data(self.query) _combine_data(self.gallery) self.train = combined self.num_train_pids = self.get_num_pids(self.train)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def combine_all(self):\n if self._train_only:\n return\n\n combined = copy.deepcopy(self.train)\n\n # relabel pids in gallery (query shares the same scope)\n g_pids = set()\n for items in self.gallery:\n pid = items[1]\n if pid in self._junk_pids:\n continue\n g_pids.add(pid)\n pid2label = {pid: i for i, pid in enumerate(g_pids)}\n\n def _combine_data(data):\n for img_path, pid, camid, dsetid in data:\n if pid in self._junk_pids:\n continue\n pid = pid2label[pid] + self.num_train_pids\n combined.append((img_path, pid, camid, dsetid))\n\n _combine_data(self.query)\n _combine_data(self.gallery)\n\n self.train = combined\n self.num_train_pids = self.get_num_pids(self.train)", "def train(self, training_data):\n pass", "def build_all_datasets(\n cfg, tokenizer, train_valid_test_num_samples,\n):\n train_dataset = RetroQAFineTuneDataset(\n cfg.train_ds.get('file_name'),\n tokenizer,\n cfg.train_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.train_ds.get('seq_length'),\n cfg.train_ds.get('add_bos'),\n cfg.train_ds.get('add_eos'),\n train_valid_test_num_samples[0],\n cfg.train_ds.get('seed'),\n cfg.train_ds.get('neighbors'),\n )\n val_dataset = RetroQAFineTuneDataset(\n cfg.val_ds.get('file_name'),\n tokenizer,\n cfg.val_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.val_ds.get('seq_length'),\n cfg.val_ds.get('add_bos'),\n cfg.val_ds.get('add_eos'),\n train_valid_test_num_samples[1],\n cfg.val_ds.get('seed'),\n cfg.val_ds.get('neighbors'),\n )\n test_dataset = RetroQAFineTuneDataset(\n cfg.test_ds.get('file_name'),\n tokenizer,\n cfg.test_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.test_ds.get('seq_length'),\n cfg.test_ds.get('add_bos'),\n cfg.test_ds.get('add_eos'),\n train_valid_test_num_samples[2],\n cfg.test_ds.get('seed'),\n cfg.test_ds.get('neighbors'),\n )\n\n return train_dataset, val_dataset, test_dataset", "def setup(self):\n # TODO check if need both dataset together\n self.train_dataset = ABSADataset(data_path=self.train_path, mode=self.in_mode, task=self.task, \n tokenizer=self.tokenizer, vocab=\"bert\")\n self.vocabulary = self.train_dataset.vocabulary\n\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=self.vocabulary)\n #self.train_restaurant = ABSADataset(data_path=RESTAURANT_TRAIN)\n #self.eval_restaurant = ABSADataset(data_path=RESTAURANT_DEV)", "def make_dataset():\n\n\tnumberOfTrials = dataset_params.num_of_samples\n\tnumberOfTrials_train = int(numberOfTrials*0.8)\n\tnumberOfTrials_test = int(numberOfTrials*0.2)\n\n\tprint(\"==================================================\")\n\tprint(\"1. Generating Train images ......\")\n\tprint(\"\\nTrain image per variation\", numberOfTrials_train)\n\tmakeDataset(numberOfTrials_train, \"train\")\n\n\tprint(\"==================================================\")\n\tprint(\"2. Generating Test images ......\")\n\tprint(\"\\nTest image per variation\", numberOfTrials_test)\n\tmakeDataset(numberOfTrials_test, \"test\")\n\n\tprint(\"==================================================\")\n\tprint(\"Done!!!\")", "def build_data_set(self):\n if not self.assert_data_correct():\n self.download_all_data()\n self.unpack_rename_data()\n self.split_data_characters()\n self.clean_data_fragments()\n self.create_font_data()\n if not self.assert_train_augmented():\n self.augment_train_data()\n if not self.assert_style_data_correct():\n self.download_style_data()\n self.unpack_rename_data()", "def run_train_test_split():\n # Load all documents\n conn = sq.connect(config.DB_FILE)\n documents = pd.read_sql_query('select pubmed_id, review_id, included, title, abstract from article ', conn)\n\n # Identify unique review IDs\n review_ids = documents['review_id'].unique()\n\n # Set seed for random sampling\n np.random.seed(2)\n\n # List of Reviews in the partial data set and full data set\n partial_set = list(np.random.choice(review_ids, 10, replace=False))\n full_set = list(review_ids.copy())\n\n # Load array (X) and labels (Y) of all documents\n with (open(config.DOC_TERM_MATRIX, \"rb\")) as openfile:\n X = pickle.load(openfile)\n\n y = documents['included']\n\n # Train-test split of the partial dataset\n train_test_split(X, y, partial_set, 'min_max', 'partial', review_ids)\n train_test_split(X, y, partial_set, 'tf_idf', 'partial', review_ids)\n\n # Train-test split of the full dataset\n train_test_split(X, y, full_set, 'min_max', 'full', review_ids)\n train_test_split(X, y, full_set, 'tf_idf', 'full', review_ids)", "def data_creator(config):\n train_dataset, val_dataset = LinearDataset(2, 5), LinearDataset(2, 5)\n train_loader = DataLoader(train_dataset, batch_size=config[\"batch_size\"])\n val_loader = DataLoader(val_dataset, batch_size=config[\"batch_size\"])\n return train_loader, val_loader", "def _generate_datasets(self):\n\n degrade_test = False\n if self._opts['degrade_step'] == 'test':\n degrade_test = True\n\n use_trainset_for_tests = UseTrainForTest.IDENTICAL # can be different in few shot workflow\n\n train_dataset, test_dataset = self._gen_datasets_with_options(self._opts['train_classes'],\n self._opts['test_classes'],\n is_superclass=self._opts['superclass'],\n class_proportion=self._opts['class_proportion'],\n degrade_test=degrade_test,\n degrade_type=self._opts['degrade_type'], # only relevant if degrade_test = True\n degrade_val=self._opts['min_val'], # only relevant if degrade_test = True\n recurse_train=self._is_train_recursive(),\n recurse_test=self._is_inference_recursive(),\n num_batch_repeats=self._opts['num_repeats'],\n recurse_iterations=self._opts['recurse_iterations'],\n evaluate_step=self._opts['evaluate'],\n use_trainset_for_tests=use_trainset_for_tests,\n invert_images=self._opts['invert_images'],\n min_val=self._opts['min_val'])\n return train_dataset, test_dataset", "def get_training_and_testing_sets(data, Y):\r\n data = pd.concat([data, Y], axis=1)\r\n x,y=data.shape\r\n train_X_sub1=data[0:x//6]\r\n dev_X_sub1 = data[x//6:x//6 + x//12]\r\n test_X_sub1 = data[x//6 + x//12:x//3]\r\n\r\n train_X_sub2 = data[x//3:x//3+x//6]\r\n dev_X_sub2 = data[x//6 + x//3:x//3 + x//6 + x//12]\r\n test_X_sub2 = data[x//3 + x//6 + x//12:2*x//3]\r\n\r\n train_X_sub3 = data[2*x//3:(2*x//3) +x//6]\r\n dev_X_sub3 = data[x//6 + 2*x//3: (2*x//3) + x//6 + x//12]\r\n test_X_sub3 = data[2*x//3 + x//6 + x//12:x]\r\n\r\n train_X=train_X_sub1.append(train_X_sub2,ignore_index = True)\r\n train_X =train_X.append(train_X_sub3,ignore_index = True)\r\n dev_X= dev_X_sub1.append(dev_X_sub2,ignore_index = True)\r\n dev_X = dev_X.append(dev_X_sub3,ignore_index = True)\r\n test_X = test_X_sub1.append(test_X_sub2,ignore_index = True)\r\n test_X = test_X.append(test_X_sub3,ignore_index = True)\r\n\r\n\r\n train_X = util.shuffle(train_X)\r\n train_X = train_X.reset_index(drop=True)\r\n\r\n dev_X = util.shuffle(dev_X)\r\n dev_X = dev_X.reset_index(drop=True)\r\n\r\n test_X = util.shuffle(test_X)\r\n test_X = test_X.reset_index(drop=True)\r\n\r\n train_X_final=train_X\r\n dev_X_final = dev_X\r\n test_X_final = test_X\r\n x, y = train_X_final.shape\r\n train_X = train_X_final.iloc[:, 0:y - 1]\r\n train_Y = train_X_final.iloc[:, y - 1]\r\n\r\n x, y = test_X_final.shape\r\n test_X = test_X_final.iloc[:, 0:y - 1]\r\n test_Y = test_X_final.iloc[:, y - 1]\r\n\r\n x, y = dev_X_final.shape\r\n dev_X = dev_X_final.iloc[:, 0:y - 1]\r\n dev_Y = dev_X_final.iloc[:, y - 1]\r\n\r\n return train_X, train_Y, dev_X,dev_Y,test_X, test_Y", "def prepare_data(self,d):\n train_loaders, train_iters = {}, {}\n unlabeled_loaders, unlabeled_iters = {}, {}\n for domain in opt.domains:\n #CONVERT TO FLOAT32\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))\n train = data_utils.TensorDataset(features,target)\n train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)\n train_iters[domain] = iter(train_loaders[domain])\n for domain in opt.unlabeled_domains:\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))\n uset = data_utils.TensorDataset(features,target)\n unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)\n unlabeled_iters[domain] = iter(unlabeled_loaders[domain])\n \n return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters", "def train(self, trainData):\n pass", "def build_dataset(self):\n print(\"reading data of images currently , please wait......\")\n x_train, y_train, _ = get_images(self.train_directory)\n x_test, y_test, _ = get_images(self.test_directory)\n x_train, y_train = image_subset(self.num_classes, x_train, y_train)\n x_test, y_test = image_subset(self.num_classes, x_test, y_test)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n self.x_train = x_train / 255\n self.x_test = x_test / 255\n self.y_train = utils.to_categorical(y_train, self.num_classes)\n self.y_test = utils.to_categorical(y_test, self.num_classes)", "def build_dataset_search(self, dataset_dir, is_training, is_shuffle):\n pass", "def Train_data():\n print (\"loading train data ...\")\n time_start = time.time()\n data_root = '/media/keziwen/86AA9651AA963E1D'\n with h5py.File(join(data_root, './data/train_real2.h5')) as f:\n data_real = f['train_real'][:]\n num, nt, ny, nx = data_real.shape\n data_real = np.transpose(data_real, (0, 1, 3, 2))\n with h5py.File(join(data_root, './data/train_imag2.h5')) as f:\n data_imag = f['train_imag'][:]\n num, nt, ny, nx = data_imag.shape\n data_imag = np.transpose(data_imag, (0, 1, 3, 2))\n data = data_real+1j*data_imag\n num_train = 15000\n num_validate = 2000\n train_data = data[0:num_train]\n validate_data = data[num_train:num_train+num_validate]\n\n train_data = np.random.permutation(train_data)\n\n time_end = time.time()\n print ('dataset has been created using {}s'.format(time_end-time_start))\n return train_data, validate_data", "def process_dataset(self):\n\n logger.info('\\n')\n logger.info('=' * 40)\n logger.info('=\\t DeepRank Data Set')\n logger.info('=')\n logger.info('=\\t Training data')\n for f in self.train_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.valid_database:\n logger.info('=\\t Validation data')\n for f in self.valid_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.test_database:\n logger.info('=\\t Test data')\n for f in self.test_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n logger.info('=' * 40 + '\\n')\n sys.stdout.flush()\n\n # check if the files are ok\n self.check_hdf5_files(self.train_database)\n\n if self.valid_database:\n self.valid_database = self.check_hdf5_files(\n self.valid_database)\n\n if self.test_database:\n self.test_database = self.check_hdf5_files(\n self.test_database)\n\n # create the indexing system\n # alows to associate each mol to an index\n # and get fname and mol name from the index\n self.create_index_molecules()\n\n # get the actual feature name\n if self.mapfly:\n self.get_raw_feature_name()\n else:\n self.get_mapped_feature_name()\n\n # get the pairing\n self.get_pairing_feature()\n\n # get grid shape\n self.get_grid_shape()\n\n # get the input shape\n self.get_input_shape()\n\n # get renormalization factor\n if self.normalize_features or self.normalize_targets or self.clip_features:\n if self.mapfly:\n self.compute_norm()\n else:\n self.get_norm()\n\n logger.info('\\n')\n logger.info(\" Data Set Info:\")\n logger.info(\n f' Augmentation : {self.use_rotation} rotations')\n logger.info(\n f' Training set : {self.ntrain} conformations')\n logger.info(\n f' Validation set : {self.nvalid} conformations')\n logger.info(\n f' Test set : {self.ntest} conformations')\n logger.info(f' Number of channels : {self.input_shape[0]}')\n logger.info(f' Grid Size : {self.data_shape[1]}, '\n f'{self.data_shape[2]}, {self.data_shape[3]}')\n sys.stdout.flush()", "def prepare_data(self):\n data = self._get_dataset(self.hparams.dataset_path)\n label_encoder = data[\"label_encoder\"]\n del data[\"label_encoder\"]\n\n click.secho(\"Building inputs and labels.\", fg=\"yellow\")\n datasets = {\n \"train\": defaultdict(list),\n \"valid\": defaultdict(list),\n \"test\": defaultdict(list),\n }\n for dataset_name, dataset in data.items():\n for sample in dataset:\n instance = self.build_input(\n self.tokenizer, sample[\"text\"], label_encoder, sample[\"label\"]\n )\n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n\n click.secho(\"Padding inputs and building tensors.\", fg=\"yellow\")\n tensor_datasets = {\"train\": [], \"valid\": [], \"test\": []}\n for dataset_name, dataset in datasets.items():\n dataset = self.pad_dataset(dataset, padding=self.tokenizer.pad_index)\n for input_name in MODEL_INPUTS:\n if input_name == \"labels\":\n tensor = torch.tensor(dataset[input_name], dtype=torch.float32)\n else:\n tensor = torch.tensor(dataset[input_name])\n tensor_datasets[dataset_name].append(tensor)\n\n self.train_dataset = TensorDataset(*tensor_datasets[\"train\"])\n self.valid_dataset = TensorDataset(*tensor_datasets[\"valid\"])\n self.test_dataset = TensorDataset(*tensor_datasets[\"test\"])\n click.secho(\n \"Train dataset (Batch, Candidates, Seq length): {}\".format(\n self.train_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Valid dataset (Batch, Candidates, Seq length): {}\".format(\n self.valid_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Test dataset (Batch, Candidates, Seq length): {}\".format(\n self.test_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )", "def train(self, data):\n pass", "def get_data_set(train=True):\n\n # 1\n train_or_test = \"train\" if train == True else \"test\"\n data_path = os.path.join(data_dir, \"aclImdb\",train_or_test)\n\n # 2\n pos_glob_pattern = os.path.join(data_path, \"pos\", \"*.txt\")\n neg_glob_pattern = os.path.join(data_path, \"neg\", \"*.txt\")\n pos_file_path_seq = glob.glob(pos_glob_pattern)\n neg_file_path_seq = glob.glob(neg_glob_pattern)\n\n # 3\n pos_dataset = [text_to_one_line(path) for path in pos_file_path_seq]\n neg_dataset = [text_to_one_line(path) for path in neg_file_path_seq]\n x = pos_dataset + neg_dataset\n y = [1.0] * len(pos_dataset) + [0.0] * len(neg_dataset)\n\n return x, y", "def _preload_all_samples(self):\n if self.mode in ['train_noval', 'train_with_val']:\n\n self._images_train, self._labels_train = [], []\n desc = \"Loading train image pairs & flows\"\n with tqdm(total=len(self._img_trn_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_trn_path):\n pbar.update(1)\n label_path = self._lbl_trn_path[n]\n image, label = self._load_sample(image_path, label_path)\n self._labels_train.append(label)\n self._images_train.append(image)\n\n if self.mode == 'train_with_val':\n self._images_val, self._labels_val = [], []\n desc = \"Loading val image pairs & flows\"\n with tqdm(total=len(self._img_val_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_val_path):\n pbar.update(1)\n label_path = self._lbl_val_path[n]\n image, label = self._load_sample(image_path, label_path, preprocess=False)\n self._labels_val.append(label)\n self._images_val.append(image)\n\n if self.opts['tb_test_imgs'] is True:\n self._images_test = []\n desc = \"Loading test samples\"\n with tqdm(total=len(self._img_tst_path), desc=desc, ascii=True, ncols=100) as pbar:\n for image_path in self._img_tst_path:\n pbar.update(1)\n self._images_test.append(self._load_sample(image_path, preprocess=False))\n\n elif self.mode in ['val', 'val_notrain']:\n\n self._images_val, self._labels_val = [], []\n desc = \"Loading val image pairs & flows\"\n with tqdm(total=len(self._img_val_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_val_path):\n pbar.update(1)\n label_path = self._lbl_val_path[n]\n image, label = self._load_sample(image_path, label_path, preprocess=False)\n self._labels_val.append(label)\n self._images_val.append(image)\n\n elif self.mode == 'test':\n self._images_test = []\n desc = \"Loading test samples\"\n with tqdm(total=len(self._img_tst_path), desc=desc, ascii=True, ncols=100) as pbar:\n for image_path in self._img_tst_path:\n pbar.update(1)\n self._images_test.append(self._load_sample(image_path, preprocess=False))", "def MakeDataSetFiles(dirname):\n\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n if not os.path.exists(os.path.join(dirname, 'train')):\n os.mkdir(os.path.join(dirname, 'train'))\n if not os.path.exists(os.path.join(dirname, 'test')):\n os.mkdir(os.path.join(dirname, 'test'))\n data_train = fetch_20newsgroups(subset='train', categories=None, shuffle=True, random_state=42)\n data_test = fetch_20newsgroups(subset='test', categories=None, shuffle=True, random_state=42)\n\n if dirname[-1] == '/' or dirname[-1] == '\\\\':\n dirname = dirname[:-1]\n \n Util.WriteClassFile(data_train.target, os.path.join(dirname, 'train_classes.txt'))\n Util.WriteClassFile(data_test.target,os.path.join(dirname, 'test_classes.txt'))\n\n\n train_counter = 0;\n for doc in data_train.data:\n filename = 'train_' + str(train_counter).zfill(5);\n f = file(os.path.join(dirname, 'train', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n train_counter = train_counter + 1;\n\n test_counter = 0;\n for doc in data_test.data:\n filename = 'test_' + str(test_counter).zfill(5);\n f = file(os.path.join(dirname, 'test', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n test_counter = test_counter + 1;\n\n class_index = file(os.path.join(dirname, 'class_label_index.txt'), 'w')\n for label in data_train.target_names:\n class_index.write(label + '\\n')\n class_index.close()", "def __add__(self, other):\n train = copy.deepcopy(self.train)\n\n for img_path, pid, camid, dsetid in other.train:\n pid += self.num_train_pids\n camid += self.num_train_cams\n dsetid += self.num_datasets\n train.append((img_path, pid, camid, dsetid))\n\n ###################################\n # Note that\n # 1. set verbose=False to avoid unnecessary print\n # 2. set combineall=False because combineall would have been applied\n # if it was True for a specific dataset; setting it to True will\n # create new IDs that should have already been included\n ###################################\n if isinstance(train[0][0], str):\n return ImageDataset(\n train,\n self.query,\n self.gallery,\n transform=self.transform,\n mode=self.mode,\n combineall=False,\n verbose=False\n )\n else:\n return VideoDataset(\n train,\n self.query,\n self.gallery,\n transform=self.transform,\n mode=self.mode,\n combineall=False,\n verbose=False,\n seq_len=self.seq_len,\n sample_method=self.sample_method\n )", "def setup_datasets(self):\r\n\r\n train_transform = transforms.Compose(\r\n [\r\n transforms.Resize(self.crop_size),\r\n transforms.RandomRotation(degrees=self.random_angle, resample=Image.BILINEAR),\r\n transforms.RandomResizedCrop(\r\n size=self.crop_size, scale=(1-self.random_scale, 1+self.random_scale), ratio=(1, 1)),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]\r\n )\r\n ]\r\n )\r\n val_transform = transforms.Compose(\r\n [\r\n transforms.Resize(self.crop_size),\r\n transforms.CenterCrop(self.crop_size),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]\r\n )\r\n ]\r\n )\r\n\r\n train_dataset = CocoDatasetPairs(\r\n root_dir=self.coco_path,\r\n set_name='train2014',\r\n transform=train_transform,\r\n dataset_size_ratio=self.dataset_size_ratio\r\n )\r\n train_subset_dataset = Subset(train_dataset, range(0, len(train_dataset), 5*self.dataset_size_ratio))\r\n val_dataset = CocoDatasetPairs(\r\n root_dir=self.coco_path,\r\n set_name='val2014',\r\n transform=val_transform,\r\n )\r\n\r\n train_loader = DataLoader(\r\n train_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=True,\r\n num_workers=self.num_workers\r\n )\r\n train_subset_loader = DataLoader(\r\n train_subset_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=False,\r\n num_workers=self.num_workers\r\n )\r\n val_loader = DataLoader(\r\n val_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=False,\r\n num_workers=self.num_workers\r\n )\r\n return train_loader, train_subset_loader, val_loader", "def get_dataloader(data_folder, model_name, data_name, size=\"default\"):\n training_set = None\n validation_set = None\n\n if model_name == \"Howe_Patterson\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition.pkl')))\n\n elif size == \"small\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_small.pkl'))\n elif size == \"tiny\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_tiny.pkl'))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition.pkl'))\n\n\n if data_name == \"SHHS\":\n training_set = Dataset_full_SHHS(partition['train'], data_folder)\n validation_set = Dataset_full_SHHS(partition['validation'], data_folder)\n elif data_name == \"snooze\":\n training_set = Dataset_full(partition['train'], data_folder)\n validation_set = Dataset_full(partition['validation'], data_folder)\n elif data_name == \"philips\":\n training_set = Dataset_Philips_full(partition['train'], data_folder)\n validation_set = Dataset_Philips_full(partition['validation'], data_folder)\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n training_set = ConcatDataset(\n Dataset_full(partition[0]['train'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['train'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n validation_set = ConcatDataset(\n Dataset_full(partition[0]['validation'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['validation'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n elif model_name == \"Deep_Sleep\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition.pkl')))\n\n elif size == \"small\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_small.pkl'))\n elif size == \"tiny\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_tiny.pkl'))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition.pkl'))\n\n if data_name == \"SHHS\":\n training_set = Dataset_full_SHHS(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_full_SHHS(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"snooze\":\n training_set = Dataset_full(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_full(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"philips\":\n training_set = Dataset_Philips_full(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_Philips_full(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n # TODO combined dataset https://discuss.pytorch.org/t/train-simultaneously-on-two-datasets/649/17\n training_set = ConcatDataset(\n Dataset_full(partition[0]['train'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['train'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n validation_set = ConcatDataset(\n Dataset_full(partition[0]['validation'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['validation'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n elif model_name == \"ConvNet_IID\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition_IID_windows.pkl')))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition_IID_windows.pkl'))\n if data_name == \"SHHS\":\n training_set = Dataset_IID_window_SHHS(partition['train'], data_folder)\n validation_set = Dataset_IID_window_SHHS(partition['validation'], data_folder)\n elif data_name == \"snooze\":\n training_set = Dataset_IID_window(partition['train'], data_folder)\n validation_set = Dataset_IID_window(partition['validation'], data_folder)\n elif data_name == \"philips\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n training_set = ConcatDataset(\n Dataset_IID_window(partition[0]['train'], data_folder[0]),\n Dataset_IID_window_SHHS(partition[1]['train'], data_folder[1]))\n validation_set = ConcatDataset(\n Dataset_IID_window(partition[0]['validation'], data_folder[0]),\n Dataset_IID_window_SHHS(partition[1]['validation'], data_folder[1]))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n\n else:\n print(\"{} wrong model for dataloader\".format(model_name))\n exit()\n\n return training_set, validation_set", "def prepare_data_for_training(args):\n # Form the train/test splits and write them to disk\n dataset = data.Dataset(args)\n # get image classes and image counts in each class\n label_map = dataset.get_class_info()\n class_count = len(list(label_map.values()))\n # split the data and store it in log dir\n df_train, df_test = dataset.split_dataset()\n\n # perform dataset augmentations\n image_data = augment.Augmentation(args)\n # get the data gens for training and test images\n train_data_gen, _ = image_data.map_fn_train(df_train)\n test_data_gen, _ = image_data.map_fn_test(df_test)\n\n return train_data_gen, test_data_gen, df_train, df_test, class_count", "def load_dataset():\n\n\n train_dd_loader = DailyDialogLoader(PATH_TO_TRAIN_DATA, load=False)\n train_dataloader = DataLoader(train_dd_loader, batch_size=16, shuffle=True, num_workers=0,\n collate_fn=PadCollate())\n\n test_dd_loader = DailyDialogLoader(PATH_TO_TEST_DATA, load=True)\n test_dataloader = DataLoader(test_dd_loader, batch_size=1, shuffle=False, num_workers=0,\n collate_fn=PadCollate())\n\n assert train_dd_loader.vocabulary.n_words == test_dd_loader.vocabulary.n_words\n\n return train_dd_loader, train_dataloader, test_dataloader", "def train_all(X_train_fuse, Y_train, X_dev_fuse, Y_dev, R_train, R_dev, hyperparams):", "def InitDataset(self):\n train_txt = 'ImageSets/Main/train.txt'\n val_txt = 'ImageSets/Main/val.txt'\n annotations = \"Annotations\"\n jpegimages = \"JPEGImages\"\n images_path = train_txt if (self.is_train) else val_txt \n images_path = readTxt(os.path.join(self.path, images_path))\n images_path.pop(-1)\n # rawdata format: [path_2_image, path_2_xml]\n rawData = list()\n for each in images_path:\n xml = os.path.join(self.path, annotations, each + '.xml')\n jpeg = os.path.join(self.path, jpegimages, each + '.jpg')\n rawData.append([jpeg, xml])\n return rawData", "def train(self, training_data, cfg, **kwargs):\n pass", "def train(self, batch_training=False):\n raise NotImplementedError", "def get_train_csv(self):\n try:\n self.train_article = pd.read_csv(constants.DATA_DIR / 'knn_article_tags.csv')\n except FileNotFoundError:\n train = pd.Series([])\n for csv_file in os.listdir(constants.CLEAN_DIR):\n if csv_file in self.article_feat_csvs:\n df = pd.read_csv(constants.CLEAN_DIR / csv_file)\n feat = csv_file[8:-4]\n g = df.dropna(axis=0).groupby(\"id\")[self.tag_ref[feat]]\n if train.empty:\n train = g.apply(lambda x: list(x.astype(str).str.lower()))\n else:\n g = g.apply(lambda x: list(x.astype(str).str.lower()))\n train = train.combine(g, lambda x1, x2: list(set(x1+x2)), fill_value=[])\n\n train = pd.DataFrame({'id':train.index, 'tags':train.values})\n train.to_csv(constants.DATA_DIR / 'knn_article_tags.csv', header=True)\n self.train_article = train\n\n try:\n self.train_image = pd.read_csv(constants.DATA_DIR / 'knn_image_tags.csv')\n except FileNotFoundError:\n train = pd.Series([])\n for csv_file in os.listdir(constants.CLEAN_DIR):\n if csv_file in self.image_feat_csvs:\n df = pd.read_csv(constants.CLEAN_DIR / csv_file)\n feat = csv_file[6:-4]\n g = df.dropna(axis=0).groupby(\"id\")[self.tag_ref[feat]]\n if train.empty:\n train = g.apply(lambda x: list(x.astype(str).str.lower()))\n else:\n g = g.apply(lambda x: list(x.astype(str).str.lower()))\n train = train.combine(g, lambda x1, x2: list(set(x1+x2)), fill_value=[])\n\n train = pd.DataFrame({'id':train.index, 'tags':train.values})\n train.to_csv(constants.DATA_DIR / 'knn_image_tags.csv', header=True)\n self.train_image = train", "def get_data_loaders(args, tokenizer):\n personachat = get_dataset(tokenizer, args.dataset_path, args.dataset_cache, args.train_lang)\n _ = personachat.pop(\"test\", None)\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": [], \"valid\": []}\n\n if args.train_lang in [\"En\", \"Fr\", \"It\", \"Id\", \"Jp\", \"Ko\", \"Zh\"]: #monolingual data\n for dataset_name, dataset in personachat.items():\n for dial in dataset[args.train_lang]: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lm_labels = True)\n datasets[dataset_name].append(instance) \n else: #multilingual data\n for dataset_name, dataset in personachat.items():\n for lang, dials in dataset.items():\n for dial in dials: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lang_id=\"<{}>\".format(lang.lower()), lm_labels = True)\n datasets[dataset_name].append(instance) #all langs together\n\n\n logger.info(\"Build train and validation dataloaders\")\n train_dataset = DatasetTrain(datasets[\"train\"])\n valid_dataset = DatasetTrain(datasets[\"valid\"])\n\n #logger.info(\"Build train and validation dataloaders\")\n #train_dataset, valid_dataset = TensorDataset(*tensor_datasets[\"train\"]), TensorDataset(*tensor_datasets[\"valid\"])\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None\n valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if args.distributed else None\n train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, shuffle=(not args.distributed), collate_fn=collate_fn)\n valid_loader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=args.valid_batch_size, shuffle=False, collate_fn=collate_fn)\n\n # logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[0].shape))\n # #logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[1].shape))\n # logger.info(\"Valid dataset (Batch, Candidates, Seq length): {}\".format(valid_dataset.tensors[0].shape))\n logger.info(\"Train dataset length: {}\".format(len(train_dataset)))\n logger.info(\"Valid dataset length: {}\".format(len(valid_dataset)))\n return train_loader, valid_loader, train_sampler, valid_sampler", "def train(self):\n # >>> YOUR ANSWER HERE\n\n fake_docs = []\n fake_words = []\n fake_words_freq = {}\n real_docs = []\n real_words = []\n real_words_freq = {}\n\n # load fake data of the training dataset, store the docs and words\n fake_data = open(self.train_data['fake']).readlines()\n for sentence in fake_data:\n preprocess_sentence = sentence.strip()\n fake_docs.append(preprocess_sentence)\n fake_words.extend(preprocess_sentence.split())\n\n # load real data of the training dataset, store the docs, words and word frequencies.\n real_data = open(self.train_data['real']).readlines()\n for sentence in real_data:\n preprocess_sentence = sentence.strip()\n real_docs.append(preprocess_sentence)\n real_words.extend(preprocess_sentence.split())\n\n # remove stop words if necessary\n if self.REMOVE_STOPWORDS:\n fake_words = [word for word in fake_words if word not in self.stopwords]\n real_words = [word for word in real_words if word not in self.stopwords]\n\n # calculate all words' frequency\n for word in fake_words:\n self.vocabulary.add(word)\n fake_words_freq[word] = fake_words_freq.get(word, 0) + 1\n for word in real_words:\n self.vocabulary.add(word)\n real_words_freq[word] = real_words_freq.get(word, 0) + 1\n\n # pre-calculate the number of all docs, the number of docs per class and words frequency per class for\n # calculation in the training loop.\n n_doc = len(fake_docs) + len(real_docs)\n n_class = {'fake': len(fake_docs), 'real': len(real_docs)}\n big_doc_dict = {'fake': fake_words_freq, 'real': real_words_freq}\n fake_words_num = 0\n real_words_num = 0\n for w in self.vocabulary:\n fake_words_num += fake_words_freq.get(w, 0)\n real_words_num += real_words_freq.get(w, 0)\n words_frequency_per_class = {'fake': fake_words_num, 'real': real_words_num}\n\n # Training\n for c in self.classes:\n self.logprior[c] = math.log(n_class[c] / n_doc)\n for w in self.vocabulary:\n count_w_c = big_doc_dict[c].get(w, 0)\n log_likelihood = math.log((count_w_c + 1) / (len(self.vocabulary) + words_frequency_per_class[c]))\n self.loglikelihood[(w, c)] = log_likelihood\n # >>> END YOUR ANSWER", "def load_dataset(display = False, flag=\"train\"):\n\n if flag ==\"train\":\n print(\"Loading training set image X ...\")\n train_X_data = DataUtils(filename=trainfile_X).getImage()\n print(\"Loading training set label y ...\")\n train_y_data = DataUtils(filename=trainfile_y).getLabel()\n print(\"size of training set X = \", train_X_data.shape)\n print(\"size of training set y = \", train_y_data.shape) \n\n if display:\n path_trainset = \"MNIST_dataset/imgs_train\"\n if not os.path.exists(path_trainset):\n os.mkdir(path_trainset)\n outImg(train_X_data, train_y_data, 30, out_path)\n DataUtils(outpath=path_trainset).outImg(train_X_data, train_y_data, 30)\n\n return train_X_data, train_y_data\n \n elif flag == \"test\":\n print(\"Loading test set image X ...\")\n test_X_data = DataUtils(testfile_X).getImage()\n print(\"Loading test set label y ...\")\n test_y_data = DataUtils(testfile_y).getLabel()\n print(\"size of test set X = \", test_X_data.shape)\n print(\"size of test set y = \", test_y_data.shape)\n\n if display:\n path_testset = \"MNIST_dataset/imgs_test\"\n if not os.path.exists(path_testset):\n os.mkdir(path_testset)\n DataUtils(outpath=path_testset).outImg(test_X_data, test_y_data, 30)\n\n return test_X_data, test_y_data", "def run():\r\n \r\n LABEL = data.LabelField(use_vocab=True)\r\n TEXT = data.Field(sequential=True, tokenize=lambda x:x.split(), lower=True, fix_length=config.MAX_LENGTH)\r\n\r\n### 1/5\r\n dataset = data.TabularDataset(path=config.TRAIN_DATASET_FNAME, \r\n format='csv', \r\n fields=[('text', TEXT),('label', LABEL)], \r\n skip_header=True)\r\n # split the dataset, 8:2\r\n train_dataset, valid_dataset = dataset.split(split_ratio=[0.8,0.2], random_state=random.getstate())\r\n \r\n test_data = data.TabularDataset(path=config.TEST_DATASET_FNAME,\r\n format='csv', \r\n fields=[('text', TEXT),('label', LABEL)], \r\n skip_header=True)\r\n \r\n### 2\r\n# train_dataset = data.TabularDataset(path=config.TRAIN_DATASET_FNAME, \r\n# format='csv', \r\n# fields=[('text', TEXT),('label', LABEL)], \r\n# skip_header=True) \r\n# valid_dataset = data.TabularDataset(path=config.VAL_DATASET_FNAME, \r\n# format='csv', \r\n# fields=[('text', TEXT),('label', LABEL)], \r\n# skip_header=True) \r\n \r\n# test_data = data.TabularDataset(path=config.TEST_DATASET_FNAME,\r\n# format='csv', \r\n# fields=[('text', TEXT),('label', LABEL)], \r\n# skip_header=True)\r\n \r\n### 3/4\r\n# train_dataset = data.TabularDataset(path=config.TRAIN_DATASET_FNAME, \r\n# format='csv', \r\n# fields=[('text', TEXT),('label', LABEL)], \r\n# skip_header=True) \r\n \r\n# dataset = data.TabularDataset(path=config.TEST_DATASET_FNAME, \r\n# format='csv', \r\n# fields=[('text', TEXT),('label', LABEL)], \r\n# skip_header=True)\r\n# # split the dataset, 5:5\r\n# valid_dataset, test_data = dataset.split(split_ratio=[0.5,0.5], random_state=random.getstate())\r\n\r\n### 5\r\n\r\n\r\n\r\n # load embeddings\r\n vectors_data = load_vectors(config.EMBEDDING_FNAME)\r\n\r\n TEXT.build_vocab(train_dataset, vectors=vectors_data)\r\n LABEL.build_vocab(train_dataset)\r\n print ('vector size:',TEXT.vocab.vectors.size())\r\n embedding_pretrained_matrix = TEXT.vocab.vectors\r\n \r\n # create torch device\r\n print(\"To device...\")\r\n USE_CUDA = torch.cuda.is_available()\r\n device = torch.device(\"cuda\" if USE_CUDA else \"cpu\")\r\n\r\n train_it, valid_it = data.BucketIterator.splits((train_dataset, valid_dataset),\r\n batch_sizes=(config.TRAIN_BATCH_SIZE,config.VAL_BATCH_SIZE), \r\n device=device, \r\n sort_key=lambda x: len(x.text),\r\n sort_within_batch=False,\r\n shuffle=True,\r\n repeat=False)\r\n test_it = data.BucketIterator(test_data, \r\n batch_size=config.TEST_BATCH_SIZE, \r\n sort_key=lambda x: len(x.text), \r\n shuffle=False,\r\n device=device)\r\n \r\n \r\n # fetch model\r\n vocab_size = len(TEXT.vocab) # TEXT.vocab.vectors.size()\r\n# pretrained_vec = TEXT.vocab.vectors\r\n \r\n # selecte network \r\n x = import_module('networks.'+config.NETWORK)\r\n model = x.Model(vocab_size,embedding_pretrained=embedding_pretrained_matrix)\r\n \r\n # send model to device\r\n model.to(device)\r\n\r\n # initialize Adam optimizer\r\n optimizer = torch.optim.Adam(model.parameters(), lr=config.LEARNING_RATE)\r\n\r\n # if you have multiple GPUs, model model to DataParallel to use multiple GPUs\r\n if torch.cuda.device_count() > 1:\r\n model = nn.DataParallel(model)\r\n \r\n params_list = []\r\n # train and validate for all epochs\r\n for epoch in range(config.EPOCHS):\r\n epoch_start_time = time.time()\r\n\r\n ###----Train--------\r\n train_outputs, train_labels, train_loss = engine.train_fn(train_it, model, optimizer, device)\r\n train_outputs = torch.Tensor(train_outputs)\r\n _, train_predicted = torch.max(train_outputs, dim=1)\r\n train_parameters_dict = metrics_func.performance_evaluation_func(train_predicted,train_labels,epoch=str(epoch))\r\n # save train paremeters\r\n params_list.append(train_parameters_dict)\r\n train_f1 = train_parameters_dict['f1_score_macro']\r\n train_prec = train_parameters_dict['precision_macro']\r\n train_recall = train_parameters_dict['precision_macro']\r\n print('\\n')\r\n print(f\" Train Epoch: {epoch}, F1 = {train_f1},precision = {train_prec},recall = {train_recall}\")\r\n ###------------\r\n \r\n # validate\r\n val_outputs, val_labels, valid_loss = engine.evaluate_fn(valid_it, model, device)\r\n val_outputs = torch.Tensor(val_outputs)\r\n _, val_predicted = torch.max(val_outputs, dim=1) \r\n # calculate evaluation paremeters\r\n val_parameters_dict = metrics_func.performance_evaluation_func(val_predicted, val_labels, epoch=str(epoch),flag='val')\r\n # save evaluation paremeters\r\n params_list.append(val_parameters_dict)\r\n \r\n val_f1 = val_parameters_dict['f1_score_macro']\r\n val_prec = val_parameters_dict['precision_macro']\r\n val_recall = val_parameters_dict['recall_macro']\r\n print(f\"Val Epoch: {epoch},F1 = {val_f1},precision = {val_prec}, recall = {val_recall}\")\r\n \r\n ###-------Test-----------------------\r\n test_outputs, test_labels, test_loss = engine.evaluate_fn(test_it, model, device)\r\n test_outputs = torch.Tensor(test_outputs)\r\n _, test_predicted = torch.max(test_outputs, dim=1) \r\n # calculate evaluation paremeters\r\n test_parameters_dict = metrics_func.performance_evaluation_func(test_predicted, test_labels, epoch=str(epoch),flag='test')\r\n # save evaluation paremeters\r\n params_list.append(test_parameters_dict)\r\n \r\n test_f1 = test_parameters_dict['f1_score_macro']\r\n test_prec = test_parameters_dict['precision_macro']\r\n test_recall = test_parameters_dict['recall_macro']\r\n print(f\"test Epoch: {epoch},F1 = {test_f1},precision = {test_prec}, recall = {test_recall}\")\r\n \r\n lr_scheduler = LRScheduler(optimizer)\r\n lr_scheduler(valid_loss)\r\n \r\n \r\n # simple early stopping\r\n# val_f1 = float(val_f1)\r\n #f1 = (float(train_f1) + float(val_f1)) / 2\r\n val_loss = float(valid_loss)\r\n early_stopping(val_loss, model)\r\n if early_stopping.early_stop:\r\n print(\"Early stopping\")\r\n break\r\n # 获得 early stopping 时的模型参数\r\n# model.load_state_dict(torch.load('checkpoint.pt'))\r\n\r\n# save_model_func(model, epoch, path='outputs')\r\n \r\n metrics_func.save_parameters_txt(params_list)", "def train(self):\n for doc, label in zip(self.train_docs(), self.train_labels()):\n yield doc, label", "def get_dataset(image_folder: str, img_size: str, self_training: bool = False, no_augmentation: bool = False, valid_dir: str = None):\n \n primary_img_paths = glob.glob(image_folder + os.sep + \"*/*.jpg\")\n primary_img_paths += glob.glob(image_folder + os.sep + \"*/*.png\")\n if valid_dir is None:\n \n y = [os.path.basename(os.path.dirname(path)) for path in primary_img_paths]\n\n train_img_paths, test_img_paths, _, _ = train_test_split(primary_img_paths, y, \n stratify = y, \n test_size = 1 - TRAIN_RATIO)\n #primary_img_paths = undersample(primary_img_paths)\n \n SIZE = len(primary_img_paths)\n shuffle(primary_img_paths)\n \n TRAIN = int(SIZE*TRAIN_RATIO)\n TEST = SIZE - TRAIN\n \n if self_training:\n print(\"Using predictions on unlabelled data in train set!\".rjust(70, \"#\").ljust(90, \"#\"))\n secondary_img_path = glob.glob(\"data/secondary_dataset\" + os.sep + \"*/*.jpg\")\n shuffle(secondary_img_path)\n\n #train_img_paths = primary_img_paths[:TRAIN] + secondary_img_path\n train_img_paths += secondary_img_path\n #else:\n # train_img_paths = primary_img_paths[:TRAIN]\n \n #test_img_paths = primary_img_paths[TRAIN:]\n TRAIN = len(train_img_paths) # For display purpose\n \n if self_training:\n TRAIN += len(secondary_img_path) # For display purpose\n else:\n train_img_paths = glob.glob(image_folder + os.sep + \"*/*.jpg\") + glob.glob(image_folder + os.sep + \"*/*.png\")\n test_img_paths = glob.glob(valid_dir + os.sep + \"*/*.jpg\") + glob.glob(valid_dir + os.sep + \"*/*.png\")\n TRAIN = len(train_img_paths)\n TEST = len(test_img_paths)\n\n label_names = os.listdir(image_folder)\n if no_augmentation:\n train_dataset = CustomImageDataset(train_img_paths, get_test_transforms(img_size), label_names)\n else:\n train_dataset = CustomImageDataset(train_img_paths, get_train_transforms(img_size), label_names)\n test_dataset = CustomImageDataset(test_img_paths, get_test_transforms(img_size), label_names)\n class_to_idx = train_dataset.class_to_idx\n \n # Create DataLoader for training\n train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)\n test_dataloader = DataLoader(test_dataset, batch_size=BATCH_SIZE)\n \n \n \n weights = get_class_weights(train_img_paths, class_to_idx, label_names) # For balancing dataset using inverse-frequency\n \n\n print(f\"Number of classes {NUM_CLASSES}, Train size: {TRAIN} images, Test size: {TEST} images, Batch size: {BATCH_SIZE}, Image size: {img_size}x{img_size}\")\n return train_dataloader, test_dataloader, class_to_idx, weights", "def prepare_data(self, *args, **kwargs):\n # get paths to train and test splits\n _split_paths = [os.path.join(self.path_to_data, split)\n for split in os.listdir(self.path_to_data)]\n\n # for each split [train, test]\n for _path in _split_paths:\n _img_classes = os.listdir(_path) # get subfolders representing each class\n self.splits[os.path.basename(_path)] = []\n\n # get the images in pairs with its corresponding class\n for _class in _img_classes:\n _data = self.get_img_text_pair(os.path.join(_path, _class))\n\n if os.path.basename(_path) == 'train':\n self.weights[self.encode_label(_class)] = len(_data)\n self.splits[os.path.basename(_path)].extend(_data)", "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader", "def _build_datasets(self):\n self._build_datasets_sis3302()\n self._build_datasets_sis3305()", "def set_data():\r\n #if not os.path.exists(filepath):\r\n #download_data()\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, train, test = {}, {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n\r\n train['x'], train['y'] = convert_train(data['ntraindata'], data['ndim'])\r\n\r\n testdata = read(filepath + flist[-2])\r\n test['x'] = testdata['data']\r\n test['y'] = testdata['labels']\r\n\r\n data['train'], data['test'] = train, test\r\n save_pkl(data)", "def creates_data_loader():\n dataset_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=True)\n\n dataset_no_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=False)\n\n datasets_faces_split = train_val_test(dataset_faces, 0.2, 0.0)\n datasets_no_faces_split = train_val_test(dataset_no_faces, 0.2, 0.0)\n\n datasets = {}\n datasets[\"train\"] = datasets_faces_split[\"train\"] + \\\n datasets_no_faces_split[\"train\"]\n datasets[\"test\"] = datasets_no_faces_split[\"test\"]\n datasets[\"val\"] = datasets_faces_split[\"val\"] + \\\n datasets_no_faces_split[\"val\"]\n\n train_loader = DataLoader(dataset=datasets[\"train\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n\n val_loader = DataLoader(dataset=datasets[\"val\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n return train_loader, val_loader", "def prepare_data(dataset, train_ratio=0.8, input_dim=None, seed=10):\n # Retrieve main path of project\n dirname = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n\n # Download and store dataset at chosen location\n if dataset == 'Cora' or dataset == 'PubMed' or dataset == 'Citeseer':\n path = os.path.join(dirname, 'data')\n data = Planetoid(path, name=dataset, split='full')[0]\n data.name = dataset\n data.num_classes = (max(data.y)+1).item()\n # data.train_mask, data.val_mask, data.test_mask = split_function(data.y.numpy())\n # data = Planetoid(path, name=dataset, split='public', transform=T.NormalizeFeatures(), num_train_per_class=20, num_val=500, num_test=1000)\n\n elif dataset == 'Amazon':\n path = os.path.join(dirname, 'data', 'Amazon')\n data = Amazon(path, 'photo')[0]\n data.name = dataset\n data.num_classes = (max(data.y)+1).item()\n data.train_mask, data.val_mask, data.test_mask = split_function(\n data.y.numpy(), seed=seed)\n # Amazon: 4896 train, 1224 val, 1530 test\n \n elif dataset in ['syn1', 'syn2', 'syn4', 'syn5']: \n data = synthetic_data(\n dataset, dirname, train_ratio, input_dim)\n \n elif dataset == 'syn6':\n data = gc_data(dataset, dirname, train_ratio)\n\n elif dataset == 'Mutagenicity':\n data = gc_data(dataset, dirname, train_ratio)\n\n return data", "def _process_dataset(all_train_img, all_train_label, all_test_img, all_test_label):\n # Read all training and test images and set the correct path\n train_files = tf.io.gfile.listdir(all_train_img)\n test_files = tf.io.gfile.listdir(all_test_img)\n all_train_class_path = [os.path.join(all_train_img, f) for f in train_files]\n all_test_img_path = [os.path.join(all_test_img, f) for f in test_files]\n # Since Labels start at 1, substract -1 for correct indices with starting '0'\n label_np_test = read_labels_txt(all_test_label) - 1\n synsets_np_train = read_labels_mat(all_train_label)\n\n all_train_img_path = []\n label_np_train = []\n for folder in all_train_class_path:\n img_class_files = tf.io.gfile.listdir(folder)\n synset = os.path.basename(os.path.normpath(folder))\n label_train = synsets_np_train.index(synset)\n for f in img_class_files:\n all_train_img_path.append(os.path.join(folder, f))\n label_np_train.append(label_train)\n\n # Create the Datasets for training and test images with corresponding labels\n path_ds_train = tf.data.Dataset.from_tensor_slices((all_train_img_path, label_np_train))\n img_label_ds_train = path_ds_train.map(_process_image)\n path_ds_test = tf.data.Dataset.from_tensor_slices((all_test_img_path, label_np_test))\n img_label_ds_test = path_ds_test.map(_process_image)\n\n print(img_label_ds_train)\n print(img_label_ds_test)\n\n # Check an example image if necessary\n # example, = img_label_ds_test.take(1)\n for i in range(5):\n example, = img_label_ds_train.take(1)\n image, label = example[0], example[1]\n plt.figure(i)\n if image.shape[2] == 1:\n plt.imshow(tf.squeeze(image), cmap='gray')\n else:\n plt.imshow(image/255)\n print(\"Label: {}\".format(label.numpy()))\n plt.show()\n\n return img_label_ds_train, img_label_ds_test", "def load_dataset(self):\n\n train_path = os.path.join(self.dataset_path, 'images_background')\n validation_path = os.path.join(self.dataset_path, 'images_evaluation')\n\n # First let's take care of the train alphabets\n for alphabet in os.listdir(train_path):\n if alphabet[0] == '.':\n continue\n alphabet_path = os.path.join(train_path, alphabet)\n\n current_alphabet_dictionary = {}\n\n for character in os.listdir(alphabet_path):\n if character[0] == '.':\n continue\n character_path = os.path.join(alphabet_path, character)\n\n current_alphabet_dictionary[character] = os.listdir(\n character_path)\n\n self.train_dictionary[alphabet] = current_alphabet_dictionary\n\n # Now it's time for the validation alphabets\n for alphabet in os.listdir(validation_path):\n alphabet_path = os.path.join(validation_path, alphabet)\n if alphabet[0] == '.':\n continue\n\n current_alphabet_dictionary = {}\n\n for character in os.listdir(alphabet_path):\n if character[0] == '.':\n continue\n character_path = os.path.join(alphabet_path, character)\n\n current_alphabet_dictionary[character] = os.listdir(\n character_path)\n\n self.evaluation_dictionary[alphabet] = current_alphabet_dictionary", "def get_datasets(data):\n train_dataset, test_dataset = None, None\n data_dir = '../data'\n\n if data == 'fmnist':\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.2860], std=[0.3530])])\n train_dataset = datasets.FashionMNIST(data_dir, train=True, download=True, transform=transform)\n test_dataset = datasets.FashionMNIST(data_dir, train=False, download=True, transform=transform)\n \n elif data == 'fedemnist':\n train_dir = '../data/Fed_EMNIST/fed_emnist_all_trainset.pt'\n test_dir = '../data/Fed_EMNIST/fed_emnist_all_valset.pt'\n train_dataset = torch.load(train_dir)\n test_dataset = torch.load(test_dir) \n \n elif data == 'cifar10':\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2023, 0.1994, 0.2010)),\n ])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2023, 0.1994, 0.2010)),\n ])\n train_dataset = datasets.CIFAR10(data_dir, train=True, download=True, transform=transform_train)\n test_dataset = datasets.CIFAR10(data_dir, train=False, download=True, transform=transform_test)\n train_dataset.targets, test_dataset.targets = torch.LongTensor(train_dataset.targets), torch.LongTensor(test_dataset.targets) \n \n return train_dataset, test_dataset", "def load_data(opt=\"mnist\"):\n if opt == \"mnist\":\n train, test = tf.keras.datasets.mnist.load_data()\n \n x_train, y_train = train\n x_test, y_test = test\n \n x_train = x_train.reshape(x_train.shape[0], 28 * 28)\n x_test = x_test.reshape(x_test.shape[0], 28 * 28)\n \n y_train = y_train.astype(np.int)\n y_test = y_test.astype(np.int)\n for i in range(len(y_train)):\n y_train[i] = 1 if y_train[i] % 2 == 0 else -1\n for i in range(len(y_test)):\n y_test[i] = 1 if y_test[i] % 2 == 0 else -1\n\n elif opt == \"covertype\":\n df = pd.read_csv(\"covtype.data\", header=None)\n x = df.iloc[:, 0:54].values\n y = df[54].values\n for i in range(len(y)):\n y[i] = 1 if y[i] % 2 == 0 else -1\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)\n \n else:\n logging.error(\"Unknown dataset!!\")\n\n logging.info(\"train data shape: {}\".format(x_train.shape))\n logging.info(\"test data shape: {}\".format(x_test.shape))\n return (x_train, y_train), (x_test, y_test)", "def train(ds, **kwargs):\n# {{{\n\n t_REPS = [ds.grand['representations'][tr] for tr in ds.data['trainers']]\n t_VALS = [ds.grand['values'][tr] for tr in ds.data['trainers']]\n\n # For convenience, set the mean of the training values to 0\n t_AVG = np.mean(t_VALS)\n t_VALS = np.subtract(t_VALS,t_AVG)\n\n # model determination (`s` and `l` hypers, then `a` coefficients)\n # {{{\n # train the hypers\n if ds.data['hypers']:\n print(\"Loading hyperparameters from Dataset.\")\n s = ds.data['s']\n l = ds.data['l']\n else:\n if 'k' in kwargs:\n k = kwargs['k']\n else:\n k = ds.setup['M']\n s, l = find_hypers(t_VALS,t_REPS,k)\n ds.data['hypers'] = True\n ds.data['s'] = s\n ds.data['l'] = l\n\n # train for alpha\n if ds.data['a']:\n print(\"Loading coefficients from Dataset.\") \n alpha = np.asarray(ds.data['a'])\n else:\n print(\"Model training using s = {} and l = {} . . .\".format(s,l))\n alpha = train_a(t_REPS,t_VALS,s,l)\n ds.data['a'] = alpha.tolist()\n # }}}\n\n return ds, t_AVG", "def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)", "def _train(self):\r\n lr, hr = self.sess.run(self.val_batch)\r\n res = self.sess.run(\r\n [self.train, self.merged,\r\n self.GAN.g_loss, self.GAN.mse_loss, self.GAN.g_gan_loss,\r\n self.GAN.d_loss, self.GAN.d_loss_real, self.GAN.d_loss_fake],\r\n feed_dict={\r\n self.GAN.g_images: lr,\r\n self.GAN.d_images: hr,\r\n self.GAN.is_training: True\r\n })\r\n\r\n return res[1:]", "def load_all(): \n training_data = dict() \n for i in range(7):\n training_data[i+1] = load_data(i+1) \n\n return training_data", "def get_dataloaders(data_dir,train_batch_size,val_batch_size,aug_flag):\n # Create the dataset object.\n transformed_dataset = PersonDataset(data_dir,False)\n # dataloader for train and validation\n validation_split = 0.2\n shuffle_dataset = True\n #random seed to keep the train-val split constant for inference purpose\n random_seed= 42\n # create indices for training and validation splits.\n dataset_size = len(transformed_dataset)\n # we create the indices using python range function and store it into a list\n indices = list(range(dataset_size))\n split = int(np.floor(validation_split*dataset_size))\n if shuffle_dataset:\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n train_indices,val_indices = indices[split:],indices[:split]\n # create dataloaders...\n train_sampler = SubsetRandomSampler(train_indices)\n val_sampler = SubsetRandomSampler(val_indices)\n train_aug,val_aug = aug_flag,False\n train_loader = DataLoader(PersonDataset(data_dir,train_aug), batch_size=train_batch_size, shuffle=False, num_workers=0,sampler = train_sampler)\n val_loader = DataLoader(PersonDataset(data_dir,val_aug), batch_size=val_batch_size, shuffle=False, num_workers=0,sampler = val_sampler)\n\n # dictionary for data loaders..\n dataloaders = {\"train\" :train_loader,\n \"val\":val_loader\n }\n return dataloaders", "def train(self, training_data, training_labels, validation_data, validation_labels):\n abstract", "def separate(self):\n print(\"start dataset separating\")\n sum = 0\n for i in tqdm(range(len(self.itemlen))):\n il = self.itemlen[i]\n if il < 3:\n sum += il\n continue\n rarr = list(range(sum, sum+il))\n random.shuffle(rarr)\n self.train.append({\n 'input': self.input[rarr[0]],\n 'label': self.label[i]\n })\n self.val.append({\n 'input': self.input[rarr[1]],\n 'label': self.label[i]\n })\n for j in range(2, len(rarr)):\n self.test.append({\n 'input': self.input[rarr[j]],\n 'label': self.label[i]\n })\n sum += il", "def train(self):\n for data_tier in self.data_tiers:\n fd = open(self.data_path + '/training_data_' + data_tier + '.json', 'r')\n self.preprocessed_data[data_tier] = json.load(fd)\n fd.close()\n tot = len(self.preprocessed_data[data_tier]['features'])\n p = int(math.ceil(tot*0.8))\n training_features = np.array(self.preprocessed_data[data_tier]['features'][:p])\n trend_training_classifications = np.array(self.preprocessed_data[data_tier]['trend_classifications'][:p])\n avg_training_classifications = np.array(self.preprocessed_data[data_tier]['avg_classifications'][:p])\n t1 = datetime.datetime.utcnow()\n self.clf_trend[data_tier].fit(training_features, trend_training_classifications)\n self.clf_avg[data_tier].fit(training_features, avg_training_classifications)\n t2 = datetime.datetime.utcnow()\n td = t2 - t1\n self.logger.info('Training %s for data tier %s took %s', self.name, data_tier, str(td))\n joblib.dump(self.clf_trend[data_tier], self.data_path + '/' + self.name + '_trend_' + data_tier + '.pkl')\n joblib.dump(self.clf_avg[data_tier], self.data_path + '/' + self.name + '_avg_' + data_tier + '.pkl')", "def __test_and_train(self):\n f = open(\"all_data_info.csv\")\n reader = csv.DictReader(f, delimiter=\",\")\n data = []\n for line in reader:\n if line['artist_group'] == \"train_and_test\" and line[\"in_train\"] == \"False\":\n # the img's artist is in training set\n # but the img is in test set only\n data.append((line['artist'], line['new_filename']))\n\n return data", "def train(self, batch):\n pass", "def load_data(batch_size=batch_size):\n trainset = LibriSpeechDataset(training_set, int(LIBRISPEECH_SAMPLING_RATE * n_seconds))\n testset = LibriSpeechDataset(validation_set, int(LIBRISPEECH_SAMPLING_RATE * n_seconds), stochastic=False)\n\n train_loader = DataLoader(trainset, batch_size=batch_size, num_workers=1, shuffle=True, drop_last=True)\n test_loader = DataLoader(testset, batch_size=1, num_workers=1, drop_last=True)\n\n return train_loader, test_loader", "def dtrain(directory):\n return dataset(directory, 'train-images-idx3-ubyte',\n 'train-labels-idx1-ubyte')", "def __init__(self, opt):\n BaseDataset.__init__(self, opt)\n self.dir_A = os.path.join(opt.dataroot, opt.phase5 + 'A') # create a path '/path/to/data/trainA'\n #self.dir_B20 = os.path.join(opt.dataroot, opt.phase + 'B20') # create a path '/path/to/data/trainB1'\n\n #self.dir_B2 = os.path.join(opt.dataroot, opt.phase + 'B2') # create a path '/path/to/data/trainB2'\n self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'\n #self.B20_paths = sorted(make_dataset(self.dir_B20, opt.max_dataset_size)) # load images from '/path/to/data/trainB'\n #self.B2_paths = sorted(make_dataset(self.dir_B2, opt.max_dataset_size)) # load images from '/path/to/data/trainB'\n self.A_size = len(self.A_paths) # get the size of dataset A\n # self.B20_size = len(self.B20_paths) # get the size of dataset B\n #self.B2_size = len(self.B2_paths)\n\n #self.dir_A50 = os.path.join(opt.dataroot, opt.phase + 'A50') # create a path '/path/to/data/trainA'\n self.dir_B50 = os.path.join(opt.dataroot, opt.phase5 + 'B50') # create a path '/path/to/data/trainB1'\n #self.dir_B2 = os.path.join(opt.dataroot, opt.phase + 'B2') # create a path '/path/to/data/trainB2'\n #self.A50_paths = sorted(make_dataset(self.dir_A50, opt.max_dataset_size)) # load images from '/path/to/data/trainA'\n self.B50_paths = sorted(make_dataset(self.dir_B50, opt.max_dataset_size)) # load images from '/path/to/data/trainB'\n #self.B2_paths = sorted(make_dataset(self.dir_B2, opt.max_dataset_size)) # load images from '/path/to/data/trainB'\n #self.A50_size = len(self.A50_paths) # get the size of dataset A\n self.B50_size = len(self.B50_paths) # get the size of dataset B\n #self.B2_size = len(self.B2_paths)\n\n self.dir_B100 = os.path.join(opt.dataroot, opt.phase5 + 'B100') # create a path '/path/to/data/trainB1'\n self.B100_paths = sorted(\n make_dataset(self.dir_B100, opt.max_dataset_size)) # load images from '/path/to/data/trainB'\n self.B100_size = len(self.B100_paths) # get the size of dataset B\n\n self.dir_B150 = os.path.join(opt.dataroot, opt.phase5 + 'B150') # create a path '/path/to/data/trainB1'\n self.B150_paths = sorted(\n make_dataset(self.dir_B150, opt.max_dataset_size)) # load images from '/path/to/data/trainB'\n self.B150_size = len(self.B150_paths) # get the size of dataset B\n\n\n\n self.dir_m0 = os.path.join(opt.dataroot, 'mask_0') # create a path '/path/to/data/trainB1'\n self.m0_paths = sorted(\n make_dataset(self.dir_m0, opt.max_dataset_size)) # load images from '/path/to/data/trainB'\n self.m0_size = len(self.m0_paths) # get the size of dataset B\n\n\n self.dir_m50 = os.path.join(opt.dataroot, 'mask_50') # create a path '/path/to/data/trainB1'\n self.m50_paths = sorted(\n make_dataset(self.dir_m50, opt.max_dataset_size)) # load images from '/path/to/data/trainB'\n self.m50_size = len(self.m50_paths) # get the size of dataset B\n\n\n\n self.dir_m100 = os.path.join(opt.dataroot, 'mask_100') # create a path '/path/to/data/trainB1'\n self.m100_paths = sorted(\n make_dataset(self.dir_m100, opt.max_dataset_size)) # load images from '/path/to/data/trainB'\n self.m100_size = len(self.m100_paths) # get the size of dataset B\n\n\n self.dir_m150 = os.path.join(opt.dataroot, 'mask_150') # create a path '/path/to/data/trainB1'\n self.m150_paths = sorted(\n make_dataset(self.dir_m150, opt.max_dataset_size)) # load images from '/path/to/data/trainB'\n self.m150_size = len(self.m150_paths) # get the size of dataset B\n\n\n \n\n btoA = self.opt.direction == 'BtoA'\n input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image\n output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image\n self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))\n self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))", "def build_enru_custom_test(self):\n train_data_file = self.data_dir + '/' + enru_paracrawl\n eval_data_file = self.data_dir + '/' + enru_newscomm\n train_data = tf.data.experimental.CsvDataset(\n [train_data_file],\n record_defaults=[tf.string, tf.string],\n compression_type='GZIP',\n field_delim='\\t',\n use_quote_delim=False)\n train_data = train_data.cache() # only read once\n eval_data = tf.data.experimental.CsvDataset(\n [eval_data_file],\n record_defaults=[tf.string, tf.string],\n compression_type='GZIP',\n field_delim='\\t',\n use_quote_delim=False)\n\n eval_data = eval_data.skip(9000).take(10000)\n eval_data = eval_data.cache()\n def to_features_dict(eng, rus):\n return {'inputs': eng, 'targets': rus}\n\n train_data = train_data.map(to_features_dict)\n eval_data = eval_data.map(to_features_dict)\n\n self.default_builder_obj = None\n\n return train_data, eval_data", "def prepare_train(self) -> Tuple[ZLIMGS, ZLIMGS, ZLIMGS, ZLIMGS]:\n\n if self.setting == 'setting1':\n warnings.warn(\"Please note that Setting 1 should not use train eval dataset! \"\n \"Because its training set only contain normal samples!\")\n\n with open(self.json_path) as fp:\n ids_json = json.load(fp)\n ids_train_normal = ids_json['normal']['train']\n ids_train_defect = ids_json['defect']['train']\n\n # train\n zlimgs_train_normal = self._create_zl_imgs_given_ids(ids=ids_train_normal,\n subset=CONFIG[self.setting]['normal_train'],\n ann_type=CONFIG[self.setting]['ann_train'])\n zlimgs_train_defect = self._create_zl_imgs_given_ids(ids=ids_train_defect,\n subset=CONFIG[self.setting]['defect_train'],\n ann_type=CONFIG[self.setting]['ann_train'])\n\n # train eval\n zlimgs_train_eval_normal = self._create_zl_imgs_given_ids(ids=ids_train_normal,\n subset=CONFIG[self.setting]['normal_train'],\n ann_type=CONFIG[self.setting]['ann_eval'])\n zlimgs_train_eval_defect = self._create_zl_imgs_given_ids(ids=ids_train_defect,\n subset=CONFIG[self.setting]['defect_train'],\n ann_type=CONFIG[self.setting]['ann_eval'])\n\n return zlimgs_train_normal, zlimgs_train_defect, zlimgs_train_eval_normal, zlimgs_train_eval_defect", "def load_data_wrapper():\r\n \r\n global training_inputs, training_results\r\n global validation_inputs, validation_results\r\n global test_inputs, test_results\r\n global num_samples, numpixels, num_test_samples\r\n \r\n tr_d, va_d, te_d = load_data()\r\n \r\n num_samples=len(tr_d[0])\r\n training_inputs=zeros([num_samples,numpixels])\r\n training_results=zeros([num_samples,10]) \r\n for j in range(num_samples):\r\n training_inputs[j,:] = reshape(tr_d[0][j], (numpixels))\r\n training_results[j,:] = vectorized_result(tr_d[1][j])\r\n# validation_inputs = [reshape(x, (numpixels)) for x in va_d[0]]\r\n# validation_results = [vectorized_result(y) for y in va_d[1]]\r\n\r\n num_test_samples=len(te_d[0])\r\n test_inputs=zeros([num_test_samples,numpixels])\r\n test_results=zeros([num_test_samples,10]) \r\n for j in range(num_test_samples):\r\n test_inputs[j,:] = reshape(te_d[0][j], (numpixels))\r\n test_results[j,:] = vectorized_result(te_d[1][j])", "def make_training_set(ind_list, training_data): \n \n exp = training_data[ind_list[0]] \n X_train = exp[0]\n u_train = exp[1] \n\n for i in ind_list[1:]: \n exp = training_data[i]\n X_train = np.append(X_train, exp[0], axis=0)\n u_train = np.append(u_train, exp[1], axis=0)\n\n return X_train, u_train", "def data(self, train=True, batch_size=2):\n if train:\n elements = self.prepare_batch(self.training_albums)\n else:\n elements = self.prepare_batch(self.validation_albums)\n\n while len(elements) > 0:\n # Collect the batch\n batch = []\n for _ in range(min(batch_size, len(elements))):\n batch.append(elements.pop())\n\n # Get same sequence size for all elements of the batch\n albums, labels = self.batchify(batch)\n yield albums, labels", "def build_paracrawl_only(self):\n logging.info('Building news commentary only dataset')\n logging.info(self.configs[PARACRAWL])\n builder = tfds.builder(WMT_BASE_DATASET_NAME,\n config=self.configs[PARACRAWL],\n data_dir=self.data_dir)\n self.default_builder_obj = builder\n shard_spec = self.build_shard_spec(self.paracrawl_size, False)\n logging.info('Training on TFDS dataset %s with split %s',\n WMT_BASE_DATASET_NAME, 'train' + shard_spec)\n train_data = builder.as_dataset(split='train' + shard_spec,\n shuffle_files=self.shuffle_train_files)\n # _, eval_data = self.build_newscomment_only()\n _, eval_data = self.build_newscomment_ft()\n return train_data, eval_data", "def train(self, trainFilenames):\n\n\t\tstartIndex = len(self.documents)\n\t\tendIndex = startIndex + len(trainFilenames)\n\t\tself.documents += trainFilenames\n\n\t\tX = [[i] for i in range(startIndex, endIndex)]\n\t\tY = [isAroused(f) for f in trainFilenames]\n\n\t\tself.knn.fit(np.array(X), np.array(Y))", "def train(self):\n self.ae_train(self.net0, self.ae0_optimizer, self.train0_loader, self.val_loader, name='Net0')\n self.ae_train(self.net1, self.ae1_optimizer, self.train1_loader, self.val_loader, name='Net1')\n self.ae_train(self.net2, self.ae2_optimizer, self.train2_loader, self.val_loader, name='Net2')\n\n self.classifier_train(self.net0, self.optimizer0, self.train0_loader, self.val_loader, name='Net0')\n self.classifier_train(self.net1, self.optimizer1, self.train1_loader, self.val_loader, name='Net1')\n self.classifier_train(self.net2, self.optimizer2, self.train2_loader, self.val_loader, name='Net2')", "def build_dataloaders(dataset, batch_size, train_test_split=0.1, train_shuffle=True, eval_shuffle=True):\n # 데이터셋 길이\n dataset_len = len(dataset)\n\n # 학습, 평가 데이터 나누기\n eval_len = int(dataset_len * train_test_split)\n train_len = dataset_len - eval_len\n\n train_dataset, eval_dataset = random_split(dataset, (train_len, eval_len))\n\n train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=train_shuffle)\n eval_loader = DataLoader(eval_dataset, batch_size=batch_size, shuffle=eval_shuffle)\n\n\n logging.info(f'''train_dataloader size: {len(train_loader.dataset)} | shuffle: {train_shuffle}\n eval_dataloader size: {len(eval_loader.dataset)} | shuffle: {eval_shuffle}''')\n\n return train_loader, eval_loader", "def buildDataSet():\n (x_train_origin, y_train_origin), (x_test_origin, y_test_origin) = mnist.load_data()\n\n assert K.image_data_format() == 'channels_last'\n x_train_origin = x_train_origin.reshape(x_train_origin.shape[0], h, w, 1)\n x_test_origin = x_test_origin.reshape(x_test_origin.shape[0], h, w, 1)\n\n dataset_train = []\n dataset_test = []\n\n #Sorting images by classes and normalize values 0=>1\n for n in range(nb_classes):\n images_class_n = np.asarray([row for idx,row in enumerate(x_train_origin) if y_train_origin[idx]==n])\n dataset_train.append(images_class_n/255)\n\n images_class_n = np.asarray([row for idx,row in enumerate(x_test_origin) if y_test_origin[idx]==n])\n dataset_test.append(images_class_n/255)\n\n return dataset_train,dataset_test,x_train_origin,y_train_origin,x_test_origin,y_test_origin", "def train(self, trainingData, trainingLabels, validationData, validationLabels): \n \n # might be useful in your code later...\n # this is a list of all features in the training set.\n self.features = list(set([ f for datum in trainingData for f in datum.keys() ]));\n \n if (self.automaticTuning):\n kgrid = [0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 20, 50]\n else:\n kgrid = [self.k]\n \n self.trainAndTune(trainingData, trainingLabels, validationData, validationLabels, kgrid)", "def train_step(self):\r\n batch_images = next(self.data_loader.next_batch())\r\n _, loss, summary, ea = self.sess.run([self.model.train_op, self.model.total_loss, self.model.merged, self.model.euclidean_a_p],\r\n feed_dict={self.model.input: batch_images, self.model.is_training: True})\r\n \r\n return loss, summary", "def get_dataset(self):\n\n trainset = datasets.SVHN('datasets/SVHN/train/', split='train', transform=self.train_transforms,\n target_transform=None, download=True)\n valset = datasets.SVHN('datasets/SVHN/test/', split='test', transform=self.val_transforms,\n target_transform=None, download=True)\n extraset = datasets.SVHN('datasets/SVHN/extra', split='extra', transform=self.train_transforms,\n target_transform=None, download=True)\n\n trainset = torch.utils.data.ConcatDataset([trainset, extraset])\n\n return trainset, valset", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n # self.recognizer.train()\n self.detector.train()\n self.shared_conv.train()", "def train():\n pass", "def split_train_and_test_with_py_datasets(data_set, batch_size=cfg['batch_size'], test_size=0.2, num_works=4,\n pin_memory=True):\n num_dataset = len(data_set)\n indices = list(range(num_dataset))\n split = int(np.floor(test_size * num_dataset))\n\n train_idx, test_idx = indices[split:], indices[:split]\n train_sampler = SubsetRandomSampler(train_idx)\n test_sampler = SubsetRandomSampler(test_idx)\n\n train_loader = torch.utils.data.DataLoader(\n dataset=data_set, batch_size=batch_size, sampler=train_sampler, num_workers=num_works,\n pin_memory=pin_memory\n )\n\n test_loader = torch.utils.data.DataLoader(\n dataset=data_set, batch_size=batch_size, sampler=test_sampler, num_workers=num_works,\n pin_memory=pin_memory\n )\n\n return train_loader, test_loader", "def _load_split_data(self, dataset_path):\n for i, prefix in enumerate(['train', 'dev', 'test']):\n filename = os.path.join(dataset_path, '{}.txt'.format(prefix))\n knowledge, src, tgt = self._load_multi_data(filename)\n self.group_text_data[0].append(knowledge)\n self.group_text_data[1].append(src)\n self.group_text_data[2].append(tgt)", "def train(self):\n acc_time = []\n data_test = self.val_data[0][0][0]\n labels_test = self.val_data[0][0][1]\n for i, train_batch in enumerate(self.dataset):\n \n writerDIM = SummaryWriter('runs/experiment_DIM'+str(i))\n data,labels, t = train_batch\n\n index_tr,index_cv,coreset = data_split(data.shape[0],777)\n\n # adding eventual replay patterns to the current batch\n if i == 0:\n ext_mem = [data[coreset], labels[coreset]]\n dataC = np.concatenate((data[index_tr], data[index_cv]),axis=0)\n labC = np.concatenate((labels[index_tr],labels[index_cv]),axis=0)\n else:\n dataP = ext_mem[0]\n labP = ext_mem[1]\n\n ext_mem = [\n np.concatenate((data[coreset], ext_mem[0])),\n np.concatenate((labels[coreset], ext_mem[1]))]\n if self.replay:\n dataC = np.concatenate((data[index_tr], data[index_cv],dataP),axis=0)\n labC = np.concatenate((labels[index_tr],labels[index_cv],labP),axis=0)\n else:\n dataC = np.concatenate((data[index_tr], data[index_cv]),axis=0)\n labC = np.concatenate((labels[index_tr],labels[index_cv]),axis=0)\n\n\n\n print(\"----------- batch {0} -------------\".format(i))\n print(\"Task Label: \", t)\n trC,cvC = data_split_Tr_CV(dataC.shape[0],777)\n\n train_set = LoadDataset(dataC,labC,transform=self.tr,indices=trC)\n val_set = LoadDataset(dataC,labC,transform=self.tr,indices=cvC)\n print('Training set: {0} \\nValidation Set {1}'.format(train_set.__len__(),val_set.__len__()))\n batch_size=32\n train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)\n valid_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)\n dataloaders = {'train':train_loader,'val':valid_loader}\n \n ####### Set hyperparameters for the training\n if i ==0: \n prior = False\n ep=40\n dim_model = DIM_model(batch_s=32,num_classes =128,feature=True) \n dim_model.to(self.device)\n classifierM = _classifier(n_input=128,n_class=50,n_neurons=[256,256,128])\n classifierM = classifierM.to(self.device)\n writer = SummaryWriter('runs/experiment_C'+str(i))\n lr_new = 0.00001\n lrC=0.0001\n \n else:\n prior = True\n ep=6\n \n lr_new =0.000005\n lrC = 0.00005\n\n optimizer = torch.optim.Adam(dim_model.parameters(),lr=lr_new)\n scheduler = lr_scheduler.StepLR(optimizer,step_size=40,gamma=0.1) #there is also MultiStepLR\n\n tr_dict_enc = {'ep':ep,'writer':writerDIM,'best_loss':1e10,'t_board':True,\n 'gamma':.5,'beta':.5,'Prior_Flag':prior,'discriminator':classifierM} \n tr_dict_cl = {'ep':40,'writer':writer,'best_loss':1e10,'t_board':True,'gamma':1}#40\n\n if i==0 and self.load:\n print('Load DIM model weights first step')\n dim_model.load_state_dict(torch.load(self.path + 'weights/weightsDIM_T0.pt'))\n else:\n ############################## Train Encoder########################################\n dim_model,self.stats = trainEnc_MI(self.stats,dim_model, optimizer, scheduler,dataloaders,self.device,tr_dict_enc)\n ####################################################################################\n if i==0:\n torch.save(dim_model.state_dict(), self.path + 'weights/weightsDIM_T'+str(i)+'.pt')\n\n ####\n #Conversion of image into latent space representation for classifier training\n ####\n dim_model.requires_grad_(False)\n for phase in ['train','val']:\n dataF = None\n labF = None\n for inputs, labels in dataloaders[phase]:\n torch.cuda.empty_cache()\n if len(inputs.shape)==5:\n\n inputs = inputs[:,:,:,:,0].to(self.device)\n else:\n inputs = inputs.to(self.device)\n\n _,_,pred = dim_model(inputs)\n pred_l = pred.data.cpu().numpy()\n if dataF is None:\n dataF = pred_l\n labF = labels.data.cpu().numpy()\n else:\n dataF = np.concatenate((dataF,pred_l),axis=0)\n labF = np.concatenate((labF,labels.data.cpu().numpy()),axis=0)\n\n if phase == 'train':\n dataTr_f = dataF\n labTr_f = labF\n else:\n dataCv_f = dataF\n labCv_f = labF\n \n dim_model.requires_grad_(True)\n train_set = LoadFeat(dataTr_f,labTr_f)\n val_set = LoadFeat(dataCv_f,labCv_f)\n batch_size=32\n\n train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)\n valid_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)\n dataloaderC = {'train':train_loader,'val':valid_loader}\n\n optimizerC = torch.optim.Adam(classifierM.parameters(),lr=lrC)\n schedulerC = lr_scheduler.StepLR(optimizerC,step_size=40,gamma=0.1)\n classifierM.requires_grad_(True)\n\n ############################## Train Classifier ########################################\n classifierM,self.stats = train_classifier(self.stats,classifierM, optimizerC, schedulerC,dataloaderC,self.device,tr_dict_cl) \n #################################### #################################### ##############\n\n torch.save(classifierM.state_dict(), self.path + 'weights/weightsC_T'+str(i)+'.pt')\n dim_model.eval()\n classifierM.eval()\n #### Cross_val Testing\n \n test_set = LoadDataset(data_test,labels_test,transform=self.trT)\n batch_size=32\n test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)\n score= []\n\n for inputs, labels in test_loader:\n torch.cuda.empty_cache()\n inputs = inputs.to(self.device)\n labels = labels.to(self.device) \n _,_,ww =dim_model(inputs)\n pred = classifierM(ww)\n pred_l = pred.data.cpu().numpy()\n score.append(np.sum(np.argmax(pred_l,axis=1)==labels.data.cpu().numpy())/pred_l.shape[0])\n print('TEST PERFORMANCES:', np.asarray(score).mean())\n acc_time.append(np.asarray(score).mean())\n del test_set,test_loader\n \n self.dim_model = dim_model\n self.classifierM = classifierM\n acc_time = np.asarray(acc_time)\n return self.stats,acc_time", "def load_dataset(data_dir='flowers'):\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n \n # Apply transformations on training set, leave alone validation and testing sets:\n data_transforms = {\n \"training\" : transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])]),\n # For validation and tesing sets, since they are the \"unseen\" data that used to measure the model performance, so they should not be applied by any transformations, however, resizing is stil needed.\n \"validation\" : transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])]),\n \"testing\" : transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n }\n \n # Load datasets with ImageFolder:\n image_datasets = {\n \"training\" : datasets.ImageFolder(train_dir, transform = data_transforms[\"training\"]),\n \"validation\" : datasets.ImageFolder(valid_dir, transform = data_transforms[\"validation\"]),\n \"testing\" : datasets.ImageFolder(test_dir, transform = data_transforms[\"testing\"])\n }\n \n # Using the image datasets and the trainforms, define the dataloaders: \n dataloaders = {\n \"training\" : torch.utils.data.DataLoader(image_datasets[\"training\"], batch_size = 64, shuffle = True),\n \"validation\" : torch.utils.data.DataLoader(image_datasets[\"validation\"], batch_size = 64),\n \"testing\" : torch.utils.data.DataLoader(image_datasets[\"testing\"], batch_size = 64)\n }\n \n return (dataloaders['training'],\n dataloaders['validation'],\n dataloaders['testing'],\n image_datasets['training'],\n image_datasets['validation'],\n image_datasets['testing'])", "def _split_data(self):\n\n # Set training data\n self.train_data = torchvision.datasets.ImageFolder(\n os.path.join(self.path, 'train'),\n transform=self._transform()\n )\n self.classes = self.train_data.classes\n\n # Set validation data\n self.val_data = torchvision.datasets.ImageFolder(\n os.path.join(self.path, 'test'),\n transform=self._transform(train=False)\n )", "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n trainset = torchvision.datasets.MNIST(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.MNIST(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465,), (0.2023, 0.1994, 0.2010,))])\n trainset = torchvision.datasets.CIFAR10(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR10(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR100\":\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR100(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR100(root, train=False, download=download, transform=transform)\n \n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size = batch_size,\n shuffle=False, num_workers=0, pin_memory = False)\n \n testloader = torch.utils.data.DataLoader(testset, batch_size= batch_size,\n shuffle=False, num_workers=2, pin_memory = False)\n \n return trainloader, testloader", "def build_enru_custom(self):\n train_data_file = self.data_dir + '/' + enru_paracrawl\n eval_data_file = self.data_dir + '/' + enru_newscomm\n train_data = tf.data.experimental.CsvDataset(\n [train_data_file],\n record_defaults=[tf.string, tf.string],\n compression_type='GZIP',\n field_delim='\\t',\n use_quote_delim=False)\n train_data = train_data.cache() # only read once\n eval_data = tf.data.experimental.CsvDataset(\n [eval_data_file],\n record_defaults=[tf.string, tf.string],\n compression_type='GZIP',\n field_delim='\\t',\n use_quote_delim=False)\n\n eval_data = eval_data.take(3000)\n eval_data = eval_data.cache()\n def to_features_dict(eng, rus):\n return {'inputs': eng, 'targets': rus}\n\n train_data = train_data.map(to_features_dict)\n eval_data = eval_data.map(to_features_dict)\n\n self.default_builder_obj = None\n\n return train_data, eval_data", "def load_data():\n data = gzip.open(\"mnist.pkl.gz\", \"rb\")\n train_set, valid_set, test_set = cPickle.load(data)\n data.close()\n\n # Combine validation and train folds to recreate the master 60k set.\n new_images = numpy.concatenate((train_set[0], valid_set[0]))\n new_labels = numpy.concatenate((train_set[1], valid_set[1]))\n\n train_set = (new_images, new_labels)\n \n return (train_set, test_set)", "def initSets(self):\n data_frame = pd.read_csv(self.train_file, header=None)\n data_frame = data_frame.drop(columns=self.drop_cols)\n features = data_frame.iloc[:, :-1].values\n labels = data_frame.iloc[:, -1].values\n if self.test_file is None:\n self.train_feat, self.test_feat, self.train_labl, self.test_labl = train_test_split(features, labels, test_size=self.test_size)\n else:\n data_frame = pd.read_csv(self.test_file, header=None)\n data_frame = data_frame.drop(columns=self.drop_cols)\n self.train_feat, _, self.train_labl, _ = train_test_split(features, labels, test_size=self.test_size)\n features = data_frame.iloc[:, :-1].values\n labels = data_frame.iloc[:, -1].values\n _, self.test_feat, _, self.test_labl = train_test_split(features, labels, test_size=self.test_size)\n # kfold = KFold(n_splits=3)\n # self.train_index, self.test_index = kfold.split(features,labels)", "def load_data(params):\n train_df = pd.read_csv(os.path.join(*[dataset_path, params['tokenizer'] + '_train_' + params['stemming'] + '.csv']))\n dev_df = pd.read_csv(os.path.join(*[dataset_path, params['tokenizer'] + '_dev_' + params['stemming'] + '.csv']))\n train_data, label_encode = data_prep(train_df, params, if_resample=True)\n dev_data, _ = data_prep(dev_df, params)\n return train_data, dev_data, label_encode", "def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels", "def _load_training_and_test_sets(normalize):\n class_labels = []\n test_labels = []\n norm = None\n if normalize == True:\n norm = loading.get_normalize_vector()\n\n for i in range(0, 10):\n [training, test] = loading.load_number_set(i, 0.7, norm_vector=norm)\n labels = [str(i)] * training.shape[0]\n tlabels = [str(i)] * test.shape[0]\n if i == 0:\n train_points = training\n test_points = test\n else:\n train_points = np.concatenate((train_points, training), axis = 0)\n test_points = np.concatenate((test_points, test), axis = 0)\n class_labels.extend(labels)\n test_labels.extend(tlabels)\n\n return train_points, test_points, class_labels, test_labels", "def __split_dataset(self):\n self.train, self.valid, _, _ = train_test_split(self.data, self.data, test_size=0.2)\n self.valid, self.test, _, _ = train_test_split(self.valid, self.valid, test_size=0.5)", "def split_data_metrics_learning(cfg):\n actual_pose = cfg['actual_pose']\n target = cfg['target']\n person_ids = cfg['person_ids']\n \n # Split train and val data based on the person ids.\n all_ids = np.arange(1, 21)\n val_ids = cfg['val_ids']\n train_ids = set(all_ids).symmetric_difference(val_ids)\n \n anchor_gallery_split_size = cfg['anchor_gallery_split_size']\n window_width = cfg['window_width']\n overlap = cfg['overlap']\n random_state = cfg['random_state']\n \n # Get only the training set data and the label.\n X_train, y_train = get_req_ids(actual_pose, target, train_ids, person_ids)\n \n # Select the evaluation data that measures the performance of the model on the training set.\n train_accuracy_ids = random.sample(train_ids, len(val_ids))\n X_train_acc, y_train_acc = get_req_ids(actual_pose, target, train_accuracy_ids, person_ids)\n \n # Anchor/Gallery set split for the training set.\n X_train_gal, X_train_anchor, y_train_gal, y_train_anchor = train_test(X_train = X_train_acc, y_train = y_train_acc, \n test_size=anchor_gallery_split_size, \n random_state=random_state, stratify=y_train_acc)\n \n # Subsample the gait sequences of the anchor/gallery set of the training set based on the window width and the overlap.\n X_train_gal, y_train_gal = subsample(cfg, X_train_gal, y_train_gal, window_width=window_width, overlap=overlap)\n X_train_anchor, y_train_anchor = subsample(cfg, X_train_anchor, y_train_anchor, window_width=window_width, overlap=overlap)\n \n # Get only the validation set data and the label.\n X_val, y_val = get_req_ids(actual_pose, target, val_ids, person_ids)\n \n # Anchor/Gallery set split for the validation set.\n X_val_gal, X_val_anchor, y_val_gal, y_val_anchor = train_test(X_train = X_val, \n y_train = y_val, \n test_size=anchor_gallery_split_size, \n random_state=random_state, \n stratify=y_val)\n \n \n # If data augmentation parameter is set to True in the configuration dictionary, data augmentation is done for the training set.\n if cfg['augment_data']:\n X_train, y_train = augment_data(X_train, y_train)\n \n # Subsample the gait sequences of the whole training set based on the window width and the overlap.\n X_train, y_train = subsample(cfg, X_train, y_train, window_width=window_width, overlap=overlap)\n \n # Subsample the gait sequences of the anchor/gallery set of the validation set based on the window width and the overlap.\n X_val_gal, y_val_gal = subsample(cfg, X_val_gal, y_val_gal, window_width=window_width, overlap=overlap)\n X_val_anchor, y_val_anchor = subsample(cfg, X_val_anchor, y_val_anchor, window_width=window_width, overlap=overlap)\n \n # Concatenate the gallery and anchor set of the validation data and label as a whole. This is just to maintain the train-val uniformity and \n # is not used anywhere in the project.\n X_val, y_val = np.concatenate((X_val_gal, X_val_anchor)), np.concatenate((y_val_gal, y_val_anchor))\n \n return X_train, X_val, X_train_gal, X_train_anchor, X_val_gal, X_val_anchor, y_train, y_val, y_train_gal, y_train_anchor, y_val_gal, y_val_anchor", "def get_dldata(filepath, dlTrainCorpusPath, dlTestCorpusPath, seed=2018, batch_size=16):\r\n\tf = open(\"record/synthetic and academic datasets/testcases_train.pkl\",'rb') #get the testcase ids of train sets and test sets\r\n\ttestcases += pickle.load(f) \r\n\tf.close()\r\n\r\n\tf = open(\"record/synthetic and academic datasets/testcases_test.pkl\",'rb')\r\n\ttestcases += pickle.load(f)\r\n\tf.close()\r\n\t\r\n print(\"produce train dataset...\") \r\n N = 6\r\n num = list(range(N))\r\n for i in num:\r\n train_set = [[], [], [], [], [], []]\r\n for folder_train in folders_train[int(i*len(folders_train)/N) : int((i+1)*len(folders_train)/N)]:\r\n if not folder_train in os.listdir(filepath):\r\n continue\r\n print(\"\\r\"+str(folder_train), end='')\r\n for filename in os.listdir(os.path.join(filepath, folder_train)):\r\n f = open(filepath + folder_train + '/' + filename, 'rb')\r\n data = pickle.load(f)\r\n f.close()\r\n if len(data[0][0]) > MAXLEN:\r\n data[2] = [x for x in data[2] if x <= MAXLEN]\r\n data[0] = cutdata(data[0][0])\r\n if data[0] == None:\r\n continue \r\n for n in range(len(data)):\r\n train_set[n].append(data[n])\r\n train_set[-1].append(folder_train+\"/\"+filename)\r\n f_train = open(dlTrainCorpusPath + \"train_\" + str(i)+ \"_0818.pkl\", 'wb')\r\n pickle.dump(train_set, f_train)\r\n f_train.close()\r\n\r\n del train_set \r\n gc.collect() \r\n\r\n print(\"\\nproduce test dataset...\")\r\n N = 6\r\n num = list(range(N))\r\n for i in num:\r\n test_set = [[], [], [], [], [], []]\r\n for folder_test in folders_test[int(i*len(folders_test)/N) : int((i+1)*len(folders_test)/N)]:\r\n if not folder_test in os.listdir(filepath):\r\n continue\r\n print(\"\\r\"+str(folder_test), end='')\r\n for filename in os.listdir(os.path.join(filepath, folder_test)):\r\n f = open(filepath + folder_test + '/' + filename, 'rb')\r\n data = pickle.load(f)\r\n f.close()\r\n if len(data[0][0]) > MAXLEN:\r\n data[2] = [x for x in data[2] if x <= MAXLEN]\r\n data[0] = cutdata(data[0][0])\r\n if data[0] == None:\r\n continue \r\n for n in range(len(data)):\r\n test_set[n].append(data[n])\r\n test_set[-1].append(folder_test+\"/\"+filename)\r\n \r\n f_test = open(dlTestCorpusPath + \"test_\" + str(i)+ \"_0124.pkl\", 'wb')\r\n pickle.dump(test_set, f_test)\r\n f_test.close()\r\n\r\n del test_set\r\n gc.collect()\r\n return", "def prepare_train_dataset(name, reso, batch_size=32):\r\n transform = transforms.Compose([\r\n transforms.RandomResizedCrop(size=reso, interpolation=3),\r\n transforms.ColorJitter(brightness=1.5, saturation=1.5, hue=0.2),\r\n transforms.RandomVerticalFlip(),\r\n transforms.ToTensor()\r\n ])\r\n\r\n path = config.datasets[name]\r\n\r\n if name == 'coco':\r\n img_datasets = CocoDataset(root=path['train_imgs'], annFile=path['train_anno'], transform=transform)\r\n dataloder = torch.utils.data.DataLoader(img_datasets, batch_size=batch_size, num_workers=4, shuffle=True, collate_fn=CocoDataset.collate_fn)\r\n elif name == 'voc':\r\n img_datasets = VocDataset(train_list=path['train_imgs'], transform=transform)\r\n dataloder = torch.utils.data.DataLoader(img_datasets, batch_size=batch_size, num_workers=4, shuffle=True, collate_fn=VocDataset.collate_fn)\r\n\r\n return img_datasets, dataloder", "def pre_train(self, dataset, **kwargs):\n\n pass", "def train(self, dataset, model_dir):\n raise NotImplementedError", "def load_data(self):\n with open('data/fordTrain.csv') as f:\n data = csv.reader(f, delimiter=',')\n train = [x for i, x in enumerate(data) if i > 0] \n # Extract features and target variable separately\n trainx = [x[3:] for x in train]\n trainy = [x[2] for x in train]\n\n with open('data/fordTest.csv') as f:\n data = csv.reader(f, delimiter=',')\n testx = [x[3:] for i, x in enumerate(data) if i > 0] \n\n with open('data/Solution.csv') as f:\n data = csv.reader(f, delimiter=',')\n testy = [x[2] for i, x in enumerate(data) if i > 0] \n\n # Extract features and target variable, convert to numpy array\n trainx = np.asarray(trainx, dtype=np.float32)\n trainy = np.asarray(trainy, dtype=np.int8)\n testx = np.asarray(testx, dtype=np.float32)\n testy = np.asarray(testy, dtype=np.int8)\n\n # Return training and test sets\n trainSet = Dataset(trainx, trainy)\n testSet = Dataset(testx, testy)\n return trainSet, testSet", "def train(dataset, epochs, discriminator, generator, generatorOptimizer, discriminatorOptimizer, seed, checkpoint, checkpoint_prefix):\n for epoch in range(epochs):\n start = time.time()\n\n for image_batch in tqdm(dataset):\n train_step(image_batch, generator, discriminator, generatorOptimizer, discriminatorOptimizer)\n #2nd for()\n\n #Produce images for the GIF\n display.clear_output(wait=True)\n generate_and_save_images(generator, epoch + 1, seed)\n\n #save model every 5 epochs\n if (epoch + 1) % 5 == 0:\n print(\"Saving checkpoint.\")\n checkpoint.save(file_prefix = checkpoint_prefix)\n #if() \n\n print(\"time for epoch {} is {} sec\".format(epoch + 1, time.time()-start))\n #1st for\n \n #Generate after final epoch\n display.clear_output(wait=True)\n generator.save(\"CatGeneratorColor.model\")\n discriminator.save(\"CatDiscriminatorColor.model\")" ]
[ "0.69598746", "0.67574775", "0.6717249", "0.66458243", "0.66314375", "0.65966815", "0.65671974", "0.65517074", "0.65076643", "0.64948964", "0.64792395", "0.6467504", "0.6462277", "0.64389426", "0.64315575", "0.641234", "0.6410118", "0.6398891", "0.63852507", "0.63804436", "0.6378063", "0.6377822", "0.63619894", "0.63600135", "0.6359427", "0.63402456", "0.6321699", "0.63047343", "0.63024265", "0.6298678", "0.6269708", "0.6265931", "0.6262738", "0.6261476", "0.625437", "0.6253035", "0.6238361", "0.6230293", "0.6229381", "0.6224981", "0.62206906", "0.6220419", "0.6216796", "0.62107366", "0.6207568", "0.6205551", "0.61922437", "0.6191198", "0.61861515", "0.6184911", "0.61834085", "0.617938", "0.61791337", "0.61769736", "0.61710197", "0.6168765", "0.61650944", "0.61648154", "0.61639464", "0.61610234", "0.61593825", "0.61558604", "0.6151101", "0.61505747", "0.6148528", "0.61482304", "0.61461955", "0.61439496", "0.6142426", "0.61421216", "0.6131843", "0.61301774", "0.61283803", "0.6126988", "0.6126988", "0.6126988", "0.6126988", "0.6126988", "0.61256844", "0.61203873", "0.61144555", "0.61111933", "0.6108909", "0.6106601", "0.61040246", "0.6101854", "0.60966897", "0.6093975", "0.6092409", "0.60912764", "0.6082511", "0.6081719", "0.6080508", "0.6077146", "0.6077001", "0.60716504", "0.6068432", "0.606599", "0.6054989", "0.6054579" ]
0.69694567
0
Checks if required files exist before going deeper.
def check_before_run(self, required_files): if isinstance(required_files, str): required_files = [required_files] for fpath in required_files: if not os.path.exists(fpath): raise RuntimeError('"{}" is not found'.format(fpath))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_check(self):\n for required_file in self._required_files:\n # Check if required files are there\n # FIXME Sometimes it doesn't work :?\n if required_file not in self.files:\n self.valid = False", "def check_missing_files(self):\n files = [getattr(self, attr) for attr in self._required]\n try:\n utilities.check_missing_files(files)\n except utilities.MissingConstraintError as err:\n err.message += \"\\nSkipping {}\\n\".format(self.__class__.__name__)\n raise err", "def check_before_run(self, required_files):\n if isinstance(required_files, str):\n required_files = [required_files]\n\n for fpath in required_files:\n if not osp.exists(fpath):\n raise RuntimeError('\"{}\" is not found'.format(fpath))", "def check_for_missing_files(self, path):\n return None", "def test_check():\n for f in cfg.required_files:\n assert os.path.isfile(f)", "def _check_file_not_used(self):\n module_files = set(self._get_module_files())\n referenced_files = set(self._get_manifest_referenced_files()).union(\n set(self._get_xml_referenced_files())\n )\n excluded_dirs = ['static', 'test', 'tests', 'migrations']\n no_referenced_files = [\n f for f in (module_files - referenced_files)\n if f.split(os.path.sep)[0] not in excluded_dirs\n ]\n self.msg_args = no_referenced_files\n return not no_referenced_files", "def checkInstall(self, abort=False):\n \n for i in self.reqfiles:\n found = False\n files = []\n for j in i:\n if( os.path.exists( self.realPath() + \"/\" + j )):\n found = True\n else:\n files.append( self.realPath() + \"/\" + j )\n if( not found ):\n if( abort ):\n if( len( files ) > 1 ):\n self.abort( \"At least one of these files: \" + str(files) + \"\\n\" \\\n + \"is required for using this installation of \" + self.name )\n else:\n self.abort( \"Required file not found: \" + str(files) )\n return False\n return True", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.probe_gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.probe_gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def check_requirements(self):\n if not os.path.isfile(self.file_path):\n _logger.error(\"File not found\")\n _logger.error(ex)\n raise\n _logger.info(\"File notifier check passed\")", "def _check_before_run(self):\n if not osp.exists(self.root):\n raise RuntimeError(\"'{}' is not available\".format(self.root))\n if not osp.exists(self.train_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.train_name_path))\n if not osp.exists(self.test_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.test_name_path))\n if not osp.exists(self.track_train_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_train_info_path))\n if not osp.exists(self.track_test_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_test_info_path))\n if not osp.exists(self.query_IDX_path):\n raise RuntimeError(\"'{}' is not available\".format(self.query_IDX_path))", "def CheckPrerequisites(_):\n _LocalDataPath(RUN_SCRIPT)\n _LocalDataPath(CONFIG_FILE)", "def _check_before_run(self):\n\t\tif not osp.exists(self.dataset_dir):\n\t\t\traise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n\t\tif not osp.exists(self.train_dir):\n\t\t\traise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n\t\tif not osp.exists(self.query_dir):\n\t\t\traise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n\t\tif not osp.exists(self.gallery_dir):\n\t\t\traise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.list_query_path):\n raise RuntimeError(\"'{}' is not available\".format(self.list_query_path))\n if not osp.exists(self.list_gallery_path):\n raise RuntimeError(\"'{}' is not available\".format(self.list_gallery_path))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.data_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.data_dir))\n if not osp.exists(self.split_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.split_dir))", "def _check_required_directories(self) -> None:\n\n if self._all_stages:\n for stage in self._all_stages:\n stage_cfg = self._app_cfg['stages'][stage]\n processor_cfg = stage_cfg['configuration']\n\n # Populate all the directories requested in the configuration.\n for dir_key, dir_id in processor_cfg['dirs'].items():\n dir_path_value = os.path.join(self._data_dir_path, self._app_cfg['dir-paths'][dir_id])\n # Rebuild the key by replacing 'id' with 'path'\n dir_path_key = dir_key.replace('id', 'path')\n processor_cfg[dir_path_key] = dir_path_value\n\n # Create the directory if it doesn't exist.\n self._validate_path(dir_path_value)\n\n # Add the temporary directory.\n processor_cfg['tmp-dir-path'] = self._tmp_dir_path\n\n del processor_cfg['dirs']", "def contains(\n path: str,\n required: list\n ) -> bool:\n for file in path.iterdir():\n try:\n required.remove(file.name)\n except ValueError:\n pass\n\n if required:\n return False\n else:\n return True", "def checkRequiredDependencies(self):\n \n # skip dependency check for downloading only\n if( self.downloadOnly ):\n return\n\n # hard dependencies\n for req in self.reqmodules:\n if( self.parent.module(req) == None ):\n # check if there is an auto detected module\n if( self.parent.module(req, True) == None ):\n self.abort( self.name + \" requires \" + req \\\n + \" and it wasn't found in your config file!!\" )\n else:\n # use auto detected module\n self.parent.use( self.parent.module(req, True) )\n self.parent.module( req ).init()\n\n print self.name + \": auto-detected \" + req + \" version \" + self.parent.module( req ).version\n \n # build only dependencies\n if( self.mode == \"install\" ):\n mods = self.reqmodules_buildonly + self.reqmodules_external\n for req in mods:\n if( self.parent.module(req) == None ):\n # check if there is an auto detected module\n if( self.parent.module(req, True) == None ):\n self.abort( req + \" not found in your config file!! \" + self.name \\\n + \" cannot be built without \" + req )\n else:\n # use auto detected module\n self.parent.use( self.parent.module(req, True) )\n self.parent.module( req ).init()\n\n print \" - \" + self.name + \": auto-detected \" + req + \" version \" + self.parent.module( req ).version", "def check_prerequisites(self, env):\n super(PopLog, self).check_prerequisites(env)\n print(\" Checking prerequisites for : {0}\".format(self.__class__.__name__))\n \n for inFile in self._expectedInFiles:\n rc, err_msg = cesmEnvLib.checkFile('{0}/{1}'.format(env['WORKDIR'],inFile), 'read')\n if not rc:\n print('{0}... continuing with additional plots.'.format(err_msg))", "def files_exist(self):\n\n passed = []\n warned = []\n failed = []\n ignored = []\n\n # NB: Should all be files, not directories\n # List of lists. Passes if any of the files in the sublist are found.\n #: test autodoc\n try:\n _, short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\").split(\"/\")\n except ValueError:\n log.warning(\"Expected manifest.name to be in the format '<repo>/<pipeline>'. Will assume it is '<pipeline>'.\")\n short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\").split(\"/\")\n\n files_fail = [\n [\".gitattributes\"],\n [\".gitignore\"],\n [\".nf-core.yml\"],\n [\".editorconfig\"],\n [\".prettierignore\"],\n [\".prettierrc.yml\"],\n [\"CHANGELOG.md\"],\n [\"CITATIONS.md\"],\n [\"CODE_OF_CONDUCT.md\"],\n [\"CODE_OF_CONDUCT.md\"],\n [\"LICENSE\", \"LICENSE.md\", \"LICENCE\", \"LICENCE.md\"], # NB: British / American spelling\n [\"nextflow_schema.json\"],\n [\"nextflow.config\"],\n [\"README.md\"],\n [os.path.join(\".github\", \".dockstore.yml\")],\n [os.path.join(\".github\", \"CONTRIBUTING.md\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"bug_report.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"config.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"feature_request.yml\")],\n [os.path.join(\".github\", \"PULL_REQUEST_TEMPLATE.md\")],\n [os.path.join(\".github\", \"workflows\", \"branch.yml\")],\n [os.path.join(\".github\", \"workflows\", \"ci.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting_comment.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting.yml\")],\n [os.path.join(\"assets\", \"email_template.html\")],\n [os.path.join(\"assets\", \"email_template.txt\")],\n [os.path.join(\"assets\", \"sendmail_template.txt\")],\n [os.path.join(\"assets\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"conf\", \"modules.config\")],\n [os.path.join(\"conf\", \"test.config\")],\n [os.path.join(\"conf\", \"test_full.config\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_dark.png\")],\n [os.path.join(\"docs\", \"output.md\")],\n [os.path.join(\"docs\", \"README.md\")],\n [os.path.join(\"docs\", \"README.md\")],\n [os.path.join(\"docs\", \"usage.md\")],\n [os.path.join(\"lib\", \"nfcore_external_java_deps.jar\")],\n [os.path.join(\"lib\", \"NfcoreTemplate.groovy\")],\n [os.path.join(\"lib\", \"Utils.groovy\")],\n [os.path.join(\"lib\", \"WorkflowMain.groovy\")],\n ]\n\n files_warn = [\n [\"main.nf\"],\n [os.path.join(\"assets\", \"multiqc_config.yml\")],\n [os.path.join(\"conf\", \"base.config\")],\n [os.path.join(\"conf\", \"igenomes.config\")],\n [os.path.join(\".github\", \"workflows\", \"awstest.yml\")],\n [os.path.join(\".github\", \"workflows\", \"awsfulltest.yml\")],\n [os.path.join(\"lib\", f\"Workflow{short_name[0].upper()}{short_name[1:]}.groovy\")],\n [\"modules.json\"],\n [\"pyproject.toml\"],\n ]\n\n # List of strings. Fails / warns if any of the strings exist.\n files_fail_ifexists = [\n \"Singularity\",\n \"parameters.settings.json\",\n \".nf-core.yaml\", # yml not yaml\n os.path.join(\"bin\", \"markdown_to_html.r\"),\n os.path.join(\"conf\", \"aws.config\"),\n os.path.join(\".github\", \"workflows\", \"push_dockerhub.yml\"),\n os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"bug_report.md\"),\n os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"feature_request.md\"),\n os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo.png\"),\n \".markdownlint.yml\",\n \".yamllint.yml\",\n os.path.join(\"lib\", \"Checks.groovy\"),\n os.path.join(\"lib\", \"Completion.groovy\"),\n os.path.join(\"lib\", \"Workflow.groovy\"),\n ]\n files_warn_ifexists = [\".travis.yml\"]\n\n # Remove files that should be ignored according to the linting config\n ignore_files = self.lint_config.get(\"files_exist\", [])\n\n def pf(file_path):\n return os.path.join(self.wf_path, file_path)\n\n # First - critical files. Check that this is actually a Nextflow pipeline\n if not os.path.isfile(pf(\"nextflow.config\")) and not os.path.isfile(pf(\"main.nf\")):\n failed.append(\"File not found: nextflow.config or main.nf\")\n raise AssertionError(\"Neither nextflow.config or main.nf found! Is this a Nextflow pipeline?\")\n\n # Files that cause an error if they don't exist\n for files in files_fail:\n if any([f in ignore_files for f in files]):\n continue\n if any([os.path.isfile(pf(f)) for f in files]):\n passed.append(f\"File found: {self._wrap_quotes(files)}\")\n else:\n failed.append(f\"File not found: {self._wrap_quotes(files)}\")\n\n # Files that cause a warning if they don't exist\n for files in files_warn:\n if any([f in ignore_files for f in files]):\n continue\n if any([os.path.isfile(pf(f)) for f in files]):\n passed.append(f\"File found: {self._wrap_quotes(files)}\")\n else:\n warned.append(f\"File not found: {self._wrap_quotes(files)}\")\n\n # Files that cause an error if they exist\n for file in files_fail_ifexists:\n if file in ignore_files:\n continue\n if os.path.isfile(pf(file)):\n failed.append(f\"File must be removed: {self._wrap_quotes(file)}\")\n else:\n passed.append(f\"File not found check: {self._wrap_quotes(file)}\")\n\n # Files that cause a warning if they exist\n for file in files_warn_ifexists:\n if file in ignore_files:\n continue\n if os.path.isfile(pf(file)):\n warned.append(f\"File should be removed: {self._wrap_quotes(file)}\")\n else:\n passed.append(f\"File not found check: {self._wrap_quotes(file)}\")\n\n # Files that are ignoed\n for file in ignore_files:\n ignored.append(f\"File is ignored: {self._wrap_quotes(file)}\")\n\n return {\"passed\": passed, \"warned\": warned, \"failed\": failed, \"ignored\": ignored}", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.data_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.data_dir))\n if not osp.exists(self.raw_mat_path):\n raise RuntimeError(\"'{}' is not available\".format(self.raw_mat_path))\n if not osp.exists(self.split_new_det_mat_path):\n raise RuntimeError(\"'{}' is not available\".format(self.split_new_det_mat_path))\n if not osp.exists(self.split_new_lab_mat_path):\n raise RuntimeError(\"'{}' is not available\".format(self.split_new_lab_mat_path))", "def check_paths( self ):\n check_a = utility_code.checkDirectoryExistence( self.PATH_TO_SOURCE_FILE_DIRECTORY )\n check_b = utility_code.checkDirectoryExistence( self.PATH_TO_ARCHIVES_ORIGINALS_DIRECTORY )\n check_c = utility_code.checkDirectoryExistence( self.PATH_TO_ARCHIVES_PARSED_DIRECTORY )\n check_d = utility_code.checkDirectoryExistence( self.PATH_TO_PARSED_ANNEX_DATA_DIRECTORY )\n check_e = utility_code.checkDirectoryExistence( self.PATH_TO_PARSED_ANNEX_COUNT_DIRECTORY )\n if check_a == 'exists' and check_b == 'exists' and check_c == 'exists' and check_d == 'exists' and check_e == 'exists':\n log.debug( 'path check passed' )\n else:\n message='path check failed; quitting'\n log.error( message )\n sys.exit( message )\n return", "def check_file_existence(self, files):\n if len(files) == 0:\n if self._nonfile_error is True:\n raise FileNotFound(\"No files are found.\")\n else:\n self._logger.info(\"No files are found. Nothing to do.\")\n return\n self._logger.info(\"Files found %s\" % files)", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.test_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.test_dir))", "async def check_files(hass):\n # Verify that the user downloaded all files.\n base = f\"{hass.config.path()}/custom_components/{DOMAIN}/\"\n missing = []\n for file in REQUIRED_FILES:\n fullpath = \"{}{}\".format(base, file)\n if not os.path.exists(fullpath):\n missing.append(file)\n\n if missing:\n _LOGGER.critical(\"The following files are missing: %s\", str(missing))\n returnvalue = False\n else:\n returnvalue = True\n\n return returnvalue", "def self_check(self):\n out = \"Loaded components\\n\"\n for package_name, package in sorted(self.packages.items()):\n out += \"\\t%s:\\n\" % package_name\n for c, fd in sorted(package.components.iteritems()):\n out += \"\\t\\t%s (%s)\\n\" % (c, fd.filename)\n\n LOG.info(out)\n\n for p in self.packages.values():\n for f in p.files:\n for id in f.requires:\n # This throws if it doesn't find something.\n try:\n self.get(id)\n except:\n LOG.exception(\"Error in: \" + f.filename)\n raise", "def require_found(cls,path):\n if not os.path.exists(path):\n raise exceptions.PathNotFoundError(path)", "def sanity_check(hdf):\n required_paths = ['Analyses', 'UniqueGlobalKey', 'Analyses/EventDetection_000']\n try:\n for p in required_paths:\n if p not in hdf:\n return False\n return True\n except:\n return False", "def test_ensure_dir_exists(self):\n pass", "def exists():\n check50.include(\"data\")\n check50.exists(\"adventure.py\")\n check50.exists(\"room.py\")", "def _checkIntegrity(self):\n return (\n os.path.isfile(os.path.join(self._root, 'processed/train.pkl'))\n and os.path.isfile(os.path.join(self._root, 'processed/test.pkl')))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.train_name_path))\n if not osp.exists(self.test_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.test_name_path))\n if not osp.exists(self.track_train_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_train_info_path))\n if not osp.exists(self.track_test_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_test_info_path))\n if not osp.exists(self.query_IDX_path):\n raise RuntimeError(\"'{}' is not available\".format(self.query_IDX_path))", "def _check_before_run(self):\r\n if not osp.exists(self.dataset_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\r\n if not osp.exists(self.train_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))", "def _file_needs_to_be_created(self, file_path, quiet=False):\n if not self._args.check_for_existing_files:\n return True\n if os.path.exists(file_path):\n if not quiet:\n sys.stderr.write(\n \"File %s exists. Skipping its generation.\\n\" % file_path\n )\n return False\n return True", "def autodetect_files(self):\n if self._is_valid_requirements_file('requirements.txt'):\n self.filenames.append('requirements.txt')\n\n if self._is_valid_requirements_file('requirements.pip'): # pragma: nocover\n self.filenames.append('requirements.pip')\n\n if os.path.isdir('requirements'):\n for filename in os.listdir('requirements'):\n file_path = os.path.join('requirements', filename)\n if self._is_valid_requirements_file(file_path):\n self.filenames.append(file_path)\n self._check_inclusions_recursively()", "def check_files(self) -> None:\n notfound = False\n give_neuro_data_hint = False\n fullpaths = [f for f, _ in self.input_sources]\n if self.target_sources is not None:\n fullpaths.extend([f for f, _ in self.target_sources])\n for p in fullpaths:\n if not os.path.exists(p):\n print('{} not found.'.format(p))\n notfound = True\n if 'neuro_data_cdhw' in p:\n give_neuro_data_hint = True\n if give_neuro_data_hint:\n print('\\nIt looks like you are referencing the neuro_data_cdhw dataset.\\n'\n 'To install the neuro_data_xzy dataset to the default location, run:\\n'\n ' $ wget https://github.com/ELEKTRONN/elektronn.github.io/releases/download/neuro_data_cdhw/neuro_data_cdhw.zip\\n'\n ' $ unzip neuro_data_cdhw.zip -d ~/neuro_data_cdhw')\n if notfound:\n print('\\nPlease fetch the necessary dataset and/or '\n 'change the relevant file paths in the network config.')\n sys.stdout.flush()\n sys.exit(1)", "def _check_integrity(self):\n root = self.root\n for scene_name in self.scene_list:\n if not(os.path.isdir(os.path.join(root,scene_name)) and \n os.path.isdir(os.path.join(root,scene_name, images_dir)) and\n os.path.isfile(os.path.join(root,scene_name,annotation_filename))):\n return False\n return True", "def _check_integrity(self):\n root = self.root\n for scene_name in self.scene_list:\n if not(os.path.isdir(os.path.join(root,scene_name)) and \n os.path.isdir(os.path.join(root,scene_name, images_dir)) and\n os.path.isfile(os.path.join(root,scene_name,annotation_filename))):\n return False\n return True", "def check_all():\n for name, module in sorted(sys.modules.items()): # module files\n filepath = getattr(module, '__file__', None)\n if filepath is None:\n # we land here when a module is an attribute of another module\n # i.e., it exists twice in the sys.modules table, once as its\n # canonical representation, and again having been imported\n # within another module\n continue\n filepath = filepath.endswith(\".pyc\") and filepath[:-1] or filepath\n check_one(filepath)\n\n for filepath in extras: # additional files\n check_one(filepath)", "def _is_fluxcd_app_compliant(path):\n mandatory_components = (\"base\", constants.APP_ROOT_KUSTOMIZE_FILE)\n check_mandatory = all(comp in os.listdir(path)\n for comp in mandatory_components)\n return check_mandatory", "def create_required_paths(self) -> None:\n\n for required_path in self.required_paths:\n required_path.mkdir(parents=True, exist_ok=True)", "def check_requirements():\n debug(\"check_requirements\")\n needed = Requirements(Project).find_missing_requirements()\n if needed:\n info(\"Please add the following to your %s file:\\n\" % 'requirements.txt')\n info(\"\\n\".join(str(needed)))\n else:\n info(\"Your %s includes all known herringlib task requirements\" % 'requirements.txt')", "def check_helpers(self):\n paths = self.get_helper_out_paths()\n\n for p in paths:\n full_path = p + \".data-00000-of-00001\"\n file = Path(full_path)\n if not file.exists():\n return False\n\n return True", "def checkOptionalDependencies(self):\n \n # skip dependency check for downloading only\n if( self.downloadOnly ):\n return\n\n # soft dependencies\n failed = []\n for opt in self.optmodules:\n mod = self.parent.module(opt)\n if( mod == None ):\n failed.append(opt)\n \n # remove soft dependencies that were not found\n self.buildWithout(failed)", "def valid(self):\r\n if self.dir_exists and self.files_exist:\r\n return True\r\n else:\r\n return False", "def sanity_check(loadfile, queryfile):\n if not os.path.exists(loadfile):\n print(\"Error: The path for the load csv file does not exist\")\n return False\n\n if not os.path.exists(queryfile): \n print(\"Error: The path for the query csv file does not exist\")\n return False\n\n\n \"\"\"\n if not os.path.exists(\"/plots\"):\n os.mkdir(\"/plots\")\n \"\"\" \n return True", "def check_build_exists(self):\n path = self.base_dir + \"/\" + self.app_name + \"/\" + \"build\"\n from django_swagger_utils.core.utils.check_path_exists import check_path_exists\n build_dir = check_path_exists(path)\n if build_dir:\n raise Exception(\"Build Directory Already Exist, please run update_specs_build\")", "def _verify(self) -> None:\n # Check if the files already exist\n if os.path.exists(os.path.join(self.root, self.image_root)):\n return\n\n # Check if .zip files already exists (if so extract)\n exists = []\n for filename, md5 in zip(self.filenames, self.md5s):\n filepath = os.path.join(self.root, filename)\n if os.path.isfile(filepath):\n if self.checksum and not check_integrity(filepath, md5):\n raise RuntimeError(\"Dataset found, but corrupted.\")\n exists.append(True)\n extract_archive(filepath)\n else:\n exists.append(False)\n\n if all(exists):\n return\n\n # Check if the user requested to download the dataset\n raise RuntimeError(\n \"Dataset not found in `root` directory, either specify a different\"\n + \" `root` directory or manually download the dataset to this directory.\"\n )", "def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"validate_match_batch.py\", get_files)", "def __check_exists(self):\n\n return os.path.exists(os.path.join(self.__path, 'train_images_tensor.pt')) and \\\n os.path.exists(os.path.join(self.__path, 'train_labels_tensor.pt')) and \\\n os.path.exists(os.path.join(self.__path, 'test_images_tensor.pt')) and \\\n os.path.exists(os.path.join(self.__path, 'test_labels_tensor.pt'))", "def check_out_files_exist(self):\n for filetype in self.filetypes:\n filename = self.out_filename(filetype)\n if not filename.is_file():\n log.error('MISSING: {}'.format(filename))\n return False\n\n return True", "def check_out_files_exist(self):\n for filetype in self.filetypes:\n filename = self.out_filename(filetype)\n if not filename.is_file():\n log.error('MISSING: {}'.format(filename))\n return False\n\n return True", "def check_out_files_exist(self):\n for filetype in self.filetypes:\n filename = self.out_filename(filetype)\n if not filename.is_file():\n log.error('MISSING: {}'.format(filename))\n return False\n\n return True", "def requirement_missing(script):\n if \"requires\" in script:\n if script[\"requires\"] is None:\n return False\n for package in script[\"requires\"].split():\n try:\n pkg_resources.working_set.require(package)\n except Exception:\n return True\n return False", "def checkRequiredConfigs(self):\n containmentFolder = self.getAbsContainmentFolder()\n rootFileName = self.app.config.exhale_args[\"rootFileName\"]\n rootFileTitle = self.app.config.exhale_args[\"rootFileTitle\"]\n doxygenStripFromPath = self.app.config.exhale_args[\"doxygenStripFromPath\"]\n\n # validate that the containmentFolder was created\n assert os.path.isdir(containmentFolder)\n # validate that {containmentFolder}/{rootFileName} was created\n assert os.path.isfile(os.path.join(containmentFolder, rootFileName))\n # validate that the title was included\n with open(os.path.join(containmentFolder, rootFileName), \"r\") as root:\n root_contents = root.read()\n root_heading = \"{0}\\n{1}\".format(\n rootFileTitle,\n exhale.utils.heading_mark(rootFileTitle, exhale.configs.SECTION_HEADING_CHAR)\n )\n assert root_heading in root_contents\n\n # TODO: validate doxygenStripFromPath\n if doxygenStripFromPath: # this is only here to avoid a flake8 fail on a todo\n pass", "def check_comps(root, comps):\n for key, comp in comps.items():\n\n filename = os.path.join(root, comp['filename'])\n if not os.path.isfile(filename):\n warnings.warn(\n 'The file {0} could not be found'.format(filename))", "def _need_generate(paths):\r\n if not os.path.exists(paths.generated_dir):\r\n return True\r\n\r\n if not os.path.exists(paths.index_file):\r\n return True\r\n\r\n # Use the index file to determine if regeneration is necessary\r\n with open(paths.index_file, 'r',newline='\\n') as index_file:\r\n indexed = [item for item in\r\n index_file.read().split('\\n') if len(item) != 0 and\r\n not item.startswith(\"#\")]\r\n return indexed != paths.resource_files", "def checkFiles( filenames ):\n \n missing = []\n for filename in filenames:\n if not os.path.exists( filename ):\n missing.append( filename )\n\n if missing:\n raise ValueError( \"missing files: %s\" % \",\".join(missing) )", "def test_not_present_file(self):\n\t\ttry:\n\t\t\tmain.Main(['input/abc.txt']).run()\n\t\texcept:\n\t\t\tself.assertTrue(True)", "def validate_files(dir, files_to_merge):\r\n for path in files_to_merge:\r\n pathname = dir.joinpath(path)\r\n if not pathname.exists():\r\n raise Exception(\"I18N: Cannot generate because file not found: {0}\".format(pathname))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))", "def shouldhave(self, thisfile):\n if not os.path.isfile(thisfile):\n self.logtxt(\"ERROR: expected file (%s/%s) does not exist!\" %\n (os.getcwd(), thisfile), 'error')", "def exists(self):\n basedir = os.path.dirname(self.path)\n\n for filename in self.files:\n path = os.path.join(basedir, filename)\n if not os.path.exists(path):\n return False\n\n return True", "def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"write_qiime_train_db.py\", get_files)", "def check_tree(cls, root, require_init_py=True):\n if os.path.basename(root) == '__pycache__':\n return\n\n if not os.path.isfile(os.path.join(root, '__init__.py')):\n # Not a Python package directory\n if require_init_py:\n raise util.UserError(\n f\"No __init__.py file in '{root}'\")\n else:\n return\n\n # First, check for the case where a .py file and a directory\n # have the same name (without the extension). This can't be\n # handled, so just raise an exception\n found = set()\n for filename in os.listdir(root):\n path = os.path.join(root, filename)\n if os.path.isfile(path):\n filename, ext = os.path.splitext(filename)\n if ext == '.py':\n found.add(filename)\n\n for dirname in os.listdir(root):\n path = os.path.join(root, dirname)\n if os.path.isdir(path):\n if dirname in found:\n raise util.UserError(\n \"Found a directory and python file with same name in \"\n \"benchmark tree: '{0}'\".format(path))\n cls.check_tree(path, require_init_py=False)", "def testFilesExist(self):\n \n for year in range(2007,2013):\n self.assertTrue(os.path.exists(\"./IncomeHistogram_\"+ str(year)+\".pdf\"), \"A histogram didn't save to output.\")\n self.assertTrue(os.path.exists(\"./LogIncomeHistogram_\"+ str(year)+\".pdf\"), \"A histogram didn't save to output.\")\n self.assertTrue(os.path.exists(\"./IncomeBoxplot(log)_\"+ str(year)+\".pdf\"), \"A boxplot didn't save to output.\") \n self.assertTrue(os.path.exists(\"./results.txt\"), \"Results file doesn't exist.\")", "def _checkIntegrity(self):\n return (\n os.path.isfile(os.path.join(self._root, 'relu5-3/train.pkl'))\n and os.path.isfile(os.path.join(self._root, 'relu5-3/test.pkl')))", "def check_file_exist(self):\n return False", "def santityCheckInitialization(self):\r\n\r\n for obj in self.config[\"repos\"]:\r\n if not isdir(obj[\"path\"]):\r\n print(\"ERROR : Initialization Failed missing {} at path {}\".format(obj[\"name\"], obj[\"path\"]))", "def _check_dependencies(self):\n imgmin = exists('imgmin')\n image_optim = exists('image_optim')\n\n if not imgmin or not image_optim:\n puts(p('Dependencies have not been installed:'))\n\n message = 'imgmin - https://github.com/rflynn/imgmin'\n message = s('✓ ' + message) if imgmin else e('✗ ' + message)\n puts(message)\n\n message = 'image_optim - http://rubygems.org/gems/image_optim'\n message = s('✓ ' + message) if image_optim else e('✗ ' + message)\n puts(message)\n\n sys.exit(0)", "def validate_paths(paths):\n for (name, path) in paths.items():\n if not os.path.exists(path):\n raise MissingPath(path, name)", "def test_exists(self):\n self.assertTrue(os.path.exists(__file__) == self._system.exists(__file__))", "def assertFilePresent(self, root_path, path):\n full_path = os.path.join(root_path, path)\n self.assertTrue(os.path.exists(full_path))", "def check_config(outconfig):\n self.log.info(\"Checking if all the necessary files exist.\")\n\n # Perform necessary checks\n\n log.info(\"All necessary files exist for {} configuration.\".format(outconfig[\"Flavor\"]))\n\n return", "def check_needs_upload(self, path):\n if self.upload_always:\n return True\n fn = '/'.join([self.hdfs_home, '.knitDeps', os.path.basename(path)])\n if self.hdfs and self.hdfs.exists(fn):\n st = os.stat(path)\n size = st.st_size\n t = st.st_mtime\n info = self.hdfs.info(fn)\n if info['size'] == size and t < info['last_mod']:\n return False\n else:\n return True\n else:\n return True", "def check_prerequisites(self):\n status_command = 'gulp --version > /dev/null 2>&1'\n not_found = subprocess.call(\n status_command, **self.subprocess_args(shell=True)) == 127\n if not_found:\n install_commands = [\n 'Either add gulp to package.json or install globally using:'\n ' `sudo npm install -g gulp`']\n raise base_installer.MissingPrerequisiteError(\n 'The `gulp` command was not found.', install_commands=install_commands)", "def _test_folder_existance(self, task_specific_folders):\n for folder in (\n self._pathcreator.required_base_folders() + task_specific_folders\n ):\n if not os.path.exists(folder):\n self._write_err_msg_and_quit(\n \"Error! Folder '%s' does not exist! Is the given project \"\n \"folder name correct?\\n\" % folder\n )", "def _check(self, config: Dict):\n if 'path' not in config:\n raise FileNotFoundError(\"File not found.\")", "def VerifyExists(self, files):\n for file in files:\n if not os.path.exists(file):\n self.ErrorMsg(\"%s does not exist\" % file)\n return False\n return True", "def check_files(self, data_path):\n files = os.listdir(data_path)\n\n if 'test_batch' not in files:\n return False\n\n if 'batches.meta' not in files:\n return False\n\n for i in range(1, 6):\n if 'data_batch_{}'.format(i) not in files:\n return False\n\n return True", "def test_files_present(self, changes_file):\n for filename in changes_file.get_files():\n log.debug('Looking whether %s was actually uploaded' % filename)\n if os.path.isfile(os.path.join(pylons.config['debexpo.upload.incoming'], filename)):\n log.debug('%s is present' % filename)\n else:\n log.critical('%s is not present; importing cannot continue' % filename)\n raise OSError(\"Missing file %s in incoming\" % (filename))\n\n return True", "def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"rdp_lineage_to_tax.py\", get_files)", "def checking_path():\n path = Path(\"phonebook\")\n try:\n path.mkdir(parents=True, exist_ok=False)\n except FileExistsError:\n pass\n else:\n pass", "def gather_required_files(filename):\n # open the file, while ignoring encoding errors (usually comments)\n encoding = open_guess_encoding(filename)\n with open(filename, encoding=encoding, errors='surrogateescape') as fp:\n config = MugenParser()\n config.read_string(fp.read())\n\n # go through each section and store any options that look like filenames\n required = set()\n for section in config.sections():\n section = config[section]\n options = set(find_asset(normpath(v)) for k, v in section.items()\n if filename_regex.match(v))\n required.update(options)\n\n # check other def files, then search them and add the results\n root = dirname(filename)\n for child_file in required.copy():\n name, ext = os.path.splitext(child_file)\n if ext.lower() == '.def':\n path = join(root, child_file)\n required.update(gather_required_files(path))\n\n # TODO: this is not implemented\n # mugen does checking against many paths, so we need\n # to emulate that the if we want to check for missing files\n # finally, go through the potential files and verify they exist\n # for child_file in required.copy():\n # path = join(root, child_file)\n # if not os.path.exists(path):\n # required.remove(child_file)\n\n return required", "def _check_jars_exist(self):\n junit_path = self.junit4_junit_path or _parse_from_classpath(\n _junit4_runner.JUNIT4_JAR_PATTERN, CLASSPATH\n )\n hamcrest_path = self.junit4_hamcrest_path or _parse_from_classpath(\n _junit4_runner.HAMCREST_JAR_PATTERN, CLASSPATH\n )\n for raw_path in (junit_path, hamcrest_path):\n if not pathlib.Path(raw_path).is_file():\n raise plug.PlugError(\n \"{} is not a file, please check the filepath you \"\n \"specified\".format(raw_path)\n )", "def ensure_modules_exist(found_modules: list, package_path: 'Path') -> None:\n\n for current_module in found_modules:\n\n module_path = package_path.joinpath(current_module + '.py')\n\n # Check result here to make sure module exists\n\n try:\n\n with open(str(module_path), 'r') as module_file:\n\n logging.getLogger('GUM Dispenser').info('Successfully opened ' + current_module)\n\n except FileNotFoundError:\n\n raise SourceModuleNotFoundError('The module named ' + current_module + ', specified in __init__.py, ' +\n 'does not exist')\n\n logging.getLogger('GUM Dispenser').info('All specified modules exist')", "def check_sanity(params):\n \n for dpath in ['input_dir','output_dir']:\n if path.isdir(params[dpath]) == False:\n print('ERROR: Cannot find directory '+params[dpath])\n exit()\n \n if path.isfile(params['star_file']) == False:\n print('ERROR: Cannot find star file '+params['star_file'])\n exit()", "def _check_missing_files_in_folder(self, expected_list_of_files):\n missing_files = [\n file_name for file_name in expected_list_of_files if self.folder_path / file_name not in self._ome_tif_files\n ]\n assert (\n not missing_files\n ), f\"Some of the TIF image files at '{self.folder_path}' are missing. The list of files that are missing: {missing_files}\"", "def built_file_must_exist(self, name, type=None, **kw):\n return self.must_exist(self.built_file_path(name, type, **kw))", "def check_path_existence(list_of_files, raise_exception=True):\n\n # flatten the list\n if isinstance(list_of_files, str):\n list_of_files = [list_of_files]\n while isinstance(list_of_files[0], list):\n tmp = [item for sublist in list_of_files for item in sublist]\n list_of_files = tmp\n\n # check if paths exist\n for path in list_of_files:\n if not os.path.exists(path):\n if raise_exception:\n raise AttributeError('Path does not exist: ' + path)\n else:\n print('Path does not exist: ' + path)\n return False\n return True", "def checkForFiles(quickLogger, fileList):\n \n for filename in fileList:\n if(not os.path.exists(filename)):\n quickLogger.critical(filename+\" doesn't exist.\")\n raise FileNotFound", "def test_unnecessary_files(self):\n path = os.path.join(BASE_DIR, \"tests\", \"fixtures\", \"test_unnecessary_files.zip\")\n zip_file = zipfile.ZipFile(path)\n\n with self.assertRaises(UnnecessaryFiles) as context:\n get_shapefile(zip_file)\n the_exception = context.exception\n self.assertEqual(UNNECESSARY_FILE, the_exception.message)", "def _check_missing_readme(self):\n self.msg_args = (self.config.readme_template_url,)\n for readme in DFTL_README_FILES:\n if os.path.isfile(os.path.join(self.module_path, readme)):\n return True\n return False", "def check_init_files_and_folders():\n\t#['cascade_wimb_bus_front_100_stages_1000_pos_3000_neg.xml', 'cascade_wimb_bus_front_33_stages_1000_pos_3000_neg_wrong.xml', 'color_detect_2.py', 'dedupe.py', 'detect_image_group_ku.py', 'detect_shape_5.py', 'get_cam_id_2.py', 'get_image_8.py', 'gui_hsv.py', 'knaps.py', 'knapsack_2.py', 'maps.html', 'program_detect_rectangle.zip', 'start_capture.py']\n\tfile_list=[\n\t#'cascade_wimb_bus_front_100_stages_1000_pos_3000_neg.xml', \n\t'models/cascade_wimb_bus_front_33_stages_1000_pos_3000_neg_wrong.xml', \n\t#'color_detect_2.py', \n\t#'dedupe.py', \n\t'detect_bus_haar_group.py', \n\t#'detect_shape_5.py', \n\t'get_cam_detail.py', \n\t'get_image.py', \n\t#'gui_hsv.py', \n\t#'knaps.py', \n\t#'knapsack_2.py', \n\t#'maps.html', \n\t#'program_detect_rectangle.zip', \n\t'start_wimb.py',\n\t'g.php',\n\t]\n\tdirectory_list=[\n\t'images',\n\t'images_bgs',\n\t'images_bgs_mask',\n\t#'images_bgs_result',\n\t'images_color',\n\t'images_haar',\n\t'images_haar_result',\n\t'images_number',\n\t'images_number_result',\n\t'models',\n\t'images_old',\n\t'text_number',\n\t]\n\t\n\tfor file_name in file_list: print 'file '+file_name+' existed: '+str(os.path.isfile(file_name))\n\tfor directory_name in directory_list: \n\t\tprint 'directory '+directory_name+' existed: '+str(os.path.isdir(directory_name))\n\t\tif not os.path.isdir(directory_name): \n\t\t\tos.makedirs(directory_name)\n\t\tif \"images\" in directory_name: shutil.copy(path+'/g.php',path+'/'+directory_name+'/g.php')" ]
[ "0.7485762", "0.7367102", "0.7227755", "0.70789534", "0.70283055", "0.69488746", "0.6946507", "0.672183", "0.6712726", "0.6712726", "0.6712726", "0.6712726", "0.6712726", "0.6712726", "0.6712726", "0.6698533", "0.66718006", "0.66628796", "0.6643828", "0.6639172", "0.66012675", "0.6586276", "0.6498097", "0.64865094", "0.64790297", "0.6473663", "0.6420706", "0.6394797", "0.63715583", "0.6335449", "0.63146996", "0.63066006", "0.630081", "0.628743", "0.6284837", "0.62792397", "0.62705386", "0.6267903", "0.6267134", "0.6251483", "0.62501", "0.62367797", "0.6224477", "0.6224477", "0.6214352", "0.62133574", "0.62005615", "0.6190365", "0.6175553", "0.61689365", "0.61468256", "0.61412686", "0.61383593", "0.612328", "0.6112088", "0.61009586", "0.6091614", "0.6091614", "0.6091614", "0.6056011", "0.605502", "0.60524404", "0.60442924", "0.6036787", "0.60107327", "0.5995174", "0.59747714", "0.5966033", "0.5954388", "0.5951466", "0.5947865", "0.5945993", "0.5940364", "0.5940254", "0.5935325", "0.59170663", "0.5916317", "0.59031403", "0.59030265", "0.58956844", "0.5886398", "0.58853275", "0.58805525", "0.58764076", "0.5874376", "0.58728534", "0.58725816", "0.58650994", "0.58449423", "0.5844807", "0.58235973", "0.58165026", "0.58134186", "0.5809175", "0.58021945", "0.5797822", "0.57894254", "0.5782512", "0.57805485", "0.57802355" ]
0.7092478
3
Perform an insert or update.
def _do_upsert(self, conn, item, spider): query_check = "select * from %s where url = %%s" % spider.name conn.execute(query_check, (item['url'], )) result = conn.fetchone() if result: query_udpate = "UPDATE %s SET price=%ss" % spider.name conn.execute(query_udpate, (item['price'])) log.msg("Item updated in db: %s" % item, level=log.DEBUG) else: query_insert = "INSERT INTO %s (title, company, description, price, status, image, url, category) VALUES (%%s, %%s, %%s, %%s, %%s, %%s, %%s, %%s)" % spider.name conn.execute(query_insert, (item['title'], item['company'], item['description'], item['price'], item['status'], item['image'], item['url'], item['category'])) log.msg("Item stored in db: %s" % item, level=log.DEBUG)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert_or_update(self, table, record):\n try:\n request = s.query(table=table, query={'sys_id': record['sys_id']})\n #request.get_single()\n response = request.update(record)\n print >> sys.stderr, 'update'\n except NoResults:\n # Record does not exist so create it\n response = self.snow.insert(table=table, payload=record)\n print >> sys.stderr, 'create'\n return response", "def insert_or_update(self, table, connection, row):\n\n # find line, if it exist\n dbrow = self.find(connection, table, row)\n\n # TODO XXX use actual database function instead of this stupid thing\n now = datetime.datetime.now()\n\n column_names = table.columns.keys()\n\n # UpdatedAt field configured ? Let's set the value in source hash\n if self.updated_at_field in column_names:\n row[self.updated_at_field] = now # XXX not pure ...\n\n # Update logic\n if dbrow:\n if not UPDATE in self.allowed_operations:\n raise ProhibitedOperationError('UPDATE operations are not allowed by this transformation.')\n\n query = table.update().values(\n **{col: row.get(col)\n for col in self.get_columns_for(column_names, row, dbrow)}\n ).where(and_(*(getattr(table.c, col) == row.get(col) for col in self.discriminant)))\n\n # INSERT\n else:\n if not INSERT in self.allowed_operations:\n raise ProhibitedOperationError('INSERT operations are not allowed by this transformation.')\n\n if self.created_at_field in column_names:\n row[self.created_at_field] = now # XXX UNPURE\n else:\n if self.created_at_field in row:\n del row[self.created_at_field] # UNPURE\n\n query = table.insert().values(**{col: row.get(col) for col in self.get_columns_for(column_names, row)})\n\n # Execute\n try:\n connection.execute(query)\n except Exception:\n connection.rollback()\n raise\n\n # Increment stats TODO\n # if dbrow:\n # self._output._special_stats[UPDATE] += 1\n # else:\n # self._output._special_stats[INSERT] += 1\n\n # If user required us to fetch some columns, let's query again to get their actual values.\n if self.fetch_columns and len(self.fetch_columns):\n if not dbrow:\n dbrow = self.find(row)\n if not dbrow:\n raise ValueError('Could not find matching row after load.')\n\n for alias, column in self.fetch_columns.items():\n row[alias] = dbrow[column]\n\n return row", "def try_insert_or_update(insert_function, data):\n with database.engine.begin() as connection:\n if len(data) > 0:\n connection.execute(insert_function, data)", "def execute_insert(self,insert):\n try:\n self.cursor.execute(insert)\n self.connection.commit()\n except Exception as error:\n self.connection.rollback()\n raise error", "def insert(self, sql):\n try:\n # Execute the SQL command\n self.cursor.execute(sql)\n # Commit your changes in the database\n self.db.commit()\n except:\n # Rollback in case there is any error\n self.db.rollback()", "def test_insert_or_update_query(self):\n\n row = (\n 'source',\n 'signal',\n 'time_type',\n 'geo_type',\n 'time_value',\n 'geo_value',\n 'value',\n 'stderr',\n 'sample_size',\n )\n mock_connector = MagicMock()\n database = Database()\n database.connect(connector_impl=mock_connector)\n\n database.insert_or_update(*row)\n\n connection = mock_connector.connect()\n cursor = connection.cursor()\n self.assertTrue(cursor.execute.called)\n\n sql, args = cursor.execute.call_args[0]\n self.assertEqual(args, row)\n\n sql = sql.lower()\n self.assertIn('insert into', sql)\n self.assertIn('`covidcast`', sql)\n self.assertIn('unix_timestamp', sql)\n self.assertIn('on duplicate key update', sql)", "def _insert_op(self, op):", "async def _insert_stmt(self):\n raise NotImplementedError", "def upsert(self, variable_value=None, commit=False):\n statement = UPDATE if self.exists else INSERT\n self.oxdb.execute(\n statement,\n variable_value or datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'),\n self.variable_name, commit=commit)\n self.select()", "def insert(self):\n self.getDbRecord().insert()\n\n return", "def _upsert(cursor, table, data, pk):\n stamped = table in ('game', 'drive', 'play')\n update_set = ['%s = %s' % (k, '%s') for k, _ in data]\n if stamped:\n update_set.append('time_updated = NOW()')\n update_set = ', '.join(update_set)\n\n insert_fields = [k for k, _ in data]\n insert_places = ['%s' for _ in data]\n if stamped:\n insert_fields.append('time_inserted')\n insert_fields.append('time_updated')\n insert_places.append('NOW()')\n insert_places.append('NOW()')\n insert_fields = ', '.join(insert_fields)\n insert_places = ', '.join(insert_places)\n\n pk_cond = ' AND '.join(['%s = %s' % (k, '%s') for k, _ in pk])\n q = '''\n UPDATE %s SET %s WHERE %s;\n ''' % (table, update_set, pk_cond)\n q += '''\n INSERT INTO %s (%s)\n SELECT %s WHERE NOT EXISTS (SELECT 1 FROM %s WHERE %s)\n ''' % (table, insert_fields, insert_places, table, pk_cond)\n\n values = [v for _, v in data]\n pk_values = [v for _, v in pk]\n try:\n cursor.execute(q, values + pk_values + values + pk_values)\n except psycopg2.ProgrammingError as e:\n raise e", "def save(self):\n if self.id is None:\n self._insert()\n else:\n self._update()", "def _insert(\n self,\n objs,\n fields,\n returning_fields=None,\n raw=False,\n using=None,\n on_conflict=None,\n update_fields=None,\n unique_fields=None,\n ):\n self._for_write = True\n if using is None:\n using = self.db\n query = sql.InsertQuery(\n self.model,\n on_conflict=on_conflict,\n update_fields=update_fields,\n unique_fields=unique_fields,\n )\n query.insert_values(fields, objs, raw=raw)\n return query.get_compiler(using=using).execute_sql(returning_fields)", "def save(self):\n ret = False\n\n # we will only use the primary key if it hasn't been modified\n pk = None\n if self.schema.pk.name not in self.modified_fields:\n pk = self.pk\n\n if pk:\n ret = self.update()\n else:\n ret = self.insert()\n\n return ret", "def _insert(self, object_arr):\n _object = None\n\n try:\n if not self._is_session_valid():\n self._reset_session()\n for obj in object_arr:\n obj.setdefault(\"mode\", \"add\")\n\n _object = obj[\"instance\"]\n if obj[\"mode\"] == \"merge\":\n self._merge(_object)\n elif obj[\"mode\"] == \"add\":\n self._add(_object)\n elif obj[\"mode\"] == \"merge_by_query\":\n self._merge_by_query(obj)\n else:\n raise NotImplementedError(\"Invalid mode: {mode}\".format(mode=obj[\"mode\"]))\n self._commit()\n except DatabaseError.ConnectionError:\n raise\n except Exception:\n self._rollback()\n self._reset_session()\n raise", "def _query_insert(self, sql, data=None):\n\n conn = psycopg2.connect(self.connect_args)\n cur = conn.cursor()\n cur.execute(sql, data)\n conn.commit()\n cur.close()\n conn.close()", "def do_insert_data(self, *args):\n print(\"Provide data to insert\")\n self.connection_obj.insert_into_table(**self.__class__.populate_data())\n print(\"Data Insertion Successful\")", "def insert_execute(self, insert_data):\n self.execute(query=self.db_insert_schema.format(self.table_name), data=insert_data)", "def _insert_if_possible(self, query, values):\n try:\n self.cursor.execute(query, values)\n self.cnx.commit()\n return True\n except mysql.connector.errors.IntegrityError:\n self.cnx.rollback()\n return False", "def insert(self, data):\r\n pass", "def _do_upsert(self, conn, item, spider):\n id = self._get_id(item)\n now = datetime.utcnow().replace(microsecond=0).isoformat(' ')\n\n conn.execute(\"\"\"SELECT EXISTS(\n SELECT 1 FROM products WHERE id = %s\n )\"\"\", (id, ))\n ret = conn.fetchone()[0]\n\n if ret:\n conn.execute(\"\"\"\n UPDATE products\n SET url=%s, title=%s, picture=%s, price=%s, brand=%s, store=%s, id_store=%s, updated=%s, tag1=%s, tag2=%s, tag3=%s, tag4=%s, tag5=%s\n WHERE id=%s\n \"\"\", (item['url'], item['title'], item['picture'], item['price'], item['brand'], item['store'], item['id_store'], now, item['tag1'], item['tag2'] , item['tag3'], item['tag4'], item['tag5'], id))\n spider.log(\"Item updated in db: %s %r\" % (id, item))\n else:\n conn.execute(\"\"\"\n INSERT INTO products (id, url, title, picture, price, brand, store, id_store, updated, tag1, tag2, tag3, tag4, tag5)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n \"\"\", (id, item['url'], item['title'], item['picture'], item['price'], item['brand'], item['store'], item['id_store'], now, item['tag1'], item['tag2'] , item['tag3'], item['tag4'], item['tag5']))\n spider.log(\"Item stored in db: %s %r\" % (id, item))", "def execute(self, sql, val=()):\n cursor = self.__db.cursor()\n try:\n cursor.execute(sql, val)\n self.__db.commit()\n except Exception as e:\n self.__db.rollback()\n raise e", "def _execute_insert(self, insertQuery, insertValues):\n with self as plasticDB:\n cursor = plasticDB.connection.cursor()\n cursor.execute(insertQuery, insertValues)\n return cursor.lastrowid", "def _insert_if_new(cursor, table, data, **kwargs):\n pk_only_data = _subdict(METADATA_PRIMARY_KEYS[table], data, enforce_key_presence=True)\n if not _exists(cursor, table, pk_only_data):\n log('inserting new {}...'.format(table), end='')\n result = _insert_dict(cursor, table, data, **kwargs)\n log('done.')\n return result", "def test_upsert_user(self):\n db = database.Database()\n db.upsert_user('nick', 100, 100)\n\n the_args, _ = db._cursor.execute.call_args\n sql = the_args[0]\n expected_sql = 'INSERT INTO quota_violations (username, triggered, last_notified)\\n VALUES (%s, %s, %s)\\n ON CONFLICT (username)\\n DO UPDATE SET\\n (triggered, last_notified)\\n = (EXCLUDED.triggered, EXCLUDED.last_notified);\\n '\n\n self.assertEqual(sql, expected_sql)", "def update(self, sql):\n try:\n # Execute the SQL command\n self.cursor.execute(sql)\n # Commit your changes in the database\n self.db.commit()\n except:\n # Rollback in case there is any error\n self.db.rollback()", "def post_single(self, obj):\n\t\timport MySQLdb\n\t\tobj_copy = {}\n\t\t\n\t\tcolumns = self.db.columns(obj['type'])\n\t\t# copy valid columns\n\t\tfor c in columns:\n\t\t\tif obj.get(c):\n\t\t\t\tobj_copy[c] = obj.get(c)\n\n\t\tparts = {\n\t\t\t'tab': obj['type'],\n\t\t\t'cmd': self.post_action(obj)\n\t\t}\n\n\t\tif parts['cmd'] in ('insert', 'replace'):\n\t\t\tparts['cols'] = '`, `'.join(obj_copy.keys())\n\t\t\tparts['vals'] = ('%s,'*len(obj_copy))[:-1]\n\t\t\tquery = \"\"\"%(cmd)s into `%(tab)s`(`%(cols)s`) \n\t\t\t\tvalues (%(vals)s)\"\"\" % parts\n\t\telse:\n\t\t\tparts['set'] = ', '.join(['`%s`=%s' % (key, '%s') for key in obj_copy.keys()])\n\t\t\tparts['name'] = obj['name'].replace(\"'\", \"\\'\")\n\t\t\tquery = \"\"\"update `%(tab)s` set %(set)s where name='%(name)s'\"\"\" % parts\n\t\t\n\t\tself.db.sql(query, tuple(obj_copy.values()))", "def db_upsert(self, force_insert=False):\n\n if self.table.search((Query().name == self.name)):\n if force_insert:\n # self.already_exist = False\n self.table.update({'name': self.name, 'positions': self.positions}, Query().id == self.id)\n\n else:\n # self.already_exist = True\n return \"Already Exist\"\n else:\n self.table.insert({\n 'id': self.id,\n 'name': self.name,\n 'positions': self.positions\n }) # insert the given data\n\n return \"\"", "def insert(self, tname, valdict, cols = None):\n icmd, vals = make_insert_command(tname, valdict, cols)\n self.write_curs.execute(icmd, vals)", "def updateOrInsert(cls, session, itemData):\n\n existingItem = Item.lookup(session, itemData['identifiers'])\n\n if existingItem is not None:\n logger.debug('Found existing item by identifier')\n existingItem.update(session, itemData)\n outItem = existingItem\n else:\n logger.debug('Inserting new item record')\n outItem = Item.createItem(session, itemData)\n\n return outItem", "def save(self):\n if not connection.connected:\n raise Exception('Not connected to the database.')\n if not self._retrieved:\n self.insert()\n self._retrieved = True\n else:\n self.update()", "def db_upsert(self, force_insert=False):\n\n if self.table.search((Query().name == self.name)):\n if force_insert:\n # self.already_exist = False\n self.table.update({'name': self.name, 'cartesian_coords': self.cartesian_coords, 'polar_cords': self.polar_coords}, Query().id == self.id)\n\n else:\n # self.already_exist = True\n return \"Already Exist\"\n else:\n self.table.insert({\n 'id': self.id,\n 'name': self.name,\n 'cartesian_coords': self.cartesian_coords,\n 'polar_cords': self.polar_coords\n }) # insert the given data\n\n return \"\"", "def Save(self) -> None:\n self.__conn.commit()", "def insert_to_db(self, query):\n try:\n q = self.connection.execute(query)\n except Exception:\n self.print_std_error()", "def single_insert(conn, insert_req):\n cursor = conn.cursor()\n try:\n cursor.execute(insert_req)\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"Error: %s\" % error)\n conn.rollback()\n cursor.close()\n return 1\n cursor.close()", "def data_insertion(self, data_dict: Dict):\n\n #self.__create_db()\n self.__create_table()\n\n self.current_state = self.system.insert().values(\n timestamp = data_dict['timestamp'],\n vibration_sensor = data_dict['vibration_sensor'],\n flow = data_dict['flow'],\n pressure = data_dict['pressure'],\n power_consumption = data_dict['power_consumption'],\n failure_times = data_dict['failure_times'],\n operational = data_dict['operational']\n )\n\n self.connection.execute(self.current_state)\n\n if self.max_table_size is not None:\n self.__cleanup_dt()", "def insert_record(self):\r\n try:\r\n db.session.add(self)\r\n db.session.commit()\r\n return {\"error\": False, \"id\": self.id}\r\n except exc.SQLAlchemyError as e: # pragma: no cover\r\n # print(e)\r\n # print(sys.exc_info())\r\n db.session.rollback()\r\n return {\"error\": True}\r\n finally:\r\n db.session.close()", "async def save(self, force_insert=False, force_update=False,\n update_fields=None, connection=None):\n if force_insert and force_update:\n raise ValueError(\n 'Cannot use force_insert and force_update together')\n\n db = self.objects\n state = self.__getstate__()\n\n # Remove any fields are in the state but should not go into the db\n for f in self.__excluded_fields__:\n state.pop(f, None)\n\n # Replace any renamed fields\n for old_name, new_name in self.__renamed_fields__.items():\n state[new_name] = state.pop(old_name)\n\n table = db.table\n async with db.connection(connection) as conn:\n if force_update or (self._id and not force_insert):\n\n # If update fields was given, only pass those\n if update_fields is not None:\n # Replace any update fields with the appropriate name\n renamed = self.__renamed_fields__\n update_fields = (renamed.get(f, f) for f in update_fields)\n\n # Replace update fields with only those given\n state = {f: state[f] for f in update_fields}\n\n q = table.update().where(\n table.c[self.__pk__] == self._id).values(**state)\n r = await conn.execute(q)\n if not r.rowcount:\n log.warning(\n f'Did not update \"{self}\", either no rows with '\n f'pk={self._id} exist or it has not changed.')\n else:\n if not self._id:\n # Postgres errors if using None for the pk\n state.pop(self.__pk__, None)\n q = table.insert().values(**state)\n r = await conn.execute(q)\n\n # Don't overwrite if force inserting\n if not self._id:\n if hasattr(r, 'lastrowid'):\n self._id = r.lastrowid # MySQL\n else:\n self._id = await r.scalar() # Postgres\n\n # Save a ref to the object in the model cache\n db.cache[self._id] = self\n self.__restored__ = True\n return r", "def insert(self, *args):\n self.insert_count += 1\n self.total_ops += 1\n return super(BulkOperator, self).insert(*args)", "def save(self):\r\n if self.instance is None:\r\n raise CQLEngineException(\"DML Query intance attribute is None\")\r\n assert type(self.instance) == self.model\r\n\r\n nulled_fields = set()\r\n if self.instance._has_counter or self.instance._can_update():\r\n return self.update()\r\n else:\r\n insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp)\r\n for name, col in self.instance._columns.items():\r\n val = getattr(self.instance, name, None)\r\n if col._val_is_null(val):\r\n if self.instance._values[name].changed:\r\n nulled_fields.add(col.db_field_name)\r\n continue\r\n insert.add_assignment_clause(AssignmentClause(\r\n col.db_field_name,\r\n col.to_database(getattr(self.instance, name, None))\r\n ))\r\n\r\n # skip query execution if it's empty\r\n # caused by pointless update queries\r\n if not insert.is_empty:\r\n self._execute(insert)\r\n\r\n # delete any nulled columns\r\n self._delete_null_columns()", "def add_or_update(self, object):\n self.lock.acquire()\n result = self.__Session.merge(object)\n self.__Session.commit()\n self.lock.release()\n return result", "def insert(self):\n pass", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def commit(self):\n if not getattr(self, '_id', None):\n return self._create()\n res = self._update()\n self._dirty = False\n return res", "def test_upsert_w_returning(self, connection):\n\n data = self.tables.data\n\n initial_data = [\n {\"x\": \"x1\", \"y\": \"y1\", \"z\": 4},\n {\"x\": \"x2\", \"y\": \"y2\", \"z\": 8},\n ]\n ids = connection.scalars(\n data.insert().returning(data.c.id), initial_data\n ).all()\n\n upsert_data = [\n {\n \"id\": ids[0],\n \"x\": \"x1\",\n \"y\": \"y1\",\n },\n {\n \"id\": 32,\n \"x\": \"x19\",\n \"y\": \"y7\",\n },\n {\n \"id\": ids[1],\n \"x\": \"x5\",\n \"y\": \"y6\",\n },\n {\n \"id\": 28,\n \"x\": \"x9\",\n \"y\": \"y15\",\n },\n ]\n\n stmt = provision.upsert(\n config,\n data,\n (data,),\n lambda inserted: {\"x\": inserted.x + \" upserted\"},\n )\n\n result = connection.execute(stmt, upsert_data)\n\n eq_(\n result.all(),\n [\n (ids[0], \"x1 upserted\", \"y1\", 4),\n (32, \"x19\", \"y7\", 5),\n (ids[1], \"x5 upserted\", \"y2\", 8),\n (28, \"x9\", \"y15\", 5),\n ],\n )", "def _execute(self, db):\n raise NotImplementedError", "def test_upsert_w_returning(self, connection):\n\n data = self.tables.data\n\n initial_data = [\n {\"x\": \"x1\", \"y\": \"y1\", \"z\": 4},\n {\"x\": \"x2\", \"y\": \"y2\", \"z\": 8},\n ]\n ids = connection.scalars(\n data.insert().returning(data.c.id), initial_data\n ).all()\n\n upsert_data = [\n {\n \"id\": ids[0],\n \"x\": \"x1\",\n \"y\": \"y1\",\n },\n {\n \"id\": 32,\n \"x\": \"x19\",\n \"y\": \"y7\",\n },\n {\n \"id\": ids[1],\n \"x\": \"x5\",\n \"y\": \"y6\",\n },\n {\n \"id\": 28,\n \"x\": \"x9\",\n \"y\": \"y15\",\n },\n ]\n\n stmt = provision.upsert(\n config,\n data,\n (data,),\n set_lambda=lambda inserted: {\"x\": inserted.x + \" upserted\"},\n )\n\n result = connection.execute(stmt, upsert_data)\n\n eq_(\n result.all(),\n [\n (ids[0], \"x1 upserted\", \"y1\", 4),\n (32, \"x19\", \"y7\", 5),\n (ids[1], \"x5 upserted\", \"y2\", 8),\n (28, \"x9\", \"y15\", 5),\n ],\n )", "def insert(self, doc_or_docs):\n return self.database.connection.request.insert_documents(\n self.database.name, self.name, doc_or_docs)", "def _doing(self, data):\n curr = self.conn.cursor()\n curr.executemany(self.sql, data)\n self.conn.commit()\n curr.close()", "def upsert(\n path, table, json_file, pk, nl, csv, tsv, batch_size, alter, not_null, default\n):\n insert_upsert_implementation(\n path,\n table,\n json_file,\n pk,\n nl,\n csv,\n tsv,\n batch_size,\n alter=alter,\n upsert=True,\n not_null=not_null,\n default=default,\n )", "def save(self):\n model = type(self)\n\n if not self._in_db: # insert\n id = model.insert(**self.data).execute()\n\n if id is not None:\n self.data[model.primarykey.name] = id # set primarykey value\n self.set_in_db(True)\n self._cache = self.data.copy() # sync cache after saving\n return id\n else: # update\n # only update changed data\n dct = dict(set(self.data.items()) - set(self._cache.items()))\n\n if self._id is None:\n raise PrimaryKeyValueNotFound # need its primarykey value to track this instance\n\n if dct:\n query = model.at(self._id).update(**dct)\n rows_affected = query.execute()\n else:\n rows_affected = 0L\n self._cache = self.data.copy() # sync cache after saving\n return rows_affected", "def execute(self):\r\n global db_runtime_context\r\n if db_runtime_context.current_db is None:\r\n print(\"!Failed to execute query because no database is selected!\")\r\n return None \r\n\r\n self.tableName = self.tableName.lower()\r\n \r\n if self.tableName is not None:\r\n\r\n update_table = db_runtime_context.current_db.getTableByName(self.tableName)\r\n\r\n if update_table is not None:\r\n pass\r\n else:\r\n print(\"!Failed to execute query on table\", self.tableName, \"because it does not exist!\")\r\n return None \r\n\r\n # Check for a lock\r\n if not db_runtime_context.current_db.isWritable(update_table.tableName):\r\n print(f\"Error: Table {update_table.tableName} is locked!\")\r\n return\r\n\r\n\r\n\r\n db_runtime_context.current_db.tables[self.tableName].update(self.targets, self.conditions)\r\n\r\n db_runtime_context.current_db.successfulTransactions += 1", "def upsert(cursor, primary, values, table):\n\t# type: (Cursor, dict, dict, str) -> bool\n\n\t# use INSERT ... ON DUPLICATE KEY UPDATE instead?\n\n\tif not primary:\n\t\traise ValueError(\"Empty primary mapping would result in an empty WHERE condition which would affect all rows\")\n\n\tset_str = \",\".join(\"{}=?\".format(k) for k in values.keys())\n\twhere_str = \" AND \".join(\"{}=?\".format(k) for k in primary.keys())\n\n\tcursor.execute(\"UPDATE {} SET {} WHERE {}\".format(table, set_str, where_str), # nosec\n\t\tchain(values.values(), primary.values())\n\t)\n\n\tif cursor.rowcount == 0:\n\t\tinto_str = \",\".join(chain(primary.keys(), values.keys()))\n\t\tvalues_str = \",\".join(repeat(\"?\", len(primary) + len(values)))\n\t\tcursor.execute(\"INSERT INTO {} ({}) VALUES ({})\".format(table, into_str, values_str), # nosec\n\t\t\tchain(primary.values(), values.values())\n\t\t)\n\t\treturn True\n\n\treturn False", "def save(self):\n if self.id:\n self.update()\n else:\n self.create()", "def insert(self, table, element):\n\n update = self.update(table, element)\n if update:\n return update\n\n fields = []\n values = []\n for key in element.keys():\n fields.append(key)\n values.append(element[key])\n result = self.__insert(table, fields, values)\n return result", "def insert(self, query: str, *args, **kwargs):\n cursor = self._cursor()\n try:\n self._execute(cursor, query, args, kwargs)\n return cursor.lastrowid\n finally:\n cursor.close()", "def insert_update_mysql_query(self, query):\n mysql_cursor = self.mysql_conn.cursor()\n try:\n mysql_cursor.execute(query)\n self.mysql_conn.commit()\n self.mysql_conn.close()\n response = \"SUCCESS\"\n except Exception, ex:\n self.mysql_conn.rollback()\n self.mysql_conn.close()\n response = \"ERROR: \" + str(ex)\n return response", "async def put(self, key, data, table_name='opsdroid_default'):\n _LOGGER.debug(\"Putting %s into PostgreSQL table %s\", key, table_name)\n\n json_data = json.dumps(data, cls=JSONEncoder)\n\n async with self.connection.transaction():\n key_already_exists = await self.get(key, table_name=table_name)\n if key_already_exists:\n await self.connection.execute(\n \"UPDATE {} SET data = $2 WHERE key = $1\".format(table_name),\n key, json_data\n )\n else:\n await self.connection.execute(\n \"INSERT INTO {} VALUES ($1, $2)\".format(table_name),\n key, json_data\n )", "def insert(\n path,\n table,\n json_file,\n pk,\n nl,\n csv,\n tsv,\n batch_size,\n alter,\n ignore,\n replace,\n truncate,\n not_null,\n default,\n):\n insert_upsert_implementation(\n path,\n table,\n json_file,\n pk,\n nl,\n csv,\n tsv,\n batch_size,\n alter=alter,\n upsert=False,\n ignore=ignore,\n replace=replace,\n truncate=truncate,\n not_null=not_null,\n default=default,\n )", "def insert(self, query, callback=None, query_args=None):\r\n data = self.db.execute(query, query_args)\r\n return data", "def execute(self,data):\n\n try:\n\n start = time.time()\n\n self.cursor.executemany(self.operation, data)\n\n end = time.time()\n\n logger.info(\"Operation [{}] took {:.3f} seconds; {} operations processed\".format(self.operation, end-start, len(data)))\n\n except Exception, exc:\n\n # Not so typical: handle integrity constraints (generate warnings)\n if exc.__class__.__name__ != \"IntegrityError\":\n raise\n\n self.conn.rollback()\n\n for record in data:\n\n try:\n self.cursor.execute(self.operation, record)\n self.conn.commit()\n\n except Exception, exc:\n\n # This record is the culprit\n if exc.__class__.__name__ != \"IntegrityError\":\n logger.error(\"Exception [{}] occurred inserting record {}\".format(exc.message, record))\n logger.error(\"Operation was: {}\".format(self.operation))\n raise\n\n error_msg = str(exc.message).rstrip()\n logger.warn( \"Integrity error (\\\"{}\\\"); data={}\".format(error_msg, record) )\n\n else:\n # If all goes well, we just need a single commit\n self.conn.commit()", "def write(engine, table_name, src, return_id=True, do_update=False):\n query = build_upsert_query(engine, table_name, src, do_update=do_update)\n LOGGER.debug(query)\n engine.execute(query)\n if return_id:\n return fetch_id(engine, table_name, src)", "def write(self):\n if not self._table: raise ValueError ( \"_table is Null\" )\n if self._isnew:\n for m in self._modified_values:\n self._modified_values[m] = self._table[m].val_py2sql(self._modified_values[m])\n \n try:\n rec = CFG.CX.insert ( CFG.DB.SCHEMA + \".\" + self._table.name,\n self._modified_values )\n #this will automatically re-read the data from the db, to take all changes\n #done by triggers and default values into account.\n self._objectid = rec['objectid']\n\n #print \"Record # {0} inserted into {1}.\".format(self._objectid, self._table.name)\n self.raiseEvent ( \"record_added\", self )\n \n except pg.DatabaseError, e:\n print \"Error inserting record.\"\n raise Record.DataManipulationError ( \"Inserting a new record into '{0}'\".format(self._table.name),\n str(self._modified_values),\n e)\n elif self._ismodified:\n \n for m in self._modified_values: \n self._modified_values[m] = self._table[m].val_py2sql(self._modified_values[m])\n \n self._modified_values['objectid'] = self._objectid\n del self.TextCache[self._objectid]\n try:\n rec = CFG.CX.update ( CFG.DB.SCHEMA + \".\" + self._table.name,\n self._modified_values )\n self.read() \n self.raiseEvent ( \"record_saved\", self )\n except pg.DatabaseError, e:\n print \"Error updating record\"\n raise Record.DataManipulationError ( \"Updating record {1} of '{0}'\".format(self._table.name, self._objectid),\n str(self._modified_values),\n e)", "def insert(self, fields, values):\n sql = self.generate_insert_sql(fields, values)\n self.sqlhistory.append(sql)\n return self.sql_insert(sql)", "def upsert(self, ctx, data, keys = []):\n\n # TODO: Check for AutoIncrement in keys, shall not be used\n\n # If keys\n qfilter = {}\n if (len(keys) > 0):\n for key in keys:\n try:\n qfilter[key] = data[key]\n except KeyError as e:\n raise Exception(\"Could not find attribute '%s' in data when storing row data: %s\" % (key, data))\n else:\n pk = self.pk(ctx)\n qfilter[pk[\"name\"]] = data[pk[\"name\"]]\n\n # Do lookup\n if len(qfilter) > 0:\n\n row = self.lookup(ctx, qfilter)\n\n if (row):\n # Check row is identical\n for c in self.columns:\n if c[\"type\"] != \"AutoIncrement\":\n v1 = row[c['name']]\n v2 = data[c['name']]\n if c[\"type\"] == \"Date\":\n v1 = row[c['name']].strftime('%Y-%m-%d')\n v2 = data[c['name']].strftime('%Y-%m-%d')\n if (isinstance(v1, str) or isinstance(v2, str)):\n if (not isinstance(v1, str)): v1 = str(v1)\n if (not isinstance(v2, str)): v2 = str(v2)\n if (v1 != v2):\n if (c[\"name\"] not in self._lookup_changed_fields):\n logger.warn(\"%s updating an entity that exists with different attributes, overwriting (field=%s, existing_value=%s, tried_value=%s)\" % (self, c[\"name\"], v1, v2))\n #self._lookup_changed_fields.append(c[\"name\"])\n\n # Update the row\n row = self.update(ctx, data, keys)\n return row\n\n row_with_id = self.insert(ctx, data)\n return row_with_id", "def insert(self, data, table, **kwargs):\n logging.info(f'Inserting into `{table}`')\n\n try:\n data.to_sql(table, self.engine, **kwargs)\n try:\n self.execute(f'ALTER TABLE `{table}` ADD PRIMARY KEY (`id`);')\n except:\n pass\n return True\n except:\n logging.exception('Something went wrong inserting. Check trace.')\n return False", "def upsert_user(user_id, nick_name, first_name, last_name):\n if execute_select(get_user_sql, (user_id,)):\n execute_insert(update_user_sql, (nick_name, first_name, last_name, user_id))\n else:\n execute_insert(add_user_sql, (user_id, nick_name, first_name, last_name))", "def insert(self):\n ret = True\n\n schema = self.schema\n fields = self.depopulate(False)\n\n q = self.query\n q.set_fields(fields)\n pk = q.insert()\n if pk:\n fields = q.fields\n fields[schema.pk.name] = pk\n self._populate(fields)\n\n else:\n ret = False\n\n return ret", "def send_insert(self, statement):\n if self.protocol is None:\n raise OperationalError(\"MySQLx Connection not available\")\n msg_type, msg = self.protocol.build_insert(statement)\n self.protocol.send_msg(msg_type, msg)\n ids = None\n if isinstance(statement, AddStatement):\n ids = statement.ids\n return Result(self, ids)", "def save(self, data):\n query = \"INSERT INTO {} (title, body, meetup_id, user_id) \\\n VALUES('{}','{}','{}', '{}') RETURNING *\".format(self.table, data['title'], data['body'], data['meetup_id'], data['user_id'])\n self.cur.execute(query)\n result = self.cur.fetchone()\n self.conn.commit()\n return result", "def insert(self,table,values):\n self.connect.execute(self.insert_disc[table],values)\n self.connect.commit()", "def insert(q, *params):\n db = Database()\n db.cur.execute(q, *params)\n ret_id = db.cur.lastrowid\n db.con.close()\n return ret_id", "def insert_or_update_instance(collection, instance, verbose: bool) -> str:\n now = datetime.now(pytz.utc)\n # also use photo number to identify\n # therefore the db has to be dropped first\n if instance.get(\"Photo\", None):\n existing = collection.find_one(\n {\"UID\": instance[\"UID\"], \"Photo\": instance[\"Photo\"]}\n )\n if not existing:\n existing = collection.find_one({\"UID\": instance[\"UID\"]})\n else:\n existing = collection.find_one({\"UID\": instance[\"UID\"]})\n if not existing:\n instance[\"created\"] = now\n collection.insert_one(instance)\n return \"inserted\"\n else:\n instance[\"modified\"] = now\n if verbose:\n print(\"updated UID:\", instance[\"UID\"])\n print(\"genotype before update:\", existing[\"genotype\"])\n print(\"genotype after update:\", instance[\"genotype\"])\n collection.update_one({\"UID\": instance[\"UID\"]}, {\"$set\": instance})\n return \"updated\"", "def touch(self, connection=None):\n self.create_marker_table()\n\n if connection is None:\n connection = self.connect()\n connection.autocommit = True # if connection created here, we commit it here\n\n connection.cursor().execute(\n \"\"\"INSERT INTO {marker_table} (update_id, target_table)\n VALUES (%s, %s)\n ON DUPLICATE KEY UPDATE\n update_id = VALUES(update_id)\n \"\"\".format(marker_table=self.marker_table),\n (self.update_id, self.table)\n )\n # make sure update is properly marked\n assert self.exists(connection)", "def insert(cls, values):\n result = yield InsertQuery(cls, values).execute()\n returnValue(result)", "def rpc_database_insert_row(self, keys, values):\n\t\tif not isinstance(keys, (list, tuple)):\n\t\t\tkeys = (keys,)\n\t\tif not isinstance(values, (list, tuple)):\n\t\t\tvalues = (values,)\n\t\tassert len(keys) == len(values)\n\t\ttable_name = self.path.split('/')[-2]\n\t\tfor key, value in zip(keys, values):\n\t\t\tassert key in DATABASE_TABLES[table_name]\n\t\ttable = DATABASE_TABLE_OBJECTS.get(table_name)\n\t\tassert table\n\t\tsession = db_manager.Session()\n\t\trow = table()\n\t\tfor key, value in zip(keys, values):\n\t\t\tsetattr(row, key, value)\n\t\tsession.add(row)\n\t\tsession.close()\n\t\treturn", "def upsert(self, context: dict=None) -> None:\n assert(context.get('data', None) is not None)\n\n data = context.get('data', None)\n\n logging.info(f\"Upsert: {data}\")\n try:\n user_record = self.table.first(formula=f\"username='{data['username']}'\")\n user_id = user_record['id'] if user_record else None\n if user_id:\n self.table.update(user_id, fields=data, replace=True, typecast=True)\n else:\n self.table.create(fields=data, typecast=True)\n except Exception as ex:\n self.close_database()\n raise DatabaseError({\n \"code\": f\"Airtable exception\",\n \"description\": f'Database: `{self.db_name}`\\n`upsert({data})`\\nEnsure DB entities exist',\n \"message\": str(ex),\n }, 500)", "async def insert(self, args: Dict[str, Any]):\n keys = \", \".join(args.keys())\n values = \", \".join([f\"${i + 1}\" for i in range(len(args))])\n\n conn: Connection\n async with self.db_pool.acquire() as conn:\n await conn.execute(\n f\"INSERT INTO {self.table_name} \"\n f\"({keys}) VALUES ({values})\",\n *args.values(),\n )", "def insert(self, data):\n\n if not data:\n raise ValueError('invalid data')\n\n # TODO: validate and insert data into model", "def DBExecute( DB, sql, *args ):\n DB.execute( sql, args )\n DB.commit()", "def insert(self, json_data, batch=None):\n headers = {'Content-type': 'application/json;charset=UTF-8'}\n url = '/'.join([self.url, self.db])\n time.sleep(0.01)\n if batch:\n r = requests.post(url, data=json_data, headers=headers, params={'batch': 'ok'})\n else:\n r = requests.post(url, data=json_data, headers=headers) \n time.sleep(0.01)\n if not r.status_code in (201, 202):\n raise Exception(\"HTTP \" + str(r.status_code))", "def insert(self):\n sql = u'INSERT INTO %s' % self.table()\n keys = []\n values = []\n format_values = []\n for field in self.fields():\n attr = object.__getattribute__(self, field)\n if attr.auto_value:\n continue\n keys.append(field)\n format_values.append(attr.format)\n values.append(attr._value)\n keys_str = u'( %s )' % u', '.join(keys)\n values_str = u'VALUES( %s )' % u', '.join(format_values)\n sql = '%s %s %s;' % (sql, keys_str, values_str)\n connection.execute(sql, values)\n primary_k = self.__class__.get_primary()\n primary = object.__getattribute__(self, primary_k)\n primary.value = connection.connection.insert_id()", "def test_update(self):\n query = \"insert into cds values(%s,%s,%s,%s)\"\n values = (156098,\"haha\",\"haha 5\",2)\n self.a.insert(query, values)\n query1 = \"update cds set Quantity=%s where id=%s\"\n values1 = (5, 156098)\n self.a.update(query1, values1)\n query2 = \"select * from cds where id=156609\"", "def upsert(self, obj):\r\n url = '{0}/upsert'.format(self.get_url())\r\n request = http.Request('PUT', url, self.wrap_object(obj))\r\n\r\n return request, parsers.parse_empty", "def db_insert_or_get(model, defaults=None, **kwargs):\n instance = model.query.filter_by(**kwargs).first()\n if instance:\n return instance, True\n else:\n kwargs.update(defaults or {})\n instance = model(**kwargs)\n db.session.add(instance)\n return instance, False", "def insert_record(self, record, session):\n try:\n session.add(record)\n session.commit()\n session.close()\n return True\n except:\n\n logging.exception(\"http record cannot be added to db \" \":Time: \" + str(datetime.datetime.now()))\n return False", "def execute(self, sql, *args, **kwgs):\n curr = self.conn.cursor()\n curr.execute(sql, *args, **kwgs)\n self.conn.commit()\n curr.close()", "def put(self, *args, **kwargs):\n self.before_put(*args, **kwargs)\n\n super(DatastoreModel, self).put(*args, **kwargs)\n\n self.after_put(*args, **kwargs)", "def upsert(self, key, values=[]):\n\n return self._put(\"\", key, values)", "def _do_commit(self):", "def save(self):\n pk = self.get_pk()\n if pk and not self._is_new_record and self._edited_fields:\n set_vars = self.get_field_dict(fields=self._edited_fields)\n self.update(**set_vars).filter(**{self.get_pk_name(): pk}).execute()\n elif self._is_new_record:\n insert_vars = self.get_field_dict()\n if self._meta.auto_increment:\n insert_vars.pop(self.get_pk_name())\n new_pk = self.insert(**insert_vars).execute()\n if self._meta.auto_increment:\n self.set_pk(new_pk)\n self.set_new_record_state(False)\n elif not pk and not self._is_new_record:\n raise ValueError('[Model.save] Primary key is not defined ' +\n 'while the data is stored')\n self._edited_fields.clear()", "def main(req: func.HttpRequest, products: func.Out[func.SqlRowList]) -> func.HttpResponse:\n\n # Note that this expects the body to be an array of JSON objects which\n # have a property matching each of the columns in the table to upsert to.\n body = json.loads(req.get_body())\n rows = func.SqlRowList(map(lambda r: func.SqlRow.from_dict(r), body))\n products.set(rows)\n\n return func.HttpResponse(\n body=req.get_body(),\n status_code=201,\n mimetype=\"application/json\"\n )", "def Insert(self):\n sql = 'INSERT INTO %s ( %s ) VALUES ( %s )' % (\n self.table_name,\n ', '.join(self.values),\n ', '.join(['?' for _ in self.values])\n )\n return Database().Execute(sql, tuple(self.values.values()))", "def execute(self, sql):\n return self.db.execute(sql)", "def singleInsert(self, table_name, fields, field_values, field_types=[]):\n if not self.checkTable(table_name):\n self.createTable(table_name, fields, field_types)\n self.transactionInsert(table_name, fields, field_values)\n self.transactionEnd()", "def _commit(self):\n if self.__session is not None:\n self.__session.commit()", "def upsert(self):\n\n if self.cluster:\n self.cluster.upsert()\n else:\n super().upsert()", "def insert(self, key, value):\n\n\t\t# If the key already exists in redis, then return\n\t\tif self.checkIfExists(key):\n\t\t\traise Exception(\"Key/Value pair already exists in Redis\")\n\t\t\n\t\t# Otherwise, insert into Redis\n\t\telse:\n\t\t\tself.db.set(key, value)" ]
[ "0.6802625", "0.6760875", "0.67485595", "0.66731614", "0.66411924", "0.621796", "0.6103661", "0.60958934", "0.6045349", "0.6037797", "0.6003509", "0.60019463", "0.5977113", "0.59489703", "0.5928445", "0.5923789", "0.59120774", "0.5863009", "0.58497566", "0.582207", "0.58096737", "0.5793914", "0.57874864", "0.57560474", "0.57493865", "0.5745387", "0.5731762", "0.57243073", "0.5719773", "0.5691863", "0.56897044", "0.5682738", "0.56806237", "0.5679718", "0.5672705", "0.5623065", "0.56211734", "0.5612116", "0.5609685", "0.5596028", "0.55935216", "0.55900663", "0.5588966", "0.5588966", "0.5588966", "0.55740756", "0.55723464", "0.5569582", "0.55640155", "0.55496436", "0.55484384", "0.5545712", "0.55440503", "0.55410737", "0.55396724", "0.55387306", "0.553838", "0.55353236", "0.5528774", "0.5527005", "0.5526206", "0.55222154", "0.55218977", "0.5518103", "0.55171907", "0.55140126", "0.5512622", "0.55094063", "0.55092245", "0.5504349", "0.54949546", "0.54946864", "0.54893655", "0.54858255", "0.5481186", "0.5479077", "0.54766846", "0.5471057", "0.54583687", "0.54582524", "0.54467535", "0.5444211", "0.54434013", "0.54232424", "0.54187036", "0.541838", "0.54148966", "0.54112357", "0.5404379", "0.5401902", "0.5394517", "0.5393844", "0.5387189", "0.53773046", "0.53739756", "0.5361619", "0.53528166", "0.535122", "0.5345997", "0.53402466" ]
0.54526764
80
Assigns every training point a weight equal to 1/N, where N is the number of training points. Returns a dictionary mapping points to weights.
def initialize_weights(training_points): N = len(training_points) ans = {} for p in training_points: ans[p] = make_fraction(1, N) return ans
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def default_weights(n):\n return np.array([1/n for _ in range(n)])", "def uniform_weights(n):\n return np.ones((n, 1)) / n", "def update_weights(point_to_weight, misclassified_points, error_rate):\n for p in point_to_weight:\n if p in misclassified_points:\n point_to_weight[p] *= make_fraction(1,2)*make_fraction(1, error_rate)\n else:\n point_to_weight[p] *= make_fraction(1,2)*make_fraction(1, 1-error_rate)\n return point_to_weight", "def _generate_weights(self):\n weights = [random.uniform(0, 1) for x in range(self.num_weights)]\n return self._normalize_weights(weights)", "def weighting_generator(distance_set):\n weights = [1/dist for dist in distance_set]\n return weights", "def weighting_generator(distance_set):\n weights = [1/dist for dist in distance_set]\n return weights", "def weights_initializer(self):\n self.weights = [np.random.normal(0, 1 / np.sqrt(x), (x, y)) for x, y in list(zip(self.structure[1:], self.structure[:-1]))]", "def get_sample_weights(self):\n target_to_weight = {}\n for target, count in self.class_count.items():\n target_to_weight[target] = self.total / count\n\n sample_weights = []\n for _, target in self.imgs:\n sample_weights.append(target_to_weight[target])\n\n return sample_weights", "def normalize_weights(self):\n total_weight = sum(self.weights)\n self.norm_weights = self.weights / float(total_weight)", "def normalize_weights(self):\n \n # Set negative weights to zero\n # Normalize to sum to one.\n \n\n\n self.new_weight=[]\n for i in self._weights:\n if any(i < 0 for i in self._weights):\n self.new_weight = [0,1]\n\n elif all(i == 0 for i in self._weights):\n i = 1/len(self._weights)\n self.new_weight.append(i)\n else:\n i = i/sum(self._weights)\n self.new_weight.append(i)\n\n # If the weights are all zeros, set weights equal to 1/k, where k is the number\n # of components.\n self._weights = self.new_weight\n self._weights = np.round(self._weights,3)", "def normalize(self, weights):\n tot = sum(weights)\n newW = [-1] * self.numParticles\n for i in range(len(weights)):\n newW[i] = weights[i] / tot\n return newW", "def _make_sample_weights(n_repetitions, n_samples, random_state):\n sample_weights = np.zeros((n_repetitions, n_samples), dtype=np.float64)\n\n for repetition in range(n_repetitions):\n sample_weights[repetition] = random_state.rand(n_samples) + 1\n\n return sample_weights", "def weightGenerate(self):\n\t\tfor i in range(0, self.numberOfInput):\n\t\t\tself.weight.append(random.random()-0.5)", "def __get_train_weights(train_data: List[TrainSample]) -> np.array:\n train_weights = []\n for sample in train_data:\n num_answers = sample.exam_episode.get_all_answers({sample.player}, sys.maxsize)\n train_weights.append(1 / len(num_answers))\n return np.array(train_weights)", "def init_weights(self):\n \n self.w = np.random.randn(self.D) / np.sqrt(self.D)", "def _reweight(self):\n self._seed_weights = [self._graph.degree(seed) for seed in self._seeds]\n weight_sum = np.sum(self._seed_weights)\n self._seed_weights = [float(weight)/weight_sum for weight in self._seed_weights]", "def weights(self) -> List[float]:", "def calculate_weights():\n weights = {}\n\n\n # estimate run time of step 1 (fast sweep)\n f_range = sweeper_script.settings['stop'] - sweeper_script.settings['start']\n N_samples = sweeper_script.settings['samplecount']\n df = f_range / N_samples\n\n t = N_samples / df\n\n weights['quick scan'] = t\n\n # estimate run time of step 2 (high res sweep)\n df = self.settings['high_res_df']\n N_samples = self.settings['high_res_N']\n\n t = N_samples / df\n\n weights['high res scan'] = t\n\n\n total_time = sum([v for k, v in weights.iteritems()])\n\n weights = {k: v/total_time for k, v in weights.iteritems()}\n\n print('weights',weights)\n\n return weights", "def update_weights(self, alpha, ind):\n inside = -alpha * self.labels * self.predictions[ind, :]\n new_weights = self.weights * np.exp(inside)\n self.weights = new_weights / np.sum(new_weights)", "def _buildWeights(self):\r\n # Compute the spatial tree\r\n kd = spatial.cKDTree(self.XYin)\r\n \r\n # Perform query on all of the points in the grid\r\n dist,self.ind=kd.query(self.XYout,distance_upper_bound=self.maxdist,k=self.NNear)\r\n \r\n self.Nc = np.size(self.ind,axis=0)\r\n print '%d interpolation points.'%self.Nc\r\n # Now loop through and get the weights for each point\r\n self.W = np.zeros((self.NNear,self.Nc))\r\n\r\n # Print percentages\r\n p0=0\r\n pstep=5\r\n for ii in range(0,self.Nc):\r\n \r\n if self.verbose:\r\n pfinish = float(ii)/float(self.Nc)*100.0\r\n if pfinish> p0:\r\n print '%3.1f %% complete...'%pfinish\r\n p0+=pstep\r\n \r\n W = self.getWeights(dist[ii,:],self.XYin[self.ind[ii,:],0],self.XYin[self.ind[ii,:],1])\r\n self.W[:,ii] = W.T", "def get_weights(train_generator):\n classes = list(train_generator.class_indices.values())\n cw = class_weight.compute_class_weight('balanced',\n np.unique(classes),\n train_generator.classes)\n m = min(cw)\n cw = [(el / m) for el in cw]\n\n return dict(zip(classes, cw))", "def getWeights(self, gameState, actton):\n\t\treturn {'successorScore': 1.0}", "def make_epochs_per_sample(weights, n_epochs):\n result = -1.0 * np.ones(weights.shape[0], dtype=np.float64)\n n_samples = n_epochs * (weights / weights.max())\n result[n_samples > 0] = float(n_epochs) / n_samples[n_samples > 0]\n return result", "def assign_tf_weight(self, actor_series):\n counter = Counter()\n for each in actor_series:\n counter[each] += 1\n total = sum(counter.values())\n for each in counter:\n counter[each] = (counter[each]/total)\n return dict(counter)", "def get_weights(self):", "def init_weights(self, dims):\n self.W = np.random.normal(size=dims) * 0.0001", "def update_weights(x_train, y_train, weights, learning_rate):\r\n predictions = compute_prediction(x_train, weights)\r\n weights_delta = np.dot(x_train.T, y_train - predictions)\r\n m = y_train.shape[0]\r\n weights += learning_rate / float(m) * weights_delta\r\n return weights", "def make_weights_for_balanced_classes(self):\n\n count = [0] * self.get_num_classes()\n\n # label = self.class_map_dict[self.meta_data.loc[image_id]['dx']]\n # labels = [self.class_map_dict[l] for l in self.get_labels()]\n\n labels = self.get_labels()\n\n # Count how many instances there are for each class\n for l in labels:\n count[l] += 1\n\n weight_per_class = [0.] * self.get_num_classes()\n\n N = float(sum(count))\n\n # Assign a weight which is inversely proportional to class frequency\n for i in range(self.get_num_classes()):\n weight_per_class[i] = N/float(count[i])\n\n # Save results for debugging purposes\n self._weight_per_class = weight_per_class\n\n # Now assign a weight to each data point\n weight = [0] * len(labels)\n\n for idx, val in enumerate(labels):\n weight[idx] = weight_per_class[val]\n\n return weight", "def init_weights_(self):\n raise NotImplementedError", "def init_weights(self):\n for i in range(5):\n default_init_weights(getattr(self, f'conv{i+1}'), 0.1)", "def totalWeighting(distance, count, data, n):\n\n weighting = (data)*(distance)*count\n weighting = weighting/(np.sum(np.sum(weighting))) \n return weighting", "def set_weights_iter_average(self):\n self.nn.set_param_values(np.average(self.w_after_iter, axis=0))", "def _init_weights(self):\n nn.init.xavier_normal_(self.out.weight)", "def d_weights(n):\n i = np.arange(1, n)\n return np.sqrt(n / (i * (n - i)))", "def update_weights(self):\n self._weights = self._weights + self.update_weights_value", "def _generate_p(self):\n self._values, weights = zip(*self._weights.items())\n cumsum = list(itertools.accumulate(weights))\n total = cumsum[-1]\n self._p = [i / total for i in cumsum]", "def initialize_weights_xavier(self):\n\t\tself.weights = [np.random.uniform(-1/sqrt(size1), 1/sqrt(size1)) for size1, size2 in zip(self.sizes[:-1], self.sizes[1:])]\n\t\tself.biases = [np.zeros([size, ]) for size in self.sizes[1:]]", "def update_weights(self):\n\t\tpass", "def initializeWeights(n_in,n_out):\r\n \r\n epsilon = sqrt(6) / sqrt(n_in + n_out + 1);\r\n W = (np.random.rand(n_out, n_in + 1)*2* epsilon) - epsilon;\r\n return W", "def initializeWeights(n_in,n_out):\r\n \r\n epsilon = sqrt(6) / sqrt(n_in + n_out + 1);\r\n W = (np.random.rand(n_out, n_in + 1)*2* epsilon) - epsilon;\r\n return W", "def weighted_matrix(point: np.mat, training_data_x: np.mat, bandwidth: float) -> np.mat:\r\n # m is the number of training samples\r\n m, n = np.shape(training_data_x)\r\n # Initializing weights as identity matrix\r\n weights = np.mat(np.eye((m)))\r\n # calculating weights for all training examples [x(i)'s]\r\n for j in range(m):\r\n diff = point - training_data[j]\r\n weights[j, j] = np.exp(diff * diff.T / (-2.0 * bandwidth ** 2))\r\n return weights", "def set_weights(self, indices):\n\n x1 = 0\n x2 = 0\n for i in indices:\n data = self.dataset[i]\n label = data['label']\n\n if label == 0:\n x1 += 1\n else:\n x2 += 1\n\n self.weight = torch.tensor([x1/x2]).to(self.device)\n self.criterion = nn.BCEWithLogitsLoss(pos_weight=self.weight)", "def _mutate_weights(self, weights):\n return weights + normal(loc=0, scale=self.standard_deviation, size=weights.shape[0])", "def my_assign_weights(context, data):\n pass", "def _weight_initializer(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)", "def weightedMean(points, weights):\r\n\t\tweightedSum = sum([a*b for a,b in zip(points, weights)])\r\n\t\ttotalWeight = sum(weights)\r\n\t\treturn weightedSum / totalWeight", "def default_weight_initializer(self):\n self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]\n self.weights = [np.random.randn(y, x)/np.sqrt(x)\n for x, y in zip(self.sizes[:-1], self.sizes[1:])]", "def init_weights(self):\n # Initialize weights\n self.apply(self._init_weights)\n # Tie weights if needed\n self.tie_weights()", "def initializeWeights(n_in,n_out):\n epsilon = sqrt(6) / sqrt(n_in + n_out + 1);\n W = (np.random.rand(n_out, n_in + 1)*2* epsilon) - epsilon;\n return W", "def initializeWeights(n_in,n_out):\n epsilon = sqrt(6) / sqrt(n_in + n_out + 1);\n W = (np.random.rand(n_out, n_in + 1)*2* epsilon) - epsilon;\n return W", "def initializeWeights(n_in,n_out):\n epsilon = sqrt(6) / sqrt(n_in + n_out + 1);\n W = (np.random.rand(n_out, n_in + 1)*2* epsilon) - epsilon;\n return W", "def initializeWeights(n_in,n_out):\n \n epsilon = sqrt(6) / sqrt(n_in + n_out + 1);\n W = (np.random.rand(n_out, n_in + 1)*2* epsilon) - epsilon;\n return W", "def __call__(self, points):\n denom = sum([self.weights[i]/(points - self.xint[i]) for i in range(self.n)])\n numer = sum([self.yint[i]*self.weights[i]/(points - self.xint[i]) for i in range(self.n)])\n return numer/denom", "def __train_projection__(self):\n\n copied_train_data = np.copy(self.train_data)\n\n for curr_train_sample in tqdm(copied_train_data,\n disable=not self.verbose,\n postfix=f'Model training...'):\n\n assert len(curr_train_sample.shape) == 1, \\\n f'Flatten your input! Now dim is: {curr_train_sample.shape}'\n\n self.weights += curr_train_sample.reshape(-1, 1) @ self.__get_inverse_flatten__(curr_train_sample)\n\n print(self.weights)\n # self.weights = self.weights / len(self.train_data)\n\n print(self.weights)", "def _generate_weights(self):\n weights = []\n for i in range(1, len(self.layers) - 1):\n weights.append(2 * np.random.random(\n (self.layers[i - 1] + 1, self.layers[i] + 1)) - 1)\n weights.append(2 * np.random.random(\n (self.layers[i] + 1, self.layers[i + 1])) - 1)\n return weights", "def init_weights(self, num_features):\n for each_label in self.valid_labels:\n self.weights[each_label] = np.zeros(num_features)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _weight_edges(self):\n weights = {}\n for edge in self.network.edges():\n weights[edge] = self.distance_matrix[edge]\n nx.set_edge_attributes(self.network, 'weight', weights)", "def ComputeWeights(self, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def calc_specialist_weights(numsamps):\n weights = 1.0/numsamps\n weights[np.isinf(weights)] = 0.0\n return np.max(numsamps)*weights", "def get_probability(letters, n):\n return {l: c/n for l, c in letters.items()}", "def initialize_weights(self):\n # compute lmda.\n self.lmda = float(self.c)/float(self.N)\n # bias term. This should not get a regularization penalty.\n self.bias = {}\n # weight vector\n self.w = {}\n # lastW[j] = k, indicates that feature j was last updated at time k.\n self.lastW = {}\n for lbl in self.labels:\n self.bias[lbl] = 0\n self.w[lbl] = {}\n self.lastW[lbl] = {}\n pass", "def _weighted(self):\n return self.dataset.weighted(self.probability)", "def _weight_initializer(self):\n for m in self.modules():\n if isinstance(m, nn.ConvTranspose2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)", "def _initialize_weights(self):\n pass", "def _create_weight_shaped_variables(self, nodes_per_layer, mean=None, stddev=None, name_prefix=\"Weights\", trainable=True):\n weights = []\n\n for layer_idx in range(1, len(nodes_per_layer)):\n num_in = nodes_per_layer[layer_idx-1]\n num_out = nodes_per_layer[layer_idx]\n shape = [num_in, num_out]\n\n if stddev:\n initial = tf.truncated_normal(shape=shape, stddev=stddev, mean=mean if mean else 0.0)\n else:\n initial = tf.constant(0.0, shape=shape)\n\n W = tf.Variable(\n initial,\n name=name_prefix + str(layer_idx),\n trainable=trainable\n )\n weights.append(W)\n\n return weights", "def generate_weights(sizes):\n weights = {}\n weights[\"w\"] = []\n weights[\"b\"] = []\n for i in range(len(sizes)-2):\n weights[\"w\"].append(np.random.randn(sizes[i], sizes[i+1]))\n weights[\"b\"].append(np.random.randn(sizes[i+1]))\n weights[\"w_final\"] = np.random.randn(sizes[-2], sizes[-1])/np.sqrt(sizes[-1])\n weights[\"b_final\"] = np.random.randn(sizes[-1])\n return weights", "def test_uniform_weight(self):\n knn = Knn(n_neighbors=3)\n distances = np.array([2,.3,4])\n weights = knn._uniform_weights(distances)\n assert np.allclose(weights, np.array([[1,2], [1,.3], [1,4]])), \"uniform_weights are not correct\"", "def average_weights(w):\n w_avg = copy.deepcopy(w[0])\n for key in w_avg.keys():\n for i in range(1, len(w)):\n w_avg[key] += w[i][key]\n w_avg[key] = torch.div(w_avg[key], len(w))\n return w_avg", "def average_weights(w):\n w_avg = copy.deepcopy(w[0])\n for key in w_avg.keys():\n for i in range(1, len(w)):\n w_avg[key] += w[i][key]\n w_avg[key] = torch.div(w_avg[key], len(w))\n return w_avg", "def average_weights(w):\n w_avg = copy.deepcopy(w[0])\n for key in w_avg.keys():\n for i in range(1, len(w)):\n w_avg[key] += w[i][key]\n w_avg[key] = torch.div(w_avg[key], len(w))\n return w_avg", "def _compute_weights(self):\n with variable_scope.variable_scope('compute_weights'):\n self.layer.W = nn_impl.l2_normalize(\n self.layer.v, axis=self.norm_axes) * self.layer.g", "def _compute_weights(self):\n with variable_scope.variable_scope('compute_weights'):\n self.layer.W = nn_impl.l2_normalize(\n self.layer.v, axis=self.norm_axes) * self.layer.g", "def weights(self):\r\n\t\treturn None", "def normalize(counts):\n numvals = sum(counts.itervalues())\n if numvals <= 0:\n return counts\n res = dict()\n for (k,cnt) in counts.iteritems():\n res[k] = float(cnt)/float(numvals)\n return res", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def normalize_weights(self, w):\n n = w.astype(np.float64, copy=True)\n c = float(np.sum(w))\n n /= c\n return n", "def init_weights(self):\r\n constant_init(self.sampling_offsets, 0.)\r\n thetas = torch.arange(\r\n self.num_heads,\r\n dtype=torch.float32) * (2.0 * math.pi / self.num_heads)\r\n grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)\r\n grid_init = (grid_init /\r\n grid_init.abs().max(-1, keepdim=True)[0]).view(\r\n self.num_heads, 1, 1,\r\n 2).repeat(1, self.num_levels, self.num_points, 1)\r\n for i in range(self.num_points):\r\n grid_init[:, :, i, :] *= i + 1\r\n\r\n self.sampling_offsets.bias.data = grid_init.view(-1)\r\n constant_init(self.attention_weights, val=0., bias=0.)\r\n xavier_init(self.value_proj, distribution='uniform', bias=0.)\r\n xavier_init(self.output_proj, distribution='uniform', bias=0.)\r\n self._is_init = True", "def linkweights(self, nnodes):\n link_graph = zeros(nnodes)\n for node_index, weight in self.weights.items():\n link_graph[node_index] = weight\n return link_graph", "def normalize(dictionary, num):\n for key in dictionary.keys():\n dictionary[key] = float(dictionary[key])/num\n return dictionary", "def weights(self):\n return self.__weights", "def weights(self):\n return self.__weights", "def weights(self):\n return self.__weights", "def weights(self):\n return self.__weights", "def weights(self):\n return self.__weights", "def sampleWeight(self):\r\n x=random.random()\r\n i = 0\r\n n = len(self.weights)-1\r\n cummulativeWeight = 0\r\n #Distribute the exploration weight evenly among all the actions that have been\r\n #taken up to this point in time by any of the users\r\n if len(self.sampledActions) == 0:\r\n explorationWeight = 0\r\n else:\r\n explorationWeight = self.explorationFund / len(self.sampledActions)\r\n #Compute the normalization factor. If no action has been sampled by this user yet,\r\n #then each action k has weight eta*pi_k, where pi_k is the weight of k in the\r\n #prior distribution. Then, the normalization factor is the sum(eta*pi_k) for all k,\r\n #which is equal to eta*sum(pi_k), which is just eta, since the sum of the previous\r\n #weights has to add up to 1.\r\n #If one or more actions have been already sampled, the normalization factor is the\r\n #sum of 1) the weights already in self.weights, 2) the exploration fund, and 3) the\r\n #weights of the actions that are not yet in self.weights. Each one of these actions\r\n #has weight eta*pi_k (because it hasn't been sampled yet), so the total weight of the\r\n #mass of actions not yet in self.weights is eta*(1-sum(pi_l)), where the sum is over all\r\n #the weights already in self.weights\r\n if n < 0:\r\n normalizationFactor = self.priorBelief\r\n else:\r\n normalizationFactor = sum(self.weights) + self.explorationFund + \\\r\n self.priorBelief*(1-self.priorTopicDistr.cummulative[n])\r\n #Keep getting the next weight until the combined mass of the weights is less than the\r\n #random number x\r\n while True:\r\n w = self.__getitem__(i)\r\n if i in self.sampledActions:\r\n w += explorationWeight\r\n cummulativeWeight += w\r\n if x <= cummulativeWeight/normalizationFactor:\r\n if i not in self.sampledActions:\r\n self.sampledActions.append(i)\r\n return w\r\n i += 1", "def trainable_weights(self):\n self._trainable_weights = list(filter(lambda x: x.requires_grad, self.get_parameters(expand=True)))\n return self._trainable_weights", "def _calc_train_class_prb(self, labels_list=None):\n if not labels_list:\n return {}\n\n n = len(labels_list)\n label_num = len(self.labels)\n prb = {}\n for l in self.labels:\n # tmp = (l, sum(1 if v == l else 0 for k, v in train_data)/n)\n prb[l] = (labels_list.count(l) + 1.0) / (n + label_num)\n return prb", "def initialize_weights(self, n_features, random=True):\n\t\tif random:\n\t\t\tlimit = 1 / math.sqrt(n_features)\n\t\t\tself.weights = np.random.uniform(-limit, limit, (n_features, ))\n\t\telse:\n\t\t\tself.weights = np.zeros(n_features)", "def pertube_distribution_weight(self, random_state, n_samples, multipler):\n curr_sample_weight = np.ones((n_samples,), dtype=np.float64)\n indices = _generate_sample_indices(random_state, n_samples)\n sample_counts = bincount(indices, minlength=n_samples)\n curr_sample_weight *= np.power(sample_counts, multipler)\n return curr_sample_weight", "def sampler_weights(dataset):\n class_counts = [0, 0]\n for index in range(len(dataset)):\n _, label = dataset[index]\n class_counts[label] += 1\n\n divisor = 2 * class_counts[0] * class_counts[1]\n sample_weights = (class_counts[1] / divisor, class_counts[0] / divisor)\n weights = []\n for index in range(len(dataset)):\n _, label = dataset[index]\n weights.append(sample_weights[label])\n\n num_samples = 2 * min(class_counts[0], class_counts[1])\n return weights, num_samples", "def getWeights(self, gameState, action):\n\n return {\n 'successorScore': 1.0\n }", "def initialize_weights(self):\n tf.nest.map_structure(\n weights_initializer.WeightsInitializer.initialize_layer_or_model,\n self._layer_nest)", "def get_weights(self):\n weights = {}\n for idx, layer in enumerate(self.model.layers):\n if len(layer.get_weights())>0:\n weights[idx] = layer.get_weights()[0]\n else:\n weights[idx] = [] \n return weights", "def init_weights(self):\n # Initialize weights\n self.apply(self._init_weights)", "def weights(self):\n return self._weights", "def weights(self):\n return self._weights", "def weights(self):\n return self._weights" ]
[ "0.6437919", "0.6396687", "0.624045", "0.61508226", "0.6023781", "0.6023781", "0.5951131", "0.59024775", "0.58835477", "0.58784914", "0.5861894", "0.58606887", "0.5855883", "0.5820768", "0.5816859", "0.5815168", "0.58121", "0.57764375", "0.57757556", "0.5774974", "0.5753624", "0.57255155", "0.5718852", "0.5710818", "0.5690943", "0.56426436", "0.5634572", "0.5632477", "0.5605354", "0.56023407", "0.5599023", "0.558802", "0.55664617", "0.5563343", "0.55499023", "0.55460227", "0.5538548", "0.5538526", "0.5532791", "0.5532791", "0.55290127", "0.552771", "0.5517118", "0.5514163", "0.55123353", "0.5509948", "0.55080086", "0.5507458", "0.55063456", "0.55063456", "0.55063456", "0.55003256", "0.54942745", "0.5488792", "0.54843736", "0.54728216", "0.54610884", "0.54610884", "0.54610884", "0.5449757", "0.54451823", "0.5441626", "0.5423328", "0.5418409", "0.5418216", "0.540428", "0.53988945", "0.53945595", "0.53833044", "0.53780484", "0.53714097", "0.53714097", "0.53714097", "0.5370978", "0.5370978", "0.5364521", "0.5359676", "0.5357273", "0.5354244", "0.5351129", "0.5350181", "0.5350062", "0.53495306", "0.53495306", "0.53495306", "0.53495306", "0.53495306", "0.53476393", "0.5343981", "0.5338938", "0.5338274", "0.5325136", "0.53185445", "0.53168124", "0.53165764", "0.53153247", "0.53150773", "0.53136814", "0.53136814", "0.53136814" ]
0.8002009
0
Given a dictionary mapping training points to their weights, and another dictionary mapping classifiers to the training points they misclassify, returns a dictionary mapping classifiers to their error rates.
def calculate_error_rates(point_to_weight, classifier_to_misclassified): ans = {} for c in classifier_to_misclassified: misclassified = classifier_to_misclassified[c] ans[c] = 0 for p in misclassified: ans[c] += point_to_weight[p] return ans
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_weights(point_to_weight, misclassified_points, error_rate):\n for p in point_to_weight:\n if p in misclassified_points:\n point_to_weight[p] *= make_fraction(1,2)*make_fraction(1, error_rate)\n else:\n point_to_weight[p] *= make_fraction(1,2)*make_fraction(1, 1-error_rate)\n return point_to_weight", "def pollster_errors(pollster_predictions, state_edges_actual):\n # declare errors to be the return object which is a dict\n errors = dict()\n # traverse the predictions to calculate the error\n for key in pollster_predictions:\n \terrors[key] = average_error(pollster_predictions[key], state_edges_actual)\n return errors", "def pollster_errors(pollster_predictions, state_edges_actual):\r\n\r\n totalAverage = {} #Create an empty dictionary\r\n\r\n for k in pollster_predictions:\r\n states = pollster_predictions[k]\r\n for j in states:\r\n if j in state_edges_actual: \r\n average = average_error(pollster_predictions[k], state_edges_actual)\r\n totalAverage[k] = average \r\n #Map each pollster to its calculated average error of each state\r\n\r\n return totalAverage", "def outliersMetrics(y_true, y_pred, weights):\n\td = np.abs(y_true - y_pred)\n\toutliers = (y_pred < 0.5 * y_true) | (y_pred > 2 * y_true)\n\treturn {\n\t\t\"RMSE\": rmseWeighted(d, weights),\n\t\t\"RMSE without outliers\": rmseWeighted(d[~outliers], weights[~outliers]),\n\t\t\"RMSE of outliers\": rmseWeighted(d[outliers], weights[outliers]),\n\t\t\"Outliers ratio\": l1Weighted(outliers, weights),\n\t}", "def prepare_loss_weights(training_endpoints, loss_weights=None):\n if loss_weights is None:\n for e in training_endpoints:\n e.loss_weight = 1.\n elif isinstance(loss_weights, collections.abc.Mapping):\n generic_utils.check_for_unexpected_keys(\n 'loss_weights', loss_weights,\n [e.output_name for e in training_endpoints])\n for e in training_endpoints:\n e.loss_weight = loss_weights.get(e.output_name, 1.)\n elif isinstance(loss_weights, list):\n if len(loss_weights) != len(training_endpoints):\n raise ValueError('When passing a list as loss_weights, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(training_endpoints)) +\n ' outputs, but you passed loss_weights=' +\n str(loss_weights))\n for w, e in zip(loss_weights, training_endpoints):\n e.loss_weight = w\n else:\n raise TypeError('Could not interpret loss_weights argument: ' +\n str(loss_weights) + ' - expected a list of dicts.')", "def get_error_rate(self, points, labelled_centroids):\n classified_incorrect = 0\n for (label, point) in points:\n classified_label = self.classify_point(point, labelled_centroids)\n if classified_label != label:\n classified_incorrect +=1\n error_rate = classified_incorrect / float(len(points))\n return error_rate", "def compute_py(x,weights,labels):\n y_test, scores = predict(x,weights,labels)\n denom_sum = logsumexp(scores.values())\n\n final_dict = {}\n for label in labels:\n final_dict[label] = np.exp(scores[label] - denom_sum)\n # hint: you should use clf_base.predict and logsumexp\n return final_dict", "def evaluate_bias(ds: List[str], predicted: List[GENDER]) -> Dict:\r\n assert (len(ds) == len(predicted)) # must have same length to create tuples\r\n\r\n conf_dict = defaultdict(lambda: defaultdict(lambda: 0))\r\n total = defaultdict(lambda: 0) # increment values if we have any gender\r\n pred_cnt = defaultdict(lambda: 0)\r\n correct_cnt = defaultdict(lambda: 0) # increment values if true_gender == predicted_gender\r\n\r\n count_unknowns = defaultdict(lambda: 0)\r\n\r\n for (gold_gender, word_ind, sent, profession), pred_gender in zip(ds, predicted):\r\n # # IMPORTANTE NOTE :\r\n # need to works with .name of GENDER object for an unknown reason\r\n\r\n if isinstance(pred_gender, str): # can happen in spacy languages 'fr', 'es' or 'it\r\n pred_gender = SPACY_GENDER_TYPES[pred_gender]\r\n\r\n # tuples of values in ds and values in predicted\r\n if pred_gender.name == GENDER.ignore.name:\r\n continue # skip analysis of ignored words\r\n\r\n gold_gender = WB_GENDER_TYPES[gold_gender] # allows Winobias gender type conversion\r\n\r\n if pred_gender.name == GENDER.unknown.name:\r\n count_unknowns[gold_gender] += 1 # increment values for any unknown pred_gender\r\n\r\n profession = profession.lower()\r\n\r\n total[gold_gender] += 1\r\n\r\n if pred_gender.name == gold_gender.name:\r\n correct_cnt[gold_gender] += 1\r\n\r\n pred_cnt[pred_gender.name] += 1\r\n\r\n conf_dict[gold_gender][pred_gender] += 1\r\n\r\n all_total = sum(total.values())\r\n\r\n output_dict = {} # init output dictionnary\r\n # Compute metrics\r\n accuracy = round((sum(correct_cnt.values()) / all_total) * 100, 1) # compute accuracy\r\n output_dict['acc'] = accuracy\r\n\r\n if (total[GENDER.male] == 0) | (pred_cnt[GENDER.male.name] == 0): # Avoid ZeroDivisionError\r\n output_dict['f1_male'] = None\r\n else:\r\n recall_male = round((correct_cnt[GENDER.male] / total[GENDER.male]) * 100, 1) # compute metrics for male\r\n prec_male = round((correct_cnt[GENDER.male] / pred_cnt[GENDER.male.name]) * 100, 1)\r\n f1_male = round(calc_f1(prec_male, recall_male), 1)\r\n output_dict['f1_male'] = f1_male\r\n\r\n if (total[GENDER.female] == 0) | (pred_cnt[GENDER.female.name] == 0): # Avoid ZeroDivisionError\r\n output_dict['f1_female'] = None\r\n else:\r\n recall_female = round((correct_cnt[GENDER.female] / total[GENDER.female]) * 100, 1) # calcul metrics for female\r\n prec_female = round((correct_cnt[GENDER.female] / pred_cnt[GENDER.female.name]) * 100, 1)\r\n f1_female = round(calc_f1(prec_female, recall_female), 1)\r\n output_dict['f1_female'] = f1_female\r\n\r\n output_dict['unk_male'] = count_unknowns[GENDER.male]\r\n output_dict['unk_female'] = count_unknowns[GENDER.female]\r\n output_dict['unk_neutral'] = count_unknowns[GENDER.neutral]\r\n\r\n return output_dict", "def _training_errors(self):\n feed_dict = dict()\n feed_dict[self.model.get_layer('input')] = self.x_train\n for id_ in self.task_ids.keys():\n feed_dict[self.model.get_layer(id_ + '-ground-truth')] = self.y_train[id_]\n errors = {}\n for task_id, loss_type in self.task_ids.iteritems():\n if loss_type is LossTypes.mse:\n errors[task_id] = np.sqrt(self.model.get_layer(task_id + '-loss')\n .eval(session=self.sess, feed_dict=feed_dict))\n elif loss_type is LossTypes.cross_entropy:\n predictions = tf.argmax(self.model.get_layer(task_id + '-prediction'), 1)\n targets = tf.argmax(self.model.get_layer(task_id + '-ground-truth'), 1)\n correct_predictions = tf.equal(targets, predictions)\n accuracy_tensor = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))\n accuracy = accuracy_tensor.eval(session=self.sess, feed_dict=feed_dict)\n errors[task_id] = 1. - accuracy\n return errors", "def getGradientWeights(y_train):\n cls_indices, event_count = np.unique(np.array(y_train), return_counts=True)\n min_class = min(event_count)\n return {cls_index: float(min_class) / cls_count\n for cls_index, cls_count in zip(cls_indices, event_count)}", "def digits_make_classifiers_to_misclassified(X,Y,classifiers,ids_to_points):\n\toutput = {key: [] for key in classifiers}\n\tN = len(X)\n\tfor cf in classifiers:\n\t\tfor i in range(N):\n\t\t\tcf_classification = cf[2](X[i])\n\t\t\tif cf_classification != Y[i]:\n\t\t\t\t# output[cf].append(X[i])\n\t\t\t\toutput[cf].append(adaboost.key_from_value(ids_to_points,X[i]))\n\n\treturn output", "def trainWeakClassifier(trainingSamples, weights, feature):\n #compute feature values\n featureValues = []\n positiveOrNegative = []\n for sample in trainingSamples:\n featureValues.append(feature.computeScore(sample[0], 0, 0))\n positiveOrNegative.append(sample[1])\n \n #zip with weights and sort by feature value\n featureValues = zip(featureValues, weights, positiveOrNegative)\n featureValues = sorted(featureValues, key=lambda tup: tup[0])\n \n #sum all weights of the positive and negative samples\n negativeWeightsTotal = 0\n positiveWeightsTotal = 0\n for value in featureValues:\n if value[2] == 1:\n positiveWeightsTotal += value[1]\n else:\n negativeWeightsTotal += value[1]\n \n #find the feature with the smallest error\n bestFeatureIndex = 0\n bestFeatureError = 1e10\n negativeWeightsSoFar = 0\n positiveWeightsSoFar = 0\n positiveOnTheLeft = 0\n positivesTotal = 0\n for i in range(0, len(featureValues)):\n error1 = positiveWeightsSoFar-negativeWeightsSoFar+negativeWeightsTotal\n error2 = negativeWeightsSoFar-positiveWeightsSoFar+positiveWeightsTotal\n error = min([error1, error2])\n \n if bestFeatureError > error:\n bestFeatureError = error\n bestFeatureIndex = i\n positiveOnTheLeft = positivesTotal\n \n if featureValues[i][2] == 1:\n positiveWeightsSoFar += featureValues[i][1]\n positivesTotal += 1\n else:\n negativeWeightsSoFar += featureValues[i][1]\n \n #count how much samples are there on the right\n positiveOnTheRight = positivesTotal - positiveOnTheLeft\n \n #determine the polarity and threshold\n polarity = -1\n threshold = featureValues[bestFeatureIndex][0]\n if positiveOnTheLeft > positiveOnTheRight:\n polarity = 1\n else:\n polarity = -1\n \n #build and return a weak classifier\n return WeakClassifier(feature, threshold, polarity)", "def computeErrorRate(test_set, words_likely_tags):\n # initiate vars\n known_words = {} # those two dictionaries are optional, just for debuging\n unknown_words = {} # those two dictionaries are optional, just for debuging\n correct_predictions = 0\n total_predictions = 0\n correct_unknown_predictions = 0\n total_unknown_predictions = 0\n\n for i in range(len(test_set)): # iterate sentences\n test_sent = test_set[i]\n for j in range(len(test_sent)): # iterate words in sent\n w = test_sent[j][WORD]\n t = test_sent[j][TAG]\n\n # known words\n if w in words_likely_tags:\n if w in known_words:\n known_words[w][COUNTER_SHOWS] += 1\n if t == words_likely_tags[w]: # same tag\n known_words[w][COUNTER_EQUAL] += 1\n correct_predictions += 1\n else:\n if t == words_likely_tags[w]: # same tag\n known_words[w] = {COUNTER_SHOWS: 1, COUNTER_EQUAL: 1}\n correct_predictions += 1\n else:\n known_words[w] = {COUNTER_SHOWS: 1, COUNTER_EQUAL: 0}\n\n total_predictions += 1\n # unknown words\n else: # w not in words_likely_tags, treat w as unknown_word\n if w in unknown_words:\n unknown_words[w][COUNTER_SHOWS] += 1\n if t == UNKNOWN_TAG:\n # same tag as our model predicts for unknown words\n unknown_words[w][COUNTER_EQUAL] += 1\n correct_unknown_predictions += 1\n else:\n if t == UNKNOWN_TAG: # same tag\n unknown_words[w] = {COUNTER_SHOWS: 1, COUNTER_EQUAL: 1}\n correct_unknown_predictions += 1\n else:\n unknown_words[w] = {COUNTER_SHOWS: 1, COUNTER_EQUAL: 0}\n\n total_unknown_predictions += 1\n\n # print('correct_predictions......... = ', correct_predictions)\n # print('total_predictions........... = ', total_predictions)\n # print('correct_unknown_predictions. = ', correct_unknown_predictions)\n # print('total_unknown_predictions... = ', total_unknown_predictions)\n err_rate_known = 1 - correct_predictions/total_predictions\n err_rate_unknown = 1 - correct_unknown_predictions/total_unknown_predictions\n # total_err = err_rate_known + err_rate_unknown\n tot_pred = total_predictions + total_unknown_predictions\n corr_pred = correct_predictions + correct_unknown_predictions\n total_err = 1 - corr_pred/tot_pred\n\n return err_rate_known, err_rate_unknown, total_err", "def compute_coupling_losses(experiments, logits_1, logits_2,\n estimated_couplings):\n test_losses = {}\n for name, coupling in estimated_couplings.items():\n loss_value = jnp.sum(\n coupling * experiments[0].coupling_loss_matrix_fn(logits_1, logits_2))\n test_losses[name] = loss_value\n return test_losses", "def class_metrics_for_ths(y_true: np.array, y_pred_scores: np.array, lim_ths: Tuple) -> Dict:\n fpr, tpr, ths = roc_curve(y_true, y_pred_scores)\n # TH generation to search for best cutoff\n metrics_th = np.linspace(min(lim_ths), max(lim_ths), 20)\n y_pred_ths = [predict_label(y_pred_scores, th) for th in metrics_th]\n return {\n 'accuracy': [accuracy_score(y_true, y_pred) for y_pred in y_pred_ths],\n 'balanced_accuracy': [balanced_accuracy_score(y_true, y_pred) for y_pred in y_pred_ths],\n 'precision': [precision_score(y_true, y_pred) for y_pred in y_pred_ths],\n 'recall': [recall_score(y_true, y_pred) for y_pred in y_pred_ths],\n 'f1': [f1_score(y_true, y_pred) for y_pred in y_pred_ths],\n 'roc_auc': roc_auc_score(y_true, y_pred_scores),\n 'fpr': fpr,\n 'tpr': tpr,\n 'roc_ths': ths,\n 'metrics_ths': metrics_th\n }", "def _compute_keypoint_estimation_losses(self, task_name, input_height,\n input_width, prediction_dict,\n per_pixel_weights):\n kp_params = self._kp_params_dict[task_name]\n heatmap_key = get_keypoint_name(task_name, KEYPOINT_HEATMAP)\n offset_key = get_keypoint_name(task_name, KEYPOINT_OFFSET)\n regression_key = get_keypoint_name(task_name, KEYPOINT_REGRESSION)\n depth_key = get_keypoint_name(task_name, KEYPOINT_DEPTH)\n heatmap_loss = self._compute_kp_heatmap_loss(\n input_height=input_height,\n input_width=input_width,\n task_name=task_name,\n heatmap_predictions=prediction_dict[heatmap_key],\n classification_loss_fn=kp_params.classification_loss,\n per_pixel_weights=per_pixel_weights)\n offset_loss = self._compute_kp_offset_loss(\n input_height=input_height,\n input_width=input_width,\n task_name=task_name,\n offset_predictions=prediction_dict[offset_key],\n localization_loss_fn=kp_params.localization_loss)\n reg_loss = self._compute_kp_regression_loss(\n input_height=input_height,\n input_width=input_width,\n task_name=task_name,\n regression_predictions=prediction_dict[regression_key],\n localization_loss_fn=kp_params.localization_loss)\n\n loss_dict = {}\n loss_dict[heatmap_key] = (\n kp_params.keypoint_heatmap_loss_weight * heatmap_loss)\n loss_dict[offset_key] = (\n kp_params.keypoint_offset_loss_weight * offset_loss)\n loss_dict[regression_key] = (\n kp_params.keypoint_regression_loss_weight * reg_loss)\n if kp_params.predict_depth:\n depth_loss = self._compute_kp_depth_loss(\n input_height=input_height,\n input_width=input_width,\n task_name=task_name,\n depth_predictions=prediction_dict[depth_key],\n localization_loss_fn=kp_params.localization_loss)\n loss_dict[depth_key] = kp_params.keypoint_depth_loss_weight * depth_loss\n return loss_dict", "def compute_metrics(self, results: list) -> Dict[str, float]:\n logger: MMLogger = MMLogger.get_current_instance()\n\n # pred_coords: [N, K, D]\n pred_coords = np.concatenate(\n [result['pred_coords'] for result in results])\n if pred_coords.ndim == 4 and pred_coords.shape[1] == 1:\n pred_coords = np.squeeze(pred_coords, axis=1)\n # gt_coords: [N, K, D]\n gt_coords = np.stack([result['gt_coords'] for result in results])\n # mask: [N, K]\n mask = np.concatenate([result['mask'] for result in results])\n # action_category_indices: Dict[List[int]]\n action_category_indices = defaultdict(list)\n for idx, result in enumerate(results):\n action_category = result['action'].split('_')[0]\n action_category_indices[action_category].append(idx)\n\n error_name = self.mode.upper()\n\n logger.info(f'Evaluating {self.mode.upper()}...')\n metrics = dict()\n\n metrics[error_name] = keypoint_mpjpe(pred_coords, gt_coords, mask,\n self.ALIGNMENT[self.mode])\n\n for action_category, indices in action_category_indices.items():\n metrics[f'{error_name}_{action_category}'] = keypoint_mpjpe(\n pred_coords[indices], gt_coords[indices], mask[indices])\n\n return metrics", "def eval_det(pred_all, gt_all, ovthresh=0.25, use_07_metric=False):\n pred = {} # map {classname: pred}\n gt = {} # map {classname: gt}\n for img_id in pred_all.keys():\n for classname, bbox, score in pred_all[img_id]:\n if classname not in pred: pred[classname] = {}\n if img_id not in pred[classname]:\n pred[classname][img_id] = []\n if classname not in gt: gt[classname] = {}\n if img_id not in gt[classname]:\n gt[classname][img_id] = []\n pred[classname][img_id].append((bbox,score))\n for img_id in gt_all.keys():\n for classname, bbox in gt_all[img_id]:\n if classname not in gt: gt[classname] = {}\n if img_id not in gt[classname]:\n gt[classname][img_id] = []\n gt[classname][img_id].append(bbox)\n\n rec = {}\n prec = {}\n ap = {}\n for classname in gt.keys():\n print('Computing AP for class: ', classname)\n rec[classname], prec[classname], ap[classname] = eval_det_cls(pred[classname], gt[classname], ovthresh, use_07_metric)\n print(classname, ap[classname])\n \n return rec, prec, ap", "def classification(i, classifiers, results_proba, x_train, x_test, y_train, y_test):\n\t\ttmp_l = dict()\n\t\tfor clf in classifiers:\n\t\t\tclf.fit(x_train, y_train)\n\t\t\ttry:\n\t\t\t\tbest_clf = clf.best_estimator_\n\t\t\texcept AttributeError:\n\t\t\t\tbest_clf = clf\n\t\t\tname = best_clf.__class__.__name__\n\t\t\ttrain_predictions_proba = clf.predict_proba(x_test)\n\t\t\ttrain_predictions_proba = train_predictions_proba[:, 1]\n\t\t\tresults_proba[name].update({i: train_predictions_proba})\n\t\t\ttmp_l[name] = {str(i): best_clf.score(x_test, y_test)}\n\t\treturn results_proba, tmp_l", "def metrics(self, weights=None):\n yt = self.ytrue\n yp = self.ypred\n w = weights\n\n if yt.size > 0:\n mse = skm.mean_squared_error(yt, yp, sample_weight=w)\n rmse = np.sqrt(mse)\n mae = skm.mean_absolute_error(yt, yp, sample_weight=w)\n median_absolute_error = skm.median_absolute_error(yt, yp)\n r2_score = skm.r2_score(yt, yp, sample_weight=w)\n ev_score = skm.explained_variance_score(yt, yp, sample_weight=w)\n max_error = skm.max_error(yt, yp)\n support = len(yt)\n else:\n mse = 0.0\n rmse = 0.0\n mae = 0.0\n median_absolute_error = 0.0\n r2_score = 0.0\n ev_score = 0.0\n max_error = 0.0\n support = 0\n\n return {\n \"mean_squared_error\": mse,\n \"root_mean_squared_error\": rmse,\n \"mean_absolute_error\": mae,\n \"median_absolute_error\": median_absolute_error,\n \"r2_score\": r2_score,\n \"explained_variance_score\": ev_score,\n \"max_error\": max_error,\n \"support\": support,\n }", "def _compute_metrics(hits_or_lcs: int, pred_len: int, target_len: int) ->Dict[str, Tensor]:\n precision = hits_or_lcs / pred_len\n recall = hits_or_lcs / target_len\n if precision == recall == 0.0:\n return dict(precision=tensor(0.0), recall=tensor(0.0), fmeasure=tensor(0.0))\n fmeasure = 2 * precision * recall / (precision + recall)\n return dict(precision=tensor(precision), recall=tensor(recall), fmeasure=tensor(fmeasure))", "def loss(self, key_roi_feats: Tensor, ref_roi_feats: Tensor,\n key_sampling_results: List[SamplingResult],\n ref_sampling_results: List[SamplingResult],\n gt_match_indices_list: List[Tensor]) -> dict:\n key_track_feats = self(key_roi_feats)\n ref_track_feats = self(ref_roi_feats)\n\n losses = self.loss_by_feat(key_track_feats, ref_track_feats,\n key_sampling_results, ref_sampling_results,\n gt_match_indices_list)\n return losses", "def generate_loss_weight_criteria(num_traits, num_classes=num_classes, weighted_loss=True, top_only=None):\n\tif (weighted_loss):\n\t\tprint(\"use different weight for each class\")\n\t\tprint(\"put more focus on locus with more significant DM p-values\")\n\t\tif (top_only):\n\t\t\tprint(\"weighted loss for the top class only...\")\n\t\t\tclass_weight_each_output_dict = dict([(i, 1) for i in range(0, num_classes-1)])\n\t\t\tclass_weight_each_output_dict[num_classes-1] = num_classes\n\t\t\tclass_weight_dict = dict([(\"out%i\"%i, class_weight_each_output_dict) for i in range(1, num_traits+1)])\n\t\telse:\n\t\t\tclass_weight_each_output_dict = dict([(i, i+1) for i in range(0, num_classes)])\n\t\t\tclass_weight_dict = dict([(\"out%i\"%i, class_weight_each_output_dict) for i in range(1, num_traits+1)])\n\telse:\n\t\tprint(\"use balanced weight for each class\")\n\t\tclass_weight_each_output_dict = dict([(i, 1) for i in range(0, num_classes)])\n\t\tclass_weight_dict = dict([(\"out%i\"%i, class_weight_each_output_dict) for i in range(1, num_traits+1)])\n\treturn class_weight_dict", "def generate_loss_weight_criteria(num_traits, num_classes=num_classes, weighted_loss=True, top_only=None):\n\tif (weighted_loss):\n\t\tprint(\"use different weight for each class\")\n\t\tprint(\"put more focus on locus with more significant DM p-values\")\n\t\tif (top_only):\n\t\t\tprint(\"weighted loss for the top class only...\")\n\t\t\tclass_weight_each_output_dict = dict([(i, 1) for i in range(0, num_classes-1)])\n\t\t\tclass_weight_each_output_dict[num_classes-1] = num_classes\n\t\t\tclass_weight_dict = dict([(\"out%i\"%i, class_weight_each_output_dict) for i in range(1, num_traits+1)])\n\t\telse:\n\t\t\tclass_weight_each_output_dict = dict([(i, i+1) for i in range(0, num_classes)])\n\t\t\tclass_weight_dict = dict([(\"out%i\"%i, class_weight_each_output_dict) for i in range(1, num_traits+1)])\n\telse:\n\t\tprint(\"use balanced weight for each class\")\n\t\tclass_weight_each_output_dict = dict([(i, 1) for i in range(0, num_classes)])\n\t\tclass_weight_dict = dict([(\"out%i\"%i, class_weight_each_output_dict) for i in range(1, num_traits+1)])\n\treturn class_weight_dict", "def mixed_prob( means,stds,weights,validt):", "def get_loss(self, outputs, heatmaps, masks, offsets, offset_weights):\n losses = dict()\n for idx in range(len(outputs)):\n pred_heatmap, pred_offset = outputs[idx]\n heatmap_weight = masks[idx].view(masks[idx].size(0), masks[idx].size(1), -1)\n losses['loss_hms'] = losses.get('loss_hms', 0) + self.loss(pred_heatmap, heatmaps[idx], heatmap_weight)\n losses['loss_ofs'] = losses.get('loss_ofs', 0) + self.offset_loss(pred_offset, offsets[idx], offset_weights[idx])\n return losses", "def compute_metrics(meters):\n metrics = {m: vs.avg for m, vs in meters.items()}\n metrics = {m: v if isinstance(v, float) else v.item() for m, v in metrics.items()}\n return metrics", "def _validation_errors(self):\n feed_dict = dict()\n feed_dict[self.model.get_layer('input')] = self.x_validate\n for id_ in self.task_ids.keys():\n feed_dict[self.model.get_layer(id_ + '-ground-truth')] = self.y_validate[id_]\n errors = {}\n for task_id, loss_type in self.task_ids.iteritems():\n if loss_type is LossTypes.mse:\n errors[task_id] = np.sqrt(self.model.get_layer(task_id + '-loss')\n .eval(session=self.sess, feed_dict=feed_dict))\n elif loss_type is LossTypes.cross_entropy:\n predictions = tf.argmax(self.model.get_layer(task_id + '-prediction'), 1)\n targets = tf.argmax(self.model.get_layer(task_id + '-ground-truth'), 1)\n correct_predictions = tf.equal(predictions, targets)\n accuracy_tensor = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))\n accuracy = accuracy_tensor.eval(session=self.sess, feed_dict=feed_dict)\n errors[task_id] = 1. - accuracy\n return errors", "def test_weighted_accuracy(self):\n total_accuracy, weights = losses.weighted_accuracy(\n logits=self.logits, targets=self.targets, weights=self.targets >= 0)\n\n expected_accuracy = 2 / 3\n\n self.assertEqual(weights, 3)\n self.assertAlmostEqual(total_accuracy / weights, expected_accuracy)", "def compute_relation_metrics(wp_classifications):\n \n output = {}\n for relation in wp_classifications.keys():\n preds = []\n labels = []\n preds += [1] * len(wp_classifications[relation][\"true_positives\"])\n labels += [1] * len(wp_classifications[relation][\"true_positives\"])\n preds += [1] * len(wp_classifications[relation][\"false_positives\"])\n labels += [0] * len(wp_classifications[relation][\"false_positives\"])\n preds += [0] * len(wp_classifications[relation][\"false_negatives\"])\n labels += [1] * len(wp_classifications[relation][\"false_negatives\"])\n \n output[relation] = {}\n output[relation][\"recall\"] = recall_score(labels, preds)\n output[relation][\"precision\"] = precision_score(labels, preds)\n output[relation][\"f1\"] = f1_score(labels, preds)\n \n return output", "def get_performance(model, data_sets, set_names):\n results = {}\n for (x, y), name in zip(data_sets, set_names):\n y_hat = model.predict(x)\n sample_weight = get_sample_weight(y)\n # should be same as balanced accuracy\n # acc = accuracy_score(y_true=y, y_pred=y_hat.argmax(axis=1), sample_weight=sample_weight)\n acc = balanced_accuracy_score(y_true=y, y_pred=y_hat.argmax(axis=1))\n loss = log_loss(y_true=y, y_pred=y_hat.astype(np.float64), sample_weight=sample_weight)\n results[name] = {\n 'accuracy': acc,\n 'loss': loss,\n }\n return results", "def compare_dictionaries(d1, d2):\n score = 0\n total = 0\n \n for i in d1:\n total = total + d1[i]\n for i in d2:\n if i in d1:\n if total == 0:\n score = score\n else:\n probablility = (d1[i] / total)\n score = score + (math.log10(probablility) * d2[i])\n else:\n if total == 0:\n score = score\n else:\n score = score + ((0.5 / total) * d2[i])\n return score", "def regression_scoring(y_true,y_pred,regression_metrics,optional_kwargs = {}, **kwargs):\n \n # classificaiton dict to hold scores\n regression_scores = {}\n \n # for loop through classification metrics\n for metric in regression_metrics:\n \n # grab the score from the custom function\n score = metric_score(y_true,y_pred,\n metric,optional_kwargs, **kwargs)\n \n # unpack the dictionary and append to scores dictonary\n for key, value in score.items():\n regression_scores[key] = value\n \n return regression_scores", "def evaluate(attr_categories, print_errors=True):\n e, y = get_labeled_data('./data/labeled_attributes/test_set')\n \n errors = []\n p = []\n i = 0\n for attr in e: \n p.append(category_mapping[attr_categories[attr]])\n if p[i] != y[i]:\n errors.append((attr, inverse_category_mapping[p[i]],\n inverse_category_mapping[y[i]]))\n i += 1\n\n if print_errors:\n print \"SOME ERRORS: \"\n print \"In the form (attribute, prediction, target)\"\n for elem in errors[:25]:\n print elem\n\n print 80 * '='\n print \"METRICS:\"\n print \"Precision = Recall = f1 = \" + \\\n str(float(sum([1 for i in range(len(y)) if y[i] == p[i]])) / len(y))", "def no_weighted_mv(K_train, K_val, alphas, y_train, y_val, K_test):\n \n prob = 1/len(K_train)\n y_tr_pred = np.zeros(y_train.shape[0])\n y_val_pred = np.zeros(y_val.shape[0])\n y_te_pred = np.zeros(1000)\n \n for i in range(len(K_train)):\n \n y_tr_i= K_train[i] @ alphas[i]\n y_val_i= K_val[i] @ alphas[i]\n \n #err = error(y_train, y_tr_i)\n #if err == 0:\n # err = 10\n #else:\n # err = gamma * np.log((1-err)/err)\n \n #errors += [err]\n \n y_tr_pred += prob * y_tr_i\n y_val_pred += prob * y_val_i\n y_te_pred += prob * (K_test[i] @ alphas[i])\n \n #print(\"Assigned Weights : \", errors)\n print(f\"Training score : {1 - error(y_train, y_tr_pred)}\")\n print(f\"Validation score : {1 - error(y_val, y_val_pred)}\")\n \n return(y_te_pred)", "def calculate_class_probabilities(summaries, input_vector):\n probabilities = {}\n\n for class_key, class_summary in summaries.iteritems():\n # initialize the probability for the class to 1 to\n # prevent keyerrors\n probabilities[class_key] = float(1)\n\n for (mean, stdev), input_val in zip(class_summary, input_vector):\n attribute_probability = calculate_probability(input_val, mean, stdev)\n probabilities[class_key] *= attribute_probability\n\n return probabilities", "def clacError(classifier,weightedSet):\n\terror = 0\n\tfor ex in weightedSet:\n\t\tif classifier.test(ex) != ex.label:\n\t\t\terror += ex.weight\n\treturn error", "def classify_links(links_dist_dict, num_classes=5):\r\n\r\n # define thresholds\r\n all_dists = sorted(links_dist_dict.values())\r\n links_interval = int(len(all_dists) / num_classes)\r\n thresholds = []\r\n count_classes = 1\r\n while count_classes < num_classes:\r\n cur_idx = count_classes * links_interval\r\n thresholds.append(all_dists[cur_idx])\r\n count_classes += 1\r\n\r\n # build return dictionary\r\n ret_dict = {}\r\n for cur_link_id in list(links_dist_dict.keys()):\r\n cur_dist = links_dist_dict[cur_link_id]\r\n cur_pot_class = 1\r\n while cur_pot_class < num_classes:\r\n if cur_dist < thresholds[cur_pot_class-1]:\r\n ret_dict[cur_link_id] = cur_pot_class\r\n break\r\n cur_pot_class += 1\r\n if cur_pot_class == num_classes:\r\n ret_dict[cur_link_id] = cur_pot_class\r\n\r\n return ret_dict", "def error_rate_impurity(X_valid_encoded, X_valid, y_valid, k=18):\n errors = 0\n impurities = 0\n for i, x_enc in enumerate(X_valid_encoded):\n top_k_indices = ann.knn(x_enc, X_valid_encoded, k)\n label = y_valid[i]\n votes_against = 0\n for index in top_k_indices:\n if label != y_valid[index]:\n votes_against += 1\n if votes_against > math.ceil(k / 2):\n errors += 1\n impurities += votes_against\n error_rate = errors * 100. / X_valid.shape[0]\n impurity = impurities / (X_valid.shape[0] * k)\n return error_rate, impurity", "def node_loss_dict():\n\n from .losses import node_kinematics, node_primary, node_type\n\n losses = {\n \"kinematics\" : node_kinematics.NodeKinematicsLoss,\n \"kinematics_edl\" : node_kinematics.NodeEvidentialKinematicsLoss,\n \"kinematics_attn\": node_kinematics.NodeTransformerLoss,\n \"primary\" : node_primary.NodePrimaryLoss,\n \"type\" : node_type.NodeTypeLoss\n }\n\n return losses", "def calc_class_weights(self):\n y = self.train_eval_data[\"sentiment\"]\n self.class_weights = {}\n classes = np.unique(y)\n for cls in classes:\n self.class_weights[cls] = len(y) / (len(classes) * (y == cls).sum())", "def calcRMSE(labelsAndPreds):\n meanOfSqErrors = labelsAndPreds.map(lambda (x,y): squaredError(x,y)).mean()\n \n return math.sqrt(meanOfSqErrors)", "def weighted_mv(K_train, K_val, alphas, y_train, y_val, K_test, gamma= 1/2 ):\n errors = []\n y_tr_pred = np.zeros(y_train.shape[0])\n y_val_pred = np.zeros(y_val.shape[0])\n y_te_pred = np.zeros(1000)\n \n for i in range(len(K_train)):\n \n y_tr_i= K_train[i] @ alphas[i]\n y_val_i= K_val[i] @ alphas[i]\n \n err = error(y_train, y_tr_i)\n if err == 0:\n err = 10\n else:\n err = gamma * np.log((1-err)/err)\n \n errors += [err]\n \n y_tr_pred += err * y_tr_i\n y_val_pred += err * y_val_i\n y_te_pred += err * (K_test[i] @ alphas[i])\n \n print(\"Assigned Weights : \", errors)\n print(f\"Training score : {1 - error(y_train, y_tr_pred)}\")\n print(f\"Validation score : {1 - error(y_val, y_val_pred)}\")\n return(y_te_pred)", "def keypoint_loss(self, pred_keypoints_2d, gt_keypoints_2d, openpose_weight, gt_weight):\n conf = gt_keypoints_2d[:, :, -1].unsqueeze(-1).clone()\n conf[:, :25] *= openpose_weight\n conf[:, 25:] *= gt_weight\n loss = (conf * self.criterion_keypoints(pred_keypoints_2d, gt_keypoints_2d[:, :, :-1])).mean()\n return loss", "def error_rate(dataset, estimates):\n\tincorrect = 0.0\n\tcount = 0.0\n\n\tfor idx in range(len(estimates)):\n\t\testimate = estimates[idx]\n\t\t_, actual = dataset[idx]\n\t\tfor e, a in zip(estimate, actual):\n\t\t\tcount += 1.0\n\t\t\tif e != a:\n\t\t\t\tincorrect += 1.0\n\t\n\treturn incorrect / count", "def loss_and_metrics_one_pair(self, params, rng):\n key_pq, key_samples = jax.random.split(rng)\n p_logits, q_logits = self.logit_pair_distribution_fn(key_pq)\n\n def sample_loss(key_sample):\n \"\"\"Computes loss for a single sample of a relaxed pair of outcomes.\"\"\"\n q_kwargs = dict(transpose=True) if self.use_transpose else {}\n soft_p = self.model.apply(\n params, p_logits, key_sample, method=self.model.sample_relaxed)\n soft_q = self.model.apply(\n params,\n q_logits,\n key_sample,\n method=self.model.sample_relaxed,\n **q_kwargs)\n coupling_loss_matrix = self.coupling_loss_matrix_fn(p_logits, q_logits)\n coupling_loss = jnp.sum(soft_p[:, None] * soft_q[None, :] *\n coupling_loss_matrix)\n\n return coupling_loss\n\n loss_samples = jax.vmap(sample_loss)(\n jax.random.split(key_samples, self.inner_num_samples))\n loss = jnp.mean(loss_samples)\n return loss, {\"loss\": loss}", "def get_loss_weights(self, speakers, type='fair'):\r\n mean_losses = np.mean(self.history, axis=1)\r\n \"\"\"Sort lowest to highest\"\"\"\r\n order_indices = np.argsort(mean_losses)\r\n \"\"\"Create weights as in Dr. Hasegawa-Johnson's slides (weight is number of classes performing better)\r\n We add one to each so that every class has some weight in the loss\"\"\"\r\n weights = np.linspace(1, mean_losses.shape[0], mean_losses.shape[0])\r\n \"\"\"Assign the weights according to the proper order\"\"\"\r\n class_weights = {}\r\n for index, i in enumerate(order_indices):\r\n class_weights[i] = weights[index]\r\n \"\"\"Now grab the correct weight for each speaker\"\"\"\r\n loss_weights = []\r\n for speaker in speakers:\r\n loss_weights.append(class_weights[self.s2i[speaker]])\r\n if type == 'fair':\r\n \"\"\"Add in the lambda weighting for fair and unfair training\"\"\"\r\n unfair_weights = np.ones(shape=(len(loss_weights, )))\r\n loss_weights = np.asarray(loss_weights)\r\n\r\n \"\"\"Lambda part\"\"\"\r\n loss_weights = config.train.fairness_lambda * loss_weights + (1-config.train.fairness_lambda) * unfair_weights\r\n\r\n elif type == 'unfair':\r\n \"\"\"All class losses are weighted evenly, unfair\"\"\"\r\n loss_weights = np.ones(shape=(len(loss_weights,)))\r\n\r\n loss_weights = torch.from_numpy(loss_weights)\r\n loss_weights = self.fix_tensor(loss_weights)\r\n return loss_weights", "def loss(self,\n inputs: Tuple[Tensor],\n batch_data_samples: OptSampleList,\n train_cfg: ConfigType = {}) -> dict:\n\n pred_coords, pred_heatmaps = self.forward(inputs)\n keypoint_labels = torch.cat(\n [d.gt_instance_labels.keypoint_labels for d in batch_data_samples])\n keypoint_weights = torch.cat([\n d.gt_instance_labels.keypoint_weights for d in batch_data_samples\n ])\n gt_heatmaps = torch.stack(\n [d.gt_fields.heatmaps for d in batch_data_samples])\n\n input_list = [pred_coords, pred_heatmaps]\n target_list = [keypoint_labels, gt_heatmaps]\n # calculate losses\n losses = dict()\n\n loss_list = self.loss_module(input_list, target_list, keypoint_weights)\n\n loss = loss_list[0] + loss_list[1]\n\n if self.lambda_t > 0:\n mh = MessageHub.get_current_instance()\n cur_epoch = mh.get_info('epoch')\n if cur_epoch >= self.lambda_t:\n loss = loss_list[0]\n\n losses.update(loss_kpt=loss)\n\n # calculate accuracy\n _, avg_acc, _ = keypoint_pck_accuracy(\n pred=to_numpy(pred_coords),\n gt=to_numpy(keypoint_labels),\n mask=to_numpy(keypoint_weights) > 0,\n thr=0.05,\n norm_factor=np.ones((pred_coords.size(0), 2), dtype=np.float32))\n\n acc_pose = torch.tensor(avg_acc, device=keypoint_labels.device)\n losses.update(acc_pose=acc_pose)\n\n return losses", "def gaussian_weighted_score(\n keypoint_scores, distances, keypoint_std_dev, bboxes):\n # Get ymin, xmin, ymax, xmax bounding box coordinates.\n # Shape: [batch_size, num_instances]\n ymin, xmin, ymax, xmax = tf.unstack(bboxes, axis=2)\n\n # shape: [num_keypoints]\n keypoint_std_dev = tf.constant(keypoint_std_dev)\n\n # shape: [batch_size, num_instances]\n sigma = cn_assigner._compute_std_dev_from_box_size( # pylint: disable=protected-access\n ymax - ymin, xmax - xmin, min_overlap=0.7)\n # shape: [batch_size, num_instances, num_keypoints]\n sigma = keypoint_std_dev[tf.newaxis, tf.newaxis, :] * sigma[:, :, tf.newaxis]\n (_, _, max_candidates, _) = _get_shape(distances, 4)\n # shape: [batch_size, num_instances, max_candidates, num_keypoints]\n sigma = tf.tile(\n sigma[:, :, tf.newaxis, :], multiples=[1, 1, max_candidates, 1])\n\n gaussian_map = tf.exp((-1 * distances * distances) / (2 * sigma * sigma))\n return keypoint_scores[:, tf.newaxis, :, :] * gaussian_map", "def rank_err(data_mod, mod_dict):\n # checks that order of patients is identical for both models, and that correct data dictionary is passed\n np.testing.assert_array_equal(mod_dict['linear'].Y, mod_dict['rbf'].Y, err_msg='lin and rbf models not trained on same data')\n np.testing.assert_array_equal(mod_dict['linear'].X, mod_dict['rbf'].X, err_msg='lin and rbf models not trained on same data')\n np.testing.assert_array_equal(mod_dict['rbf'].X, data_mod['XA'], err_msg='mogp models and data dictionary not trained on same data') \n\n err_dict = {}\n for key in mod_dict.keys():\n err_dict[key] = calc_rmse_full_mod(data=data_mod, model_type=key, mod=mod_dict[key])\n\n return err_dict", "def boosting(train_data, dim, t):\n w = []\n w.append([float(1) / float(len(train_data))] * len(train_data))\n\n # Store models in m, models are stored as a tuple with the w_vector as well\n # as the t_vector\n\n m = []\n\n for i in range(t):\n print(\"Iteration \" + str(i + 1) + str(\":\"))\n t_vec, w_vec, error = binary_classifier(train_data, dim, w[i])\n alpha = 0.5 * math.log(float(1 - error) / float(error))\n print(\"Error = \" + str(error))\n print(\"Alpha = \" + str(alpha))\n if error >= 0.5:\n break\n # Add model only if it has error rate less than 0.5\n m.append((t_vec, w_vec, alpha))\n\n is_increase_weights_printed = False\n is_decrease_weights_printed = False\n factor_to_increase = 0\n factor_to_decrease = 0\n # Update weights by figuring out which points that are misclassified\n w.append([0] * len(train_data))\n for j in range(len(train_data)):\n if np.dot(train_data[j][0:dim], w_vec) > t_vec:\n if train_data[j][dim] == -1:\n # misclassified\n w[i + 1][j] = float(w[i][j]) / float(2 * error)\n if not is_increase_weights_printed:\n factor_to_increase = float(1) / float(2 * error)\n is_increase_weights_printed = True\n else:\n # correctly classified\n w[i + 1][j] = float(w[i][j]) / float(2 * (1 - error))\n if not is_decrease_weights_printed:\n factor_to_decrease = float(1) / float(2 * (1 - error))\n is_decrease_weights_printed = True\n else:\n if train_data[j][dim] == 1:\n # misclassified\n w[i + 1][j] = float(w[i][j]) / float(2 * error)\n if not is_increase_weights_printed:\n factor_to_increase = float(1) / float(2 * error)\n is_increase_weights_printed = True\n else:\n # correctly classified\n w[i + 1][j] = float(w[i][j]) / float(2 * (1 - error))\n if not is_decrease_weights_printed:\n factor_to_decrease = float(1) / float(2 * (1 - error))\n is_decrease_weights_printed = True\n\n print(\"Factor to increase weights = \" + str(factor_to_increase))\n print(\"Factor to decrease weights = \" + str(factor_to_decrease))\n\n return m", "def compare_recall_precisions_from_predictions(true, score_dict, **kwargs):\n pr = OrderedDict()\n for name, score in score_dict.items():\n p, r, threshold = precision_recall_curve(true, score)\n pr[name] = [p, r]\n compare_recall_precision_graph(pr, **kwargs)", "def loss_by_feat(\n self,\n cls_scores: List[Tensor],\n bbox_preds: List[Tensor],\n centernesses: List[Tensor],\n batch_gt_instances: InstanceList,\n batch_img_metas: List[dict],\n batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n assert len(featmap_sizes) == self.prior_generator.num_levels\n\n device = cls_scores[0].device\n anchor_list, valid_flag_list = self.get_anchors(\n featmap_sizes, batch_img_metas, device=device)\n\n cls_reg_targets = self.get_targets(\n anchor_list,\n valid_flag_list,\n batch_gt_instances,\n batch_img_metas,\n batch_gt_instances_ignore=batch_gt_instances_ignore)\n\n (anchor_list, labels_list, label_weights_list, bbox_targets_list,\n bbox_weights_list, avg_factor) = cls_reg_targets\n avg_factor = reduce_mean(\n torch.tensor(avg_factor, dtype=torch.float, device=device)).item()\n\n losses_cls, losses_bbox, loss_centerness, \\\n bbox_avg_factor = multi_apply(\n self.loss_by_feat_single,\n anchor_list,\n cls_scores,\n bbox_preds,\n centernesses,\n labels_list,\n label_weights_list,\n bbox_targets_list,\n avg_factor=avg_factor)\n\n bbox_avg_factor = sum(bbox_avg_factor)\n bbox_avg_factor = reduce_mean(bbox_avg_factor).clamp_(min=1).item()\n losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox))\n return dict(\n loss_cls=losses_cls,\n loss_bbox=losses_bbox,\n loss_centerness=loss_centerness)", "def loss(self,\n inputs: Union[Tensor, Tuple[Tensor]],\n batch_data_samples: OptSampleList,\n train_cfg: ConfigType = {}) -> dict:\n\n pred_outputs = self.forward(inputs)\n\n lifting_target_label = torch.cat([\n d.gt_instance_labels.lifting_target_label\n for d in batch_data_samples\n ])\n trajectory_weights = torch.cat([\n d.gt_instance_labels.trajectory_weights for d in batch_data_samples\n ])\n\n # calculate losses\n losses = dict()\n loss = self.loss_module(pred_outputs, lifting_target_label,\n trajectory_weights.unsqueeze(-1))\n\n losses.update(loss_traj=loss)\n\n # calculate accuracy\n _, avg_acc, _ = keypoint_pck_accuracy(\n pred=to_numpy(pred_outputs),\n gt=to_numpy(lifting_target_label),\n mask=to_numpy(trajectory_weights) > 0,\n thr=0.05,\n norm_factor=np.ones((pred_outputs.size(0), 3), dtype=np.float32))\n\n mpjpe_traj = torch.tensor(avg_acc, device=lifting_target_label.device)\n losses.update(mpjpe_traj=mpjpe_traj)\n\n return losses", "def eval_all(cls_prob, dtrain):\n #determine the top k predictions\n labels = dtrain.get_label()\n top_k = cls_prob.argsort(axis = 1)[:,::-1][:,:5]\n# top_k = cls_prob.argsort(axis = 1)[:,:k:-1]\n #convert true values and compared with predictions to check for equality\n labels = labels[:, None]\n return 'error', 1-ndcg(top_k, labels)/len(labels)", "def compare_dictionaries(d1, d2):\n score = 0\n total = 0\n for key in d1:\n total += d1[key]\n for item in d2:\n if item in d1:\n score += d2[item] * math.log(d1[item]/total)\n else:\n score += d2[item] * math.log(0.5/total)\n return score", "def init_metrics(losses):\n metrics = {'train': {}, 'valid': {}}\n for mode, d in metrics.items():\n metrics[mode] = dict([(k, 0) for k in losses])\n\n return metrics", "def compute_evidence_weighted_aggregated_veracity_score(\n gold: Dict[Tuple[int, str], Dict],\n pred: Dict[Tuple[int, str], Dict],\n elementwise_evidence_f1: Dict[Tuple[int, str], float],\n elementwise_evidence_f1_corrected: Dict[Tuple[int, str], float],\n) -> Dict:\n\n accuracies_passages: List[float] = []\n f1_scores_evidence: List[float] = []\n f1_scores_corrected_evidence: List[float] = []\n\n keys: List[Any] = list(gold.keys())\n\n for key in keys:\n gold_sample: Dict = gold[key]\n pred_sample: Dict = pred[key]\n\n gold_passage_label: str = gold_sample['labels']['passage']\n predicted_passage_label: str = pred_sample['predicted']\n\n accuracies_passages.append(get_instance_accuracy(gold_passage_label, predicted_passage_label))\n f1_scores_evidence.append(elementwise_evidence_f1[key])\n f1_scores_corrected_evidence.append(elementwise_evidence_f1_corrected[key])\n\n return {\n 'ev_weighted_accuracy': np.mean(np.array(accuracies_passages) * np.array(f1_scores_evidence)),\n 'ev_weighted_accuracy_corrected': np.mean(\n np.array(accuracies_passages) * np.array(f1_scores_corrected_evidence)\n )\n }", "def fixture_rnafusion_validated_metrics() -> Dict[str, str]:\n return {\n \"gc_content\": \"51.7\",\n \"ribosomal_bases\": \"65.81\",\n \"q20_rate\": \"97.48\",\n \"q30_rate\": \"92.95\",\n \"mapped_reads\": \"96.53\",\n \"rin\": \"10.0\",\n \"input_amount\": \"300.0\",\n \"insert_size\": \"N/A\",\n \"insert_size_peak\": \"N/A\",\n \"mean_length_r1\": \"99.0\",\n \"million_read_pairs\": \"75.0\",\n \"bias_5_3\": \"1.07\",\n \"pct_adapter\": \"12.01\",\n \"duplicates\": \"14.86\",\n \"mrna_bases\": \"85.97\",\n \"pct_surviving\": \"99.42\",\n \"uniquely_mapped_reads\": \"91.02\",\n }", "def bdrate(metric_set1, metric_set2):\n rate1 = [x[0] for x in metric_set1]\n psnr1 = [x[1] for x in metric_set1]\n rate2 = [x[0] for x in metric_set2]\n psnr2 = [x[1] for x in metric_set2]\n\n log_rate1 = map(lambda x: math.log(x), rate1)\n log_rate2 = map(lambda x: math.log(x), rate2)\n\n # Best cubic poly fit for graph represented by log_ratex, psrn_x.\n p1 = numpy.polyfit(psnr1, log_rate1, 3)\n p2 = numpy.polyfit(psnr2, log_rate2, 3)\n\n # Integration interval.\n min_int = max([min(psnr1),min(psnr2)])\n max_int = min([max(psnr1),max(psnr2)])\n\n # find integral\n p_int1 = numpy.polyint(p1)\n p_int2 = numpy.polyint(p2)\n\n # Calculate the integrated value over the interval we care about.\n int1 = numpy.polyval(p_int1, max_int) - numpy.polyval(p_int1, min_int)\n int2 = numpy.polyval(p_int2, max_int) - numpy.polyval(p_int2, min_int)\n\n # Calculate the average improvement.\n avg_exp_diff = (int2 - int1) / (max_int - min_int)\n\n # In really bad formed data the exponent can grow too large.\n # clamp it.\n if avg_exp_diff > 200 :\n avg_exp_diff = 200\n\n # Convert to a percentage.\n avg_diff = (math.exp(avg_exp_diff) - 1) * 100\n\n return avg_diff", "def compute_map(scores, num_instances):\r\n pr = []\r\n for i, score in enumerate(scores):\r\n score = score[np.argsort(-score[:, 1])] # sort by confidence score\r\n FP = 0\r\n TP = 0\r\n pr_ = []\r\n for prediction in score:\r\n if prediction[0]:\r\n TP += 1\r\n else:\r\n FP += 1\r\n pr_.append([TP/(TP+FP), (TP/num_instances)])\r\n pr.append(pr_)\r\n pr = np.array(pr)\r\n\r\n pinterps = [] # lists of interpolated precisions for every confidence level\r\n ranks = np.linspace(0, 1, 11)\r\n idxs_interpolations = [] # list of indexes of the interpolated precisions, just to plot the recall\r\n for pr_ in pr:\r\n pinterp = []\r\n idxs = []\r\n last_idx = -1\r\n for rank in ranks:\r\n idx = (np.abs(pr_[:, 1] - rank)).argmin() # find the closest recall to the rank\r\n\r\n if rank > pr_[idx, 1]: # this makes sure we are taking the closest recall at the right of the rank\r\n if idx+1 < pr_[:, 0].shape[0]:\r\n idx += 1\r\n interpolated_precision = np.max(pr_[idx:, 0]) # find the max precision within the interval\r\n if idx == last_idx: # just some checks for when the recall doesn't exist\r\n pinterp[-1] = 0\r\n idxs[-1] = 0\r\n pinterp.append(0)\r\n idxs.append(0)\r\n else:\r\n pinterp.append(interpolated_precision)\r\n idxs.append(idx)\r\n last_idx = idx\r\n pinterps.append(pinterp)\r\n idxs_interpolations.append(idxs)\r\n APs = np.array(pinterps).mean(axis=1) # the AP is the average of the interpolated precisions\r\n mAP = APs.mean() # mAP is the mean of all the APs\r\n\r\n return pr, pinterps, idxs_interpolations, mAP, APs", "def _compute_losses(discriminator, d_real, d_fake, interpolated_x, interpolated_c):\n wasserstein_distance = tf.reduce_mean(d_real) - tf.reduce_mean(d_fake)\n\n gradient_penalty_x = wgan.compute_gradient_penalty(\n lambda interpolated: discriminator(interpolated, interpolated_c),\n interpolated_x\n )\n \n gradient_penalty_c = wgan.compute_gradient_penalty(\n lambda interpolated: discriminator(interpolated_x, interpolated),\n interpolated_c\n )\n\n g_loss = tf.reduce_mean(d_fake)\n d_loss = wasserstein_distance + (\n wgan.GRADIENT_PENALTY_LAMBDA * gradient_penalty_x +\n wgan.GRADIENT_PENALTY_LAMBDA * gradient_penalty_c\n )\n\n return g_loss, d_loss", "def _combine_dict(train_dict, eval_dict, train_idx, eval_idx):\n combine_dict = {}\n for key, val in train_dict.items():\n if isinstance(val, dict):\n if key not in eval_dict:\n logger.warning(f'{key} not found in `eval_dict`, skipping...')\n continue\n combine_dict[key] = _combine_dict(train_dict[key], eval_dict[key], train_idx, eval_idx)\n else:\n if key.replace('train', 'eval') not in eval_dict:\n logger.warning(f\"{key.replace('train', 'eval')} not found in `eval_dict`, skipping...\")\n continue\n train_arr = val\n eval_arr = eval_dict[key.replace('train', 'eval')]\n assert train_arr.shape[1] == eval_arr.shape[1], f\"Trial lengths for {key} and {key.replace('train', 'eval')} don't match\"\n assert train_arr.shape[2] == eval_arr.shape[2], f\"Number of channels for {key} and {key.replace('train', 'eval')} don't match\"\n full_arr = np.empty((train_arr.shape[0] + eval_arr.shape[0], train_arr.shape[1], train_arr.shape[2]))\n full_arr[train_idx] = train_arr\n full_arr[eval_idx] = eval_arr\n combine_dict[key] = full_arr\n return combine_dict", "def get_pred_class_probs(self, pred_mu, pred_sigma):", "def compute_weighted_accuracy(logits, targets, weights=None):\n if logits.ndim != targets.ndim + 1:\n raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %\n (str(logits.shape), str(targets.shape)))\n loss = jnp.equal(jnp.argmax(logits, axis=-1), targets)\n normalizing_factor = jnp.prod(jnp.array(logits.shape[:-1]))\n if weights is not None:\n loss = loss * weights\n normalizing_factor = weights.sum()\n\n return loss.sum(), normalizing_factor", "def compare_dictionaries(d1, d2):\n score = 0\n total = 0\n\n for element in d1:\n total += d1[element]\n\n for item in d2:\n if item in d1:\n score += math.log(d1[item]/total) * (d2[item])\n else:\n score += math.log(0.5/total) * (d2[item])\n return score", "def loss(self, prediction_dict, groundtruth_lists):\r\n pass", "def get_weights(train_generator):\n classes = list(train_generator.class_indices.values())\n cw = class_weight.compute_class_weight('balanced',\n np.unique(classes),\n train_generator.classes)\n m = min(cw)\n cw = [(el / m) for el in cw]\n\n return dict(zip(classes, cw))", "def error_in_assigned_energy(predictions, ground_truth):\n errors = {}\n both_sets_of_meters = iterate_through_submeters_of_two_metergroups(\n predictions, ground_truth)\n for pred_meter, ground_truth_meter in both_sets_of_meters:\n sections = pred_meter.good_sections()\n ground_truth_energy = ground_truth_meter.total_energy(sections=sections)\n predicted_energy = pred_meter.total_energy(sections=sections)\n errors[pred_meter.instance()] = np.abs(ground_truth_energy - predicted_energy)\n return pd.Series(errors)", "def compute_losses(self, predictions, targets):\n smpl_weight = targets['target_smpl_weight']\n\n losses = {}\n if self.loss_beta is not None:\n losses['loss_beta'] = self.loss_beta(\n predictions['pred_shape'] * smpl_weight,\n targets['target_beta'] * smpl_weight)\n if self.loss_theta is not None:\n pred_pose = rotmat_to_quat(predictions['pred_pose']).reshape(\n -1, 96)\n losses['loss_theta'] = self.loss_theta(\n pred_pose * smpl_weight * targets['target_theta_weight'],\n targets['target_theta'] * smpl_weight *\n targets['target_theta_weight'])\n if self.loss_twist is not None:\n losses['loss_twist'] = self.loss_twist(\n predictions['pred_phi'] * targets['target_twist_weight'],\n targets['target_twist'] * targets['target_twist_weight'])\n if self.loss_uvd is not None:\n pred_uvd = predictions['pred_uvd_jts']\n target_uvd = targets['target_uvd_29'][:, :pred_uvd.shape[1]]\n target_uvd_weight = targets['target_weight_29'][:, :pred_uvd.\n shape[1]]\n losses['loss_uvd'] = self.loss_uvd(\n 64 * predictions['pred_uvd_jts'],\n 64 * target_uvd,\n target_uvd_weight,\n avg_factor=target_uvd_weight.sum())\n\n return losses", "def fit(self, inputs, targets):\n\n # Forward propagate inputs through network\n final_inputs = self.forwardProp(inputs)\n\n # Calculate output error with selected error function, default == 'difference'\n output_errors = self.errorFunc(targets, self.finalActivations)\n\n # Backwards propagate\n self.backProp(inputs, output_errors, final_inputs, targets)\n\n return output_errors", "def compute_metrics(self, outputs: List[Dict[str, torch.Tensor]]) -> dict:\n distance_pos, distance_neg = [], []\n for minibatch in outputs:\n minibatch = minibatch[\"val_prediction\"]\n src_embedding = minibatch[\"src_sentemb\"]\n ref_embedding = minibatch[\"ref_sentemb\"]\n pos_embedding = minibatch[\"pos_sentemb\"]\n neg_embedding = minibatch[\"neg_sentemb\"]\n\n distance_src_pos = F.pairwise_distance(pos_embedding, src_embedding)\n distance_ref_pos = F.pairwise_distance(pos_embedding, ref_embedding)\n harmonic_distance_pos = (2 * distance_src_pos * distance_ref_pos) / (\n distance_src_pos + distance_ref_pos\n )\n distance_pos.append(harmonic_distance_pos)\n\n distance_src_neg = F.pairwise_distance(neg_embedding, src_embedding)\n distance_ref_neg = F.pairwise_distance(neg_embedding, ref_embedding)\n harmonic_distance_neg = (2 * distance_src_neg * distance_ref_neg) / (\n distance_src_neg + distance_ref_neg\n )\n distance_neg.append(harmonic_distance_neg)\n\n return {\n \"kendall\": self.metrics.compute(\n torch.cat(distance_pos), torch.cat(distance_neg)\n )\n }", "def _rouge_l_score(pred: Sequence[str], target: Sequence[str]) ->Dict[str, Tensor]:\n pred_len, target_len = len(pred), len(target)\n if 0 in (pred_len, target_len):\n return dict(precision=tensor(0.0), recall=tensor(0.0), fmeasure=tensor(0.0))\n lcs: int = _lcs(pred, target)\n return _compute_metrics(lcs, pred_len, target_len)", "def compute(cls, observation, prediction, distances):\n\n errors = collections.OrderedDict()\n\n for i in range (0, len(distances)):\n if 'mean_AP1_amp_strong_propagating_at_'+str(distances[i])+'um' in observation.keys() or 'mean_AP1_amp_weak_propagating_at_'+str(distances[i])+'um' in observation.keys():\n p_value = prediction['model_AP1_amp_at_'+str(distances[i])+'um']['mean']\n o_mean = observation['mean_AP1_amp_strong_propagating_at_'+str(distances[i])+'um']\n o_std = observation['std_AP1_amp_strong_propagating_at_'+str(distances[i])+'um']\n\n try:\n error = abs(p_value - o_mean)/o_std\n error = assert_dimensionless(error)\n except (TypeError,AssertionError) as e:\n error = e\n errors['AP1_amp_strong_propagating_at_'+str(distances[i])] = error\n\n\n o_mean = observation['mean_AP1_amp_weak_propagating_at_'+str(distances[i])+'um']\n o_std = observation['std_AP1_amp_weak_propagating_at_'+str(distances[i])+'um']\n\n try:\n error = abs(p_value - o_mean)/o_std\n error = assert_dimensionless(error)\n except (TypeError,AssertionError) as e:\n error = e\n errors['AP1_amp_weak_propagating_at_'+str(distances[i])] = error\n\n else:\n p_value = prediction['model_AP1_amp_at_'+str(distances[i])+'um']['mean']\n o_mean = observation['mean_AP1_amp_at_'+str(distances[i])+'um']\n o_std = observation['std_AP1_amp_at_'+str(distances[i])+'um']\n\n try:\n error = abs(p_value - o_mean)/o_std\n error = assert_dimensionless(error)\n except (TypeError,AssertionError) as e:\n error = e\n errors['AP1_amp_at_'+str(distances[i])] = error\n\n for i in range (0, len(distances)): # to keep better order: first all AP1, then all APlast\n p_value_l = prediction['model_APlast_amp_at_'+str(distances[i])+'um']['mean']\n o_mean_l = observation['mean_APlast_amp_at_'+str(distances[i])+'um']\n o_std_l = observation['std_APlast_amp_at_'+str(distances[i])+'um']\n\n try:\n error_l = abs(p_value_l - o_mean_l)/o_std_l\n error_l = assert_dimensionless(error_l)\n except (TypeError,AssertionError) as e:\n error_l = e\n errors['APlast_amp_at_'+str(distances[i])] = error_l\n\n score_sum_strong_propagating = 0.0\n score_sum_weak_propagating = 0.0\n\n for key, value in errors.iteritems():\n if 'strong' not in key:\n score_sum_weak_propagating += value\n for key, value in errors.iteritems():\n if 'weak' not in key:\n score_sum_strong_propagating += value\n return [score_sum_strong_propagating, score_sum_weak_propagating], errors", "def trainAndTune(self, trainingData, trainingLabels, validationData, validationLabels, kgrid):\n no_of_examples = len(trainingLabels)\n prior_prob = dict(Counter(trainingLabels))\n for key in prior_prob.keys():\n prior_prob[key] = prior_prob[key] / float(no_of_examples)\n\n self.prior_prob = prior_prob\n\n likelihoods = dict()\n for cls, prob in prior_prob.items():\n # initializing the dictionary\n likelihoods[cls] = defaultdict(list)\n\n for cls, prob in prior_prob.items():\n # taking samples of only 1 class at a time\n row_indices = list()\n for index, value in enumerate(trainingLabels):\n if value == cls:\n row_indices.append(index)\n\n subset = list()\n for index in row_indices:\n subset.append(trainingData[index])\n\n for r in range(len(subset)):\n for key, value in subset[r].items():\n likelihoods[cls][key].append(value)\n\n classes = [key for key in prior_prob]\n self.classes = classes\n _like = likelihoods\n for cls in classes:\n for key, value in likelihoods[cls].items():\n likelihoods[cls][key] = self._occurrences(likelihoods[cls][key])\n\n self.likelihoods = likelihoods\n\n # results = {}\n # correct = 0\n # for itr in range(len(validationData)):\n # for cls in classes:\n # class_probability = prior_prob[cls]\n # for key, value in validationData[itr].items():\n # relative_feature_values = likelihoods[cls][key]\n # class_probability *= relative_feature_values.get(validationData[itr][key], 0.01)\n #\n # results[cls] = class_probability\n #\n # norm_factor = 0.0\n #\n # for key, value in results.items():\n # norm_factor += value\n #\n # for key in results:\n # try:\n # results[key] = results[key]/norm_factor\n # except ZeroDivisionError:\n # pass\n #\n # if (list(results.keys())[list(results.values()).index(max([value for key, value in results.items()]))]) == validationLabels[itr]:\n # correct += 1\n #\n # print \"validation accuracy: {}%\".format((correct/float(len(validationLabels))) * 100)", "def classify(trainX, trainY, testX, testY):\n trainC = getClasses(trainY)\n P = estimatePosterior(trainX, trainC, testX)\n E = fit(testX, P)\n (e_rate, se, interval) = error.confidenceInterval(testY, E)\n return (P, E, e_rate, se, interval)", "def _compute_losses_and_predictions_dicts(\r\n model, features, labels,\r\n add_regularization_loss=True):\r\n model_lib.provide_groundtruth(model, labels)\r\n preprocessed_images = features[fields.InputDataFields.image]\r\n\r\n prediction_dict = model.predict(\r\n preprocessed_images,\r\n features[fields.InputDataFields.true_image_shape],\r\n **model.get_side_inputs(features))\r\n prediction_dict = ops.bfloat16_to_float32_nested(prediction_dict)\r\n\r\n losses_dict = model.loss(\r\n prediction_dict, features[fields.InputDataFields.true_image_shape])\r\n losses = [loss_tensor for loss_tensor in losses_dict.values()]\r\n if add_regularization_loss:\r\n # TODO(kaftan): As we figure out mixed precision & bfloat 16, we may\r\n ## need to convert these regularization losses from bfloat16 to float32\r\n ## as well.\r\n regularization_losses = model.regularization_losses()\r\n if regularization_losses:\r\n regularization_losses = ops.bfloat16_to_float32_nested(\r\n regularization_losses)\r\n regularization_loss = tf.add_n(\r\n regularization_losses, name='regularization_loss')\r\n losses.append(regularization_loss)\r\n losses_dict['Loss/regularization_loss'] = regularization_loss\r\n\r\n total_loss = tf.add_n(losses, name='total_loss')\r\n losses_dict['Loss/total_loss'] = total_loss\r\n\r\n return losses_dict, prediction_dict", "def compare_dictionaries(d1, d2):\r\n score = 0\r\n gef = 0\r\n for z in d1:\r\n gef += d1[z]\r\n total = gef\r\n \r\n for x in d2:\r\n if x in d1:\r\n score += math.log(d1[x] / total) * d2[x] \r\n else:\r\n score += math.log(0.5/total) * d2[x]\r\n return score", "def _compute_kp_heatmap_loss(self, input_height, input_width, task_name,\n heatmap_predictions, classification_loss_fn,\n per_pixel_weights):\n gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)\n gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)\n gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)\n gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)\n\n assigner = self._target_assigner_dict[task_name]\n (keypoint_heatmap, num_instances_per_kp_type,\n valid_mask_batch) = assigner.assign_keypoint_heatmap_targets(\n height=input_height,\n width=input_width,\n gt_keypoints_list=gt_keypoints_list,\n gt_weights_list=gt_weights_list,\n gt_classes_list=gt_classes_list,\n gt_boxes_list=gt_boxes_list)\n flattened_valid_mask = _flatten_spatial_dimensions(valid_mask_batch)\n flattened_heapmap_targets = _flatten_spatial_dimensions(keypoint_heatmap)\n # Sum over the number of instances per keypoint types to get the total\n # number of keypoints. Note that this is used to normalized the loss and we\n # keep the minimum value to be 1 to avoid generating weird loss value when\n # no keypoint is in the image batch.\n num_instances = tf.maximum(\n tf.cast(tf.reduce_sum(num_instances_per_kp_type), dtype=tf.float32),\n 1.0)\n loss = 0.0\n # Loop through each feature output head.\n for pred in heatmap_predictions:\n pred = _flatten_spatial_dimensions(pred)\n unweighted_loss = classification_loss_fn(\n pred,\n flattened_heapmap_targets,\n weights=tf.ones_like(per_pixel_weights))\n # Apply the weights after the loss function to have full control over it.\n loss += unweighted_loss * per_pixel_weights * flattened_valid_mask\n loss = tf.reduce_sum(loss) / (\n float(len(heatmap_predictions)) * num_instances)\n return loss", "def interpolate_loss_calculation(self, interpolates):\n _, fake_scores = self.D(interpolates)\n return fake_scores", "def fit_predict(self, train_x: pd.DataFrame, train_y: pd.Series, test_x: pd.DataFrame, test_y: pd.Series) -> dict:\n self.evaluator.fit(train_x, train_y, test_x, test_y)\n predictions = self.evaluator.predict(test_x)\n print(predictions)\n metrics = metrics_stat(predictions, test_y)\n return metrics", "def predict_state_edges(pollster_predictions, pollster_errors):\r\n\r\n dict = {} #Create an empty dictionary\r\n pollster_state_prediction = pivot_nested_dict(pollster_predictions)\r\n for state in pollster_state_prediction:\r\n dict[state] = average_edge(pollster_state_prediction[state], pollster_errors)\r\n return dict\r\n \r\n #Iterate through each state and calculate the predicted edge for each state\r", "def compute_metrics(self, target, data, weight):\n pred = self.predict(data, weight)\n assert len(pred) == len(target)\n # Calculate the mis-classification rate:\n N = len(pred)\n pred = np.reshape(pred, (N,))\n target = np.reshape(target, (N,))\n nb_misclass = np.count_nonzero(target - pred)\n return nb_misclass / N", "def score_items(X, U, mu,\n scoremethod='lowhigh',\n missingmethod='none',\n feature_weights=[]):\n\n # Use U to model and then reconstruct the data in X.\n # 1. Project all data in X into space defined by U,\n # then reconstruct it.\n if missingmethod.lower() != 'ignore':\n # All missing values should have been replaced with 0,\n # or non-existent.\n # 1a. Subtract the mean and project onto U\n proj = np.dot(U.T, (X - mu))\n # 1b. Reconstruct by projecting back up and adding mean\n reproj = np.dot(U, proj) + mu\n # 1c. Compute the residual\n #print('X:', X.T)\n #print('reproj:', reproj.T)\n err = X - reproj\n #print('err:', err.T)\n #input()\n \n else:\n # Missing method must be 'ignore' (Brand 2002)\n (err, reproj) = compute_error_with_missing(X, U, mu)\n\n # 2. Compute reconstruction error\n if scoremethod == 'low': # Blank out all errors > 0\n err[err>0] = 0\n elif scoremethod == 'high': # Blank out all errors < 0\n err[err<0] = 0\n else: # default, count everything\n pass\n \n # Weight features if requested\n if len(feature_weights) > 0:\n for i in range(len(feature_weights)):\n err[i,:] = err[i,:] * feature_weights[i]\n\n if missingmethod.lower() == 'ignore':\n # Only tally error for observed features.\n # This means that items with missing values are not penalized\n # for those features, which is probably the best we can do.\n scores = np.nansum(np.array(np.power(err, 2)), axis=0)\n else:\n scores = np.sum(np.array(np.power(err, 2)), axis=0)\n\n #print('scores:', scores)\n #print('reproj:', reproj)\n #input()\n return (scores, reproj)", "def test_accuracy(self):\n total_accuracy, weights = losses.weighted_accuracy(\n logits=self.logits, targets=self.targets)\n\n expected_accuracy = 2 / 3\n\n self.assertEqual(weights, 3)\n self.assertAlmostEqual(total_accuracy / weights, expected_accuracy)", "def gradients(self):\n error = self.error()\n input_ = self.get_cache('input')\n error_stacked = np.reshape(error, (error.shape[0], 1, error.shape[1]))\n input_stacked = np.reshape(input_, (input_.shape[0], input_.shape[1], 1))\n return {\n 'bias': np.sum(error, axis=0),\n 'weight': np.sum(np.matmul(input_stacked, error_stacked), axis=0)\n }", "def classify(X, y, X_validate):\r\n # Logistic Regression\r\n lr = sklearn.linear_model.LogisticRegression()\r\n # Gradient Boosting Machine\r\n gb = sklearn.ensemble.GradientBoostingClassifier()\r\n # Random Forest\r\n rf = sklearn.ensemble.RandomForestClassifier()\r\n # Support Vector Machine\r\n svm = sklearn.svm.SVC()\r\n\r\n classifiers = {\r\n 'Logistic Regression': lr,\r\n 'Random Forest': rf,\r\n 'Gradient Boosting': gb,\r\n 'Support Vector': svm\r\n }\r\n predictions = {}\r\n for name, classifier in classifiers.items():\r\n classifier.fit(X,y)\r\n predictions[name] = classifier.predict(X_validate) \r\n return predictions", "def eval_metrics_for_multiclass(self, predicted_answers):\n total_correct_in_all = 0\n total_pred_in_all = len(predicted_answers)\n # initial a dict for total correct in topK counting.\n total_correct_in_topK = dict([(i, 0) for i in self.topK_list])\n total_pred_in_topK = dict([(i, 0) for i in self.topK_list])\n max_topK = max(self.topK_list)\n label_pred = []\n label_true = []\n label_weights = []\n digits = 3\n metrics = {}\n\n for e_id, sample in predicted_answers.iteritems():\n # get all correct ids\n correct_label_indices = sample['correct_labels']\n # current case, we only have a majority lable for the correct label\n label_true.append(correct_label_indices[0])\n # counting all correct for each sample\n total_correct_in_all += len(correct_label_indices)\n # select topK\n sorted_probs_max_topK = sorted(sample['pred_probs'], reverse=True, key=lambda x: x['prob'])[:max_topK]\n top1_pred = sorted_probs_max_topK[0]\n label_pred.append(top1_pred['label_index'])\n\n # for all topK predictions\n for i in range(len(sorted_probs_max_topK)):\n pred = sorted_probs_max_topK[i]\n for topK in self.topK_list:\n if i >= topK:\n continue\n else:\n total_pred_in_topK[topK] += 1\n if pred['label_index'] in correct_label_indices:\n total_correct_in_topK[topK] += 1\n\n if total_correct_in_all != 0:\n # recall@K\n recall_at_K = dict([(k, total_correct_in_topK[k] / (total_correct_in_all * 1.0)) for k in self.topK_list])\n # assign recall@K into metrics\n for k, v in recall_at_K.items():\n # Jie\n # 1 means the greater the better.\n # -1 means the smaller the better.\n metrics['R@{}'.format(k)] = (1, v)\n\n self.logger.info('total_correct_in_all = {}, correct_in_topK = {}, recall@K = {}'.format(total_correct_in_all, sorted(total_correct_in_topK.items()), sorted(recall_at_K.items())))\n # here return all the p,r,f for each label, then we compute the micro average later.\n p, r, f1, s = precision_recall_fscore_support(label_true, label_pred, beta=1.0, labels=range(self.num_classes), average=None)\n total_s = np.sum(s)\n p_micro, r_micro, f1_micro, _ = precision_recall_fscore_support(label_true, label_pred, beta=1.0, labels=range(self.num_classes), average='micro')\n last_lines_heading = ['macro / total', 'weighted_mac / total', 'micro / total']\n target_names = self.classes\n name_width = max(len(cn) for cn in target_names)\n width = max(name_width, max([len(x) for x in last_lines_heading]), digits)\n\n headers = [\"precision\", \"recall\", \"f1-score\", \"support\"]\n head_fmt = u'{:>{width}s} ' + u' {:>9}' * len(headers)\n report = head_fmt.format(u'', *headers, width=width)\n report += u'\\n\\n'\n row_fmt = u'{:>{width}s} ' + u' {:>9.{digits}f}' * 3 + u' {:>9}\\n'\n rows = zip(target_names, p, r, f1, s)\n for row in rows:\n label_weights.append(row[4])\n report += row_fmt.format(*row, width=width, digits=digits)\n metrics['P_{}'.format(row[0])] = (1, row[1])\n metrics['R_{}'.format(row[0])] = (1, row[2])\n metrics['F1_{}'.format(row[0])] = (1, row[3])\n report += u'\\n'\n\n # compute macro averages\n p_macro = np.average(p, weights = None)\n r_macro = np.average(r, weights = None)\n f1_macro = np.average(f1, weights = None)\n metrics['P_{}'.format(\"macro\")] = (1, p_macro)\n metrics['R_{}'.format(\"macro\")] = (1, r_macro)\n metrics['F1_{}'.format(\"macro\")] = (1, f1_macro)\n report += row_fmt.format(last_lines_heading[0],\n p_macro,\n r_macro,\n f1_macro,\n total_s,\n width=width, digits=digits)\n\n # compute weighted macro average\n label_weights = map(lambda x : x/(total_s * 1.0), label_weights)\n p_weighted_average = np.average(p, weights = label_weights)\n r_weighted_average = np.average(r, weights = label_weights)\n f1_weighted_average = np.average(f1, weights = label_weights)\n metrics['P_{}'.format(\"weighted_macro\")] = (1, p_weighted_average)\n metrics['R_{}'.format(\"weighted_macro\")] = (1, r_weighted_average)\n metrics['F1_{}'.format(\"weighted_macro\")] = (1, f1_weighted_average)\n report += row_fmt.format(last_lines_heading[1],\n p_weighted_average,\n r_weighted_average,\n f1_weighted_average,\n total_s,\n width=width, digits=digits)\n # micro average\n metrics['P_{}'.format(\"micro\")] = (1, p_micro)\n metrics['R_{}'.format(\"micro\")] = (1, r_micro)\n metrics['F1_{}'.format(\"micro\")] = (1, f1_micro)\n report += row_fmt.format(last_lines_heading[2],\n p_micro,\n r_micro,\n f1_micro,\n total_s,\n width=width, digits=digits)\n\n self.logger.info(\"P,R,F1 report as follows:\\n {}\".format(report))\n # only plot it at dev and test time, not during training.\n if self.gen_confusing_matrix:\n\n self.logger.info(\"Generate confusing matrix photo.\")\n # Compute confusion matrix\n conf_matrix = confusion_matrix(label_true, label_pred)\n np.set_printoptions(precision=2)\n\n # Plot non-normalized confusion matrix\n plt.figure()\n self.plot_confusion_matrix(conf_matrix, classes=self.brief_classes, ori_fmt='d',\n title='Confusion matrix, without normalization')\n wo_norm_fig_path = os.path.join(self.result_dir, '{}_wo_norm.png'.format(self.result_prefix))\n plt.savefig(wo_norm_fig_path)\n\n # Plot normalized confusion matrix\n plt.figure()\n self.plot_confusion_matrix(conf_matrix, classes=self.brief_classes, ori_fmt='d', normalize=True,\n title='Normalized confusion matrix')\n\n norm_fig_path = os.path.join(self.result_dir, '{}_w_norm.png'.format(self.result_prefix))\n plt.savefig(norm_fig_path)\n\n else:\n self.logger.warn('invalid total_correct_in_all')\n\n return metrics", "def _compute_kp_regression_loss(self, input_height, input_width, task_name,\n regression_predictions, localization_loss_fn):\n gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)\n gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)\n gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)\n gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)\n # keypoint regression offset loss.\n assigner = self._target_assigner_dict[task_name]\n (batch_indices, batch_regression_offsets,\n batch_weights) = assigner.assign_joint_regression_targets(\n height=input_height,\n width=input_width,\n gt_keypoints_list=gt_keypoints_list,\n gt_classes_list=gt_classes_list,\n gt_weights_list=gt_weights_list,\n gt_boxes_list=gt_boxes_list)\n\n loss = 0.0\n for prediction in regression_predictions:\n batch_size, out_height, out_width, _ = _get_shape(prediction, 4)\n reshaped_prediction = tf.reshape(\n prediction, shape=[batch_size, out_height, out_width, -1, 2])\n reg_prediction = cn_assigner.get_batch_predictions_from_indices(\n reshaped_prediction, batch_indices)\n unweighted_loss = localization_loss_fn(\n reg_prediction,\n batch_regression_offsets,\n weights=tf.expand_dims(tf.ones_like(batch_weights), -1))\n # Apply the weights after the loss function to have full control over it.\n loss += batch_weights * tf.reduce_sum(unweighted_loss, axis=1)\n\n loss = tf.reduce_sum(loss) / (\n float(len(regression_predictions)) *\n tf.maximum(tf.reduce_sum(batch_weights), 1.0))\n return loss", "def misclassification_criterion_rerr(instance, adv_imgs, labels, imgs):\n pred, det = instance.predict(imgs)\n acc_err_bool = ~torch.argmax(pred, dim=1).eq(labels.to(instance.device)) & det\n adv_pred, adv_det = instance.predict(torch.clamp(adv_imgs, 0, 1))\n robust_err_bool = ~torch.argmax(adv_pred, dim=1).eq(labels.to(instance.device)) & adv_det\n mask = det | adv_det\n err = acc_err_bool | robust_err_bool\n return err, mask", "def pollster_to_weight(pollster, pollster_errors):\r\n if pollster not in pollster_errors:\r\n weight = average_error_to_weight(DEFAULT_AVERAGE_ERROR)\r\n else:\r\n weight = average_error_to_weight(pollster_errors[pollster])\r\n return weight", "def get_weighted_loss(pos_weights, neg_weights, epsilon=1e-7):\n def weighted_loss(y_true, y_pred):\n \"\"\"\n Return weighted loss value. \n\n Args:\n y_true (Tensor): Tensor of true labels, size is (num_examples, num_classes)\n y_pred (Tensor): Tensor of predicted labels, size is (num_examples, num_classes)\n Returns:\n loss (Tensor): overall scalar loss summed across all classes\n \"\"\"\n # initialize loss to zero\n loss = 0.0\n \n ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###\n\n for i in range(len(pos_weights)):\n \n # for each class, add average weighted loss for that class \n pos_avg = -1*(pos_weights[i]*y_true[:,i]*tf.keras.backend.log(y_pred[:,i]+epsilon))\n neg_avg = -1*(neg_weights[i]*(1-y_true[:,i])*tf.keras.backend.log(1-y_pred[:,i]+epsilon))\n loss =loss + tf.keras.backend.mean(pos_avg + neg_avg) \n #complete this line\n return loss\n \n ### END CODE HERE ###\n return weighted_loss", "def compute_errors(gt, pred, selector):\n gt = gt[selector]\n pred = pred[selector]\n\n thresh = np.maximum((gt / pred), (pred / gt))\n a1 = (thresh < 1.25 ).mean()\n a2 = (thresh < 1.25 ** 2).mean()\n a3 = (thresh < 1.25 ** 3).mean()\n\n rmse = (gt - pred) ** 2\n rmse = np.sqrt(rmse.mean())\n\n rmse_log = (np.log(gt) - np.log(pred)) ** 2\n rmse_log = np.sqrt(rmse_log.mean())\n\n abs_rel = np.mean(np.abs(gt - pred) / gt)\n\n sq_rel = np.mean(((gt - pred) ** 2) / gt)\n\n return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3", "def class_probabilities(image, dict, show_debug=False):\n probabilities = {}\n for label, compare_img in dict.items():\n # equalize size\n ch = compare_img.shape[0]\n cw = compare_img.shape[1]\n img = resize(image, (ch, cw), preserve_range=True)\n\n # take diff\n diff = np.abs(img - compare_img)\n histo, _ = np.histogram(diff, bins=20)\n histo = histo[1:]\n probabilities[label] = sum(histo)\n\n # show images/diff\n if show_debug:\n plt.subplot(321)\n plt.title('orig')\n plt.imshow(image, cmap='gray')\n plt.subplot(322)\n plt.title('compare_img')\n plt.imshow(compare_img, cmap='gray')\n plt.subplot(323)\n plt.title('diff')\n plt.imshow(diff, cmap='gray')\n plt.show()\n\n return probabilities", "def get_metrics(y_true, y_pred):\n return {'acc': np.mean(y_true == y_pred)}", "def fXY_std_errs(self) -> Dict[Tuple[int, ...], Optional[float]]:\n return {tuple(es.targets): es.fXY_std_err for es in self.edges_specs}", "def get_loss(self, outputs, targets, masks, joints):\n losses = dict()\n heatmaps_losses, push_losses, pull_losses = self.loss(outputs, targets, masks, joints)\n for idx in range(len(targets)):\n if heatmaps_losses[idx] is not None:\n heatmaps_loss = heatmaps_losses[idx].mean(dim=0)\n if 'heatmap_loss' not in losses:\n losses['heatmap_loss'] = heatmaps_loss\n else:\n losses['heatmap_loss'] += heatmaps_loss\n if push_losses[idx] is not None:\n push_loss = push_losses[idx].mean(dim=0)\n if 'push_loss' not in losses:\n losses['push_loss'] = push_loss\n else:\n losses['push_loss'] += push_loss\n if pull_losses[idx] is not None:\n pull_loss = pull_losses[idx].mean(dim=0)\n if 'pull_loss' not in losses:\n losses['pull_loss'] = pull_loss\n else:\n losses['pull_loss'] += pull_loss\n return losses", "def metrics(labels, predictions, wqs, thresholds):\n sorted_groups = []\n cut_indices = []\n for q in range(1, 5):\n sorted_group = sorted(\n [(prediciton[0], label)\n for (prediciton, label, wq) in zip(predictions, labels, wqs)\n if wq == q],\n reverse=True)\n sorted_groups.append(sorted_group)\n cut_index = int(len(sorted_group) * 0.3)\n for i in range(len(sorted_group)):\n if sorted_group[i][0] <= thresholds[q - 1]:\n cut_index = i\n break\n cut_indices.append(cut_index)\n\n fprs = [\n get_fpr(sorted_group, cut_index)\n for (sorted_group, cut_index) in zip(sorted_groups, cut_indices)\n ]\n fairness_violation = max(fprs) - min(fprs)\n\n errs = [\n errors(sorted_group, cut_index)\n for (sorted_group, cut_index) in zip(sorted_groups, cut_indices)\n ]\n acc = 1.0 - np.sum(errs) / len(labels)\n\n return (acc, fairness_violation)", "def eval_metrics(y, pred):\n classification_error = np.sum(pred != y) / float(y.shape[0])\n return classification_error", "def evaluate(self, training_scores, original_test_scores, imitation_test_scores):\n\n #finding a threshold: third to smallest training score\n sorted_scores = np.sort(training_scores)\n threshold = sorted_scores[2]\n\n #computing the number of errors\n errors = len(np.where(original_test_scores < threshold)[0])\n errors += len(np.where(imitation_test_scores > threshold)[0])\n\n #computing the local accuracy\n accuracy = 1 - errors/(len(original_test_scores)+len(imitation_test_scores))\n return accuracy, threshold" ]
[ "0.6313117", "0.5606973", "0.55354184", "0.55307525", "0.5491061", "0.54466426", "0.5413339", "0.5376725", "0.5321494", "0.5300928", "0.5286332", "0.52586734", "0.5237726", "0.5230793", "0.52304846", "0.5226977", "0.5218962", "0.5217092", "0.51976115", "0.5183116", "0.5169867", "0.51389265", "0.51352704", "0.51352704", "0.50988704", "0.5093193", "0.5091579", "0.5073541", "0.5072125", "0.5059275", "0.50544935", "0.5047826", "0.50367975", "0.5030786", "0.5030734", "0.50182676", "0.50180024", "0.5015289", "0.50146735", "0.49988985", "0.49987295", "0.49896663", "0.49891773", "0.49743652", "0.49663758", "0.496578", "0.49613193", "0.49588147", "0.49575448", "0.49555352", "0.49455142", "0.49414837", "0.4940454", "0.49385518", "0.4936368", "0.49336487", "0.49335298", "0.49330616", "0.49269295", "0.49239537", "0.4923787", "0.49169454", "0.49117556", "0.49105114", "0.4908038", "0.49071535", "0.49068338", "0.49044955", "0.49023506", "0.49009603", "0.48952308", "0.48903716", "0.4887354", "0.488069", "0.48786047", "0.4873165", "0.48721138", "0.48664972", "0.48642036", "0.48588353", "0.48580647", "0.485215", "0.48497427", "0.48455706", "0.48433676", "0.48423082", "0.4840292", "0.48370212", "0.4836507", "0.4836263", "0.4833958", "0.48318627", "0.4827393", "0.48253447", "0.48206538", "0.4820307", "0.48197925", "0.48157623", "0.48152554", "0.48133868" ]
0.76960677
0
Given a dictionary mapping classifiers to their error rates, returns the best classifier, or raises NoGoodClassifiersError if best classifier has error rate 1/2. best means 'smallest error rate' if use_smallest_error is True, otherwise 'error rate furthest from 1/2'.
def pick_best_classifier(classifier_to_error_rate, use_smallest_error=True): best_classifier = None if use_smallest_error: best_classifier = min(classifier_to_error_rate, key=classifier_to_error_rate.get) else: best_classifier = max(classifier_to_error_rate, key=lambda x : abs(classifier_to_error_rate[x]-0.5)) if make_fraction(classifier_to_error_rate[best_classifier]) == make_fraction(1,2): raise NoGoodClassifiersError #find a classifier that comes before this one alphabetically for c in classifier_to_error_rate: if use_smallest_error and classifier_to_error_rate[c] == classifier_to_error_rate[best_classifier]: if c < best_classifier: best_classifier = c if not use_smallest_error: error = make_fraction(abs(classifier_to_error_rate[best_classifier] - 0.5)) check_error = make_fraction(abs(classifier_to_error_rate[c] -0.5)) if error == check_error: if c < best_classifier: best_classifier = c return best_classifier
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_best_classifier(data, possible_classifiers, target_classifier):\n best_disorder_score = 10000000\n best_classifier = None\n try:\n for classifier in possible_classifiers:\n total_disorder = average_test_disorder(data, classifier, target_classifier)\n if total_disorder < best_disorder_score:\n best_classifier = classifier\n best_disorder_score = total_disorder\n else:\n pass\n if best_classifier!=None:\n branches = split_on_classifier(data, best_classifier)\n if len(branches) == 1:\n raise NoGoodClassifiersError\n else:\n return best_classifier\n except Exception as e:\n raise NoGoodClassifiersError", "def _fetch_best_weak_classifier(self, weighted_patches):\n min_error = 2.\n print \"Training and measuring error for %d classifiers\" % len(self.all_classifiers),\n dec = .05\n i = 0\n for wc in self.all_classifiers:\n i += 1\n wc.train(weighted_patches)\n if wc.error < min_error:\n min_error = wc.error\n ret = wc\n if i > dec * len(self.all_classifiers):\n dec += .05\n print \".\",\n print \"[DONE]\"\n return ret", "def __get_best_score(scores):\n best = max(scores.items(), key=operator.itemgetter(1))[0]\n print(\"The best classification for this corpus is: \" + str(best))\n return best", "def get_acceptable_classification_metrics(label_array):\n\n # this is a classifier so the acceptable objective\n # functions definitely include those metrics that\n # are specifically for classification and also\n # the unweighted kappa metrics\n acceptable_metrics = CLASSIFICATION_ONLY_METRICS | UNWEIGHTED_KAPPA_METRICS\n\n # now let us consider which other metrics may also\n # be acceptable depending on whether the labels\n # are strings or (contiguous) integers/floats\n label_type = label_array.dtype.type\n\n # CASE 1: labels are strings, then no other metrics\n # are acceptable\n if issubclass(label_type, (np.object_, str)):\n pass\n\n # CASE 2: labels are integers or floats; the way\n # it works in SKLL, it's guaranteed that\n # class indices will be sorted in the same order\n # as the class labels therefore, ranking metrics\n # such as various correlations should work fine.\n elif issubclass(label_type, (int,\n np.int32,\n np.int64,\n float,\n np.float32,\n np.float64)):\n acceptable_metrics.update(CORRELATION_METRICS)\n\n # CASE 3: labels are numerically contiguous integers\n # this is a special sub-case of CASE 2 which\n # represents ordinal classification. Only in this\n # case, weighted kappas -- where the distance\n # between the class labels has a special\n # meaning -- can be allowed. This is because\n # class indices are always contiguous and all\n # metrics in SKLL are computed in the index\n # space, not the label space. Note that floating\n # point numbers that are equivalent to integers\n # (e.g., [1.0, 2.0, 3.0]) are also acceptable.\n if contiguous_ints_or_floats(label_array):\n acceptable_metrics.update(WEIGHTED_KAPPA_METRICS)\n\n # if there are any custom metrics registered, include them too\n if len(_CUSTOM_METRICS) > 0:\n acceptable_metrics.update(_CUSTOM_METRICS)\n\n return acceptable_metrics", "def get_best_thresholds(labels, test_y, outputs, plot=False):\n t_max = [0] * len(labels)\n f_max = [0] * len(labels)\n\n for i, label in enumerate(labels):\n ts = []\n fs = []\n\n for t in np.linspace(0.1, 0.99, num=50):\n p, r, f, _ = precision_recall_fscore_support(test_y[:,i], np.where(outputs[:,i]>t, 1, 0), average='micro')\n ts.append(t)\n fs.append(f)\n if f > f_max[i]:\n f_max[i] = f\n t_max[i] = t\n\n if plot:\n print(f'LABEL: {label}')\n print(f'f_max: {f_max[i]}')\n print(f't_max: {t_max[i]}')\n\n plt.scatter(ts, fs)\n plt.show()\n \n return t_max, f_max", "def find_best_classifier(x_train, x_test, y_train, y_test):\n max_depth, _ = find_best_parameters(\n 'max_depth', list(range(1, 30)),\n x_train, x_test, y_train, y_test)\n print(\"Best max_depth t: \", max_depth)\n min_samples_split, _ = find_best_parameters(\n 'min_samples_split', list(range(2, 400)),\n x_train, x_test, y_train, y_test)\n min_samples_split = int(min_samples_split)\n print(\"Best min samples split: \", min_samples_split)\n min_samples_leaf, _ = find_best_parameters(\n 'min_samples_leaf', list(range(2, 200)),\n x_train, x_test, y_train, y_test)\n min_samples_leaf = int(min_samples_leaf)\n print(\"Best sample leaf: \", min_samples_leaf)\n max_leaf_nodes, _ = find_best_parameters(\n 'max_leaf_nodes', list(range(2, 150)),\n x_train, x_test, y_train, y_test)\n max_leaf_nodes = int(max_leaf_nodes)\n print(\"Best max leaf nodes split: \", max_leaf_nodes)\n min_impurity_decrease, _ = find_best_parameters(\n 'min_impurity_decrease', np.arange(0.0005, 0.1, 0.0005),\n x_train, x_test, y_train, y_test)\n print(\"Best min impurity decrease: \", min_impurity_decrease)\n clf = DecisionTreeClassifier(\n min_impurity_decrease=min_impurity_decrease,\n max_depth=max_depth,\n min_samples_leaf=min_samples_leaf,\n max_leaf_nodes=max_leaf_nodes,\n min_samples_split=min_samples_split,\n random_state=0)\n clf = clf.fit(x_train, y_train)\n return clf", "def classify(self, doc, default=None):\n probs = {}\n \n # Find the category with the highest probability\n max = Decimal(0)\n for cat in self.categories():\n probs[cat] = self.prob(doc, cat)\n if probs[cat] > max: \n max = probs[cat]\n best = cat\n\n if max == 0.0:\n return default\n \n # Make sure the probability exceeds threshold*next best\n for cat in probs:\n if cat == best:\n continue\n if probs[cat]*Decimal(str(self.get_threshold(best)))>probs[best]:\n return default\n \n print probs[best]\n return best", "def get_best_model(res, model_type):\n if model_type == 'classification':\n # the best classification model according to f1 metric\n best = sorted([(k, v['f1']) for k, v in res.items()], key=lambda x: -x[1])[0]\n # the best regression model according to mse metric\n elif model_type == 'regression':\n best = sorted([(k, v['mse']) for k, v in res.items()], key=lambda x: x[1])[0]\n\n return best", "def best_cv_training(args: dict):\n assert len(args.learning_rates) == args.s, (\n \"learning_rates should be of size s\"\n )\n np.random.seed(args.seed) # set seed\n dataloader_pairs = get_dataloaders(\n args.folder, args.dimensions, args.batch_size, args.s, args.num_workers\n )\n train_losses, val_losses, metrics = [], [], []\n best_metric = -1\n final_model = None\n for i, pair in enumerate(dataloader_pairs):\n mod, train_loss, val_loss, metric = train(args, i, pair)\n train_losses.append(train_loss)\n val_losses.append(val_loss)\n metrics.append(metric)\n if metric > best_metric:\n best_metric = metric\n final_model = mod\n else:\n del mod # remove from GPU resources\n return final_model, best_metric", "def classify(data, labels, (train_idx, test_idx), classifier=None):\r\n\r\n assert classifier is not None, \"Why would you pass not classifier?\"\r\n\r\n # Data scaling based on training set\r\n scaler = SupervisedStdScaler() #SupervisedRobustScaler() # # \r\n scaler.fit(data[train_idx,:], labels[train_idx], label=-1)\r\n #scaler.fit(data[train_idx,:], labels[train_idx])\r\n data_train = scaler.transform(data[train_idx,:])\r\n data_test = scaler.transform(data[test_idx,:])\r\n try:\r\n classifier.fit(data_train, labels[train_idx])\r\n \r\n \r\n confMat = confusion_matrix(labels[test_idx],\r\n classifier.predict(data_test))\r\n if confMat.shape == (1,1):\r\n if all(labels[test_idx] == -1):\r\n confMat = np.array([[confMat[0], 0], [0, 0]], dtype=confMat.dtype)\r\n else:\r\n confMat = np.array([[0, 0], [0, confMat[0]]], dtype=confMat.dtype)\r\n confMatRate = confMat / np.tile(np.sum(confMat, axis=1).astype('float'), (2,1)).transpose()\r\n totalErr = (confMat[0, 1] + confMat[1, 0]) / float(confMat.sum())\r\n #if type(classifier) not in [type(None), DummyClassifier]:\r\n if hasattr(classifier,'param_grid'): \r\n #isinstance(classifier, GridSearchCV) or \\\r\n # isinstance(classifier, RandomizedSearchCV):\r\n fitted_model = classifier.best_estimator_\r\n else:\r\n fitted_model = copy.copy(classifier) \r\n return confMatRate, totalErr, fitted_model\r\n except np.linalg.linalg.LinAlgError as e:\r\n # sahil added statement to raise the error instead of returning nun values\r\n print e.message\r\n raise e\r\n # return np.array([[np.nan, np.nan], [np.nan, np.nan]]), np.nan, None\r", "def select_best_results(results, metric='mean_auc', best=\"max\"):\n best_metric = 0\n best_model = ''\n for model, result in results.items():\n if best == \"max\":\n is_better = result[metric] > best_metric\n elif best == \"min\":\n is_better = result[metric] < best_metric\n else:\n raise Exception('best must be either min or max')\n\n if is_better:\n best_metric = result[metric]\n best_model = model\n\n output = {\n 'model': best_model,\n 'hyper_params': results[best_model]['hyper_params'],\n metric: best_metric\n }\n\n return output", "def get_best_classifier(self, score_target, dataset_id=None,\n datarun_id=None, method=None,\n hyperpartition_id=None):\n classifiers = self.get_classifiers(dataset_id=dataset_id,\n datarun_id=datarun_id,\n method=method,\n hyperpartition_id=hyperpartition_id,\n status=ClassifierStatus.COMPLETE)\n\n if '_judgment_metric' not in score_target:\n score_target += '_judgment_metric'\n\n if not classifiers:\n return None\n return max(classifiers, key=attrgetter(score_target))", "def getBest(self, category):\n if category == 'Accuracy':\n index = np.argmax(self.trainAcc)\n elif category == 'Error':\n index = np.argmin(self.trainError)\n\n return self.trainError[index], self.trainAcc[index], self.w[index]", "def find_best_classifier(classifiers, X_t, y_t, X_v, y_v, params, jobs):\n\n # Initialize result storage\n clfs_return = []\n train_scores = []\n test_scores = []\n\n # Loop through classifiers\n for classifier in classifiers:\n # Grid search, calibrate, and test the classifier\n classifier, train_score, test_score = train_calibrate_predict(\n classifier, X_t, y_t, X_v, y_v, params[classifier], jobs)\n\n # Append the result to storage\n clfs_return.append(classifier)\n train_scores.append(train_score)\n test_scores.append(test_score)\n\n # Return storage\n return clfs_return, train_scores, test_scores", "def select_best_th(metrics_dict: Dict, metric: str):\n max_metric_ix = np.argmax(metrics_dict[metric])\n return metrics_dict['metrics_ths'][max_metric_ix]", "def bestOf(predictorList):\n assert predictorList != [], \"Predictor list is empty!\"\n bestList = []\n bestRate = -1.0\n for p in predictorList:\n if p.successRate > bestRate:\n bestList = [p]\n bestRate = p.successRate\n elif p.successRate == bestRate:\n bestList.append(p)\n return bestList", "def test_format_best_results(self):\r\n # Single category.\r\n exp = ('Method name\\tNumber of categories\\tCategories\\t'\r\n 'rho statistics\\nBEST\\t1\\tDOB = 1\\t(-0.001, \\'1\\')\\n')\r\n obs = format_best_results({'method_name': 'BEST', 'num_vars': 1,\r\n 'vars': ['DOB = 1'],\r\n 'rho_vals': [(-0.0010, '1')]})\r\n self.assertEqual(obs, exp)\r\n\r\n # Multiple categories.\r\n exp = ('Method name\\tNumber of categories\\tCategories\\t'\r\n 'rho statistics\\nBEST\\t2\\tDOB = 1, pH = 2\\t(-0.001, \\'1\\'), '\r\n '(0.99909, \\'1,2\\')\\n')\r\n obs = format_best_results({'method_name': 'BEST', 'num_vars': 2,\r\n 'vars': ['DOB = 1', 'pH = 2'],\r\n 'rho_vals': [(-0.0010, '1'),\r\n (0.99909, '1,2')]})\r\n self.assertEqual(obs, exp)", "def getBestFittedModel( models, features ):\r\n\r\n\tvalidModels = []\r\n\tclusteringScores = []\r\n\tfor model in models:\r\n\t\t#Skip mono cluster models\r\n\t\tif st.getNbClusters( model ) < 2: continue\r\n\t\tvalidModels.append( model )\r\n\t\tlabels = model.labels_\r\n\t\tclusteringScore = evaluateClusters(features, labels)\r\n\t\tclusteringScores.append( clusteringScore)\r\n\tif len(clusteringScores) == 0: return False, -1\r\n\tbestScoreIndex = np.argmax(clusteringScores)\r\n\treturn validModels[bestScoreIndex], clusteringScores[bestScoreIndex]", "def multiclass_roc(\n preds: Tensor,\n target: Tensor,\n num_classes: int,\n thresholds: Optional[Union[int, List[float], Tensor]] = None,\n ignore_index: Optional[int] = None,\n validate_args: bool = True,\n) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:\n if validate_args:\n _multiclass_precision_recall_curve_arg_validation(num_classes, thresholds, ignore_index)\n _multiclass_precision_recall_curve_tensor_validation(preds, target, num_classes, ignore_index)\n preds, target, thresholds = _multiclass_precision_recall_curve_format(\n preds, target, num_classes, thresholds, ignore_index\n )\n state = _multiclass_precision_recall_curve_update(preds, target, num_classes, thresholds)\n return _multiclass_roc_compute(state, num_classes, thresholds)", "def log_best_performer(self) -> None:\n best = self.get_highest_accuracy()\n self.logger.info(f\"\\n\\nThe model with the highest accuracy {best[0]} has the following characteristics: \\n\")\n for k, v in best[1].items():\n if k != 'best_performer':\n self.logger.info(f\"{k} : {v}\")\n else:\n self.logger.info(f\"Best Accuracy: {v['Accuracy']}\")\n self.logger.info(\"Features used: \")\n for f in v['Labels']:\n self.logger.info(f)\n for nw, w in v['Vars'].items():\n self.logger.info(f\"{nw}: {w}\")", "def get_best_result(log_dict, mode=\"regress\"):\n logs = OrderedDict()\n sum_score = 0\n for log_item in log_dict:\n csv_log = utils.parse_csv_log(log_dict[log_item])\n if mode == \"regress\":\n logs[log_item] = np.min(csv_log[\"val_mean_squared_error\"])\n sum_score += logs[log_item]\n elif mode == \"class\":\n logs[log_item] = np.max(csv_log[\"val_accuracy\"])\n sum_score += logs[log_item]\n elif mode == \"binary\":\n logs[log_item] = np.max(csv_log[\"val_binary_accuracy\"])\n sum_score += logs[log_item]\n\n return logs, sum_score", "def _get_best_metrics(self,\n metric_type: str,\n scores: list,\n greater_is_better: bool = True,\n verbose: bool = True) -> Tuple[int, int]:\n if greater_is_better:\n best_score = max(scores)\n else:\n best_score = min(scores)\n\n best_index = scores.index(best_score)\n best_threshold = self.search_space[best_index]\n self.optimized_metrics.update(\n {\n metric_type: {\n 'best_score': best_score,\n 'best_threshold': best_threshold,\n 'all_scores': scores,\n },\n },\n )\n if verbose:\n print(f'best {metric_type}: {best_score} occurs at threshold {best_threshold}')\n return best_score, best_threshold", "def is_best(self, metric: float) -> bool:", "def findMinMaxFeatures(pl_accuracy, pw_accuracy, sl_accuracy, sw_accuracy):\r\n \r\n All_Accuracies = {pl_accuracy: 'Petal length', \r\n pw_accuracy: 'Petal width', \r\n sl_accuracy: 'Sepal length', \r\n sw_accuracy: 'Sepal width'}\r\n \r\n worst_accuracy = All_Accuracies[min(All_Accuracies.keys())]\r\n \r\n best_accuracy = All_Accuracies[max(All_Accuracies.keys())]\r\n\r\n return worst_accuracy, best_accuracy", "def class_metrics_for_ths(y_true: np.array, y_pred_scores: np.array, lim_ths: Tuple) -> Dict:\n fpr, tpr, ths = roc_curve(y_true, y_pred_scores)\n # TH generation to search for best cutoff\n metrics_th = np.linspace(min(lim_ths), max(lim_ths), 20)\n y_pred_ths = [predict_label(y_pred_scores, th) for th in metrics_th]\n return {\n 'accuracy': [accuracy_score(y_true, y_pred) for y_pred in y_pred_ths],\n 'balanced_accuracy': [balanced_accuracy_score(y_true, y_pred) for y_pred in y_pred_ths],\n 'precision': [precision_score(y_true, y_pred) for y_pred in y_pred_ths],\n 'recall': [recall_score(y_true, y_pred) for y_pred in y_pred_ths],\n 'f1': [f1_score(y_true, y_pred) for y_pred in y_pred_ths],\n 'roc_auc': roc_auc_score(y_true, y_pred_scores),\n 'fpr': fpr,\n 'tpr': tpr,\n 'roc_ths': ths,\n 'metrics_ths': metrics_th\n }", "def personal_best(scores):\n return max(scores)", "def eval_all(cls_prob, dtrain):\n #determine the top k predictions\n labels = dtrain.get_label()\n top_k = cls_prob.argsort(axis = 1)[:,::-1][:,:5]\n# top_k = cls_prob.argsort(axis = 1)[:,:k:-1]\n #convert true values and compared with predictions to check for equality\n labels = labels[:, None]\n return 'error', 1-ndcg(top_k, labels)/len(labels)", "def get_k_best(data_dict, features_list, k):\n data = featureFormat(data_dict, features_list)\n labels, features = targetFeatureSplit(data)\n\n k_best = SelectKBest(k=k)\n k_best.fit(features, labels)\n scores = k_best.scores_\n print(scores)\n unsorted_pairs = zip(features_list[1:], scores)\n sorted_pairs = list(reversed(sorted(unsorted_pairs, key=lambda x: x[1])))\n k_best_features = dict(sorted_pairs[:k])\n print (\"{0} best features: {1}\\n\".format(k, k_best_features.keys(), scores))\n return k_best_features", "def get_best_model(self, d_model_info, metric='F1', delta_auc_th=0.03, verbose=False):\n # select valid models (abs(auc_train - auc_test)<0.03)\n valid_model = {}\n for key, param in d_model_info.items():\n if param['metrics']['delta_auc'] <= delta_auc_th:\n valid_model[key] = param\n\n # Best model according to selected metric\n if len(valid_model.keys()) > 0:\n best_model_idx = max(valid_model, key=lambda x: valid_model[x].get('metrics').get(metric))\n if verbose:\n print(' >', len(valid_model.keys()), ' valid models |auc(train)-auc(test)|<=' + str(delta_auc_th))\n print(' > best model : ' + str(best_model_idx))\n else:\n best_model_idx = None\n print('0 valid model')\n\n return best_model_idx, list(valid_model.keys())", "def get_k_best(data_dict, features_list, k):\r\n data = featureFormat(data_dict, features_list)\r\n labels, features = targetFeatureSplit(data)\r\n k_best = SelectKBest(k=k)\r\n k_best.fit(features, labels)\r\n scores = k_best.scores_\r\n print(scores)\r\n unsorted_pairs = zip(features_list[1:], scores)\r\n sorted_pairs = list(reversed(sorted(unsorted_pairs, key=lambda x: x[1])))\r\n k_best_features = dict(sorted_pairs[:k])\r\n print (\"{0} best features: {1}\\n\".format(k, k_best_features.keys(), scores))\r\n return k_best_features", "def _get_best(self, populations, func):\n best = None\n for population in populations:\n for item in population:\n if not best:\n best = item\n elif func.fit(*item) > func.fit(*best):\n best = item\n return best", "def get_k_best(data_dict, feature_list, num_features):\n data = featureFormat(data_dict, feature_list)\n target, features = targetFeatureSplit(data)\n\n clf = SelectKBest(k = num_features)\n clf = clf.fit(features, target)\n feature_weights = {}\n for idx, feature in enumerate(clf.scores_):\n feature_weights[feature_list[1:][idx]] = feature\n best_features = sorted(feature_weights.items(), key = lambda k: k[1], reverse = True)[:num_features]\n new_features = []\n for k, v in best_features:\n new_features.append(k)\n return new_features", "def top10_accuracy_scorer(estimator, X, y):\n # predict the probabilities across all possible labels for rows in our training set\n probas = estimator.predict_proba(X)\n \n # get the indices for top 10 predictions for each row; these are the last ten in each row\n # Note: We use argpartition, which is O(n), vs argsort, which uses the quicksort algorithm \n # by default and is O(n^2) in the worst case. We can do this because we only need the top ten\n # partitioned, not in sorted order.\n # Documentation: https://numpy.org/doc/1.18/reference/generated/numpy.argpartition.html\n \n \n top10_idx = np.argpartition(probas, -10, axis=1)[:, -10:]\n \n # index into the classes list using the top ten indices to get the class names\n top10_preds = estimator.classes_[top10_idx]\n\n # check if y-true is in top 10 for each set of predictions\n mask = top10_preds == y.reshape((y.size, 1))\n \n # take the mean\n top_10_accuracy = mask.any(axis=1).mean()\n \n return top_10_accuracy", "def get_best_model(x_train, y_train):\n # calculate class weights\n class_weights = class_weight.compute_class_weight('balanced', np.unique(y_train),\n y_train)\n # convert to dict\n class_weights = dict(enumerate(class_weights))\n # encode label data\n y_train = to_categorical(y_train)\n\n return get_model(x_train, y_train, 256, 3, 'adamax', 'normal', class_weights)", "def classify(train=None, test=None, data=None, res_dir=\"res/\", disp=True, outfilename=None):\n utils.print_success(\"Comparison of differents classifiers\")\n if data is not None:\n train_features = data[\"train_features\"]\n train_groundtruths = data[\"train_groundtruths\"]\n test_features = data[\"test_features\"]\n test_groundtruths = data[\"test_groundtruths\"]\n else:\n train = utils.abs_path_file(train)\n test = utils.abs_path_file(test)\n train_features, train_groundtruths = read_file(train)\n test_features, test_groundtruths = read_file(test)\n if not utils.create_dir(res_dir):\n res_dir = utils.abs_path_dir(res_dir)\n classifiers = {\n \"RandomForest\": RandomForestClassifier()\n # \"RandomForest\": RandomForestClassifier(n_estimators=5),\n # \"KNeighbors\":KNeighborsClassifier(3),\n # \"GaussianProcess\":GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),\n # \"DecisionTree\":DecisionTreeClassifier(max_depth=5),\n # \"MLP\":MLPClassifier(),\n # \"AdaBoost\":AdaBoostClassifier(),\n # \"GaussianNB\":GaussianNB(),\n # \"QDA\":QuadraticDiscriminantAnalysis(),\n # \"SVM\":SVC(kernel=\"linear\", C=0.025),\n # \"GradientBoosting\":GradientBoostingClassifier(),\n # \"ExtraTrees\":ExtraTreesClassifier(),\n # \"LogisticRegression\":LogisticRegression(),\n # \"LinearDiscriminantAnalysis\":LinearDiscriminantAnalysis()\n }\n for key in classifiers:\n utils.print_success(key)\n clf = classifiers[key]\n utils.print_info(\"\\tFit\")\n clf.fit(train_features, train_groundtruths)\n utils.print_info(\"\\tPredict\")\n predictions = clf.predict(test_features)\n\n if outfilename is not None:\n with open(outfilename, \"w\") as filep:\n for gt, pred in zip(test_groundtruths, predictions):\n filep.write(gt + \",\" + pred + \"\\n\")\n\n # Global\n data = [key]\n data.append(str(precision_score(test_groundtruths, predictions, average='weighted')))\n data.append(str(recall_score(test_groundtruths, predictions, average='weighted')))\n data.append(str(f1_score(test_groundtruths, predictions, average='weighted')))\n data = \",\".join(data)\n if disp:\n print(data)\n else:\n with open(res_dir + \"global.csv\", \"a\") as filep:\n filep.write(data + \",\\n\")\n # Local\n for index, tag in enumerate(list(set(train_groundtruths))):\n precision = precision_score(test_groundtruths, predictions, average=None)\n recall = recall_score(test_groundtruths, predictions, average=None)\n f1 = f1_score(test_groundtruths, predictions, average=None)\n line = key + \",\" + str(precision[index]) + \",\" + str(recall[index]) + \",\" + str(f1[index])\n if disp:\n print(line)\n else:\n with open(res_dir + \"tag_\" + tag + \".csv\", \"a\") as filep:\n filep.write(line + \",\\n\")\n return predictions", "def fit_best(self, test_range, apply=True, get_scores=False, verbose=True):\n if isinstance(test_range, int):\n n_clusters = range(2, test_range + 1)\n elif isinstance(test_range, Iterable):\n n_clusters = sorted(list(test_range))\n else:\n raise ValueError(\"test_range must be an integer or iterable\")\n\n im_reshaped = self._im_small.reshape(-1, 3)\n\n sil_scores = []\n for centers in n_clusters:\n km = KMeans(n_clusters=centers)\n pred = km.fit_predict(im_reshaped)\n sil = silhouette_score(im_reshaped, pred)\n sil_scores.append((centers, sil))\n\n best_idx = np.argmax([x[1] for x in sil_scores])\n best_clusters = sil_scores[best_idx][0]\n\n if apply:\n self.fit(n_colors=best_clusters)\n\n if verbose:\n print(f'Best n_clusters: {best_clusters}')\n\n if get_scores:\n return sil_scores", "def find_best_face(faces_dict):\n\n prefix_msg_response = \"The best face is from:\"\n no_valid_urls_msg = \"Please insert valid URLs\"\n if faces_dict:\n max_face_item = max(faces_dict.values(), key=itemgetter(1)) # Finds the image that is the common one,\n # and that has the largest face.\n max_face_image = max_face_item[2]\n max_face_top = max_face_item[3]\n max_face_left = max_face_item[4]\n return f\"{prefix_msg_response} {max_face_image}. The face top is: {max_face_top} and left: {max_face_left}\"\n return no_valid_urls_msg", "def get_best_recall_metrics(self,\n verbose: bool = True) -> Tuple[int, int]:\n recall_scores = list()\n for i in self.search_space:\n classes = self.convert_classes(threshold=i)\n recall_scores.append(recall_score(classes, self.y_true))\n best_recall_score, best_recall_threshold = self._get_best_metrics(\n metric_type='precision_score',\n scores=recall_scores,\n greater_is_better=True,\n verbose=verbose\n )\n return best_recall_score, best_recall_threshold", "def calc_openset_classification(data_outlier_probs, num_classes, num_outlier_threshs=50):\n\n dataset_outliers = []\n threshs = []\n\n # loop through each rejection prior value and evaluate the percentage of the dataset being considered as\n # statistical outliers, i.e. each data point's outlier probability > rejection prior.\n for i in range(num_outlier_threshs - 1):\n outlier_threshold = (i + 1) * (1.0 / num_outlier_threshs)\n threshs.append(outlier_threshold)\n\n dataset_outliers.append(0)\n total_dataset = 0\n\n for j in range(num_classes):\n total_dataset += len(data_outlier_probs[j])\n\n for k in range(len(data_outlier_probs[j])):\n if data_outlier_probs[j][k] > outlier_threshold:\n dataset_outliers[i] += 1\n\n dataset_outliers[i] = dataset_outliers[i] / float(total_dataset)\n\n return {\"thresholds\": threshs, \"outlier_percentage\": dataset_outliers}", "def show_bestScore(train_set, test_set):\n start = input(\"Do you have p, d and q values defined? \")\n if start == \"No\" or start == \"no\" or start == \"N\" or start == \"n\":\n print(\"Please define p, d, q values and retry.\")\n else:\n print(\"Finding out...\")\n target = [values for values in train_set]\n testVals = [values for values in test_set]\n target = train_set.astype(\"float32\")\n testVals = test_set.astype(\"float32\")\n score = [10000, (0, 0, 0)]\n for p in pList:\n for d in dList:\n for q in qList:\n order = (p, d, q)\n model = SARIMAX(target, order=order)\n fit = model.fit(disp=False)\n preds = fit.forecast(len(test_set))\n error = mean_absolute_error(testVals, preds)\n if score[0] != 0 and error < score[0]:\n score.pop()\n score.pop()\n score.append(error)\n score.append(order)\n\n best_score, best_order = score[0], score[1]\n out = print(\"Best SARIMAX: MAE = %.f :: Order = %s\" %\n (best_score, best_order))\n if not best_score:\n print(\"Invalid or missing value for MAE. Please retry.\")\n elif not best_order:\n print(\"Invalid or missing order of values. Please retry.\")\n else:\n return out # Best MAE = 700 :: Order = (8, 3, 1)", "def tuning_with_scaling(self, distance_funcs, scaling_classes, x_train, y_train, x_val, y_val):\n self.best_k = None\n self.best_distance_function = None\n self.best_model = None\n bestf = float(\"-inf\")\n self.best_scaler = None\n \n for s in scaling_classes.keys():\n if s == \"min_max_scale\":\n minmax_scaler = MinMaxScaler()\n x_t = minmax_scaler(x_train)\n x_v = minmax_scaler(x_val)\n else:\n normal_scaler = NormalizationScaler()\n x_t = normal_scaler(x_train)\n x_v = normal_scaler(x_val)\n for d in distance_funcs.keys():\n for k in range(1,min(len(x_train),30),2):\n knnmodel = KNN(k, distance_funcs[d])\n knnmodel.train(x_t, y_train)\n pred = knnmodel.predict(x_v)\n f1 = f1_score(y_val,pred)\n if f1>bestf:\n bestk = k\n bestf = f1\n bestd = d\n bests = s\n bestmodel = knnmodel\n \n \n # You need to assign the final values to these variables\n self.best_k = bestk\n self.best_distance_function = bestd\n self.best_scaler = bests\n self.best_model = bestmodel\n #raise NotImplementedError", "def get_best_specificity_metrics(self,\n verbose: bool = True) -> Tuple[int, int]:\n specificity_scores = list()\n for i in self.search_space:\n classes = self.convert_classes(threshold=i)\n tn, fp, fn, tp = confusion_matrix(self.y_true, classes).ravel()\n specificity = tn / (tn + fp)\n specificity_scores.append(specificity)\n best_specificity_score, best_specificity_threshold = self._get_best_metrics(\n metric_type='specificity_score',\n scores=specificity_scores,\n greater_is_better=True,\n verbose=verbose\n )\n return best_specificity_score, best_specificity_threshold", "def best_t(precisions, recalls, thresholds):\n f1 = [2 * (precisions[i] * recalls[i]) / (precisions[i] + recalls[i]) for i in range(0, len(thresholds))]\n return thresholds[np.argmax(f1)]", "def getHighestRank_Naive(self):\n\n # filter out low confidences\n maxConfidence = max(self.Predictors, key=operator.attrgetter('confidence'))\n p = [p for p in self.Predictors if p.confidence >= maxConfidence.confidence]\n \n if len(p) == 1:\n # only one predictor has high confidence\n chosenPredictor = p[0]\n elif len(p) > 1:\n # many predictors has high confidence. look for highest wins\n maxScore = max(p, key=operator.attrgetter('scoreWins'))\n \n# maxScore = 0\n# for pred in p:\n# maxScore = max(maxScore, pred.scoreWins - pred.scoreLosts) \n \n predictors = p\n p = [p for p in predictors if p.scoreWins >= maxScore.scoreWins]\n \n if len(p) == 1:\n chosenPredictor = p[0]\n elif len(p) > 1:\n # there are ties. look for lowest losts\n maxScore = min(p, key=operator.attrgetter('scoreLosts'))\n predictors = p\n p = [p for p in predictors if p.scoreLosts == maxScore]\n \n if len(p) == 1:\n chosenPredictor = p[-1]\n elif len(p) > 1:\n # choose at random\n random = rps.random() % len(p)\n chosenPredictor = p[random]\n \n if len(p) == 0:\n maxConfidence = max(self.Predictors, key=operator.attrgetter('confidence'))\n p = [p for p in self.Predictors if p.confidence >= maxConfidence.confidence]\n \n random = rps.random() % len(p)\n chosenPredictor = p[random]\n else:\n # confidences are low. look for highest wins\n maxScore = max(self.Predictors, key=operator.attrgetter('scoreWins'))\n p = [p for p in self.Predictors if p.scoreWins == maxScore]\n \n if len(p) == 1:\n chosenPredictor = p[0]\n elif len(p) > 1:\n # choose at random\n random = rps.random() % len(p)\n chosenPredictor = p[random]\n else:\n # choose at random\n random = rps.random() % len(self.Predictors)\n chosenPredictor = self.Predictors[random]\n \n if Debug:\n maxScore = max([p.scoreWins for p in self.Predictors]) \n print(\"max score: %f \" % (maxScore), end=\"\") \n maxScore = max([p.confidence for p in self.Predictors]) \n print(\"max confidence: %f \" % (maxScore), end=\"\") \n print(\"chosen predictor: %s\" % (chosenPredictor.name))\n #input()\n\n \n rankConfidence = chosenPredictor.confidence\n return chosenPredictor, rankConfidence", "def score_classifier(\n X,\n y,\n clf,\n nfeats=None,\n scoring=default_scorers,\n score_aggreg=default_score_aggreg,\n scale=None,\n decompose=None,\n select=None,\n decompose_params={},\n nfolds=10,\n shuffle=True,\n random_fold_state=None,\n include_train_stats=False,\n):\n # give scoring and score_aggreg elements some names\n scoring = scoring or default_scorers\n scoring = mk_scoring_dict(scoring)\n score_aggreg = score_aggreg or default_score_aggreg\n score_aggreg = mk_score_aggreg_dict(score_aggreg)\n\n if nfeats is None:\n nfeats = np.shape(X)[1]\n\n # X = X[:, :nfeats]\n\n stratified_k_fold = StratifiedKFold(\n y, n_folds=nfolds, shuffle=shuffle, random_state=random_fold_state\n )\n score_info = list()\n for train, test in stratified_k_fold:\n d = dict()\n\n X_train, X_test, y_train, y_test = X[train], X[test], y[train], y[test]\n\n if include_train_stats:\n d['train_pts'] = np.shape(X_train)[0]\n d['train_nfeats'] = np.shape(X_train)[1]\n\n pipeline_steps = list()\n if scale: # preprocessing.StandardScaler(), preprocessing.MinMaxScaler()\n pipeline_steps.append(('scale', scale))\n if decompose:\n pipeline_steps.append(('decompose', decompose))\n if select:\n pipeline_steps.append(('select', feature_selection.SelectKBest(k=nfeats)))\n else:\n X = X[:, :nfeats]\n\n pipeline_steps.append(('clf', clf))\n\n pipeline = Pipeline(steps=pipeline_steps)\n\n pipeline.fit(X_train, y_train)\n y_pred = pipeline.predict(X_test)\n\n for score_name, score_fun in scoring.items():\n d[score_name] = score_fun(y_test, y_pred)\n score_info.append(d)\n\n # return score_info\n score_info = pd.DataFrame(score_info)\n score_result = pd.Series()\n for score_aggreg_name, score_aggreg_fun in score_aggreg.items():\n t = score_info.apply(score_aggreg_fun)\n t.set_axis(\n axis=0,\n labels=[\n mk_aggreg_score_name(score_aggreg_name, score_name)\n for score_name in t.index.values\n ],\n )\n score_result = score_result.append(t)\n\n return score_result", "def _max_like_est(self, x):\n if self.method in [None, \"wass\", \"reg\", \"freg\", \"sparse\"]:\n return _comp_prob_rule(x, self.mean_, self.prec_,\n self.logdet_, self.priors_, self.rule)\n elif \"kl\" == self.method.lower():\n return self._KL(x)\n elif \"mean\" == self.method.lower():\n return self._mean(x)\n elif \"fr\" == self.method.lower():\n return self._FR(x)", "def personal_best(scores):\n# return sorted(scores, reverse=True)[0]\n return max(scores)", "def calc_scores(y_true, y_pred, mltype, metrics=None):\n scores = OrderedDict()\n\n if mltype == 'cls': \n scores['auroc'] = sklearn.metrics.roc_auc_score(y_true, y_pred)\n scores['f1_score'] = sklearn.metrics.f1_score(y_true, y_pred, average='micro')\n scores['acc_blnc'] = sklearn.metrics.balanced_accuracy_score(y_true, y_pred)\n\n elif mltype == 'reg':\n scores['r2'] = sklearn.metrics.r2_score(y_true=y_true, y_pred=y_pred)\n scores['mean_absolute_error'] = sklearn.metrics.mean_absolute_error(y_true=y_true, y_pred=y_pred)\n scores['median_absolute_error'] = sklearn.metrics.median_absolute_error(y_true=y_true, y_pred=y_pred)\n scores['mean_squared_error'] = sklearn.metrics.mean_squared_error(y_true=y_true, y_pred=y_pred)\n scores['auroc_reg'] = reg_auroc(y_true=y_true, y_pred=y_pred)\n\n # # https://scikit-learn.org/stable/modules/model_evaluation.html\n # for metric_name, metric in metrics.items():\n # if isinstance(metric, str):\n # scorer = sklearn.metrics.get_scorer(metric_name) # get a scorer from string\n # scores[metric_name] = scorer(ydata, pred)\n # else:\n # scores[metric_name] = scorer(ydata, pred)\n\n return scores", "def get_best_thres(self, data, label, score_func = f1_score):\n pred_prob = self.model.predict(data)\n best_score = 0\n for i_thres in range(0, 100):\n pred_label = [int(i > (i_thres / 100.0)) for i in pred_prob]\n fs = score_func(label, pred_label)\n if best_score < fs:\n best_score = fs\n self.config.thres = i_thres / 100.0\n print ('best score: %0.2f best_thres: %0.2f' % (best_score, self.config.thres))", "def classify(trainX, trainY, testX, testY):\n trainC = getClasses(trainY)\n P = estimatePosterior(trainX, trainC, testX)\n E = fit(testX, P)\n (e_rate, se, interval) = error.confidenceInterval(testY, E)\n return (P, E, e_rate, se, interval)", "def cross_validate(k, original_x_train, original_y_train, label, features: List[str],\n features_to_encode: List[str], balance_ratio: float,\n classifiers: List[ClassifierMixin]) \\\n -> Tuple[List[Tuple[Type[ClassifierMixin], Dict]], Type[ClassifierMixin]]:\n\n X, y = balance_train(original_x_train, original_y_train, label, balance_ratio)\n skf = StratifiedKFold(n_splits=k)\n val_scores = []\n for classifier in classifiers:\n print('Doing ', classifier.__class__)\n clf = make_pipeline(FeatureEncoder(features_to_encode, features), classifier)\n val_scores.append((classifier.__class__,\n model_selection.cross_validate(clf, X, y, scoring=('f1_weighted'),\n cv=skf, n_jobs=-1)))\n best_classifier_class = max([(mod, median(res['test_score'])) for mod, res in val_scores],\n key=lambda x: x[1])[0]\n return val_scores, best_classifier_class", "def get_best_face_recognition(recognitions, desired_label, probability_threshold=4.0):\n\n rospy.logdebug(\"get_best_face_recognition: recognitions = {}\".format(recognitions))\n\n # Only take detections with operator\n # detections = []\n # The old implementation took, for each recognition, the (label, prob) pairs where label==desired_label.\n # Other pairs in the same distribution may have higher probability.\n # When the best_recognition is picked, it picked the recognition where the probability for the desired_label is hhighest comapared to other recognitions. BUT: a recognitions highest probability may be for a different label\n # because the selection only compares matching labels, not looking at the probability of non-matching pairs.\n # For example: we have 2 recognitions.\n # in recognition 1, A has 50%, desired_label has 30%, B has 20%.\n # in recognition 2, B has 60%, desired_label has 35%, A has 5%.\n # Then, recognition 2 has the highest probability for the desired_label and is thus picked.\n # Because we take the [0]'th index of the distribution, that name is B\n #\n # Solution: because the probability distributions are sorted, just take the probability distribution where the desired label has the highest probability.\n #for recog in recognitions:\n # for cp in recog.categorical_distribution.probabilities:\n # if cp.label == desired_label:\n # detections.append((recog, cp.probability))\n\n # Sort based on probability\n #if detections:\n # sorted_detections = sorted(detections, key=lambda det: det[1])\n # best_detection = sorted_detections[0][0] # A CategoricalDistribution in a Recognition is already ordered, max prob is at [0]\n #else:\n # best_detection = None\n\n rospy.loginfo(\"Probability threshold %.2f\", probability_threshold)\n for index, recog in enumerate(recognitions):\n rospy.loginfo(\"{index}: {dist}\".format(index=index,\n dist=[(cp.label, \"{:.2f}\".format(cp.probability)) for cp in recog.categorical_distribution.probabilities]))\n\n matching_recognitions = [recog for recog in recognitions if \\\n recog.categorical_distribution.probabilities and \\\n recog.categorical_distribution.probabilities[0].label == desired_label]\n\n if matching_recognitions:\n best_recognition = max(matching_recognitions, key=lambda recog: recog.categorical_distribution.probabilities[0].probability)\n return best_recognition if best_recognition.categorical_distribution.probabilities[0].probability > probability_threshold else None\n else:\n return None # TODO: Maybe so something smart with selecting a recognition where the desired_label is not the most probable for a recognition?", "def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) /\n predictions.shape[0])", "def fit_best_classifier(docs, labels, best_result):\n clf = LogisticRegression()\n ans = [tokenize(doc, best_result['punct']) for doc in docs]\n X, vocab = vectorize(ans, best_result['features'], best_result['min_freq'])\n clf.fit(X,labels)\n return clf, vocab", "def classify(self, data):\n score_mappings = self.score(data)\n\n #update this logic to return max or the first thing in sorted list\n\n # score_mappings[\"2\"] = 0.009015777610818933 \n\n # print(score_mappings)\n\n max_value = score_mappings[max(score_mappings, key=score_mappings.get)]\n\n # print(max_value)\n\n score_mappings = dict(filter(lambda x: x[1] == max_value, score_mappings.items()))\n\n # print(score_mappings)\n\n return sorted(score_mappings)[0]", "def adaboost(training_points, classifier_to_misclassified,\n use_smallest_error=True, mistake_tolerance=0, max_rounds=INF):\n point_to_weight = initialize_weights(training_points)\n H = [] # (classifier, voting_power)\n\n while True:\n # exit conditions\n if is_good_enough(H, training_points, classifier_to_misclassified, mistake_tolerance):\n break\n if max_rounds == 0:\n break\n classifier_to_error_rate = calculate_error_rates(point_to_weight, classifier_to_misclassified)\n best_classifier = None\n try:\n best_classifier = pick_best_classifier(classifier_to_error_rate, use_smallest_error)\n except NoGoodClassifiersError:\n break\n\n max_rounds -= 1\n error_rate = classifier_to_error_rate[best_classifier]\n\n H.append((best_classifier, calculate_voting_power(error_rate)))\n\n point_to_weight = update_weights(point_to_weight, classifier_to_misclassified[best_classifier], error_rate)\n return H", "def get_k_best(data_dict, features_list, k):\n\n data = featureFormat(data_dict, features_list)\n labels, features = targetFeatureSplit(data)\n\n k_best = SelectKBest(k='all')\n k_best.fit(features, labels)\n scores = k_best.scores_\n unsorted_pairs = zip(features_list[1:], scores)\n sorted_pairs = list(reversed(sorted(unsorted_pairs, key=lambda x: x[1])))\n k_best_features = dict(sorted_pairs[:k])\n return k_best_features", "def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) /\n predictions.shape[0])", "def multi_metric_scorer():\n\n scoring = {'AUC': 'roc_auc',\n 'Accuracy': 'accuracy',\n\n 'Balanced_accuracy': make_scorer(\n recall_score,\n pos_label=None,\n average='macro',\n sample_weight=None\n ),\n 'Sensitivity': make_scorer(\n recall_score,\n pos_label=1,\n average='binary',\n sample_weight=None\n ),\n 'Specificity': make_scorer(\n recall_score,\n pos_label=0,\n average='binary',\n sample_weight=None\n ),\n 'F1': make_scorer(\n f1_score, average='weighted'\n ),\n 'PPV': make_scorer(\n precision_score,\n pos_label=1,\n average='binary'\n ),\n 'NPV': make_scorer(\n precision_score, \n pos_label=0, \n average='binary'\n ),\n 'Brier_score': 'brier_score_loss'}\n\n return scoring", "def _loss_smaller(best_eval_result, current_eval_result):\n default_key = metric_keys.MetricKeys.LOSS\n if not best_eval_result or default_key not in best_eval_result:\n raise ValueError(\n 'best_eval_result cannot be empty or no loss is found in it.')\n\n if not current_eval_result or default_key not in current_eval_result:\n raise ValueError(\n 'current_eval_result cannot be empty or no loss is found in it.')\n\n return best_eval_result[default_key] > current_eval_result[default_key]", "def get_tp_score(val_loss, best_val_loss, num_classes=10):\n random_loss = math.log(num_classes)\n\n return (random_loss - val_loss) / (random_loss - best_val_loss)", "def _get_best_ratios(self, context, sensitivities, target_ratio):\n _logger.info('_get_best_ratios for pruning ratie: {}'.format(\n target_ratio))\n\n def func(params, x):\n a, b, c, d = params\n return a * x * x * x + b * x * x + c * x + d\n\n def error(params, x, y):\n return func(params, x) - y\n\n def slove_coefficient(x, y):\n init_coefficient = [10, 10, 10, 10]\n coefficient, loss = leastsq(error, init_coefficient, args=(x, y))\n return coefficient\n\n min_loss = 0.\n max_loss = 0.\n\n # step 1: fit curve by sensitivities\n coefficients = {}\n for param in sensitivities:\n losses = np.array([0] * 5 + sensitivities[param]['loss'])\n precents = np.array([0] * 5 + sensitivities[param][\n 'pruned_percent'])\n coefficients[param] = slove_coefficient(precents, losses)\n loss = np.max(losses)\n max_loss = np.max([max_loss, loss])\n\n # step 2: Find a group of ratios by binary searching.\n flops = context.eval_graph.flops()\n model_size = context.eval_graph.numel_params()\n ratios = []\n while min_loss < max_loss:\n loss = (max_loss + min_loss) / 2\n _logger.info(\n '-----------Try pruned ratios while acc loss={:.4f}-----------'.\n format(loss))\n ratios = []\n # step 2.1: Get ratios according to current loss\n for param in sensitivities:\n coefficient = copy.deepcopy(coefficients[param])\n coefficient[-1] = coefficient[-1] - loss\n roots = np.roots(coefficient)\n for root in roots:\n min_root = 1\n if np.isreal(root) and root > 0 and root < 1:\n selected_root = min(root.real, min_root)\n ratios.append(selected_root)\n _logger.info('Pruned ratios={}'.format(\n [round(ratio, 3) for ratio in ratios]))\n # step 2.2: Pruning by current ratios\n param_shape_backup = {}\n self._prune_parameters(\n context.eval_graph,\n context.scope,\n sensitivities.keys(),\n ratios,\n context.place,\n only_graph=True,\n param_shape_backup=param_shape_backup)\n\n pruned_flops = 1 - (float(context.eval_graph.flops()) / flops)\n pruned_size = 1 - (float(context.eval_graph.numel_params()) /\n model_size)\n _logger.info('Pruned flops: {:.4f}'.format(pruned_flops))\n _logger.info('Pruned model size: {:.4f}'.format(pruned_size))\n for param in param_shape_backup.keys():\n context.eval_graph.var(param).set_shape(param_shape_backup[\n param])\n\n # step 2.3: Check whether current ratios is enough\n if abs(pruned_flops - target_ratio) < 0.015:\n break\n if pruned_flops > target_ratio:\n max_loss = loss\n else:\n min_loss = loss\n return sensitivities.keys(), ratios", "def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n numpy.sum(numpy.argmax(predictions, 1) == labels) /\n predictions.shape[0])", "def scoring(estimator, features_test, labels_test):\n pred = estimator.predict(features_test)\n p = metrics.precision_score(labels_test, pred, average='micro')\n r = metrics.recall_score(labels_test, pred, average='micro')\n if p > 0.3 and r > 0.3:\n return metrics.f1_score(labels_test, pred, average='macro')\n return 0", "def most_probable_class(text, weights):\n\n pos_weights = weights['positive']\n neg_weights = weights['negative']\n neu_weights = weights['neutral']\n features = calculate_features(text)\n pos_numerator = 0.0\n neg_numerator = 0.0\n neu_numerator = 0.0\n denominator = 0.0\n for f in features:\n if f in pos_weights and f in neg_weights and f in neu_weights:\n pos_numerator += pos_weights[f] * features[f]\n neg_numerator += neg_weights[f] * features[f]\n neu_numerator += neu_weights[f] * features[f]\n denominator += pos_numerator + neg_numerator + neu_numerator\n else:\n pos_numerator += 0\n neg_numerator += 0\n neu_numerator += 0\n denominator += pos_numerator + neg_numerator + neu_numerator\n\n pos_prob = (\"positive\", exp(pos_numerator))# /exp(denominator))\n neg_prob = (\"negative\", exp(neg_numerator))# /exp(denominator))\n neu_prob = (\"neutral\", exp(neu_numerator))# /exp(denominator))\n return max(neu_prob, neg_prob, pos_prob, key=lambda x: x[1])", "def find_best_threshold(model_profits):\n max_model = None\n max_threshold = None\n max_profit = None\n for model, profits, thresholds in model_profits:\n max_index = np.argmax(profits)\n if not max_model or profits[max_index] > max_profit:\n max_model = model\n max_threshold = thresholds[max_index]\n max_profit = profits[max_index]\n return max_model, max_threshold, max_profit", "def get_preferred_form( percents ) :\n # If a given forms occurrs 70% of the cases (for 2 forms) or more, it is \n # considered preferred\n # TODO: Test an entropy-based measure for choosing among the forms\n PRED_THRESHOLD = .9 - .15 * len(percents)\n max_like = (0, None)\n for form in list(percents.keys()) :\n if percents[form] >= PRED_THRESHOLD and percents[form] > max_like[0]:\n max_like = (percents[ form ], form )\n return max_like[ 1 ] # No preferred form", "def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n np.sum(np.argmax(predictions, 1) == labels) /\n predictions.shape[0])", "def get_best( self ):\n if len(self.listScore) < 1:\n if self.bMinimumIsBest: return 9999,\"Unknown\"\n else: return -1,\"Unknown\"\n return self.listScore[0]", "def _evaluate_classifer(self, classifier: object, X_test: np.ndarray, y_test: np.ndarray, scaler: StandardScaler, optimal_threshold: float, beta: float, calculate_confusion_matrix:bool = False) -> tuple:\n\n # If the data was scaled in the pipeline the scaler will be not none othersie (none) don't scale the data\n if scaler is not None:\n X_test = scaler.transform(X_test)\n\n # get probabilities for positive class\n y_pred = classifier.predict_proba(X_test)[:,1]\n\n # predict based on optimal_threshold\n threshold_predictions = [1 if y > optimal_threshold else 0 for y in y_pred]\n\n # calculate scores\n fb_score = fbeta_score(y_test, threshold_predictions, beta=beta)\n balanced_accurcacy = balanced_accuracy_score(y_test, threshold_predictions)\n\n if calculate_confusion_matrix:\n conf_mat = confusion_matrix(y_test, threshold_predictions)\n return fb_score, balanced_accurcacy, conf_mat\n\n return fb_score, balanced_accurcacy", "def classification(i, classifiers, results_proba, x_train, x_test, y_train, y_test):\n\t\ttmp_l = dict()\n\t\tfor clf in classifiers:\n\t\t\tclf.fit(x_train, y_train)\n\t\t\ttry:\n\t\t\t\tbest_clf = clf.best_estimator_\n\t\t\texcept AttributeError:\n\t\t\t\tbest_clf = clf\n\t\t\tname = best_clf.__class__.__name__\n\t\t\ttrain_predictions_proba = clf.predict_proba(x_test)\n\t\t\ttrain_predictions_proba = train_predictions_proba[:, 1]\n\t\t\tresults_proba[name].update({i: train_predictions_proba})\n\t\t\ttmp_l[name] = {str(i): best_clf.score(x_test, y_test)}\n\t\treturn results_proba, tmp_l", "def best_model_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor):\n\tmodel_based_score = 0\n\tscaling_factors = [\"0.25*mean\", \"0.5*mean\", \"median\", \"1.25*mean\", \"1.5*mean\"]\n\t# scaling_factors = [\"0.5*mean\", \"median\"]\n\tmodel_based_selector = None\n\tmodel_based_train_features_selected = None\n\tmodel_based_test_features_selected = None\n\n\tfor factor in scaling_factors:\n\t\tprint(factor)\n\t\ttemp_model_based_selector = SelectFromModel(RandomForestRegressor(n_estimators=100), threshold=factor)\n\t\ttemp_model_based_selector.fit(train_features, train_similarity_target)\n\t\ttemp_model_based_train_features_selected = temp_model_based_selector.transform(train_features)\n\t\ttemp_model_based_test_features_selected = temp_model_based_selector.transform(test_features)\n\n\t\tregressor.fit(temp_model_based_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_model_based_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Model Based Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > model_based_score:\n\t\t\tmodel_based_score = temp_score\n\t\t\tmodel_based_selector = temp_model_based_selector\n\t\t\tmodel_based_train_features_selected = temp_model_based_train_features_selected\n\t\t\tmodel_based_test_features_selected = temp_model_based_test_features_selected\n\n\tmodel_based_mask = model_based_selector.get_support()\n\tprint(\"This is the model based mask: \")\n\tprint(model_based_mask)\n\n\treturn model_based_selector, model_based_score, model_based_train_features_selected, model_based_test_features_selected, model_based_mask", "def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n np.sum(np.argmax(predictions, 1) == labels) /\n predictions.shape[0])", "def optimize(\n # trials,\n random_state=SEED):\n\n space = {\n 'max_depth': scope.int(hp.uniform('max_depth', 5, 15)),\n 'subsample': hp.uniform('subsample', 0.03, 1),\n 'learning_rate' : hp.loguniform('learning_rate', np.log(0.005), np.log(0.5)) - 0.0001,\n 'colsample_bytree': hp.uniform('colsample_bytree', 0.3, 1),\n 'reg_alpha': hp.loguniform('reg_alpha', np.log(0.005), np.log(5)) - 0.0001,\n 'reg_lambda': hp.loguniform('reg_lambda', np.log(1), np.log(5)),\n 'bagging_freq': hp.choice('bagging_freq', [0, 1]),\n 'num_leaves': scope.int(hp.uniform('num_leaves', 10, 128)),\n 'n_estimators': 1000,\n 'boosting': 'gbdt',\n 'objective': 'multiclass',\n 'num_class': 12,\n 'metric': 'None',\n 'is_unbalance': 'true',\n # 'min_data_per_group': 1000,\n 'verbose': -1,\n 'random_seed': 42,\n \n }\n\n # Use the fmin function from Hyperopt to find the best hyperparameters\n best = fmin(score_model, space, algo=tpe.suggest,\n # trials=trials,\n max_evals=hyperopt_niters)\n return best", "def generate_loss_weight_criteria(num_traits, num_classes=num_classes, weighted_loss=True, top_only=None):\n\tif (weighted_loss):\n\t\tprint(\"use different weight for each class\")\n\t\tprint(\"put more focus on locus with more significant DM p-values\")\n\t\tif (top_only):\n\t\t\tprint(\"weighted loss for the top class only...\")\n\t\t\tclass_weight_each_output_dict = dict([(i, 1) for i in range(0, num_classes-1)])\n\t\t\tclass_weight_each_output_dict[num_classes-1] = num_classes\n\t\t\tclass_weight_dict = dict([(\"out%i\"%i, class_weight_each_output_dict) for i in range(1, num_traits+1)])\n\t\telse:\n\t\t\tclass_weight_each_output_dict = dict([(i, i+1) for i in range(0, num_classes)])\n\t\t\tclass_weight_dict = dict([(\"out%i\"%i, class_weight_each_output_dict) for i in range(1, num_traits+1)])\n\telse:\n\t\tprint(\"use balanced weight for each class\")\n\t\tclass_weight_each_output_dict = dict([(i, 1) for i in range(0, num_classes)])\n\t\tclass_weight_dict = dict([(\"out%i\"%i, class_weight_each_output_dict) for i in range(1, num_traits+1)])\n\treturn class_weight_dict", "def generate_loss_weight_criteria(num_traits, num_classes=num_classes, weighted_loss=True, top_only=None):\n\tif (weighted_loss):\n\t\tprint(\"use different weight for each class\")\n\t\tprint(\"put more focus on locus with more significant DM p-values\")\n\t\tif (top_only):\n\t\t\tprint(\"weighted loss for the top class only...\")\n\t\t\tclass_weight_each_output_dict = dict([(i, 1) for i in range(0, num_classes-1)])\n\t\t\tclass_weight_each_output_dict[num_classes-1] = num_classes\n\t\t\tclass_weight_dict = dict([(\"out%i\"%i, class_weight_each_output_dict) for i in range(1, num_traits+1)])\n\t\telse:\n\t\t\tclass_weight_each_output_dict = dict([(i, i+1) for i in range(0, num_classes)])\n\t\t\tclass_weight_dict = dict([(\"out%i\"%i, class_weight_each_output_dict) for i in range(1, num_traits+1)])\n\telse:\n\t\tprint(\"use balanced weight for each class\")\n\t\tclass_weight_each_output_dict = dict([(i, 1) for i in range(0, num_classes)])\n\t\tclass_weight_dict = dict([(\"out%i\"%i, class_weight_each_output_dict) for i in range(1, num_traits+1)])\n\treturn class_weight_dict", "def is_better(self, curr, best, **kwargs):\r\n score_threshold = kwargs.pop('score_threshold', 1e-3)\r\n relative_eps = 1.0 + score_threshold\r\n return curr >= best*relative_eps", "def find_best_k_and_metric(data):\n metrics_and_scores = []\n possible_metrics = [euclidean_distance, manhattan_distance, hamming_distance, cosine_distance]\n for k in range(1, len(data)):\n for metric in possible_metrics:\n cross_validation_score = cross_validate(data, k, metric)\n metrics_and_scores.append([k, metric, cross_validation_score])\n sorted_metrics = sorted(metrics_and_scores, key = lambda item:item[2])\n return (sorted_metrics[-1][0], sorted_metrics[-1][1])", "def fit_best_classifier(docs, labels, best_result):\n ###TODO\n #print('best_result = ',best_result)\n #print('labels = ',labels)\n \n #step 1 -> call tokenize\n #keys = ['punct','features','min_freq','accuracy']\n tokens_list = [tokenize(d,best_result['punct']) for d in docs]\n \n #step 2 -> call vectorize \n #vocabulary = {} \n X, vocab = vectorize(tokens_list, best_result['features'], best_result['min_freq'], vocab=None) # vocab = None\n \n #step 3 -> do LogisticRegression\n clf = LogisticRegression()\n clf.fit(X,labels)\n \n #predictions = clf.predict(X)\n #print('Testing accuracy=%f' %\n #accuracy_score(labels, predictions))\n\n return (clf,vocab) #sending clf too ***", "def test_scikit_learn_compatibility():\n\n # sklearn tests in:\n # https://github.com/scikit-learn/scikit-learn/blob/main/sklearn/utils/estimator_checks.py\n\n skip_tests = {\n \"check_dtype_object\", # the error message required to pass is too specific and incorrect for us\n \"check_classifiers_one_label\", # TODO: fix this! We should accept 1 category\n \"check_classifiers_regression_target\", # we're more permissive and convert any y values to str\n \"check_supervised_y_no_nan\", # error message too specific\n \"check_supervised_y_2d\", # we ignore useless added dimensions\n \"check_fit2d_predict1d\", # we accept 1d for predict\n \"check_fit2d_1sample\", # TODO: we allow fitting on 1 sample, but this kind of input is likely a bug from the caller, so change this\n \"check_regressors_no_decision_function\", # TODO: fix this!\n }\n for estimator, check_func in check_estimator(\n ExplainableBoostingClassifier(), generate_only=True\n ):\n f = check_func.func\n module = f.__module__\n shortname = f.__name__\n fullname = f\"{module}.{shortname}\"\n if shortname not in skip_tests:\n try:\n check_func(estimator)\n except BaseException as e:\n print(fullname)\n print(f\"{type(e).__name__}: {e}\")\n print()\n\n for estimator, check_func in check_estimator(\n ExplainableBoostingRegressor(), generate_only=True\n ):\n f = check_func.func\n module = f.__module__\n shortname = f.__name__\n fullname = f\"{module}.{shortname}\"\n if shortname not in skip_tests:\n try:\n check_func(estimator)\n except BaseException as e:\n print(fullname)\n print(f\"{type(e).__name__}: {e}\")\n print()\n\n for estimator, check_func in check_estimator(\n DPExplainableBoostingClassifier(), generate_only=True\n ):\n f = check_func.func\n module = f.__module__\n shortname = f.__name__\n fullname = f\"{module}.{shortname}\"\n if shortname not in skip_tests:\n try:\n check_func(estimator)\n except BaseException as e:\n print(fullname)\n print(f\"{type(e).__name__}: {e}\")\n print()\n\n for estimator, check_func in check_estimator(\n DPExplainableBoostingRegressor(), generate_only=True\n ):\n f = check_func.func\n module = f.__module__\n shortname = f.__name__\n fullname = f\"{module}.{shortname}\"\n if shortname not in skip_tests:\n try:\n check_func(estimator)\n except BaseException as e:\n print(fullname)\n print(f\"{type(e).__name__}: {e}\")\n print()", "def trainWeakClassifier(trainingSamples, weights, feature):\n #compute feature values\n featureValues = []\n positiveOrNegative = []\n for sample in trainingSamples:\n featureValues.append(feature.computeScore(sample[0], 0, 0))\n positiveOrNegative.append(sample[1])\n \n #zip with weights and sort by feature value\n featureValues = zip(featureValues, weights, positiveOrNegative)\n featureValues = sorted(featureValues, key=lambda tup: tup[0])\n \n #sum all weights of the positive and negative samples\n negativeWeightsTotal = 0\n positiveWeightsTotal = 0\n for value in featureValues:\n if value[2] == 1:\n positiveWeightsTotal += value[1]\n else:\n negativeWeightsTotal += value[1]\n \n #find the feature with the smallest error\n bestFeatureIndex = 0\n bestFeatureError = 1e10\n negativeWeightsSoFar = 0\n positiveWeightsSoFar = 0\n positiveOnTheLeft = 0\n positivesTotal = 0\n for i in range(0, len(featureValues)):\n error1 = positiveWeightsSoFar-negativeWeightsSoFar+negativeWeightsTotal\n error2 = negativeWeightsSoFar-positiveWeightsSoFar+positiveWeightsTotal\n error = min([error1, error2])\n \n if bestFeatureError > error:\n bestFeatureError = error\n bestFeatureIndex = i\n positiveOnTheLeft = positivesTotal\n \n if featureValues[i][2] == 1:\n positiveWeightsSoFar += featureValues[i][1]\n positivesTotal += 1\n else:\n negativeWeightsSoFar += featureValues[i][1]\n \n #count how much samples are there on the right\n positiveOnTheRight = positivesTotal - positiveOnTheLeft\n \n #determine the polarity and threshold\n polarity = -1\n threshold = featureValues[bestFeatureIndex][0]\n if positiveOnTheLeft > positiveOnTheRight:\n polarity = 1\n else:\n polarity = -1\n \n #build and return a weak classifier\n return WeakClassifier(feature, threshold, polarity)", "def classify_ar(classifier_params, X_model, Y_model, X_observation, bad_pixels, **kwargs):\n # Encode labels\n le = LabelEncoder()\n le.fit(Y_model)\n Y_model = le.transform(Y_model)\n # Split training and test data\n X_train, X_test, Y_train, Y_test = train_test_split(\n X_model, Y_model, test_size=kwargs.get('test_size', 0.33))\n # Fit classifier\n clf = RandomForestClassifier(**classifier_params)\n clf.fit(X_train, Y_train)\n test_error = 1. - clf.score(X_test, Y_test)\n # Classify observations\n Y_observation = clf.predict(X_observation)\n Y_observation_prob = clf.predict_proba(X_observation)\n # Frequency map\n data = np.empty(bad_pixels.shape)\n data[bad_pixels] = np.nan\n data[~bad_pixels] = Y_observation\n class_map = data.copy()\n # Probability maps\n probability_maps = {}\n for i, c in enumerate(le.inverse_transform(clf.classes_)):\n data = np.empty(bad_pixels.shape)\n data[bad_pixels] = np.nan\n data[~bad_pixels] = Y_observation_prob[:, i]\n probability_maps[c] = data.copy()\n\n return class_map, probability_maps, clf, test_error", "def optimize_xgb(X_train, y_train, max_evals=10, cv=None, scorer='neg_mean_squared_error', seed=42):\n assert cv is not None\n\n space = {\n \"n_estimators\": hp.quniform(\"n_estimators\", 100, 1000, 10),\n \"max_depth\": hp.quniform(\"max_depth\", 1, 8, 1),\n \"learning_rate\": hp.loguniform(\"learning_rate\", -5, 1),\n \"subsample\": hp.uniform(\"subsample\", 0.8, 1),\n \"gamma\": hp.quniform(\"gamma\", 0, 100, 1)\n }\n\n objective_fn = partial(train_xgb,\n X_train=X_train, y_train=y_train, \n scorer=scorer, \n cv=cv,\n seed=seed)\n\n trials = Trials()\n best = fmin(fn=objective_fn,\n space=space,\n algo=tpe.suggest,\n max_evals=max_evals,\n trials=trials)\n\n # evaluate the best model on the test set\n return best, trials", "def get_k_best(data_dict, features_list, k):\n \n data = featureFormat(data_dict, features_list)\n labels, features = targetFeatureSplit(data) \n\n k_best = SelectKBest(k=k).fit(features, labels)\n scores = k_best.scores_\n pairs = zip(scores, features_list[1:])\n pairs.sort(reverse = True)\n pairs_sorted = [(v2,v1) for v1,v2 in pairs]\n k_best_features = dict(pairs_sorted[:k])\n pprint(pairs_sorted)\n return k_best_features", "def find_best_features(year, features, sex, age, heavy):\r\n print 'find_best_features(year=%d,features=%s,sex=%s,age=%s,heavy=%s)' % (year, features, sex,\r\n age, heavy)\r\n X, y, keys = getXy_by_features(year, features, sex, age)\r\n title = 'features=%s,sex=%s,age=%s,year=%d' % (features,sex,age,year) \r\n results, n_samples = select_features.get_most_predictive_feature_set(title, X, y, keys, heavy)\r\n return results, n_samples, keys", "def extractBestAlgorithms(args = algs2009, f_factor=2,\n target_lb=1e-8, target_ub=1e22):\n\n # TODO: use pproc.TargetValues class as input target values\n # default target values:\n targets = pproc.TargetValues(\n 10**np.arange(np.log10(max((1e-8, target_lb))),\n np.log10(target_ub) + 1e-9, 0.2))\n # there should be a simpler way to express this to become the\n # interface of this function\n\n print 'Loading algorithm data from given algorithm list...\\n' \n\n verbose = True\n dsList, sortedAlgs, dictAlg = pproc.processInputArgs(args, verbose=verbose)\n\n print 'This may take a while (depending on the number of algorithms)'\n\n selectedAlgsPerProblem = {}\n for f, i in pproc.dictAlgByFun(dictAlg).iteritems():\n for d, j in pproc.dictAlgByDim(i).iteritems():\n selectedAlgsPerProblemDF = []\n best = BestAlgSet(j)\n \n for i in range(0, len(best.target)):\n t = best.target[i]\n # if ((t <= target_ub) and (t >= target_lb)):\n if toolsstats.in_approximately(t,\n targets((f, d), discretize=True)):\n # add best for this target:\n selectedAlgsPerProblemDF.append(best.algs[i])\n \n # add second best or all algorithms that have an ERT\n # within a factor of f_factor of the best:\n secondbest_ERT = np.infty\n secondbest_str = ''\n secondbest_included = False \n for astring in j:\n currdictalg = dictAlg[astring].dictByDim()\n if currdictalg.has_key(d):\n curralgdata = currdictalg[d][f-1] \n currERT = curralgdata.detERT([t])[0]\n if (astring != best.algs[i]):\n if (currERT < secondbest_ERT):\n secondbest_ERT = currERT\n secondbest_str = astring\n if (currERT <= best.detERT([t])[0] * f_factor):\n selectedAlgsPerProblemDF.append(astring)\n secondbest_included = True\n if not (secondbest_included) and (secondbest_str != ''):\n selectedAlgsPerProblemDF.append(secondbest_str)\n \n if len(selectedAlgsPerProblemDF) > 0:\n selectedAlgsPerProblem[(d, f)] = selectedAlgsPerProblemDF\n \n print 'pre-processing of function', f, 'done.' \n \n print 'loading of best algorithm(s) data done.'\n \n countsperalgorithm = {}\n for (d, f) in selectedAlgsPerProblem:\n print 'dimension:', d, ', function:', f\n setofalgs = set(selectedAlgsPerProblem[d,f])\n \n # now count how often algorithm a is best for the extracted targets\n for a in setofalgs:\n # use setdefault to initialize with zero if a entry not existant:\n countsperalgorithm.setdefault((d, a), 0) \n countsperalgorithm[(d,a)] += selectedAlgsPerProblem[d,f].count(a)\n \n selectedalgsperdimension = {}\n for (d,a) in sorted(countsperalgorithm):\n if not selectedalgsperdimension.has_key(d):\n selectedalgsperdimension[d] = []\n selectedalgsperdimension[d].append((countsperalgorithm[(d,a)], a))\n \n for d in sorted(selectedalgsperdimension):\n print d, 'D:'\n for (count, alg) in sorted(selectedalgsperdimension[d], reverse=True):\n print count, alg\n print '\\n'\n \n \n print \" done.\"\n \n return selectedalgsperdimension", "def get_bests(self):\n set_names = [\"training\", \"hp_selection\", \"validation\"]\n run_tec_conf_set = recursivedict()\n validation = self._campaign_configuration['General']['validation']\n hp_selection = self._campaign_configuration['General']['hp_selection']\n if (validation, hp_selection) in {(\"All\", \"All\"), (\"Extrapolation\", \"All\"), (\"All\", \"HoldOut\"), (\"HoldOut\", \"All\"), (\"HoldOut\", \"HoldOut\"), (\"Extrapolation\", \"HoldOut\")}:\n # For each run, for each technique the best configuration\n run_tec_best_conf = recursivedict()\n\n # Hyperparameter search\n for conf in self._exp_confs:\n run = int(conf.get_signature()[0].replace(\"run_\", \"\"))\n technique = conf.technique\n run_tec_conf_set[run][technique][str(conf.get_signature()[4:])] = conf.mapes\n # First experiment for this technique or better than the current best\n if technique not in run_tec_best_conf[run] or conf.mapes[\"hp_selection\"] < run_tec_best_conf[run][technique].mapes[\"hp_selection\"]:\n run_tec_best_conf[run][technique] = conf\n\n # Print results for each run\n for run in range(0, self._campaign_configuration['General']['run_num']):\n self._logger.info(\"-->Printing results for run %s\", str(run))\n overall_run_best = None\n # Print data of single techniques\n for technique in run_tec_best_conf[run]:\n temp = run_tec_best_conf[run][technique]\n self._logger.info(\"---Best result for %s - Configuration is %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", technique, temp.get_signature()[4:], temp.mapes[\"training\"], temp.mapes[\"hp_selection\"], temp.mapes[\"validation\"])\n\n # Compute which is the best technique\n if not overall_run_best or temp.mapes[\"hp_selection\"] < overall_run_best.mapes[\"hp_selection\"]:\n overall_run_best = temp\n best_model_description = overall_run_best.print_model()\n self._logger.info(\"<--Overall best result is %s %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", overall_run_best.get_signature()[3:], \"(\" + best_model_description + \")\" if best_model_description else \"\", overall_run_best.mapes[\"training\"], overall_run_best.mapes[\"hp_selection\"], overall_run_best.mapes[\"validation\"])\n\n elif (validation, hp_selection) in {(\"KFold\", \"All\"), (\"KFold\", \"HoldOut\")}:\n folds = float(self._campaign_configuration['General']['folds'])\n # For each run, for each fold, for each technique, the best configuration\n run_fold_tec_best_conf = recursivedict()\n\n # Hyperparameter search inside each fold\n for conf in self._exp_confs:\n run = int(conf.get_signature()[0].replace(\"run_\", \"\"))\n fold = int(conf.get_signature()[1].replace(\"f\", \"\"))\n technique = conf.technique\n if \"hp_selection\" not in run_tec_conf_set[run][technique][str(conf.get_signature_string()[4:])]:\n for set_name in set_names:\n run_tec_conf_set[run][technique][str(conf.get_signature_string()[4:])][set_name] = 0\n for set_name in set_names:\n run_tec_conf_set[run][technique][str(conf.get_signature_string()[4:])][set_name] = run_tec_conf_set[run][technique][str(conf.get_signature_string()[4:])][set_name] + conf.mapes[set_name] / folds\n # First experiment for this fold+technique or better than the current best\n if technique not in run_fold_tec_best_conf[run][fold] or conf.mapes[\"hp_selection\"] < run_fold_tec_best_conf[run][fold][technique].mapes[\"hp_selection\"]:\n run_fold_tec_best_conf[run][fold][technique] = conf\n\n # Aggregate different folds (only the value of the mapes)\n run_tec_set = recursivedict()\n for run in run_fold_tec_best_conf:\n for fold in run_fold_tec_best_conf[run]:\n for tec in run_fold_tec_best_conf[run][fold]:\n if \"hp_selection\" not in run_tec_set[run][technique]:\n for set_name in set_names:\n run_tec_set[run][tec][set_name] = 0\n for set_name in set_names:\n run_tec_set[run][tec][set_name] = run_fold_tec_best_conf[run][fold][tec].mapes[set_name]\n # Print results for each run\n for run in range(0, self._campaign_configuration['General']['run_num']):\n self._logger.info(\"Printing results for run %s\", str(run))\n overall_run_best = ()\n # Print data of single techniques\n for technique in run_tec_set[run]:\n self._logger.info(\"---Best result for %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", technique, run_tec_set[run][technique][\"training\"], run_tec_set[run][technique][\"hp_selection\"], run_tec_set[run][technique][\"validation\"])\n\n # Compute which is the best technique\n if not overall_run_best or run_tec_set[run][technique][\"hp_selection\"] < overall_run_best[1][\"hp_selection\"]:\n overall_run_best = (technique, run_tec_set[run][technique])\n\n self._logger.info(\"---Overall best result is %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", overall_run_best[0], overall_run_best[1][\"training\"], overall_run_best[1][\"hp_selection\"], overall_run_best[1][\"validation\"])\n\n # Overall best will contain as first argument the technique with the best (across runs) average (across folds) mape on validation; now we consider on all the runs and on all the folds the configuraiton of this technique with best validation mape\n\n elif (validation, hp_selection) in {(\"All\", \"KFold\"), (\"HoldOut\", \"KFold\"), (\"Extrapolation\", \"KFold\")}:\n folds = float(self._campaign_configuration['General']['folds'])\n # For each run, for each technique, for each configuration, the aggregated mape\n run_tec_conf_set = recursivedict()\n\n # Hyperparameter search aggregating over folders\n for conf in self._exp_confs:\n run = int(conf.get_signature()[0].replace(\"run_\", \"\"))\n fold = int(conf.get_signature()[2].replace(\"f\", \"\"))\n technique = conf.technique\n configuration = str(conf.get_signature()[4:])\n if \"hp_selection\" not in run_tec_conf_set[run][technique][configuration]:\n for set_name in set_names:\n run_tec_conf_set[run][technique][configuration][set_name] = 0\n for set_name in set_names:\n run_tec_conf_set[run][technique][configuration][set_name] = run_tec_conf_set[run][technique][configuration][set_name] + conf.mapes[set_name] / folds\n\n # Select the best configuration for each technique across different folders\n run_tec_best_conf = recursivedict()\n for run in run_tec_conf_set:\n for tec in run_tec_conf_set[run]:\n for conf in run_tec_conf_set[run][tec]:\n if tec not in run_tec_best_conf[run] or run_tec_conf_set[run][tec][conf][\"hp_selection\"] < run_tec_best_conf[run][tec][1][\"hp_selection\"]:\n run_tec_best_conf[run][tec] = (conf, run_tec_conf_set[run][tec][conf])\n\n # Print results for each run\n for run in range(0, self._campaign_configuration['General']['run_num']):\n self._logger.info(\"Printing results for run %s\", run)\n overall_run_best = () # (technique, configuration, mapes)\n # Print data of single techniques\n for technique in run_tec_best_conf[run]:\n temp = run_tec_best_conf[run][technique]\n self._logger.info(\"---Best result for %s - Configuration is %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", technique, temp[0], temp[1][\"training\"], temp[1][\"hp_selection\"], temp[1][\"validation\"])\n\n # Compute which is the best technique\n if not overall_run_best or temp[1][\"hp_selection\"] < overall_run_best[2][\"hp_selection\"]:\n overall_run_best = (technique, temp[0], temp[1])\n\n self._logger.info(\"---Overall best result is %s %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", overall_run_best[0], overall_run_best[1], overall_run_best[2][\"training\"], overall_run_best[2][\"hp_selection\"], overall_run_best[2][\"validation\"])\n\n elif (validation, hp_selection) in {(\"KFold\", \"KFold\")}:\n folds = float(self._campaign_configuration['General']['folds'])\n # For each run, for each external fold, for each technique, the aggregated mape\n run_efold_tec_conf_set = recursivedict()\n\n # Hyperparameter search aggregating over internal folders\n for conf in self._exp_confs:\n run = int(conf.get_signature()[0].replace(\"run_\", \"\"))\n ext_fold = int(conf.get_signature()[2].replace(\"f\", \"\"))\n technique = conf.technique\n configuration = str(conf.get_signature()[4:])\n if \"hp_selection\" not in run_tec_conf_set[run][technique][configuration]:\n for set_name in set_names:\n run_tec_conf_set[run][technique][configuration][set_name] = 0\n for set_name in set_names:\n run_tec_conf_set[run][technique][configuration][set_name] = run_tec_conf_set[run][technique][configuration][set_name] + (conf.mapes[set_name] / (folds * folds))\n if configuration not in run_efold_tec_conf_set[run][ext_fold][technique]:\n for set_name in set_names:\n run_efold_tec_conf_set[run][ext_fold][technique][configuration][set_name] = 0\n for set_name in set_names:\n run_efold_tec_conf_set[run][ext_fold][technique][configuration][set_name] = run_efold_tec_conf_set[run][ext_fold][technique][configuration][set_name] + (conf.mapes[set_name] / (folds * folds))\n\n # Select the best configuration for each technique in each external fold across different internal folders\n run_efold_tec_best_conf = recursivedict()\n for run in run_efold_tec_conf_set:\n for efold in run_efold_tec_conf_set[run]:\n for tec in run_efold_tec_conf_set[run][efold]:\n for conf in run_efold_tec_conf_set[run][efold][tec]:\n if conf not in run_efold_tec_best_conf[run][efold][tec] or run_efold_tec_conf_set[run][efold][tec][conf][\"hp_selection\"] < run_efold_tec_best_conf[run][efold][tec][1][\"hp_selection\"]:\n run_efold_tec_best_conf[run][efold][tec] = (conf, run_efold_tec_conf_set[run][efold][tec][conf], run_efold_tec_conf_set[run][efold][tec][conf])\n\n # Aggregate on external folds\n run_tec_set = recursivedict()\n for run in run_efold_tec_best_conf:\n for efold in run_efold_tec_best_conf[run]:\n for tec in run_efold_tec_best_conf[run][efold]:\n if \"hp_selection\" not in run_tec_set[run][tec]:\n for set_name in set_names:\n run_tec_set[run][tec][set_name] = 0\n for set_name in set_names:\n run_tec_set[run][tec][set_name] = run_tec_set[run][tec][set_name] + run_efold_tec_best_conf[run][efold][tec][1][set_name]\n\n # Print results for each run\n for run in range(0, self._campaign_configuration['General']['run_num']):\n self._logger.info(\"Printing results for run %s\", run)\n overall_run_best = ()\n # Print data of single techniques\n for technique in run_tec_set[run]:\n self._logger.info(\"---Best result for %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", technique, run_tec_set[run][technique][\"training\"], run_tec_set[run][technique][\"hp_selection\"], run_tec_set[run][technique][\"validation\"])\n\n # Compute which is the best technique\n if not overall_run_best or run_tec_set[run][technique][\"hp_selection\"] < overall_run_best[1][\"hp_selection\"]:\n overall_run_best = (technique, run_tec_set[run][technique])\n\n self._logger.info(\"---Overall best result is %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", overall_run_best[0], overall_run_best[1][\"training\"], overall_run_best[1][\"hp_selection\"], overall_run_best[1][\"validation\"])\n\n else:\n self._logger.error(\"Unexpected combination: %s\", str((validation, hp_selection)))\n sys.exit(1)\n best_confs = {}\n best_technique = None\n for conf in self._exp_confs:\n technique = conf.technique\n if technique not in best_confs or conf.mapes[\"validation\"] < best_confs[technique].mapes[\"validation\"]:\n best_confs[technique] = conf\n for technique in best_confs:\n if not best_technique or best_confs[technique].mapes[\"validation\"] < best_confs[best_technique].mapes[\"validation\"]:\n best_technique = technique\n if bool(self._campaign_configuration['General']['details']):\n for run in run_tec_conf_set:\n for tec in run_tec_conf_set[run]:\n for conf in run_tec_conf_set[run][tec]:\n assert \"hp_selection\" in run_tec_conf_set[run][tec][conf]\n assert \"validation\" in run_tec_conf_set[run][tec][conf], \"training MAPE not found for \" + str(run) + str(tec) + str(conf)\n self._logger.info(\"Run %s - Technique %s - Conf %s - Training MAPE %f - Test MAPE %f\", str(run), ec.enum_to_configuration_label[tec], str(conf), run_tec_conf_set[run][tec][conf][\"hp_selection\"], run_tec_conf_set[run][tec][conf][\"validation\"])\n return best_confs, best_technique", "def run_classification_models(train,test,metric_file_path,classes):\n metric_names = ['accuracy','weightedRecall','weightedPrecision']\n f = open(metric_file_path,'w')\n f.write('model,'+','.join(metric_names)+'\\n')\n name = 'Logistic Regression'\n model = LogisticRegression()\n param_grid = ParamGridBuilder()\\\n .addGrid(model.regParam,[0,.25,.5]) \\\n .addGrid(model.elasticNetParam,[0,.25,.5])\\\n .build()\n model_cv = CrossValidator(\n estimator = model,\n estimatorParamMaps = param_grid,\n evaluator = MulticlassClassificationEvaluator(),\n numFolds = 3,\n seed = 7).fit(train)\n best_model = model_cv.bestModel\n print name\n print '\\t Best regParam (lambda): %.2f'%best_model._java_obj.getRegParam()\n print '\\t Best elasticNetparam (alpha): %.2f'%best_model._java_obj.getElasticNetParam()\n eval_model(f,name,model_cv,test,MulticlassClassificationEvaluator,metric_names)\n name = 'Decision Tree'\n model = DecisionTreeClassifier(seed=7)\n param_grid = ParamGridBuilder()\\\n .addGrid(model.maxDepth,[5,10,15]) \\\n .addGrid(model.maxBins,[8,16,32])\\\n .build()\n model_cv = CrossValidator(\n estimator = model,\n estimatorParamMaps = param_grid,\n evaluator = MulticlassClassificationEvaluator(),\n numFolds = 3,\n seed = 7).fit(train)\n best_model = model_cv.bestModel \n print name\n print '\\t Best maxDepth: %d'%best_model._java_obj.getMaxDepth()\n print '\\t Best maxBins: %d'%best_model._java_obj.getMaxBins()\n eval_model(f,name,model_cv,test,MulticlassClassificationEvaluator,metric_names)\n name = 'Random Forest'\n model = RandomForestClassifier(seed=7)\n param_grid = ParamGridBuilder()\\\n .addGrid(model.maxDepth,[5,10,15]) \\\n .addGrid(model.numTrees,[10,15,20])\\\n .build()\n model_cv = CrossValidator(\n estimator = model,\n estimatorParamMaps = param_grid,\n evaluator = MulticlassClassificationEvaluator(),\n numFolds = 3,\n seed = 7).fit(train)\n best_model = model_cv.bestModel \n print name\n print '\\t Best maxDepth: %d'%best_model._java_obj.getMaxDepth()\n print '\\t Best numTrees: %d'%best_model._java_obj.getNumTrees()\n eval_model(f,name,model_cv,test,MulticlassClassificationEvaluator,metric_names)\n name = 'One vs Rest'\n model = OneVsRest(classifier=LogisticRegression()).fit(train)\n print name\n eval_model(f,name,model,test,MulticlassClassificationEvaluator,metric_names)\n name = 'Naive Bayes'\n model = NaiveBayes()\n param_grid = ParamGridBuilder()\\\n .addGrid(model.smoothing,[.5,1,2])\\\n .build()\n model_cv = CrossValidator(\n estimator = model,\n estimatorParamMaps = param_grid,\n evaluator = MulticlassClassificationEvaluator(),\n numFolds = 3,\n seed = 7).fit(train)\n best_model = model_cv.bestModel \n print name\n print '\\t Best smoothing: %.1f'%best_model._java_obj.getSmoothing()\n eval_model(f,name,model_cv,test,MulticlassClassificationEvaluator,metric_names)\n if classes == 2:\n name = 'Gradient Boosted Trees'\n model = GBTClassifier(seed=7).fit(train)\n print name\n eval_model(f,name,model,test,MulticlassClassificationEvaluator,metric_names)\n name = 'Linear Support Vector Machine'\n model = LinearSVC().fit(train)\n print name\n eval_model(f,name,model,test,MulticlassClassificationEvaluator,metric_names) \n f.close()", "def get_best_sensitivity_metrics(self,\n verbose: bool = True) -> Tuple[int, int]:\n sensitivity_scores = list()\n for i in self.search_space:\n classes = self.convert_classes(threshold=i)\n tn, fp, fn, tp = confusion_matrix(self.y_true, classes).ravel()\n sensitivity = tp / (tp + fn)\n sensitivity_scores.append(sensitivity)\n best_sensitivity_score, best_sensitivity_threshold = self._get_best_metrics(\n metric_type='sensitivity_score',\n scores=sensitivity_scores,\n greater_is_better=True,\n verbose=verbose\n )\n return best_sensitivity_score, best_sensitivity_threshold", "def find_best_k(X_train, y_train, X_test, y_test, min_k=1, max_k=25):\n best_k = 0\n best_score = 0.0\n for k in range(min_k, max_k+1, 2):\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(X_train, y_train)\n preds = knn.predict(X_test)\n f1 = f1_score(y_test, preds)\n if f1 > best_score:\n best_k = k\n best_score = f1\n print(\"Best Value for k: {}\".format(best_k))\n print(\"F1-Score: {}\".format(best_score))", "def _filter_to_most_specific(self, graph, classlist):\n candidates = {}\n for brickclass in classlist:\n sc_query = f\"SELECT ?subclass WHERE {{ ?subclass rdfs:subClassOf+ <{brickclass}> }}\"\n subclasses = set([x[0] for x in graph.query(sc_query)])\n # if there are NO subclasses of 'brickclass', then it is specific\n if len(subclasses) == 0:\n candidates[brickclass] = 0\n continue\n # 'subclasses' are the subclasses of 'brickclass'. If any of these appear in\n # 'classlist', then we know that 'brickclass' is not the most specific\n intersection = set(classlist).intersection(subclasses)\n if len(intersection) == 1 and brickclass in intersection:\n candidates[brickclass] = 1\n else:\n candidates[brickclass] = len(intersection)\n most_specific = None\n mincount = float(\"inf\")\n for specific, score in candidates.items():\n if score < mincount:\n most_specific = specific\n mincount = score\n return most_specific", "def loss(self, class_weights):\n losses = self.model_args.get('loss', 'categorical_crossentropy')\n\n if type(losses) is str:\n multi_loss = False\n losses = {losses: 1.0}\n elif type(losses) is dict:\n multi_loss = True\n\n if class_weights is not None:\n class_weights = tf.convert_to_tensor(class_weights, dtype=tf.float32)\n\n # custom 'ordinal' loss option\n if 'ordinal_squared_error' in losses.keys():\n k = float(self.data.num_classes)\n a = tf.expand_dims(tf.range(0, k, dtype=tf.float32), axis=-1)\n k_factor = tf.constant((k+1)/k, shape=[1,1], name='k_factor')\n min_regr = tf.constant(-0.5, shape=[1,1], name='min_regression_value')\n\n def ordinal_loss(y_true, y_pred):\n y_estimate = tf.tensordot(y_pred, a, [[-1], [0]])\n y_estimate = k_factor * y_estimate + min_regr # scale to range [-0.5, k+0.5]\n y_values = tf.cast(tf.argmax(y_true, -1), dtype=y_estimate.dtype)\n\n min_class = tf.convert_to_tensor(0.0, dtype=y_estimate.dtype)\n max_class = tf.convert_to_tensor( k, dtype=y_estimate.dtype)\n sqr_error = tf.square(y_values - tf.squeeze(tf.clip_by_value(y_estimate, min_class, max_class)))\n\n if class_weights is not None:\n weight_vec = tf.gather(class_weights, tf.argmax(y_true, -1))\n sqr_error *= weight_vec\n\n return tf.reduce_mean(sqr_error)\n\n if not multi_loss:\n return ordinal_loss\n\n if 'categorical_crossentropy' in losses.keys():\n # TODO: option for clipping?\n def categorical_loss(y_true, y_pred):\n epsilon_ = tf.convert_to_tensor(1e-5, dtype=y_pred.dtype)\n y_pred = tf.clip_by_value(y_pred, epsilon_, 1. - epsilon_)\n\n cross_entropy = -tf.reduce_sum(y_true * tf.math.log(y_pred), axis=-1)\n\n if class_weights is not None:\n weight_vec = tf.gather(class_weights, tf.argmax(y_true, -1))\n cross_entropy *= weight_vec\n\n return cross_entropy\n\n if not multi_loss:\n return categorical_loss\n\n # weighted multi-loss option\n if multi_loss:\n def weighted_loss(y_true, y_pred):\n ord_weight = tf.constant(losses['ordinal_squared_error'], shape=[1,1])\n cat_weight = tf.constant(losses['categorical_crossentropy'], shape=[1,1])\n loss = ord_weight * ordinal_loss(y_true, y_pred) \\\n + cat_weight * categorical_loss(y_true, y_pred)\n return loss\n return weighted_loss", "def model_performance(yname):\n y, features, X = get_model_features(yname)\n rfr = RandomForestRegressor(max_features=None, warm_start=True,\n oob_score=True, random_state=RANDOM_STATE)\n from scipy.stats import spearmanr, pearsonr\n min_estimators = 50\n max_estimators = 500\n rfr_error = OrderedDict()\n for i in range(min_estimators, max_estimators + 1):\n rfr.set_params(n_estimators=i)\n rfr.fit(X, y)\n oob = rfr.oob_score_\n y_pred = rfr.oob_prediction_\n sp = spearmanr(y, y_pred)\n pe = pearsonr(y, y_pred)\n feat_imp = rfr.feature_importances_\n rfr_error[i] = {'error':oob, \n 'spearman': sp, \n 'pearson': pe, \n 'feat_imp': feat_imp}\n print(i, '\\n\\toob: ', oob, '\\n\\tspearman: ', sp.correlation)\n print('\\tpearson: ', pe[0])\n print()\n\n#*************************** Plots *************************************************\n\n if yname == \"net\":\n color = \"orange\"\n y_label = \"Net Construction\"\n elif yname == \"scale_const\":\n color = \"green\"\n y_label = \"Scaled Construction Value\"\n elif yname == \"scale_demo\":\n color = \"purple\"\n y_label = \"Scaled Demolition Value\"\n x = list(rfr_error.keys())\n y_error = [rfr_error[k]['error'] for k in rfr_error.keys()]\n y_sp = [rfr_error[k]['spearman'].correlation for k in rfr_error.keys()]\n y_pe = [rfr_error[k]['pearson'][0] for k in rfr_error.keys()]\n plt.figure(figsize=(12,8))\n plt.subplot(311)\n plt.plot(x, y_error, label=\"OOB Accuracy\", color=color, linewidth=2.25)\n plt.ylabel(\"OOB\", fontsize=16)\n plt.yticks(fontsize=12) \n plt.tight_layout()\n plt.title(\"OOB Accuracy\", fontsize=18) \n plt.subplot(312)\n plt.plot(x, y_sp, label=\"Spearman's R\", color=color, linewidth=2.25)\n #plt.xlabel(\"n_estimators\")\n plt.ylabel(\"R\", fontsize=16)\n plt.yticks(fontsize=12) \n plt.tight_layout(pad=1.75)\n plt.title(\"Spearman's Rho\", fontsize=18)\n plt.subplot(313)\n plt.plot(x, y_pe, color=color, linewidth=2.25)\n plt.ylabel(\"p\", fontsize=16)\n plt.yticks(fontsize=12) \n plt.xlabel(\"n_estimators\")\n plt.tight_layout(pad=1.75)\n plt.title(\"Pearson's R\", fontsize=18)\n plt.savefig(fig_home+\"/model_performance_{}\".format(yname))\n \n\n #Y actual vs Y predicted\n x = rfr.oob_prediction_\n m, b = np.polyfit(x, y, 1)\n avg_error = np.average(y_error)\n# x_label_pos = np.percentile(rfr.oob_prediction_, 40)\n #y_label_pos = np.percentile(y, 99)\n error_str = \"Average OOB Accuracy\\n{:.{prec}f}\".format(avg_error, prec=3)\n fig = plt.figure(figsize=(12,8))\n ax = fig.add_subplot(111)\n ax.scatter(rfr.oob_prediction_, y, color=color, s=25)\n ax.annotate(error_str, xy=(0,0), xytext=(0.2, 0.8), \n fontsize=16, ha=\"center\", va=\"center\", textcoords=\"axes fraction\")\n #plt.figure(figsize=(12,8))\n #plt.scatter(rfr.oob_prediction_, y, color=color, s=25)\n #plt.text(, error_str, ha=\"center\", va=\"center\", fontsize=16)\n plt.ylabel(y_label, fontsize=16)\n plt.yticks(fontsize=12)\n plt.xlabel('OOB Prediction', fontsize=16)\n plt.xticks(fontsize=12)\n plt.title('Y-Predicted vs Y-Actual', fontsize=18)\n plt.plot(x, m*x+ b, '-', color='black')\n plt.savefig(fig_home+\"/prediction_vs_actual_{}\".format(yname))", "def get_worst_score(nssms, truth_ccm, scoring_func, truth_ad=None, subchallenge=\"SC2\", larger_is_worse=True):\n\n if subchallenge is 'SC3':\n if truth_ad is None:\n raise ValueError('truth_ad must not be None when scoring SC3')\n else:\n if larger_is_worse:\n return max(get_bad_score(nssms, truth_ccm, scoring_func, truth_ad, 'OneCluster', subchallenge),\n get_bad_score(nssms, truth_ccm, scoring_func, truth_ad, 'NCluster', subchallenge))\n else:\n return min(get_bad_score(nssms, truth_ccm, scoring_func, truth_ad, 'OneCluster', subchallenge),\n get_bad_score(nssms, truth_ccm, scoring_func, truth_ad, 'NCluster', subchallenge))\n\n elif subchallenge is 'SC2': \n if larger_is_worse:\n return max(get_bad_score(nssms, truth_ccm, scoring_func, truth_ad, 'OneCluster', subchallenge),\n get_bad_score(nssms, truth_ccm, scoring_func, truth_ad, 'NCluster', subchallenge))\n else:\n return min(get_bad_score(nssms, truth_ccm, scoring_func, truth_ad, 'OneCluster', subchallenge),\n get_bad_score(nssms, truth_ccm, scoring_func, truth_ad, 'NCluster', subchallenge))\n\n else:\n raise ValueError('Subchallenge must be one of SC2 or SC3')", "def personal_best(scores: list) -> int:\n return max(scores)", "def _resolve_objective_function(self) -> Scorer:\n\n objective = self.cfg_.objective\n if objective == 'accuracy':\n return make_scorer(ex.accuracy_score_round_inputs)\n if objective.startswith('precision'):\n if objective.endswith('macro'):\n return make_scorer(ex.precision_score_round_inputs,\n average='macro')\n elif objective.endswith('weighted'):\n return make_scorer(ex.precision_score_round_inputs,\n average='weighted')\n if objective.startswith('f1'):\n if objective.endswith('macro'):\n return make_scorer(ex.f1_score_round_inputs,\n average='macro')\n elif objective.endswith('weighted'):\n return make_scorer(ex.f1_score_round_inputs,\n average='weighted')\n elif objective.endswith('least_frequent'):\n return make_scorer(ex.f1_score_least_frequent_round_inputs)\n if objective == 'pearson_r':\n return make_scorer(pearson)\n if objective == 'spearman':\n return make_scorer(spearman)\n if objective == 'kendall_tau':\n return make_scorer(kendall_tau)\n if objective.startswith('uwk'):\n if objective == 'uwk':\n return make_scorer(ex.kappa_round_inputs)\n return make_scorer(ex.kappa_round_inputs,\n allow_off_by_one=True)\n if objective.startswith('lwk'):\n if objective == 'lwk':\n return make_scorer(ex.kappa_round_inputs,\n weights='linear')\n return make_scorer(ex.kappa_round_inputs,\n weights='linear',\n allow_off_by_one=True)\n if objective.startswith('qwk'):\n if objective == 'qwk':\n return make_scorer(ex.kappa_round_inputs,\n weights='quadratic')\n return make_scorer(ex.kappa_round_inputs,\n weights='quadratic',\n allow_off_by_one=True)\n return objective", "def ratings_best(Y, titles):\n ratings_lists = data_proc.ratings_for_movies(Y)\n\n # Possible rating values\n rating_values = list(range(1, 6))\n\n # avg_ratings[i] = average rating of movie i\n # avg_ratings = [float(sum(ratings_list)) / len(ratings_list)\n # for ratings_list in ratings_lists]\n # Disfavor highly rated movies with few ratings:\n # http://stats.stackexchange.com/a/6361\n avg_ratings = [float(sum(ratings_list) + sum(rating_values))\n / (len(ratings_list) + len(rating_values))\n for ratings_list in ratings_lists]\n best_ids = data_proc.top_indices(avg_ratings, 10)\n best_titles = [titles[id] for id in best_ids]\n\n # ratings_table[i][j] is the number of rating_values[j] ratings for\n # movie with id i\n ratings_table = \\\n [[sum([rating_list[1] == id and rating_list[2] == rating_val\n for rating_list in Y])\n for rating_val in rating_values]\n for id in best_ids]\n\n sns.heatmap(ratings_table,\n annot=True, fmt='d', # Annotate data values\n yticklabels=best_titles, xticklabels=rating_values)\n plt.yticks(rotation=0) # Rotate yticks horizontally\n plt.xlabel('Rating')\n plt.title('Ratings of highest rated movies')\n plt.tight_layout() # Make room for long ytick labels\n plt.show()", "def strategy_best(cookies, cps, history, time_left, build_info):\n info = build_info.clone()\n best_choice = None\n best_ratio = 0.0\n choices = info.build_items()\n for item in choices:\n ratio = max_return(cookies, cps, time_left, info.get_cost(item), info.get_cps(item))\n\n if ratio >= best_ratio:\n best_choice = item\n best_ratio = ratio\n print best_ratio\n\n if (time_left * cps + cookies) < info.get_cost(best_choice):\n return None\n\n return best_choice", "def TrainSTRidge(\n TrainR,\n TrainY,\n TestR,\n TestY,\n lam,\n d_tol,\n maxit=200,\n STR_iters=200,\n l0_penalty=0.0,\n print_best_tol=False,\n):\n from sklearn.linear_model import Ridge\n\n D = TrainR.shape[1]\n\n # Set up the initial tolerance and l0 penalty\n d_tol = float(d_tol)\n tol = d_tol\n if l0_penalty == None:\n l0_penalty = 0.001 * np.linalg.cond(TrainR)\n\n # Get the standard least squares estimator\n w = np.zeros((D, 1))\n w_best = Ridge(alpha=0.0, fit_intercept=False).fit(TrainR, TrainY).coef_.T\n err_best = np.linalg.norm(\n TestY - TestR.dot(w_best), 2\n ) + l0_penalty * np.count_nonzero(w_best)\n tol_best = 0\n\n errors = [\n np.linalg.norm(TestY - TestR.dot(w_best), 2)\n + l0_penalty * np.count_nonzero(w_best)\n ]\n tolerances = [0]\n\n # Now increase tolerance until test performance decreases\n for iter in range(maxit):\n\n # Get a set of coefficients and error\n w = STRidge(TrainR, TrainY, lam, STR_iters, tol)\n err = np.linalg.norm(TestY - TestR.dot(w), 2) + l0_penalty * np.count_nonzero(w)\n\n errors.append(err)\n\n # Has the accuracy improved?\n if err <= err_best:\n err_best = err\n w_best = w\n tol_best = tol\n tol = tol + d_tol\n\n else:\n tol = max([0, tol - 2 * d_tol])\n d_tol = 2 * d_tol / (maxit - iter)\n tol = tol + d_tol\n tolerances.append(tol)\n\n if print_best_tol:\n print(\"Optimal tolerance: \" + str(tol_best))\n return w_best", "def checkBest(data):\n global filename, hyp\n if data.newBest is True:\n bestReps = hyp['bestReps']\n rep = np.tile(data.best[-1], bestReps)\n fitVector = batchMpiEval(rep, gen=None, sp_count=None, sameSeedForEachIndividual=False)\n trueFit = np.mean(fitVector)\n if trueFit > data.best[-2].fitness: # Actually better!\n data.best[-1].fitness = trueFit\n data.fit_top[-1] = trueFit\n data.bestFitVec = fitVector\n else: # Just lucky!\n prev = hyp['save_mod']\n data.best[-prev:] = data.best[-prev]\n data.fit_top[-prev:] = data.fit_top[-prev]\n data.newBest = False\n return data" ]
[ "0.65992683", "0.6348734", "0.60387164", "0.56676745", "0.5634381", "0.5621993", "0.55739546", "0.55624396", "0.5539638", "0.5454468", "0.5443642", "0.5441887", "0.5408296", "0.53730357", "0.531757", "0.526106", "0.5247903", "0.52269906", "0.52153784", "0.52098244", "0.5195273", "0.5189636", "0.5176431", "0.51694185", "0.51669157", "0.5161891", "0.5161404", "0.514498", "0.5135008", "0.51219034", "0.5068449", "0.50561", "0.5052308", "0.5043046", "0.50289357", "0.5003854", "0.50014025", "0.49825224", "0.4955453", "0.4949262", "0.49486923", "0.49378467", "0.49315742", "0.49255973", "0.49242318", "0.49228537", "0.4922428", "0.4916493", "0.49158946", "0.4914703", "0.49145204", "0.49067807", "0.49056584", "0.49021688", "0.488893", "0.48840183", "0.48811752", "0.4880468", "0.48802647", "0.48734385", "0.48685524", "0.48616862", "0.48589805", "0.48539135", "0.48443264", "0.48385137", "0.48329717", "0.48329085", "0.48246083", "0.48217314", "0.48186052", "0.4812012", "0.4811628", "0.48090985", "0.48074996", "0.48074996", "0.48064607", "0.47994864", "0.47983712", "0.47947732", "0.4793627", "0.47853637", "0.4783375", "0.47761598", "0.47715923", "0.4769894", "0.47691742", "0.47595355", "0.47569793", "0.47569636", "0.47555375", "0.47546968", "0.47453007", "0.47378188", "0.47376034", "0.4734669", "0.472617", "0.47256964", "0.47253317", "0.47234043" ]
0.80622786
0
Given a classifier's error rate (a number), returns the voting power (aka alpha, or coefficient) for that classifier.
def calculate_voting_power(error_rate): if error_rate == 0: return INF if error_rate == 1: return -INF return 0.5*ln(make_fraction(1-error_rate, error_rate))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exponential_vote(score, category):\n status = \"\"\n\n try:\n max_vote = constants.MAX_VOTE[category]\n except:\n max_vote = constants.MAX_TASK_REQUEST\n\n else:\n power = constants.EXP_POWER\n weight = pow(\n score / 100.0,\n power - (score / 100.0 * (power - 1.0))) * max_vote\n\n return float(weight)", "def priority(self, error):\n\n return (error + self.epsilon) ** self.alpha", "def get_alpha(self, error_rate, func='default'):\n return 0.5 * np.log((1. - error_rate) / error_rate)", "def k(rating):\n if rating < 100: return 40\n if rating < 200: return 20\n return 10", "def penaltyFactor(m):\n \"\"\" m in GeV \"\"\"\n if m*m>0.71:\n return math.pow(m*m/0.71,-4)\n else:\n return 1", "def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) /\n predictions.shape[0])", "def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) /\n predictions.shape[0])", "def error_rate(predictions, labels):\n return 100.0 - (100*(np.sum(predictions == labels)/float(predictions.shape[0]*predictions.shape[1])))", "def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n np.sum(np.argmax(predictions, 1) == labels) /\n predictions.shape[0])", "def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n np.sum(np.argmax(predictions, 1) == labels) /\n predictions.shape[0])", "def precision(classifier_output, true_labels):\n\n # TODO: finish this.\n conf_matrix = confusion_matrix(classifier_output, true_labels)\n return conf_matrix[0][0]/(conf_matrix[0][0] + conf_matrix[1][0])", "def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n numpy.sum(numpy.argmax(predictions, 1) == labels) /\n predictions.shape[0])", "def get_exponential_detection_thresholds():\n \n m = utils.MAX_DETECTION_THRESHOLD\n n = utils.NUM_DETECTION_THRESHOLDS\n y = np.exp(np.log(m) / n)\n return y ** np.arange(1, n + 1)", "def Probability(rating1, rating2):\n return 1.0 * 1.0 / (1 + 1.0 * math.pow(10, 1.0 * (rating1 - rating2) / 400))", "def calculate_perplexity(loss):\n return math.exp(float(loss)) if loss < 300 else float(\"inf\")", "def DRate_j(eta,Pap,Pec,exp_loss_jt):\n return (1 + Pap)*(1 - (1 - 2*Pec)*exp_loss_jt)", "def power(self):\r\n return self.model * self.percent / 100", "def test_error_rate(self):\n # For the penalty, the default loss is hinge.\n expected_signed_penalty_labels = (self._penalty_labels > 0.0) * 2.0 - 1.0\n expected_penalty_numerator = np.sum(\n np.maximum(\n 0.0,\n 1.0 - expected_signed_penalty_labels * self._penalty_predictions) *\n self._penalty_weights * self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n self._penalty_weights * self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_signed_constraint_labels = (\n (self._constraint_labels > 0.0) * 2.0 - 1.0)\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 - expected_signed_constraint_labels * np.sign(\n self._constraint_predictions))) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n self._constraint_weights * self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.error_rate(self.context)\n self.check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)", "def exponentialLearningRate(base):\n def function(t):\n return base ** (t-1)\n return function", "def balanced_accuracy(self):\n return 0.5 * (self.sensitivity + self.specificity)", "def get_error_rates():\n return [ERR_RT * i for i in range(int((1 / ERR_RT) / 4))] # error up to 25%", "def error_coefficient(self,tree,mode='exact'):\n from numpy import dot\n from sympy import Rational, simplify\n code=elementary_weight_str(tree)\n A,b,c = self.A,self.b,self.c\n\n if A.dtype == object:\n exec('coeff = simplify({} - Rational(1, {}))'.format(code, tree.density()))\n else:\n exec(\"coeff = ({} - 1.0 / {})\".format(code, tree.density()))\n return locals()[\"coeff\"] / tree.symmetry()", "def bike_multiplier(bikelane, bicycle_pref):\n result = bicycle_pref * 0.8\n if bikelane=='Yes':\n return -result\n return 0.0", "def get_discounted_r(self, r, i):\n return self.gamma**i * r", "def learnign_rate_examples():\n #######\n bad_larning_rate = 0.1\n not_bad_learning_rate = 1e-4\n good_learning_rate = 1e-3\n #######\n return bad_larning_rate, not_bad_learning_rate, good_learning_rate", "def fv(rate, n_years):\n return pow(1 + rate, n_years)", "def realistic_error_rate(predictions, labels, predicted_hardness):\n # # print (predicted_hardness)\n # predicted_hardness = predicted_hardness / np.sum(predicted_hardness)\n # # print (np.argmax(predictions, 1) == labels)\n # # print (np.multiply(np.argmax(predictions, 1) == labels, np.squeeze(predicted_hardness)))\n # return 100.0 - 100 * np.sum(np.multiply(np.argmax(predictions, 1) == labels, np.squeeze(predicted_hardness)))\n # # return 100.0 - (\n # # 100.0 *\n # # np.sum(np.argmax(predictions, 1) == labels) /\n # # predictions.shape[0])\n print (np.sum(predicted_hardness))\n return 100.0 - 100 * (np.sum(np.multiply(np.argmax(predictions, 1) == labels, np.squeeze(predicted_hardness))) / np.sum(predicted_hardness))", "def simulate_power(self):\n if self.p_treatment - self.p_control < 0:\n thresh = 1 - self.alpha\n else:\n thresh = self.alpha\n\n try:\n p_crit = self.norm_null.ppf(1 - thresh)\n beta = self.norm_alt.cdf(p_crit)\n except:\n self.norm_distribution()\n p_crit = self.norm_null.ppf(1 - thresh)\n beta = self.norm_alt.cdf(p_crit)\n\n power = (1 - beta) if self.p_treatment > self.p_control else beta\n self.power = power\n\n return power", "def majority_vote():\n iris = datasets.load_iris()\n x_vals, y_vals = iris.data[50:, [1, 2]], iris.target[50:]\n labenc = LabelEncoder()\n y_vals = labenc.fit_transform(y_vals)\n x_train, x_test, y_train, y_test = train_test_split(x_vals, y_vals,\n test_size=0.5, random_state=1)\n\n clf1 = LogisticRegression(penalty='l2', C=0.001, random_state=0)\n clf2 = DecisionTreeClassifier(max_depth=1, criterion='entropy', random_state=0)\n clf3 = KNeighborsClassifier(n_neighbors=1, p=2, metric='minkowski')\n pipe1 = Pipeline([['sc', StandardScaler()], ['clf', clf1]])\n pipe3 = Pipeline([['sc', StandardScaler()], ['clf', clf3]])\n clf_labels = ['Logistic Regression', 'Decision Tree', 'KNN']\n\n # Majority Rule (hard) Voting\n mv_clf = MajorityVoteClassifier(classifiers=[pipe1, clf2, pipe3])\n\n clf_labels += ['Majority Voting']\n all_clf = [pipe1, clf2, pipe3, mv_clf]\n print('10-fold cross validation:\\n')\n for clf, label in zip(all_clf, clf_labels):\n scores = cross_val_score(estimator=clf, X=x_train, y=y_train, cv=10, scoring='roc_auc')\n print(\"ROC AUC: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label))\n\n colors = ['black', 'orange', 'blue', 'green']\n linestyles = [':', '--', '-.', '-']\n for clf, label, clr, lin_style in zip(all_clf, clf_labels, colors, linestyles):\n # assuming the label of the positive class is 1\n y_pred = clf.fit(x_train, y_train).predict_proba(x_test)[:, 1]\n fpr, tpr, _ = roc_curve(y_true=y_test, y_score=y_pred)\n print(y_pred)\n roc_auc = auc(x=fpr, y=tpr)\n plt.plot(fpr, tpr, color=clr, linestyle=lin_style,\n label='%s (auc = %0.2f)' % (label, roc_auc))\n\n plt.legend(loc='lower right')\n plt.plot([0, 1], [0, 1], linestyle='--', color='gray', linewidth=2)\n\n plt.xlim([-0.1, 1.1])\n plt.ylim([-0.1, 1.1])\n plt.grid()\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.tight_layout()\n plt.savefig(IMG_PATH + 'roc.png', dpi=300)\n plt.close()\n\n stdc = StandardScaler()\n x_train_std = stdc.fit_transform(x_train)\n all_clf = [pipe1, clf2, pipe3, mv_clf]\n x_min = x_train_std[:, 0].min() - 1\n x_max = x_train_std[:, 0].max() + 1\n y_min = x_train_std[:, 1].min() - 1\n y_max = x_train_std[:, 1].max() + 1\n xxx, yyy = np.meshgrid(np.arange(x_min, x_max, 0.1), np.arange(y_min, y_max, 0.1))\n _, axarr = plt.subplots(nrows=2, ncols=2, sharex='col', sharey='row', figsize=(7, 5))\n for idx, clf, ttt in zip(product([0, 1], [0, 1]), all_clf, clf_labels):\n clf.fit(x_train_std, y_train)\n z_vals = clf.predict(np.c_[xxx.ravel(), yyy.ravel()])\n z_vals = z_vals.reshape(xxx.shape)\n axarr[idx[0], idx[1]].contourf(xxx, yyy, z_vals, alpha=0.3)\n axarr[idx[0], idx[1]].scatter(x_train_std[y_train == 0, 0], x_train_std[y_train == 0, 1],\n c='blue', marker='^', s=50)\n axarr[idx[0], idx[1]].scatter(x_train_std[y_train == 1, 0], x_train_std[y_train == 1, 1],\n c='red', marker='o', s=50)\n axarr[idx[0], idx[1]].set_title(ttt)\n plt.text(-3.5, -4.5, s='Sepal width [standardized]', ha='center', va='center', fontsize=12)\n plt.text(-10.5, 4.5, s='Petal length [standardized]', ha='center', va='center',\n fontsize=12, rotation=90)\n plt.tight_layout()\n plt.savefig(IMG_PATH + 'voting_panel.png', bbox_inches='tight', dpi=300)\n # print(mv_clf.get_params())\n params = {'decisiontreeclassifier__max_depth': [1, 2],\n 'pipeline-1__clf__C': [0.001, 0.1, 100.0]}\n grid = GridSearchCV(estimator=mv_clf, param_grid=params, cv=10, scoring='roc_auc')\n grid.fit(x_train, y_train)\n\n for params, mean_score, scores in grid.cv_results_:\n print(\"%0.3f+/-%0.2f %r\" % (mean_score, scores.std() / 2, params))\n print('Best parameters: %s' % grid.best_params_)\n print('Accuracy: %.2f' % grid.best_score_)", "def error_rate(self):\n\n\t\treturn theano.tensor.mean(theano.tensor.neq(\n\t\t\tself.get_symbolic_predicted_labels(),\n\t\t\tself.symbolic_output))", "def compute_perplexity(self,loss: float):\n return math.exp(loss)", "def average_error_to_weight(error):\r\n return error ** (-2)", "def __pow__(self, exponent):\n return self.runtime.pow(self, exponent)", "def eval_power(parse_result):\r\n # `reduce` will go from left to right; reverse the list.\r\n parse_result = reversed(\r\n [k for k in parse_result\r\n if isinstance(k, numbers.Number)] # Ignore the '^' marks.\r\n )\r\n # Having reversed it, raise `b` to the power of `a`.\r\n power = reduce(lambda a, b: b ** a, parse_result)\r\n return power", "def coefficient(self) -> float:\n ...", "def get_capacitor(self):\n cap = 0.5 * self.metric_.logdet()\n return cap", "def average_error_to_weight(error):\n return error ** (-2)", "def get_rating(mpg):\n if mpg < 14:\n return 1\n elif mpg < 15:\n return 2\n elif mpg < 17:\n return 3\n elif mpg < 20:\n return 4\n elif mpg < 24:\n return 5\n elif mpg < 27:\n return 6\n elif mpg < 31:\n return 7\n elif mpg < 37:\n return 8\n elif mpg < 45:\n return 9\n return 10", "def calculate_error_rates(point_to_weight, classifier_to_misclassified):\n ans = {}\n for c in classifier_to_misclassified:\n misclassified = classifier_to_misclassified[c]\n ans[c] = 0\n for p in misclassified:\n ans[c] += point_to_weight[p]\n return ans", "def get_tp_score(val_loss, best_val_loss, num_classes=10):\n random_loss = math.log(num_classes)\n\n return (random_loss - val_loss) / (random_loss - best_val_loss)", "def approximate_loss(self, x, x_recon, v_dist, eps=1e-3):\n log_alpha = v_dist.logits\n bce = F.binary_cross_entropy(x_recon, x.view(-1, 784), reduction='sum')\n num_class = torch.tensor(self.latent_dim).float()\n probs = torch.softmax(log_alpha, dim=-1) # alpha_i / alpha_sum\n kl = torch.sum(probs * (num_class * (probs + eps)).log(), dim=-1).sum()\n return bce, kl", "def precision_score(y_true, y_pred):\n return ((y_true == 1) * (y_pred == 1)).sum() / (y_pred == 1).sum()", "def penalty(self):\n assert len(self.weights) == len(self.means), \"Dimensions!\"\n out = np.exp(self.data['riskfree'] * self.data['maturity'])\n for weight, mean in zip(self.weights, self.means):\n out -= weight * np.exp(mean * self.data['maturity'])\n return (out**2).mean()**.5", "def get_loss(fidelity):\n\n return 1 - np.sqrt(fidelity)", "def beta_r(r):\n return 1.", "def classProbs(observation, tree, classes):\n res = classify(observation, tree) #res = results\n total = sum(res.values())\n probs = []\n for c in classes:\n if c in res.keys():\n probs.append(float(res[c])/total)\n else:\n probs.append(0)\n return probs", "def get_crossover_accept_rate(n: int):\n return 1 / (2 ** n)", "def chance(currCost, newCost, currTemperature, maxTemperature, avg):\n temps = currTemperature / maxTemperature\n if newCost <= currCost:\n return 1\n try:\n p = math.exp(-8*(newCost-currCost)/(temps * avg))\n except: #catching divide by 0 errors lol\n p = 0\n return p", "def calculateErrorRate(numCorrect, numWrong):\n return np.round((numWrong)/(numCorrect+numWrong),3)", "def get_power(self) -> float:\n\n #:READ[n][:CHANnel[m]][:SCALar]: POWer[:DC]?\n return float(self._inst.query(\":READ:POW?\"))", "def precision(y_true, y_pred):\n tp = true_positive(y_true, y_pred)\n fp = false_positive(y_true, y_pred)\n precision = tp / (tp + fp)\n return precision", "def expect(self, rating, other_rating_val):\n # http://www.chess-mind.com/en/elo-system\n diff = float(other_rating_val) - float(rating.value)\n f_factor = 2 * self.beta # rating disparity\n return 1. / (1 + 10 ** (diff / f_factor))", "def expect(self, rating, other_rating_val):\n # http://www.chess-mind.com/en/elo-system\n diff = float(other_rating_val) - float(rating.value)\n f_factor = 2 * self.beta # rating disparity\n return 1. / (1 + 10 ** (diff / f_factor))", "def get_penalty(state, action, winrate_predictor):\n if violate_rule(state, action):\n return -1 \n return 0", "def score_coefficient(self):\n return self.predictor._score_coefficient", "def kaiserord(ripple, width):\n A = abs(ripple) # in case somebody is confused as to what's meant\n if (A>50):\n beta = 0.1102*(A-8.7)\n elif (A>21):\n beta = 0.5842*(A-21)**0.4 + 0.07886*(A-21)\n else:\n beta = 0.0\n N = (A-8)/2.285/(pi*width)\n return ceil(N), beta", "def _lonely_coefficient(replies):\n return 1/((replies+1)**0.3)", "def __logprob__(self, cv, vsense):\n return 1.0 / (1.0 + np.exp(-np.dot(cv, vsense)))", "def __pow__(self, ???):", "def __rpow__(self, ???):", "def calculate_sensitivity(n_excesses, n_background, alpha):\n significance = n_excesses / np.sqrt(n_background * alpha)\n sensitivity = 5 / significance * 100 # percentage of Crab\n\n return sensitivity", "def get_duct_linear_heat_loss_coefficient() -> float:\n return 0.49", "def targetGateErrorRate(pGate, pRef, dimension):\n return ((1 - (pGate / pRef)) * (dimension - 1)) / dimension", "def error_rate_impurity(X_valid_encoded, X_valid, y_valid, k=18):\n errors = 0\n impurities = 0\n for i, x_enc in enumerate(X_valid_encoded):\n top_k_indices = ann.knn(x_enc, X_valid_encoded, k)\n label = y_valid[i]\n votes_against = 0\n for index in top_k_indices:\n if label != y_valid[index]:\n votes_against += 1\n if votes_against > math.ceil(k / 2):\n errors += 1\n impurities += votes_against\n error_rate = errors * 100. / X_valid.shape[0]\n impurity = impurities / (X_valid.shape[0] * k)\n return error_rate, impurity", "def tf_error_rate_impurity(X_encoded, X, y, k=18):\n err_impr_ = tf.map_fn(\n fn=lambda x: votes_and_error(x, X_encoded, y, k),\n elems=tf.concat(\n [X_encoded, tf.expand_dims(tf.cast(y, X_encoded.dtype), axis=1)], axis=1\n ),\n fn_output_signature=tf.TensorSpec(shape=(2, ), dtype=tf.int32))\n acc_err_impr_ = tf.reduce_mean(tf.cast(err_impr_, tf.float32), axis=0)\n impr_ = acc_err_impr_[0] / tf.cast(k, tf.float32)\n errr_ = acc_err_impr_[1] * 100.\n return errr_, impr_", "def get_coeff(self):\n return bernoulli(self.degree+1) / factorial(self.degree + 1)", "def getCoolerPower(self):\n return 90.0", "def get_power(self):\r\n x = self.query('POW?')\r\n if x == None: return None\r\n return float(x)", "def get_learning_rate():\n return 0.00001", "def multiplier(self) :\n\t\ttry :\n\t\t\treturn self._multiplier\n\t\texcept Exception as e:\n\t\t\traise e", "def get_multiplier(quality):\n\n if quality == \"low\":\n return 5\n elif quality == \"medium\":\n return 6\n elif quality == \"good\":\n return 7\n elif quality == \"high\":\n return 8\n return 6", "def max_error(y_true, y_pred):\n ...", "def precision(y_test, y_pred):\n\treturn precision_score(y_test, y_pred)", "def ensemble_accuracy(n_classifiers, accuracy):\n k_start = int(math.ceil(n_classifiers / 2.0))\n probs = [comb(n_classifiers, k) *\n accuracy**k *\n (1 - accuracy)**(n_classifiers - k)\n for k in range(k_start, n_classifiers + 1)]\n return sum(probs)", "def _eval_coeff(self, pt):\n val = 1\n for a in self.args:\n val *= a.coeff(pt)\n return val", "def get_failure_rate(self) -> float:\n return self.failurerate", "def rash_mult(condition, tolerance):\n return 1/(1+sympy.exp(condition/tolerance))", "def evaluate(labels, predictions):\n pos = 0\n neg = 0\n true_pos_rate = 0\n true_neg_rate = 0\n\n for i in range(len(labels)):\n if labels[i] == 1:\n pos += 1\n else:\n neg += 1\n if predictions[i] == labels[i]:\n if predictions[i] == 1:\n true_pos_rate += 1\n else:\n true_neg_rate += 1\n \n sensitivity = true_pos_rate / pos\n specificity = true_neg_rate / neg\n\n return (sensitivity, specificity)", "def kl_policy(self):\n r = .5 * (np.trace(np.dot(self.behavior_policy.precision, self.target_policy.noise))\n - self.behavior_policy.dim_A - np.log(np.linalg.det(self.target_policy.noise) / np.linalg.det(self.behavior_policy.noise)))\n\n dtheta = (self.behavior_policy.theta - self.target_policy.theta)\n da = np.dot(dtheta, self.mu.T)\n m = float(np.sum(\n da * np.dot(self.target_policy.precision, da))) / self.mu.shape[0]\n\n r += .5 * m\n return r", "def get_pred_class_probs(self, pred_mu, pred_sigma):", "def __pow__( self, power ):\r\n\t\tif ( power > 0 ):\r\n\t\t\treturn fraction( self.numerator ** power, self.denominator ** power )\r\n\t\tif ( power < 0 ):\r\n\t\t\treturn fraction( self.denominator ** abs( power ), self.numerator ** abs( power ) )\r\n\t\treturn 1", "def constantLearningRate(rate):\n def function(t):\n return rate\n return function", "def error_coeffs(self,p):\n import nodepy.rooted_trees as rt\n forest=rt.list_trees(p)\n err_coeffs=[]\n for tree in forest:\n err_coeffs.append(self.error_coefficient(tree))\n return snp.array(err_coeffs)", "def predictability(self):\n temp = self.probs\n for n in range(10):\n temp = temp.dot(temp)\n final = temp[0,:]\n #Let's assume that all words have unique initial letters\n probs = map(len, self.words)\n probs = array(probs)\n probs = (probs + self.probs.max(1)-1)/probs\n return sum(final*probs)", "def result(self):\n prec_value = self.precision.result()\n recall_value = self.recall.result()\n return 2 * math_ops.div_no_nan(prec_value * recall_value,\n prec_value + recall_value)", "def MSE(ratings, range):\n\n def squared_err(pair):\n (r, rP) = pair\n return (r-rP)**2\n\n return (1/len(ratings)) * sum(map(squared_err, ratings))", "def sensitivity(self):\n return self.recall", "def getFactor(currency):", "def prob(self, feature_index, feature_value, class_):\r\n\r\n deviation = self.conditional_prob[class_][feature_index][1]\r\n mean = self.conditional_prob[class_][feature_index][0]\r\n\r\n val1 = math.pow((feature_value - mean), 2)\r\n val1 = val1/math.pow(deviation, 2)\r\n\r\n val2 = 2*math.pi*math.pow(deviation, 2)\r\n val2 = 1/(math.sqrt(val2))\r\n\r\n probability = val2 * math.exp(-val1)\r\n\r\n return probability", "def get_power(self):\r\n return self.p", "def classificationError(p_m1):\n p_m2 = 1 - p_m1\n E = 1 - max(p_m1, p_m2) # You could do E=min(p_m1, p_m2) instead \n return E", "def success_chance(dc,modifier=0,adv=False,disadv=False):\r\n if adv:\r\n return 1-((dc-modifier-1)/20)**2\r\n elif disadv:\r\n return (1-(dc-modifier-1)/20)**2\r\n return 1-(dc-modifier-1)/20", "def confidence_coefficient( confidence_level, dimensions=1 ):\n return np.sqrt(chi2.ppf(confidence_level, df=dimensions))", "def get_instance_accuracy(gold_label: str, predicted_label: str) -> float:\n if gold_label == predicted_label:\n return 1.0\n else:\n return 0.0", "def expose(self, rating):\n k = self.mu / self.sigma\n return rating.mu - k * rating.sigma", "def __pow__(self,power):\n return Factor().__build( VarSet(self.v) , np.power(self.t,power) )", "def objective(params):\n\t# hyperopt casts as float\n\tparams['num_boost_round'] = int(params['num_boost_round'])\n\tparams['num_leaves'] = int(params['num_leaves'])\n\n\t# need to be passed as parameter\n\tparams['is_unbalance'] = True\n\tparams['verbose'] = -1\n\tparams['seed'] = 1\n\n\tcv_result = lgb.cv(\n\t\tparams,\n\t\tdtrain,\n\t\tnum_boost_round=params['num_boost_round'],\n\t\tmetrics='binary_logloss',\n\t\tnfold=3,\n\t\tearly_stopping_rounds=20,\n\t\tstratified=False)\n\tearly_stop_dict[objective.i] = len(cv_result['binary_logloss-mean'])\n\terror = round(cv_result['binary_logloss-mean'][-1], 4)\n\tobjective.i+=1\n\treturn error", "def predict_movie_rating(pss_score: float)->int:\n return int(round(pss_score))", "def minkowski_loss(y_true, y_pred):\n r = 1 #often 0.4\n return K.mean(K.pow(K.abs(y_pred-y_true),r))", "def power(num, exponent):\n return num ** exponent" ]
[ "0.6483174", "0.60067403", "0.58584934", "0.57399225", "0.5617777", "0.55273247", "0.5520179", "0.5502458", "0.5488472", "0.54872227", "0.5482623", "0.5475527", "0.5460961", "0.5439238", "0.54219", "0.54015714", "0.53638995", "0.53533477", "0.53349483", "0.5307904", "0.529213", "0.5283464", "0.52704", "0.52585274", "0.5249965", "0.5246826", "0.52288735", "0.5212087", "0.52117145", "0.5206323", "0.5204183", "0.51993906", "0.51806927", "0.5177731", "0.5170423", "0.5164337", "0.51619244", "0.5150218", "0.5140412", "0.5140081", "0.51275796", "0.51202005", "0.51163936", "0.5105485", "0.5103643", "0.50974447", "0.50874835", "0.508519", "0.50836116", "0.50806123", "0.50731283", "0.5072704", "0.5072704", "0.50681996", "0.5066166", "0.5065799", "0.5059792", "0.505436", "0.5042584", "0.50339425", "0.5032263", "0.5031081", "0.5028776", "0.5016159", "0.5005113", "0.50032115", "0.49999022", "0.4993108", "0.49915674", "0.49904266", "0.49900827", "0.4983079", "0.4975164", "0.49749264", "0.4971791", "0.4967677", "0.4963251", "0.4963153", "0.49615225", "0.49604103", "0.4956023", "0.4955574", "0.49554506", "0.49495086", "0.49493185", "0.49487084", "0.4947664", "0.4945009", "0.4944863", "0.49352083", "0.4930535", "0.49275598", "0.49269322", "0.49261525", "0.49227354", "0.49179545", "0.49177063", "0.49128222", "0.49125135", "0.49121606" ]
0.7368655
0
Given an overall classifier H, a list of all training points, and a dictionary mapping classifiers to the training points they misclassify, returns a set containing the training points that H misclassifies. H is represented as a list of (classifier, voting_power) tuples.
def get_overall_misclassifications(H, training_points, classifier_to_misclassified): misclassified = [] for p in training_points: score = 0 for tup in H: c = tup[0] voting_power = tup[1] if p in classifier_to_misclassified[c]: score -= voting_power else: score += voting_power if score <= 0: misclassified.append(p) return set(misclassified)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def misclassified_training_points(svm):\n wrong = []\n for point in svm.training_points:\n if point.classification is not classify(svm, point):\n wrong.append(point)\n return set(wrong)", "def digits_make_classifiers_to_misclassified(X,Y,classifiers,ids_to_points):\n\toutput = {key: [] for key in classifiers}\n\tN = len(X)\n\tfor cf in classifiers:\n\t\tfor i in range(N):\n\t\t\tcf_classification = cf[2](X[i])\n\t\t\tif cf_classification != Y[i]:\n\t\t\t\t# output[cf].append(X[i])\n\t\t\t\toutput[cf].append(adaboost.key_from_value(ids_to_points,X[i]))\n\n\treturn output", "def is_good_enough(H, training_points, classifier_to_misclassified, mistake_tolerance=0):\n misclassified = get_overall_misclassifications(H, training_points, classifier_to_misclassified)\n if len(misclassified) > mistake_tolerance:\n return False\n return True", "def condenseData(trainX, trainY):\n # get euclidean distance matrix\n edm = squareform(pdist(trainX))\n \n # initialize prototype subset\n ntrain = trainX.shape[0]\n classes = np.unique(trainY)\n condensedIdx = np.zeros(ntrain).astype(bool)\n \n for cls in classes:\n mask = trainY == cls\n rep = np.random.randint(0, np.sum(mask))\n condensedIdx[np.where(mask)[0][rep]] = True\n \n # slice edm to include only prototype subset\n edm_p = edm[condensedIdx]\n \n # label remaining points using 1-NN\n labels_t = trainY[condensedIdx]\n labels_h = labels_t[np.argmin(edm_p, 0)]\n\n # iterate over remaining points\n for i in range(ntrain):\n # if point is misclassified, add to prototype subset\n if labels_h[i] != trainY[i]: \n condensedIdx[i] = True\n edm_p = edm[condensedIdx]\n labels_t = trainY[condensedIdx]\n labels_h = labels_t[np.argmin(edm_p, 0)] # 1-NN w/new prototype\n\n return np.where(condensedIdx)[0]", "def supervisedMLClassify(sim_vec_dict, true_match_set):\n\n num_folds = 3 # Number of classifiers to create\n\n class_match_set = set()\n class_nonmatch_set = set()\n\n try:\n import numpy\n import sklearn.tree\n except:\n print('Either the \"numpy\" or \"sklearn\" modules is not installed! Aborting.')\n print('')\n\n return set(), set() # Return two empty sets so program continues\n\n import random\n\n print('Supervised decision tree classification of %d record pairs' % \\\n (len(sim_vec_dict)))\n\n # Generate the training data sets (similarity vectors plus class labels\n # (match or non-match)\n #\n num_train_rec = len(sim_vec_dict)\n num_features = len(list(sim_vec_dict.values())[0])\n\n print(' Number of training records and features: %d / %d' % \\\n (num_train_rec, num_features))\n\n all_train_data = numpy.zeros([num_train_rec, num_features])\n all_train_class = numpy.zeros(num_train_rec)\n\n rec_pair_id_list = []\n\n num_pos = 0\n num_neg = 0\n\n i = 0\n for (rec_id1, rec_id2) in sim_vec_dict:\n rec_pair_id_list.append((rec_id1, rec_id2))\n sim_vec = sim_vec_dict[(rec_id1, rec_id2)]\n\n all_train_data[:][i] = sim_vec\n\n if (rec_id1, rec_id2) in true_match_set:\n all_train_class[i] = 1.0\n num_pos += 1\n else:\n all_train_class[i] = 0.0\n num_neg += 1\n i += 1\n\n num_all = num_pos + num_neg # All training examples\n\n num_train_select = int(2. / 3 * num_all) # Select 2/3 for training\n num_test_select = num_all - num_train_select\n\n print(' Number of positive and negative training records: %d / %d' % \\\n (num_pos, num_neg))\n print('')\n\n class_list = [] # List of the generated classifiers\n\n for c in range(num_folds):\n\n train_index_list = random.sample(xrange(num_all), num_train_select)\n\n train_data = numpy.zeros([num_train_select, num_features])\n train_class = numpy.zeros(num_train_select)\n test_data = numpy.zeros([num_test_select, num_features])\n test_class = numpy.zeros(num_test_select)\n\n # Copy similarities and class labels\n #\n train_ind = 0\n test_ind = 0\n\n for i in range(num_all):\n\n if (i in train_index_list):\n train_data[:][train_ind] = all_train_data[:][i]\n train_class[train_ind] = all_train_class[i]\n train_ind += 1\n else:\n test_data[:][test_ind] = all_train_data[:][i]\n test_class[test_ind] = all_train_class[i]\n test_ind += 1\n\n # Now build and train the classifier\n #\n decision_tree = sklearn.tree.DecisionTreeClassifier()\n decision_tree.fit(train_data, train_class)\n\n # Now use the trained classifier on the testing data to see how accurate\n # it is\n #\n class_predict = decision_tree.predict(test_data)\n\n num_corr = 0\n num_wrong = 0\n\n for i in range(len(class_predict)):\n if (class_predict[i] == test_class[i]):\n num_corr += 1\n else:\n num_wrong += 1\n\n print(' Classifier %d gets %d correct and %d wrong' % \\\n (c, num_corr, num_wrong))\n\n class_list.append(decision_tree)\n\n # Now use the trained classifiers to classify all record pairs\n #\n num_match_class_list = [0] * num_all # Count how often a record pair is\n # classified as a match\n\n for decision_tree in class_list:\n\n class_predict = decision_tree.predict(all_train_data) # Classify all pairs\n\n for i in range(num_all):\n num_match_class_list[i] += class_predict[i]\n\n assert num_match_class_list[i] <= num_folds, num_match_class_list[i]\n\n for i in range(num_all):\n rec_id_pair = rec_pair_id_list[i]\n\n # More '1' (match) classifications than '0' (non-match ones)\n #\n if (float(num_match_class_list[i]) / num_folds > 0.5):\n class_match_set.add(rec_id_pair)\n else:\n class_nonmatch_set.add(rec_id_pair)\n\n print('')\n\n print(' Classified %d record pairs as matches and %d as non-matches' % \\\n (len(class_match_set), len(class_nonmatch_set)))\n print('')\n\n return class_match_set, class_nonmatch_set", "def adaboost(training_points, classifier_to_misclassified,\n use_smallest_error=True, mistake_tolerance=0, max_rounds=INF):\n point_to_weight = initialize_weights(training_points)\n H = [] # (classifier, voting_power)\n\n while True:\n # exit conditions\n if is_good_enough(H, training_points, classifier_to_misclassified, mistake_tolerance):\n break\n if max_rounds == 0:\n break\n classifier_to_error_rate = calculate_error_rates(point_to_weight, classifier_to_misclassified)\n best_classifier = None\n try:\n best_classifier = pick_best_classifier(classifier_to_error_rate, use_smallest_error)\n except NoGoodClassifiersError:\n break\n\n max_rounds -= 1\n error_rate = classifier_to_error_rate[best_classifier]\n\n H.append((best_classifier, calculate_voting_power(error_rate)))\n\n point_to_weight = update_weights(point_to_weight, classifier_to_misclassified[best_classifier], error_rate)\n return H", "def _pointset_from_tuples(self, *tuples):\n newset = set()\n for t in tuples:\n newset.add(Point(*t))\n return newset", "def irrelevant_features(features):\n irrelevant = []\n for vec in set(features):\n if (features[vec].count(0)/len(indtf_features[vec])) < 0.1:\n irrelevant.append(vec)\n return irrelevant", "def __get_homes_not_on_intersections(x_intercepts: list, y_intercepts: list, homes: list) -> set:\n return {(x, y) for x, y in homes if x not in x_intercepts or y not in y_intercepts}", "def em(indiv_dict, hyplo_collection):\r\n\thyplo_dict=defaultdict(float)\r\n\tres=[]\r\n\tres_pairs=[]\r\n\tstart_fre= np.random.dirichlet(np.ones(len(hyplo_collection)),size=1)[0]\r\n\ti=0\r\n\tfor x in hyplo_collection:\r\n\t\thyplo_dict[x]=start_fre[i]\r\n\t\ti+=1\r\n\tprev_dict=hyplo_dict\r\n\thyplo_dict=phase_cnt(indiv_dict, hyplo_dict)\r\n\twhile True:\r\n\t\tif check_converge(prev_dict, hyplo_dict)==False:\r\n\t\t\tprev_dict=hyplo_dict\r\n\t\t\thyplo_dict=phase_cnt(indiv_dict, hyplo_dict)\r\n\t\telse:\r\n\t\t\tbreak\r\n\tfor k,v in indiv_dict.iteritems():\r\n\t\tpair=get_best(v, hyplo_dict)\r\n\t\tres+=pair\r\n\t\tres_pairs.append(pair)\r\n\tkey_list=indiv_dict.keys()\r\n\treturn list(set(res)), res_pairs", "def evaluate_features(X, y, clf=None):\n if clf is None:\n clf = LogisticRegression()\n \n probas = cross_val_predict(clf, X, y, cv=StratifiedKFold(random_state=8), \n n_jobs=-1, method='predict_proba', verbose=2)\n pred_indices = np.argmax(probas, axis=1)\n classes = np.unique(y)\n preds = classes[pred_indices]\n print('Log loss: {}'.format(log_loss(y, probas)))\n print('Accuracy: {}'.format(accuracy_score(y, preds)))\n skplt.plot_confusion_matrix(y, preds)", "def split_on_classifier(data, classifier):\n feature_values = []\n for point in data:\n classification = classifier.classify(point)\n if classification not in feature_values:\n feature_values.append(classification)\n classification_dict = {}\n for feature_value in feature_values:\n classification_dict[feature_value] = []\n for point in data:\n classification = classifier.classify(point)\n classification_dict[classification].append(point)\n return classification_dict", "def undersample_mlc(y):\n\n ns_per_class = y.sum(axis=0)\n target_per_class = np.min(ns_per_class)\n\n ixes: List[int] = []\n for next_class_ix in np.argsort(ns_per_class):\n candidates_pos = np.where(y[:, next_class_ix] >= 0.5)[0]\n candidates_neg = np.where(y[:, next_class_ix] <= 0.5)[0]\n\n n_have_pos = y[ixes].sum(axis=0)[next_class_ix]\n n_have_neg = len(ixes) - n_have_pos\n\n n_needed_pos = target_per_class - n_have_pos\n n_needed_neg = target_per_class - n_have_neg\n\n if n_needed_pos > 0:\n ixes += list(npr.choice(candidates_pos, size=n_needed_pos))\n if n_needed_neg > 0:\n ixes += list(npr.choice(candidates_neg, size=n_needed_neg))\n\n return ixes", "def evaluate_SURF(x,y,NN,feature,inst,data,multiclass_map,maxInst):\r\n diff = 0\r\n if not data.discretePhenotype: #if continuous phenotype\r\n same_class_bound=data.phenSD #boundary to determine similarity between classes for continuous attributes\r\n \r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n #determining boundaries for continuous attributes\r\n min_bound=data.attributeInfo[feature][1][0]\r\n max_bound=data.attributeInfo[feature][1][1]\r\n \r\n diff_hit=0 #initializing the score to 0\r\n diff_miss=0\r\n \r\n count_hit=0\r\n count_miss=0\r\n \r\n if data.discretePhenotype:\r\n if len(data.phenotypeList) > 2: #multiclass endpoint\r\n class_Store = {}\r\n missClassPSum = 0\r\n for each in multiclass_map:\r\n if each != y[inst]: #Store all miss classes\r\n class_Store[each] = [0,0] #stores cout_miss and diff_miss\r\n missClassPSum += multiclass_map[each]\r\n \r\n for i in range(len(NN)): #for all nearest neighbors\r\n if x[inst][feature]!=data.labelMissingData and x[NN[i]][feature]!=data.labelMissingData: # add appropriate normalization.\r\n if y[inst]==y[NN[i]]: #HIT\r\n count_hit+=1\r\n if x[inst][feature]!=x[NN[i]][feature]:\r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n diff_hit-=abs(x[inst][feature]-x[NN[i]][feature])/(max_bound-min_bound)\r\n else:#Discrete\r\n diff_hit-=1\r\n else: #MISS\r\n for missClass in class_Store:\r\n if y[NN[i]] == missClass:\r\n class_Store[missClass][0] += 1\r\n if x[inst][feature]!=x[NN[i]][feature]:\r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n class_Store[missClass][1]+=abs(x[inst][feature]-x[NN[i]][feature])/(max_bound-min_bound)\r\n else:#Discrete\r\n class_Store[missClass][1]+=1\r\n \r\n #Corrects for both multiple classes, as well as missing data.\r\n missSum = 0 \r\n for each in class_Store:\r\n missSum += class_Store[each][0]\r\n missAverage = missSum/float(len(class_Store))\r\n \r\n hit_proportion=count_hit/float(len(NN)) #Correcting for Missing Data.\r\n for each in class_Store:\r\n diff_miss += (multiclass_map[each]/float(missClassPSum))*class_Store[each][1]\r\n \r\n diff = diff_miss*hit_proportion\r\n miss_proportion=missAverage/float(len(NN))\r\n diff += diff_hit*miss_proportion\r\n \r\n else: #Binary Class Problem\r\n for i in range(len(NN)): #for all nearest neighbors\r\n if x[inst][feature]!=data.labelMissingData and x[NN[i]][feature]!=data.labelMissingData: # add appropriate normalization.\r\n \r\n if y[inst]==y[NN[i]]: #HIT\r\n count_hit+=1\r\n if x[inst][feature]!=x[NN[i]][feature]:\r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n diff_hit-=abs(x[inst][feature]-x[NN[i]][feature])/(max_bound-min_bound)\r\n else:#Discrete\r\n diff_hit-=1\r\n else: #MISS\r\n count_miss+=1\r\n if x[inst][feature]!=x[NN[i]][feature]:\r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n diff_miss+=abs(x[inst][feature]-x[NN[i]][feature])/(max_bound-min_bound)\r\n else:#Discrete\r\n diff_miss+=1 \r\n\r\n #Take hit/miss inbalance into account (coming from missing data)\r\n hit_proportion=count_hit/float(len(NN))\r\n miss_proportion=count_miss/float(len(NN))\r\n \r\n diff=diff_hit*miss_proportion + diff_miss*hit_proportion #applying weighting scheme to balance the scores \r\n \r\n else: #continuous endpoint\r\n for i in range(len(NN)): #for all nearest neighbors\r\n if x[inst][feature]!=data.labelMissingData and x[NN[i]][feature]!=data.labelMissingData: # add appropriate normalization.\r\n \r\n if abs(y[inst]-y[NN[i]])<same_class_bound: #HIT\r\n count_hit+=1 \r\n if x[inst][feature]!=x[NN[i]][feature]:\r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n diff_hit-=abs(x[inst][feature]-x[NN[i]][feature])/(max_bound-min_bound)\r\n else:#Discrete\r\n diff_hit-=1\r\n else: #MISS\r\n count_miss+=1\r\n if x[inst][feature]!=x[NN[i]][feature]:\r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n diff_miss+=abs(x[inst][feature]-x[NN[i]][feature])/(max_bound-min_bound)\r\n else:#Discrete\r\n diff_miss+=1\r\n\r\n #Take hit/miss inbalance into account (coming from missing data, or inability to find enough continuous neighbors)\r\n hit_proportion=count_hit/float(len(NN))\r\n miss_proportion=count_miss/float(len(NN))\r\n \r\n diff=diff_hit*miss_proportion + diff_miss*hit_proportion #applying weighting scheme to balance the scores \r\n \r\n return diff", "def evalute_subset(X_train, X_test, y_train, y_test):\r\n clf = KNeighborsClassifier(n_neighbors=3)\r\n clf.fit(X_train, y_train) \r\n y_pred = clf.predict(X_test)\r\n return accuracy_score(y_test, y_pred)", "def evaluate_features(X, y, clfKey):\n\n clf = {\n 'LogReg': LogisticRegression(),\n 'SDG_Mh': SGDClassifier(loss=\"modified_huber\", penalty=\"l2\", max_iter=5),\n 'SDG_Log': SGDClassifier(loss=\"log\", penalty=\"l2\", max_iter=5),\n 'Tree': RandomForestClassifier(n_estimators=1000, max_depth=5, verbose=1)\n }\n\n probabilities = cross_val_predict(clf[clfKey], X, y, cv=StratifiedKFold(n_splits=2, random_state=8),\n n_jobs=-1, method='predict_proba', verbose=2)\n predicted_indices = np.argmax(probabilities, axis=1)\n classes = np.unique(y)\n predicted = classes[predicted_indices]\n print('Log loss: {}'.format(log_loss(y, probabilities)))\n print('Accuracy: {}'.format(accuracy_score(y, predicted)))\n skplt.metrics.plot_confusion_matrix(y, predicted, normalize=True)\n plt.show()", "def test__load_training_set():\n classifier = classifier_module.Classifier(None)\n set = classifier._load_training_set('test')\n for i in range(0, 5):\n signal_list = set[i]\n assert signal_list[0].get_x() == 1.0 + i * 0.028\n assert signal_list[0].get_y() == 1.00 - i * i * 0.20 * 0.30\n\n assert signal_list[1].get_x() == 2.0 - i * 0.011\n assert signal_list[1].get_y() == 2.00 - i * 0.020", "def remove_naked_sets_from_candidates(c, *args, naked_sets=defaultdict(list)):\n for d in args:\n for k, v in d.items():\n for coord in v:\n c[coord] = [n for n in c[coord] if n not in k]\n naked_sets[coord].extend(list(k))\n return c, dict(naked_sets)", "def prune(candidate_aspect_list, min_sup):\n l_k = deepcopy(candidate_aspect_list)\n for key, value in list(l_k.items()):\n if value < min_sup:\n del l_k[key]\n return l_k", "def remove_features(sets_x, unused_features):\n\n # initiate empty list for return variable\n significant_x = [] \n\n # iterate through subsets and their corresponding insignificant features\n for x, features in zip(sets_x, unused_features):\n # remove features from subset and store the result into list\n significant_x.append(np.delete(x,features,1))\n \n return significant_x", "def binary_classifications(self):\n return sorted(list(set(self.binarized[self._groundtruth])), reverse=True)", "def lostMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]:\n # get added metabolism\n lostMetabolismEnzymes = self.lostMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicated enzyme pairs\n geneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the lost metabolism\n geneDuplicatedLost = set()\n \n for enzymeTuple in geneDuplicated:\n if enzymeTuple[0] in lostMetabolismEnzymes and enzymeTuple[1] in lostMetabolismEnzymes:\n geneDuplicatedLost.add(enzymeTuple)\n \n return geneDuplicatedLost", "def get_train_test_set(self, large, Prop):\n elems = int(large * Prop)\n suc = [i for i in range(0, large)]\n train = random.sample(suc, elems)\n test = list(set(suc) - set(train))\n return (train, test)", "def get_harris_points(response, min_dist = 10, threshold_percent = 0.1):\n \n # find top corner caditades above a threshold\n threshold = response.max() * threshold_percent\n response_threshold = imtools.threshold(response, threshold)\n\n # get coordinates of candidates\n coords = np.array(response_threshold.nonzero()).T\n \n # ... and their values\n candidate_values = [response[c[0], c[1]] for c in coords]\n \n # sort candidates\n index = np.argsort(candidate_values)\n \n # store allowed point locations in array\n allowed_locations = np.zeros(response.shape)\n allowed_locations[min_dist:-min_dist, min_dist:-min_dist] = 1\n \n # select the best points taking min_distance into account\n filtered_coords = []\n for i in index:\n if allowed_locations[coords[i,0], coords[i,1]] == 1:\n filtered_coords.append(coords[i])\n allowed_locations[(coords[i,0]-min_dist):(coords[i,0]+min_dist),\n (coords[i,1]-min_dist):(coords[i,1]+min_dist)] = 0\n \n return filtered_coords", "def get_misclassified_items(cluster: np.ndarray):\r\n # Get the indexes for the misclassified elements of the label\r\n mask_label = np.array(generate_inputs.test_labels == generate_inputs.EXPECTED_LABEL)\r\n mask_miss = np.array(generate_inputs.test_labels != generate_inputs.predictions)\r\n mask_idxs = np.argwhere(mask_miss[mask_label]).flatten()\r\n intersection = np.intersect1d(cluster, mask_idxs)\r\n return intersection", "def predict_bagging_example(x, h_ens):\r\n arr = []\r\n for y in h_ens:\r\n \t# calls predict example repeatedly and stores them in an array\r\n tst_pred = predict_example(x, h_ens[y][1])\r\n arr.append(tst_pred)\r\n # returning the maximum voted\r\n predict_egz = max(set(arr), key=arr.count)\r\n return predict_egz", "def multiclass_noisify(y, P, random_state=0):\n print(np.max(y), P.shape[0])\n assert P.shape[0] == P.shape[1]\n assert np.max(y) < P.shape[0]\n\n # row stochastic matrix\n assert_array_almost_equal(P.sum(axis=1), np.ones(P.shape[1]))\n assert (P >= 0.0).all()\n\n n_classes = len(P)\n\n m = y.shape[0]\n print(m)\n new_y = y.copy()\n\n for idx in np.arange(m):\n i = y[idx]\n # draw a vector with only an 1\n new_y[idx] = np.random.choice(n_classes, 1, p=P[i, :])[0]\n\n return new_y", "def _plot_good_pred_whit_reject(self, test: Set, title=None, fig_size=None):\r\n if fig_size is not None:\r\n fig = plt.figure(figsize=fig_size)\r\n else:\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n goodclassified_index = []\r\n for idx_preds in range(self.preds.shape[1] - 1):\r\n new_good_index = []\r\n for idx in range(self.preds.shape[0]):\r\n if self.preds[idx][0][idx_preds] == test.labels[idx] and \\\r\n self.preds[idx][1][idx_preds] != self.preds[idx][1][idx_preds + 1]:\r\n new_good_index.append(idx)\r\n if new_good_index:\r\n ax.scatter(test.features[new_good_index, 0], self.features[new_good_index, 1],\r\n label='Good classified top{0}'.format(int(idx_preds + 1)))\r\n goodclassified_index += new_good_index\r\n new_good_index = []\r\n for idx in range(self.preds.shape[0]):\r\n if self.preds[idx][0][-1] == test.labels[idx]:\r\n new_good_index.append(idx)\r\n if new_good_index:\r\n ax.scatter(test.features[new_good_index, 0], self.features[new_good_index, 1],\r\n label='Good classified top{0}'.format(int(self.preds.shape[1])))\r\n goodclassified_index += new_good_index\r\n reject_idx, misclassified_idx = ([], [])\r\n for idx in range(self.preds.shape[0]):\r\n if idx not in goodclassified_index:\r\n reject = False\r\n for idx_preds in range(self.preds.shape[1] - 1):\r\n if self.preds[idx][1][idx_preds] == self.preds[idx][1][idx_preds + 1]:\r\n reject_idx.append(idx)\r\n reject = True\r\n break\r\n if not reject:\r\n misclassified_idx.append(idx)\r\n if reject_idx:\r\n ax.scatter(test.features[reject_idx, 0], self.features[reject_idx, 1],\r\n label='Reject', c='orange', marker='^')\r\n if misclassified_idx:\r\n ax.scatter(test.features[misclassified_idx, 0], self.features[misclassified_idx, 1],\r\n label='Misclassified', marker='x', c='red')\r\n box = ax.get_position()\r\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\r\n # Put a legend to the right of the current axis\r\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\r\n if title is not None:\r\n ax.set_title(title)\r\n plt.show()", "def classify(self, data ):\n guesses = []\n for datum in data:\n vectors = util.Counter()\n for l in self.legalLabels:\n vectors[l] = self.weights[l] * datum\n guesses.append(vectors.argMax())\n return guesses", "def classify(self, data):\n guesses = []\n for datum in data:\n vectors = util.Counter()\n for l in self.legalLabels:\n vectors[l] = self.weights[l] * datum\n guesses.append(vectors.argMax())\n return guesses", "def isomorphism_representatives(self):\n result = set()\n for c in self:\n cc = c.relabel()\n if cc not in result:\n result.add(cc)\n return result", "def _reweight_and_discard_irrelevant(self, weighted_sample_pool, t):\n tmp = []\n ret = []\n wc = self.classifiers[t]\n theta_a = wc.theta_a\n theta_b = wc.theta_b\n\n norm_factor = 0\n discarded = 0\n for patch, w in weighted_sample_pool:\n response = self.h_t(patch, t)\n # if t > 3:\n # if response < theta_a or response > theta_b: # throw it away\n # discarded += 1\n # continue\n r = self.classify(patch)\n label = patch.label\n new_weight = w * np.exp(-label * r)\n\n tmp.append([patch, new_weight])\n norm_factor += new_weight\n for patch, w in tmp: # normalize weights\n normalized_weight = w / norm_factor\n ret.append([patch, normalized_weight])\n print \"Discarded %d training samples\" % discarded\n return ret", "def generateCandidates(self):\n\t\tprint(\"Candidate list:\\n\")\n\t\tkeys = list(self.prune_list.keys())\n\t\ttuple_count = len(keys[0])\n\t\tprune_list = {}\n\t\ttup = []\n\t\tfor v in comb(keys, 2):\n\t\t\ta = set(v[0])\n\t\t\tb = set(v[1])\n\t\t\t\n\t\t\t# If there's as many common element in a & b as one less than tuple_count\n\t\t\tif((len(a & b) == (tuple_count - 1)) and (tuple(a | b) not in tup)):\n\t\t\t\ttup.append(tuple(a | b))\n\t\t\t\tprint(tup[-1])\n\t\t\t\t# Update prune list\n\t\t\t\tcount = self.getSupport(tup[-1])\n\t\t\t\tif(count >= self.support):\n\t\t\t\t\tprune_list[tup[-1]] = count\n\t\treturn prune_list", "def filter(self, new_set):\n for old_set in self.itervalues():\n for feat in old_set.iterkeys():\n if feat not in new_set:\n del old_set[feat]\n return self", "def cross_validate(k, original_x_train, original_y_train, label, features: List[str],\n features_to_encode: List[str], balance_ratio: float,\n classifiers: List[ClassifierMixin]) \\\n -> Tuple[List[Tuple[Type[ClassifierMixin], Dict]], Type[ClassifierMixin]]:\n\n X, y = balance_train(original_x_train, original_y_train, label, balance_ratio)\n skf = StratifiedKFold(n_splits=k)\n val_scores = []\n for classifier in classifiers:\n print('Doing ', classifier.__class__)\n clf = make_pipeline(FeatureEncoder(features_to_encode, features), classifier)\n val_scores.append((classifier.__class__,\n model_selection.cross_validate(clf, X, y, scoring=('f1_weighted'),\n cv=skf, n_jobs=-1)))\n best_classifier_class = max([(mod, median(res['test_score'])) for mod, res in val_scores],\n key=lambda x: x[1])[0]\n return val_scores, best_classifier_class", "def train_naive_bayes_soy(train_set, classes):\n\n print('[ INFO ]: Training soy data with Naive Bayes Classifier...')\n\n class_probabilities = {}\n class_feature_probs = {}\n\n for soy_class in classes:\n\n feature_true_probs = {}\n feature_false_probs = {}\n\n # Find the probability that each class is in the training set\n class_probabilities[soy_class] = len(train_set[(train_set[soy_class] == 1)]) / len(train_set)\n\n # Compute the conditional feature probabilities based on the class probabilities\n # where the class is present\n class_true = train_set[(train_set[soy_class] == 1)]\n for col in class_true.columns:\n if col not in classes:\n try:\n true_true = len(class_true[(class_true[col] == 1)]) / len(class_true)\n except:\n true_true = 0\n feature_true_probs[col] = true_true\n\n # Compute the conditional feature probabilities based on the class probabilities\n # where the class is not present\n class_false = train_set[(train_set[soy_class] == 0)]\n for col in class_false.columns:\n if col not in classes:\n try:\n false_false = len(class_false[(class_false[col] == 0)]) / len(class_false)\n except:\n false_false = 0\n feature_false_probs[col] = false_false\n\n class_feature_probs[soy_class] = [feature_true_probs, feature_false_probs]\n\n return class_probabilities, class_feature_probs", "def h_t(self, x, t):\n ret = 0\n strong_classifier = self.classifiers[0:t+1]\n for wc in strong_classifier:\n ret += wc.classify(x)\n return ret", "def h_t(self, x, t):\n ret = 0\n strong_classifier = self.classifiers[0:t+1]\n for wc in strong_classifier:\n ret += wc.classify(x)\n return ret", "def MostCommonClassBaseline(training_set, test_set):\n wordtagcounts = {}\n\n for sentence in training_set:\n for (word, tag) in sentence:\n if word not in wordtagcounts:\n wordtagcounts[word] = {tag:1}\n elif tag in wordtagcounts[word]:\n wordtagcounts[word][tag] = wordtagcounts[word][tag] + 1\n elif tag not in wordtagcounts[word]:\n wordtagcounts[word][tag] = 1\n \n result_set = []\n for sentence in test_set:\n toadd = []\n for i in range(len(sentence) ):\n toadd.append( (sentence[i][0], max( wordtagcounts[ sentence[i][0] ], key=wordtagcounts[sentence[i][0] ].get ) ) )\n result_set.append( toadd )\n return result_set", "def validate_holdout(Xs, ys, X_holdout, y_holdout, non_bleeding_features, features_dir,\n scorer_type, feat_indices, result_string, learning_rate_string, features):\n # define folder to save the classifier and create it if not existing\n parent_folder = \"%s/data/fnc-1/mlp_models/\" % (path.dirname(path.dirname(path.abspath(__file__))))\n\n # create the new save folder\n save_folder = get_save_folder(parent_folder, scorer_type+\"_new\")\n\n # only pass a save folder if the classifier should be saved\n best_clf = esitmator_definitions.get_estimator(scorer_type, save_folder=save_folder)\n\n # stack all the feature vectors of all the folds\n X_train = np.vstack(tuple([Xs[i] for i in range(10)]))\n y_train = np.hstack(tuple([ys[i] for i in range(10)]))\n\n # concat non-bleeding features\n X_train, X_holdout, feat_indices_holdout = concat_non_bleeding_features(\n X_train, X_holdout,\n non_bleeding_features, features_dir, 'holdout')\n\n # test for oversampling: fits the current classifier, oversampled with a given\n # method and checks the score on the holdout set\n use_over_sampling = False\n if use_over_sampling == True:\n from imblearn.over_sampling import SMOTE\n kind = ['regular', 'borderline1', 'borderline2', 'svm']\n for m in kind:\n sm = SMOTE(kind=m)\n X_res, y_res = sm.fit_sample(X_train, y_train)\n best_clf.fit(X_res, y_res)\n y_predicted = best_clf.predict(X_holdout)\n predicted = [LABELS[int(a)] for a in y_predicted]\n actual = [LABELS[int(a)] for a in y_holdout]\n fold_score, _ = score_submission(actual, predicted)\n max_fold_score, _ = score_submission(actual, actual)\n score = fold_score / max_fold_score\n print(\"Score \" + m + \":\" + str(score))\n\n\n #Taken from Benjamins LSTM\n loss_monitor_file_dir = \"%s/data/fnc-1/model_results/loss_results/\" % (\n path.dirname(path.dirname(path.abspath(__file__))))\n loss_filename = loss_monitor_file_dir + str(datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M\")) + \".txt\"\n # fit the classifier\n if 'f_ext' in scorer_type:\n append_to_loss_monitor_file(\"\\n\\nFOLD holdout and classifier: \" + scorer_type + \"\\n\", loss_filename)\n append_to_loss_monitor_file(str(datetime.datetime.now()).split('.')[0], loss_filename)\n best_clf.fit(X_train, y_train, X_holdout, np.array(y_holdout), 'holdout', loss_filename)\n else:\n best_clf.fit(X_train, y_train)\n\n # predict labels\n y_predicted = best_clf.predict(X_holdout)\n predicted = [LABELS[int(a)] for a in y_predicted]\n actual = [LABELS[int(a)] for a in y_holdout]\n\n # calc FNC score\n fold_score, cm = score_submission(actual, predicted)\n max_fold_score, _ = score_submission(actual, actual)\n score = fold_score / max_fold_score\n\n # calc accuracy for related/unrelated and stances\n accuracy_stance = score_calculation.get_accuracy(y_predicted, y_holdout, stance=True)\n accuracy_related = score_calculation.get_accuracy(y_predicted, y_holdout, stance=False)\n f1_stance = score_calculation.get_f1score(y_predicted, y_holdout, stance=True)\n f1_related = score_calculation.get_f1score(y_predicted, y_holdout, stance=False)\n\n # prepare printout for final results of holdout set\n printout = printout_manager.get_holdout_printout(save_folder, accuracy_related, accuracy_stance, f1_related, f1_stance, score)\n printout += printout_manager.calculate_confusion_matrix(cm)\n print(printout) # print holdout results\n result_string += printout + \"\\n\"# add results to string that is going to be saved into a file\n\n result_file_folder = \"%s\" % (path.dirname(path.dirname(path.abspath(__file__))))\n printout_manager.save_file(result_string, result_file_folder + \"/fnc_results_holdout.txt\", \"a+\")\n\n #aligned printout for ablation:\n summary = printout_manager.get_holdout_ablation_printout(features, score,f1_stance,save_folder)\n printout_manager.save_file(summary, result_file_folder + \"/fnc_results_holdout_summary.txt\", \"a+\")\n\n # test saving and restoring model\n #filename = scorer_type + \".sav\"\n #save_model(best_clf, save_folder,filename)\n #load_clf = load_model(parent_folder + scorer_type + \"_new_0/\", filename) # the 0th folder should always exist\n #print_score_from_restored_model(load_clf, X_holdout, y_holdout)\n\n # add to special file that shows learning rate and loss of optimizer\n if isinstance(best_clf, MultiThreadingFeedForwardMLP):\n learning_rate_string += best_clf.get_learning_rates('holdout') + \"\\n\"\n\n # print feature importances\n if scorer_type == 'randomforest':\n result_file_folder = \"%s\" % (path.dirname(path.dirname(path.abspath(__file__))))\n importances = best_clf.feature_importances_\n std = np.std([tree.feature_importances_ for tree in best_clf.estimators_],\n axis=0)\n indices = np.argsort(importances)[::-1]\n feat_indices.append(feat_indices_holdout)\n\n feat_importance_string = str(feat_indices) + \"\\n\"\n for i in indices:\n feat_importance_string += str(i) + \";\" + str(importances[i]) + \";\" + str(std[i]) + \"\\n\"\n\n # save feature importances as file\n printout_manager.save_file(feat_importance_string, result_file_folder + \"/feat_importance_rf.txt\", \"a+\")\n\n return result_string, learning_rate_string", "def get_known(self):\n pool = set()\n\n # Add neighbours:\n pool.update(self.neighbours)\n\n # Add fingers:\n for f in SUCC_FINGERS:\n pool.update(self.best_finger_succ[f])\n for f in PRED_FINGERS:\n pool.update(self.best_finger_pred[f])\n return list(pool)", "def classify(self, data):\n guesses = []\n for datum in data:\n vectors = util.Counter()\n for l in self.legalLabels:\n vectors[l] = self.weights[l] * datum + self.bias[l]\n guesses.append(vectors.argMax())\n return guesses", "def leave_one_out_cross_validation(data_set):\r\n with_removal = []\r\n total = 0\r\n correct = 0\r\n for dat in data_set:\r\n removed = [x for x in data_set]\r\n\r\n removed.remove(dat)\r\n t = test_with_data(removed)\r\n actual = dat.dat_party\r\n classified = classify(dat, t)\r\n if actual == classified: correct+=1\r\n total+=1\r\n\r\n print(\"Accuracy after L.O.O.C.V.: \" + str(correct/total))", "def __cross_validation(self, classifier, X, y, k, stratify=True):\n\t\tif k == X.shape[0]:\t\t# leave-one-out\n\t\t\tkf = model_selection.KFold(n_splits=k)\n\t\telse:\n\t\t\tif stratify:\n\t\t\t\tkf = model_selection.StratifiedKFold(n_splits=k, shuffle=True, random_state=0)\n\t\t\telse:\n\t\t\t\tkf = model_selection.KFold(n_splits=k, shuffle=True, random_state=0)\n\n\t\t# training data and predictions for each fold\n\t\ty_train_list = []\n\t\ty_train_pred_list = []\n\t\ty_train_prob_list = []\n\t\ty_val_list = []\n\t\ty_val_pred_list = []\n\t\ty_val_prob_list = []\n\n\t\tfor train_idx, val_idx in kf.split(X, y):\n\t\t\tX_train, X_val = X.iloc[train_idx], X.iloc[val_idx]\n\t\t\ty_train, y_val = y.iloc[train_idx], y.iloc[val_idx]\n\t\t\ty_train_list.append(y_train)\n\t\t\ty_val_list.append(y_val)\n\n\t\t\t# catch convergence warning\n\t\t\twith warnings.catch_warnings():\n\t\t\t\twarnings.filterwarnings('error', category=exceptions.ConvergenceWarning)\n\t\t\t\ttry:\n\t\t\t\t\tclassifier = classifier.fit(X_train, y_train)\n\t\t\t\texcept exceptions.ConvergenceWarning:\n\t\t\t\t\tModel.counter -= 1\n\t\t\t\t\traise\n\n\t\t\ty_train_pred_list.append(classifier.predict(X_train))\n\t\t\ty_val_pred_list.append(classifier.predict(X_val))\n\t\t\ty_train_prob_list.append(classifier.predict_proba(X_train))\n\t\t\ty_val_prob_list.append(classifier.predict_proba(X_val))\n\n\t\tif k == X.shape[0]:\t\t# leave-one-out\n\t\t\ty_val = np.hstack(y_val_list)\n\t\t\ty_val_pred = np.hstack(y_val_pred_list)\n\t\t\ty_val_prob = np.vstack(y_val_prob_list)\n\n\t\t\treturn ModelMetrics(classifier, y_train_list, y_train_pred_list, y_train_prob_list, 'cv'), \\\n\t\t\t\tModelMetrics(classifier, y_val, y_val_pred, y_val_prob, 'loo')\n\t\telse:\n\t\t\treturn ModelMetrics(classifier, y_train_list, y_train_pred_list, y_train_prob_list, 'cv'), \\\n\t\t\t\tModelMetrics(classifier, y_val_list, y_val_pred_list, y_val_prob_list, 'cv')", "def classify(list_of_sets, sort=True):\n classifier = Classifier(sort=sort)\n classifier.update(list_of_sets)\n return classifier.getClasses(), classifier.getMapping()", "def classify(self, data ):\n\t\tguesses = []\n\t\tfor datum in data:\n\t\t\tvectors = util.Counter()\n\t\t\tfor l in self.legalLabels:\n\t\t\t\tvectors[l] = self.weights[l] * datum\n\t\t\tguesses.append(vectors.argMax())\n\t\treturn guesses", "def classify(self, data ):\n\t\tguesses = []\n\t\tfor datum in data:\n\t\t\tvectors = util.Counter()\n\t\t\tfor l in self.legalLabels:\n\t\t\t\tvectors[l] = self.weights[l] * datum\n\t\t\tguesses.append(vectors.argMax())\n\t\treturn guesses", "def get_mistakes(X, yt, cls, documents):\n yp = cls.predict(X)\n yp_probs = cls.predict_proba(X)\n mistakes = []\n \n for i in range(len(yp)):\n if yp[i] != yt[i]:\n mistakes.append([documents[i], yp_probs[i], yt[i]])\n return mistakes", "def knn_classification(x_test, df_training, attrib_column, k):\n return majority_vote(k_nearest_neighbors(x_test, df_training,k),df,attrib_column)", "def trainClassifiers(features, labels):\n # trains each classifier on given training set\n classArr = VotingClassifier(estimators = [('NB', naiveBayesModel), ('linSVC', linearSVCModel), ('LR', logRegModel)], \\\n voting = 'hard', weights = [1, 5, 3])\n \n classArr = classArr.fit(features, labels)\n return classArr", "def __add_dummy_inputs(training_set):\n return [(Perceptron.__add_dummy_input(input_vector), is_connected)\n for input_vector, is_connected in training_set]", "def _validateClassification(self, trainingSet):\n wrongCount = 0.\n\n pv = []\n tv = []\n\n if self.K == 1:\n for example in trainingSet:\n Y = self.test(example)\n \n givenClass = example.label[0]\n if Y[0] < 0.5:\n chosenClass = 0\n else:\n chosenClass = 1\n \n pv.append(chosenClass)\n tv.append(givenClass)\n \n if chosenClass != givenClass:\n wrongCount += 1.\n else:\n for example in trainingSet:\n Y = self.test(example)\n \n posterior, chosenClass = max((x, i) for i, x in enumerate(Y))\n max_val, givenClass = max((x, i) for i, x in enumerate(example.label))\n \n pv.append(chosenClass)\n tv.append(givenClass)\n \t\t\t\n if chosenClass != givenClass:\n wrongCount += 1.\n \n return wrongCount/len(trainingSet), pv, tv", "def get_possible_labels(Y):\n \n return list(set(itertools.chain(*Y)))", "def gen_all_holds(hand):\n all_holds_set = [()]\n for entry in hand:\n for subset in all_holds_set:\n # create subsets of hand set\n all_holds_set = all_holds_set + [tuple(subset) + (entry,)]\n return set(sorted(all_holds_set))", "def calc_openset_classification(data_outlier_probs, num_classes, num_outlier_threshs=50):\n\n dataset_outliers = []\n threshs = []\n\n # loop through each rejection prior value and evaluate the percentage of the dataset being considered as\n # statistical outliers, i.e. each data point's outlier probability > rejection prior.\n for i in range(num_outlier_threshs - 1):\n outlier_threshold = (i + 1) * (1.0 / num_outlier_threshs)\n threshs.append(outlier_threshold)\n\n dataset_outliers.append(0)\n total_dataset = 0\n\n for j in range(num_classes):\n total_dataset += len(data_outlier_probs[j])\n\n for k in range(len(data_outlier_probs[j])):\n if data_outlier_probs[j][k] > outlier_threshold:\n dataset_outliers[i] += 1\n\n dataset_outliers[i] = dataset_outliers[i] / float(total_dataset)\n\n return {\"thresholds\": threshs, \"outlier_percentage\": dataset_outliers}", "def getLabeledXYonly(self, trainingData):\r\n labeledData, unlabeledData = trainingData\r\n return labeledData", "def test_qsvm_multiclass_all_pairs(self):\n training_input = {'A': np.asarray([[0.6560706, 0.17605998], [0.25776033, 0.47628296],\n [0.8690704, 0.70847635]]),\n 'B': np.asarray([[0.38857596, -0.33775802], [0.49946978, -0.48727951],\n [0.49156185, -0.3660534]]),\n 'C': np.asarray([[-0.68088231, 0.46824423], [-0.56167659, 0.65270294],\n [-0.82139073, 0.29941512]])}\n\n test_input = {'A': np.asarray([[0.57483139, 0.47120732], [0.48372348, 0.25438544],\n [0.48142649, 0.15931707]]),\n 'B': np.asarray([[-0.06048935, -0.48345293], [-0.01065613, -0.33910828],\n [0.06183066, -0.53376975]]),\n 'C': np.asarray([[-0.74561108, 0.27047295], [-0.69942965, 0.11885162],\n [-0.66489165, 0.1181712]])}\n\n total_array = np.concatenate((test_input['A'], test_input['B'], test_input['C']))\n\n aqua_globals.random_seed = self.random_seed\n feature_map = SecondOrderExpansion(feature_dimension=get_feature_dimension(training_input),\n depth=2,\n entangler_map=[[0, 1]])\n try:\n svm = QSVM(feature_map, training_input, test_input, total_array,\n multiclass_extension=AllPairs())\n\n quantum_instance = QuantumInstance(BasicAer.get_backend('qasm_simulator'),\n shots=self.shots,\n seed_simulator=aqua_globals.random_seed,\n seed_transpiler=aqua_globals.random_seed)\n result = svm.run(quantum_instance)\n self.assertAlmostEqual(result['testing_accuracy'], 0.444444444, places=4)\n self.assertEqual(result['predicted_classes'], ['A', 'A', 'C', 'A',\n 'A', 'A', 'A', 'C', 'C'])\n except NameError as ex:\n self.skipTest(str(ex))", "def get_valid_sets(self, inverse):\n class_names = [0] * self._counter\n for element in inverse:\n class_names[self._class_names[element]] += 1\n return [i for i, value in enumerate(class_names)\n if value != 0 and value != len(self.part[i])]", "def generate_Lk_by_Ck(data_set, Ck, min_support, support_data):\r\n Lk = set()\r\n item_count = {}\r\n for t in data_set:\r\n for item in Ck:\r\n if item.issubset(t):\r\n if item not in item_count:\r\n item_count[item] = 1\r\n else:\r\n item_count[item] += 1\r\n t_num = float(len(data_set))\r\n for item in item_count:\r\n if (item_count[item] ) >= min_support:\r\n Lk.add(item)\r\n support_data[item] = item_count[item] / t_num\r\n return Lk", "def factorset(self,x):\r\n return set(self.factors(x))", "def _classifier(self, test_set):\r\n return self._mahalanobis_classifier(test_set.features, self.targets)", "def classifier_set(tuning=False):\n\tif tuning==False:\n\t\tclassifiers = [\n\t\t\tKNeighborsClassifier(50),\n\t\t\tSVC(kernel=\"linear\", C=0.025, probability=True),\n\t\t\tSVC(gamma=1, C=1, probability=True),\n\t\t\tGaussianProcessClassifier(1.0 * RBF(1.0)),\n\t\t\tDecisionTreeClassifier(criterion= 'entropy', min_samples_leaf= 30, min_samples_split= 10, splitter= 'random'),\n\t\t\tRandomForestClassifier(n_estimators=50, min_samples_leaf=30, min_samples_split=2),\n\t\t\tMLPClassifier(early_stopping=True, hidden_layer_sizes=100,learning_rate_init=0.1),\n\t\t\tAdaBoostClassifier(n_estimators= 50),\n\t\t\tGaussianNB(),\n\t\t\tLogisticRegression()\n\t\t\t]\n\t\tnames = [\"KNN\",\n\t\t\t \t\"L SVM\",\n\t\t\t \t\"RBF SVM\", \n\t\t\t \t\"GP\",\n\t\t\t\t\"DT\",\n\t\t\t\t\"RF\",\n\t\t\t\t\"NN\", \n\t\t\t\t\"AB\",\n\t\t\t\t\"NB\",\n\t\t\t\t\"LR\"\n\t\t\t\t]\n\treturn classifiers, names", "def svm_train_classifier(self):\n\n # needed because a SVM needs more than 1 class\n if len(self.saved_gestures.keys()) <= 1:\n print(\"Not enough gestures!\")\n else:\n training_data = []\n categories = []\n id = 0\n\n for gesture, value in self.saved_gestures.items():\n id += 1\n # needed to map the id returned from the SVM to a name of a gesture\n self.category_to_gesture[id] = gesture\n categories.append(id)\n\n x = []\n y = []\n z = []\n for elem in value:\n x.append(elem[0][0])\n y.append(elem[1][0])\n z.append(elem[2][0])\n\n training_data.append(self.get_fft(x, y, z))\n\n # normalized length of fft\n self.cutoff_length = min([len(l) for l in training_data])\n\n normalized_fft = []\n for l in training_data:\n normalized_fft.append(l[:self.cutoff_length])\n\n training_data = normalized_fft\n\n self.classifier.fit(training_data, categories)", "def _split_k_best_hypotheses(self, hts):\n hts_info = [(-1 * ht.conf, idx) for idx, ht in enumerate(hts)]\n sorted_hts_info = sorted(hts_info)\n best_hts_idx = set([\n i for _, i in sorted_hts_info[:self.max_hypotheses]])\n best_k_hts = [ht for idx, ht in enumerate(hts)\n if idx in best_hts_idx]\n other_hts = [ht for idx, ht in enumerate(hts)\n if idx not in best_hts_idx]\n return best_k_hts, other_hts", "def delete(feature_set, population):\n features = [x for x in list(feature_set)]\n pop = [x for y in population for x in y]\n min = float(\"+inf\")\n rem = features[0]\n for i in range(0, len(features)):\n x = pop.count(features[i])\n if x < min:\n min = x\n rem = features[i]\n features.remove(rem)\n return set(features)", "def non_max_suppression_all_classes(boxes, scores, labels, iou_threshold=0.5):\n excluded_indices = []\n for i in range(0,len(boxes)):\n obj1_box, _, obj1_label = boxes[i], scores[i], labels[i]\n for j in range(i+1,len(boxes)):\n obj2_box, _, obj2_label = boxes[j], scores[j], labels[j]\n if (get_iou(obj1_box, obj2_box) > iou_threshold):\n #print('excluding idx={}, class={}, score={}, bbox={}'.format(j, obj2_label, obj2_score, obj2_box))\n excluded_indices.append(j)\n \n excluded_indices = list(set(excluded_indices)) #Elimina indices repetidos\n included_indices = [idx for idx in range(len(boxes)) if idx not in excluded_indices]\n #print(included_indices)\n return included_indices", "def crossValidate(cls, trinary_data, num_holdout=5, num_iter=10,\n clf_desc=ClassifierDescriptorSVM(), **kwargs):\n def dropIndices(df, indices):\n \"\"\"\n Drops the indices from the dataframe or series.\n \"\"\"\n df_result = df.copy()\n sorted_indices = list(indices)\n sorted_indices.sort()\n sorted_indices.reverse()\n for idx in sorted_indices:\n df_result = df_result.drop(idx, axis=0)\n return df_result\n #\n def getClasses(indices=None):\n \"\"\"\n Returns the list of classes for the indices.\n \"\"\"\n if indices is None:\n indices = list(trinary_data.ser_y.index)\n return list(set(trinary_data.ser_y.loc[indices]))\n #\n svm_ensemble = cls(clf_desc=clf_desc, **kwargs)\n all_classes = getClasses()\n total_correct = 0\n for _ in range(num_iter):\n # Select holdouts for each class\n holdout_idxs = []\n for cls in all_classes:\n cls_ser = trinary_data.ser_y[trinary_data.ser_y == cls]\n cls_idxs = list(cls_ser.index)\n if num_holdout >= len(cls_idxs):\n raise ValueError(\"Not enough samples in class %s for %d holdouts!\"\n % (cls, num_holdout))\n # Choose holdouts\n random_positions = np.random.permutation(range(len(cls_idxs)))\n [holdout_idxs.append(cls_idxs[n])\n for n in random_positions[:num_holdout]]\n # Fit\n df_X = dropIndices(trinary_data.df_X, holdout_idxs)\n ser_y = dropIndices(trinary_data.ser_y, holdout_idxs)\n svm_ensemble.fit(df_X, ser_y)\n # Evaluate\n df = pd.DataFrame(trinary_data.df_X.loc[holdout_idxs, :])\n df_pred = svm_ensemble.predict(df)\n for idx in holdout_idxs:\n true_cls = trinary_data.ser_y.loc[idx]\n total_correct += df_pred.loc[idx, true_cls]\n accuracy = total_correct/(num_holdout*num_iter*len(all_classes))\n return accuracy", "def candidate_regressions(self):\n failclass = ('not classified', 'fixed by commit')\n candidate_regressions = set()\n for label, summary in self.label_summaries.items():\n if summary.status == Status.PASS:\n continue\n\n if all(c not in failclass for c in summary.classifications):\n continue\n\n candidate_regressions.add(label)\n return candidate_regressions", "def make_train_val_test_split_inchikey_lists(train_inchikey_list,\n train_inchikey_dict,\n train_val_test_split_fractions,\n holdout_inchikey_list=None,\n splitting_type='random'):\n if not np.isclose([sum(train_val_test_split_fractions)], [1.0]):\n raise ValueError('Must specify train_val_test_split that sums to 1.0')\n\n if holdout_inchikey_list:\n # filter out those inchikeys that are in the holdout set.\n train_inchikey_list = [\n ikey for ikey in train_inchikey_list\n if ikey not in holdout_inchikey_list\n ]\n\n if splitting_type == 'random':\n return get_random_inchikeys(train_inchikey_list,\n train_val_test_split_fractions)\n else:\n # Assume that splitting_type is the name of a structure family.\n # get_inchikeys_by_family will throw an error if this is not supported.\n return get_inchikeys_by_family(\n train_inchikey_list,\n train_inchikey_dict,\n train_val_test_split_fractions,\n family_name=splitting_type,\n exclude_from_train=True)", "def evaluate_classifier(self, h, x):\n a = np.matmul(self.W, h) + np.matmul(self.U, x) + self.b\n h = np.tanh(a)\n o = np.matmul(self.V, h) + self.c\n p = self.compute_softmax(o)\n\n return a, h, o, p", "def _classifier(self, classes):\n # Initialize key variables\n pseudo = np.linalg.pinv(self.data)\n result = np.dot(pseudo, classes)\n return result", "def classify(self):\n\n if self.classifier is None:\n raise ValueError('self.classifier is None')\n if self.df is None:\n raise ValueError('self.df is None')\n if self.features is None:\n raise ValueError('self.features is None')\n\n train_set = self.df[self.df[self.label_col] != CLASSIFIER_NAN]\n test_set = self.df[self.df[self.label_col] == CLASSIFIER_NAN]\n\n test_set_timestamps = list(test_set.index.strftime('%Y-%m-%d %H:%M:%S.%f'))\n\n self.classifier.fit(\n train_set[self.features],\n train_set[self.label_col]\n )\n\n preds = self.classifier.predict(test_set[self.features])\n probs = self.classifier.predict_proba(test_set[self.features])\n\n res = []\n\n for i in range(0, len(preds)):\n probability = max(probs[i])\n res.append([test_set_timestamps[i], preds[i], probability])\n\n return res", "def housing_labels_(strat_train_set):\n logging.info(\"copy of dataset\")\n housing_labels = strat_train_set[\"median_house_value\"].copy()\n return housing_labels", "def select_candidates(self, X, y, candidate_mask, train_mask):\n\n return self.best_heuristic(X=X, y=y, candidate_mask=candidate_mask,\n train_mask=train_mask, classifier=self.classifier,\n n_candidates=self.n_candidates, random_state=self.pool_rng.randint(1000),\n **self.h_kwargs)", "def set_kbest_features_list(data_dict, features_list):\n \n print \" --- \"\n\n k_best = get_k_best(data_dict,features_list,len(features_list)-1)\n \n ### Sort the k_best features in descending order\n arr_features = []\n print \"sorted k_best:\"\n for key, value in sorted(k_best.iteritems(), key=lambda (k,v): (v,k), reverse=True):\n print \"%s: %s\" % (key, value)\n arr_features.append(key)\n\n print \"---\"\n poi = [\"poi\"]\n features_list = poi\n features_list += arr_features\n\n print \"---\"\n print \"features List: \"\n print features_list\n print(\"Number of features in k_best: \",len(features_list))\n print \" \"\n\n return features_list", "def get_coords_naked_sets(ns, candidates, dicts, row_or_col=0, setlength=2):\n c = candidates\n rm, cm, sm = dicts\n group = []\n out = {}\n ns_sorted = {el[0]:el[1] for el in sorted(ns.items(), key=lambda x: x[0])}\n for k, g in groupby(ns_sorted, lambda x: x[row_or_col]):\n coords = list(g)\n key = tuple(ns[coords[0]])\n if len(coords) > 1: #if list has only one element, there are no naked sets for that key\n if len(cm[k] if row_or_col == 1 else rm[k]) > setlength: #check missing row or col dict to see if more than given setlength is missing\n out[key] = [coord for coord in c.keys() if coord[row_or_col] == k and coord not in coords]\n return out", "def vulnerability_ids(self):\n ids = set()\n for clf in self.values():\n for vuln in (clf.vulnerability_id_cve(), clf.vulnerability_id_microsoft()):\n if vuln:\n ids.add(vuln)\n return ids", "def _computedivergentset(repo):\n divergent = set()\n obsstore = repo.obsstore\n newermap = {}\n for ctx in repo.set('(not public()) - obsolete()'):\n mark = obsstore.precursors.get(ctx.node(), ())\n toprocess = set(mark)\n while toprocess:\n prec = toprocess.pop()[0]\n if prec not in newermap:\n successorssets(repo, prec, newermap)\n newer = [n for n in newermap[prec] if n]\n if len(newer) > 1:\n divergent.add(ctx.rev())\n break\n toprocess.update(obsstore.precursors.get(prec, ()))\n return divergent", "def most_informative_features(self, n=100):\n\t# The set of (fname, fval) pairs used by this classifier.\n\tfeatures = set()\n\t# The max & min probability associated w/ each (fname, fval)\n\t# pair. Maps (fname,fval) -> float.\n\tmaxprob = defaultdict(lambda: 0.0)\n\tminprob = defaultdict(lambda: 1.0)\n\n\tfor (label, fname), probdist in self._feature_probdist.items():\n\t\tfor fval in probdist.samples():\n\t\t\tfeature = (fname, fval)\n\t\t\tfeatures.add( feature )\n\t\t\tp = probdist.prob(fval)\n\t\t\tprint p\n\t\t\tmaxprob[feature] = max(p, maxprob[feature])\n\t\t\tminprob[feature] = min(p, minprob[feature])\n\t\t\tif minprob[feature] == 0:\n\t\t\t\tfeatures.discard(feature)\n\t\t\t# print maxprob\n\t\t\t# print minprob\n\n\n\t# Convert features to a list, & sort it by how informative\n\t# features are.\n\tfeatures = sorted(features,\n\t key=lambda feature_: minprob[feature_]/maxprob[feature_])\n\treturn features[:n]", "def get_classification_training_data() -> Iterable[Tuple[str, Dict[str, Any]]]:\n return (_create_training_entry(*pair) for pair in TRAINING_DATA) # type: ignore", "def _plot_good_pred_whitout_reject(self, test: Set, title=None, fig_size=None):\r\n if fig_size is not None:\r\n fig = plt.figure(figsize=fig_size)\r\n else:\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n goodclassified_index = []\r\n for idx_preds in range(self.preds.shape[1]):\r\n new_good_index = []\r\n for idx in range(self.preds.shape[0]):\r\n if test.labels[idx] == self.preds[idx, idx_preds]:\r\n new_good_index.append(idx)\r\n if new_good_index:\r\n ax.scatter(test.features[new_good_index, 0], self.features[new_good_index, 1],\r\n label='Good classified top{0}'.format(int(idx_preds + 1)))\r\n goodclassified_index += new_good_index\r\n misclassified = [idx for idx in range(self.preds.shape[0]) if idx not in goodclassified_index]\r\n if misclassified:\r\n ax.scatter(test.features[misclassified, 0], test.features[misclassified, 1],\r\n label='Misclassified', marker='x', c='red')\r\n box = ax.get_position()\r\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\r\n # Put a legend to the right of the current axis\r\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\r\n if title is not None:\r\n ax.set_title(title)\r\n plt.show()", "def evaluate_features(trainFeatures, testFeatures):\n classifier = NaiveBayesClassifier.train(trainFeatures)\n\n #initiates referenceSets and testSets\n referenceSets = collections.defaultdict(set)\n testSets = collections.defaultdict(set)\n\n for i, (features, label) in enumerate(testFeatures):\n referenceSets[label].add(i)\n predicted = classifier.classify(features)\n testSets[predicted].add(i)\n\n print 'train on %d instances, test on %d instances' % (len(trainFeatures), len(testFeatures))\n print 'accuracy:', nltk.classify.util.accuracy(classifier, testFeatures)\n print 'pos precision:', precision(referenceSets['pos'], testSets['pos'])\n print 'pos recall:', recall(referenceSets['pos'], testSets['pos'])\n print 'neg precision:',precision(referenceSets['neg'], testSets['neg'])\n print 'neg recall:', recall(referenceSets['neg'], testSets['neg'])\n classifier.show_most_informative_features(50)", "def single_supervise_evaluation(clf, x_train, y_train, x_test, y_test, r1_norm_step=0.05, r2_norm_step=0.05):\n # fit model\n clf.fit(x_train, y_train)\n\n # calc accuracy\n y_train_pred = clf.predict(x_train)\n accuracy_train = balanced_accuracy_score(y_true=y_train, y_pred=y_train_pred)\n print(f\"Balanced accuracy on the training set: {accuracy_train:.3f}\")\n y_test_pred = clf.predict(x_test)\n accuracy_test = balanced_accuracy_score(y_true=y_test, y_pred=y_test_pred)\n print(f\"Balanced accuracy on the hold-out set: {accuracy_test:.3f}\")\n\n # get confusion matrix\n y_pred = clf.predict(x_test)\n cmat = confusion_matrix(y_test, y_pred)\n\n # normalize confusion matrix\n r1_cmat = _r1_normalize(cmat)\n r2_cmat = _r2_normalize(cmat)\n m1 = np.max(r1_cmat)\n if np.isnan(m1):\n m1 = 1.0\n m2 = np.max(r2_cmat)\n\n cluster_map = {}\n while (len(cluster_map) == 0) and (m1 > 0) and (m2 > 0):\n m1 -= r1_norm_step\n m2 -= r2_norm_step\n\n # final binary matrix to calculate which clusters need to be merged\n judge = np.maximum.reduce([(r1_cmat > m1), (r2_cmat > m2)])\n if judge.sum() > 0:\n rows, cols = np.where(judge)\n edges = zip(rows.tolist(), cols.tolist())\n g = nx.Graph()\n g.add_edges_from(edges)\n for comp in nx.connected_components(g):\n to_label = comp.pop()\n for remain in comp:\n cluster_map[remain] = to_label\n return clf, accuracy_test, cluster_map, cmat, r1_cmat, r2_cmat", "def addedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]: \n # get added metabolism\n addedMetabolismEnzymes = self.addedMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicated enzyme pairs\n geneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the added metabolism\n geneDuplicatedAdded = set()\n \n for enzymeTuple in geneDuplicated:\n if enzymeTuple[0] in addedMetabolismEnzymes and enzymeTuple[1] in addedMetabolismEnzymes:\n geneDuplicatedAdded.add(enzymeTuple)\n \n return geneDuplicatedAdded", "def __call__(self, uind: Set[Ind]) -> FrozenSet[Ind]:\n ks = len(uind)\n try:\n start_arity, G = self.generate_graph(uind)\n except StopIteration:\n return frozenset()\n\n _logger.info('Looking for hypercliques')\n H = find_hypercliques(G)\n _logger.info('Validating %d hypercliques', len(H))\n I = self._validate_all(H)\n\n result = set(filter(lambda i: len(i) == 1, I))\n for m in range(start_arity + 1, ks):\n _logger.info('Iteration %d (%d candidates)', m, len(I))\n _logger.info('Iteration %d (%d positives)', m, len(result))\n C = set()\n for c in I:\n if c.valid and len(c) >= m - 1:\n result.add(c)\n if not c.valid and len(c) >= m:\n C.add(c)\n k_ary = gen_k_ary_ind_from_cliques(m, C)\n _logger.info('%d %d-ary generated from %d', len(k_ary), m, len(C))\n Gm = Graph()\n Gm.E = self._validate_all(k_ary)\n Gm.E = set(filter(lambda e: e.valid, Gm.E))\n if Gm.empty() or True:\n return frozenset(map(Edge.to_ind, result))\n result.update(gen_sub_inds(m, Gm, result))\n Gm.V = frozenset(reduce(frozenset.union, map(lambda e: e.set, Gm.E), frozenset()))\n H = find_hypercliques(Gm)\n I = self._validate_all(H)\n\n # Convert candidates back to Ind\n return frozenset(map(Edge.to_ind, result))", "def prune_hypergrid(hypergrid: np.ndarray, tested_points: np.ndarray) -> np.ndarray:\n if len(tested_points) == 0:\n return hypergrid\n\n mask = [not_in_array(potential_point, tested_points) for potential_point in hypergrid]\n return hypergrid[mask]", "def preceptron(X,Y,g,epochs=1000):\n w = g\n for epoch in range(epochs):\n H = np.sign(X.dot(w))\n missclassified = np.where(Y != H)[0] #obtain a list of missclassified point\n if len(missclassified) == 0:\n break\n mc_sample = np.random.choice(missclassified) #pick one missclassified point\n w = w + (Y[mc_sample]*X[mc_sample])\n return w, epoch + 1", "def find_naked_sets(candidates, dicts, setlength=2):\n c = candidates\n ns = build_possible_naked_sets(c, setlength=setlength)\n cpns = build_coords_per_naked_set(ns)\n ns = update_naked_set(ns, cpns)\n rows = get_coords_naked_sets(ns, candidates, dicts, row_or_col=0, setlength=setlength)\n cols = get_coords_naked_sets(ns, candidates, dicts, row_or_col=1, setlength=setlength)\n return rows, cols", "def _classifier(self, test_set):\r\n return self._euclidian_classifier(test_set.features, test_set.targets)", "def one_of_k_encoding_unk(self, x, allowable_set):\n if x not in allowable_set:\n x = allowable_set[-1]\n return list(map(lambda s: x == s, allowable_set))", "def classify( self, data):\n\n\t\t\"*** YOUR CODE HERE ***\"\n\t\tguesses = np.zeros(len(data))\n\n\t\tfor k in range(len(self.classifiers)):\n\t\t\tclassifier = self.classifiers[k]\n\t\t\tguesses += np.dot(classifier.classify(data),self.alphas[k])\n\t\t\n\t\tguesses = np.sign(guesses)\n\t\tguesses[np.where(guesses == 0)[0]] = np.repeat(np.expand_dims(np.random.choice([-1,1]),axis=0),len(np.where(guesses == 0)[0]),axis=0)\n\t\treturn guesses\n\t\t# util.raiseNotDefined()", "def neighbors(node, test_set):\r\n result = set()\r\n for neighbor in node.neighbors:\r\n if neighbor in test_set:\r\n result.add(neighbor)\r\n return result", "def analyze_similarities():\r\n print('Total number of candidate pairs:', len(pairs))\r\n print(f'\\nNumber of actual item pairs in the train set: {pairs[\"true_match\"].sum()}\\n')\r\n\r\n for feature in ['text_score', 'image_score', 'txt_img_score', 'words_ratio', 'txt_img_words']:\r\n\r\n # Check distribution of True and False predictions for various similarity scores\r\n print('-' * 50)\r\n print(f'\\nDistribution of True/False predictions for {feature}')\r\n for thr in (0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95):\r\n print('-' * 50)\r\n print(f'Similarity score over {thr}')\r\n pairs_sample = pairs[pairs[feature] >= thr]\r\n print(f'Number of similar item pairs: {len(pairs_sample)}')\r\n print(pairs_sample['true_match'].value_counts(normalize=True))\r\n\r\n # Check if identical phash can be used to improve the accuracy\r\n same_phash = pairs[pairs['phash_match'] == 1]\r\n different_phash = pairs[pairs['phash_match'] == 0]\r\n\r\n print('\\nFor item pairs with the same phash:')\r\n print(same_phash['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(same_phash))\r\n\r\n print('\\nFor item pairs with different phash:')\r\n print(different_phash['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(different_phash))\r\n\r\n # Check if numbers in titles can be used to improve the accuracy\r\n same_numbers = pairs[pairs['nums_match'] == 1]\r\n different_numbers = pairs[pairs['nums_match'] == 0]\r\n\r\n print('\\nFor item pairs with the same numbers:')\r\n print(same_numbers['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(same_numbers))\r\n\r\n print('\\nFor item pairs with different numbers:')\r\n print(different_numbers['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(different_numbers))", "def featureLikelihood():\r\n\r\n\t# Lists\r\n\twords = []\r\n\tfinalWords = []\r\n\tposWords = []\r\n\tnegWords = []\r\n\tfeatureListPos = []\r\n\tfeatureListNeg = []\r\n\r\n\t# Counters\r\n\tposCount = 0.0\r\n\tnegCount = 0.0\r\n\r\n\t# Temporary Lists for formating\r\n\tfeatureListPosFormat = []\r\n\tfeatureListNegFormat = []\r\n\r\n\t# Strings\r\n\ts = \" \"\r\n\tposString = \"\"\r\n\tnegString = \"\"\r\n\r\n\tseen = set()\r\n\r\n\t# Add all words to words list and count positive & negative occurences\r\n\tfor item in trainingData:\r\n\t\tfor word in item[2]:\r\n\t\t\twords.append(word)\r\n\t\tif item[1] == '0':\r\n\t\t\tfor word in item[2]:\r\n\t\t\t\tposWords.append(word)\r\n\t\t\t\tposCount += 1\r\n\t\tif item[1] == '1':\r\n\t\t\tfor word in item[2]:\r\n\t\t\t\tnegWords.append(word)\r\n\t\t\t\tnegCount +=1\r\n\r\n\t# Adds all values into finalWords, skipping duplicates\r\n\tfor values in words:\r\n\t\tif values not in seen:\r\n\t\t\tfinalWords.append(values)\r\n\t\t\tseen.add(values)\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t# Add positive and negative counts to feature list and dictionaries\r\n\tfor word in finalWords:\r\n\t\ts += '{:12s}'.format(word)\r\n\t\t\r\n\t\tpCount = 0\r\n\t\tnCount = 0\r\n\t\t\r\n\t\tfor row in trainingData:\r\n\t\t\tif row[1] == '0':\r\n\t\t\t\tif word in row[2]: pCount += 1\r\n\t\t\tif row[1] == '1':\r\n\t\t\t\tif word in row[2]: nCount += 1\r\n\t\t\t\t\r\n\t\tfeatureListPos.append((pCount + 1) / (posCount + 9))\r\n\t\tclass0Dict[word] = ((pCount + 1) / (posCount + 9))\r\n\t\t\r\n\t\tfeatureListNeg.append((nCount + 1) / (negCount + 9))\r\n\t\tclass1Dict[word] = ((nCount + 1) / (negCount + 9))\r\n\r\n\t\t\r\n\t\t\r\n\t# Formatting for the positive feature list\r\n\tfor item in featureListPos:\r\n\t\tfeatureListPosFormat.append('{0:.5f}'.format(item))\r\n\t\t\r\n\tfor item in featureListPosFormat:\r\n\t\tposString += '{:12s}'.format(item)\r\n\r\n\t# Formatting for the negative feature list\r\n\tfor item in featureListNeg:\r\n\t\tfeatureListNegFormat.append('{0:.5f}'.format(item))\r\n\t\t\r\n\tfor item in featureListNegFormat:\r\n\t\tnegString += '{:12s}'.format(item)\r\n\r\n\r\n\t\t\r\n\treturn(s, posString, negString)", "def geneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]:\n \n \n enzymes = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n geneDuplicationModel = SimpleGeneDuplication\n \n geneIdToEnzyme = dict()\n for enzyme in enzymes:\n geneIdToEnzyme[enzyme.geneID] = enzyme\n \n enzymePairs = geneDuplicationModel.getEnzymePairs(enzymes, ignoreDuplicatesOutsideSet = True, geneIdToEnzyme = geneIdToEnzyme, preCalculatedEnzymes = None)\n \n return enzymePairs", "def fit(self, data, X):\r\n self.centroids = {}\r\n\r\n for i in range(self.k):\r\n self.centroids[i] = data[i]\r\n\r\n for i in range(self.max_iter):\r\n self.classifications = {}\r\n\r\n for i in range(self.k):\r\n self.classifications[i] = []\r\n\r\n for featureset in X:\r\n distances = [np.linalg.norm(featureset-self.centroids[centroid]) for centroid in self.centroids]\r\n classification = distances.index(min(distances))\r\n self.classifications[classification].append(featureset)\r\n\r\n prev_centroids = dict(self.centroids)\r\n\r\n for classification in self.classifications:\r\n self.centroids[classification] = np.average(self.classifications[classification], axis=0)\r\n\r\n optimized = True\r\n\r\n for c in self.centroids:\r\n original_centroid = prev_centroids[c]\r\n current_centroid = self.centroids[c]\r\n if np.sum((current_centroid-original_centroid)/original_centroid*100.0) > self.tol:\r\n print(np.sum((current_centroid-original_centroid)/original_centroid*100.0))\r\n optimized = False\r\n\r\n if optimized:\r\n break", "def power_set(sett):\n\n powerset_so_far = {frozenset()}\n\n for element in sett:\n set.update(powerset_so_far,\\\n extend_all(element, powerset_so_far))\n \n return powerset_so_far", "def represent_underepresented_classes(x_data, y_data, represent):\r\n\r\n\r\n\r\n # Sort the x and y data using merge sort\r\n ind = np.argsort(y_data, kind=\"mergesort\")\r\n tmpx = []\r\n tmpy = []\r\n for i in range(len(ind)):\r\n tmpx.append(x_data[ind[i]])\r\n tmpy.append(y_data[ind[i]])\r\n x_data = np.asarray(tmpx)\r\n y_data = np.asarray(tmpy)\r\n del tmpx\r\n del tmpy\r\n\r\n # extract the classes and sort the data under specific classes\r\n classes = []\r\n data = [[]]\r\n for i in range(len(y_data)):\r\n if y_data[i] not in classes:\r\n classes.append(y_data[i])\r\n if data != [[]]:\r\n data.append([x_data[i]])\r\n else:\r\n data[0] = [x_data[i]]\r\n else:\r\n ind = np.where(classes==y_data[i])\r\n data[ind[0][0]].append(x_data[i])\r\n\r\n # find the most occurring class\r\n max = 0\r\n for i in range(len(classes)):\r\n if max < len(data[i]):\r\n max = len(data[i])\r\n\r\n # duplicate underrepresented classes\r\n for da in range(len(data)):\r\n if len(data[da]) != max:\r\n for i in range(int(max*represent) - len(data[da])):\r\n data[da].append(data[da][i])\r\n\r\n # create x and y data with of the same size as each other\r\n x_data = []\r\n y_data = []\r\n for c in range(len(classes)):\r\n for d in range(len(data[c])):\r\n x_data.append(data[c][d])\r\n y_data.append(classes[c])\r\n\r\n return np.asarray(x_data), np.asarray(y_data)", "def get_numerical_features(self, categorical_feats):\n common_feats = self.get_common_features()\n return list(set(common_feats).difference(categorical_feats))", "def _candidate_closure(self, p_map, v_note):\n policies = self.v_policy_map[v_note]\n candidates = OrderedSet()\n for p in policies:\n candidates = candidates.union(p_map.unassigned_actors(p))\n return candidates" ]
[ "0.7052224", "0.6119313", "0.54236513", "0.5231023", "0.5223641", "0.51209354", "0.510333", "0.5085148", "0.5023613", "0.5006669", "0.49687526", "0.4961643", "0.49522075", "0.49459502", "0.49397606", "0.49372053", "0.49112162", "0.48705956", "0.48250222", "0.48217845", "0.4814562", "0.47884578", "0.4761955", "0.47604293", "0.47553012", "0.47456226", "0.4738816", "0.47274488", "0.471881", "0.47129014", "0.47006363", "0.46991393", "0.46923232", "0.46685797", "0.46681798", "0.46679106", "0.46529192", "0.46529192", "0.46512812", "0.4651079", "0.46412164", "0.4640399", "0.46362895", "0.46297657", "0.46159172", "0.46146265", "0.46146265", "0.4604444", "0.4603854", "0.46008995", "0.45946887", "0.45925802", "0.4586674", "0.45826092", "0.4582602", "0.45813298", "0.4580852", "0.4576477", "0.4568396", "0.45660943", "0.45652348", "0.45651713", "0.4562815", "0.45592728", "0.4559217", "0.45568442", "0.45517537", "0.45511213", "0.45475376", "0.4534778", "0.4533024", "0.45305932", "0.4528701", "0.45279285", "0.45207623", "0.4516502", "0.45157045", "0.4511768", "0.4507974", "0.45046547", "0.45006913", "0.45004487", "0.44977948", "0.4497384", "0.44965088", "0.44953668", "0.44934082", "0.44921374", "0.44890794", "0.44833073", "0.44812936", "0.4480047", "0.44788024", "0.44784057", "0.4476296", "0.44751674", "0.44745648", "0.44645393", "0.44616202", "0.4459611" ]
0.85402024
0
Given an overall classifier H, a list of all training points, a dictionary mapping classifiers to the training points they misclassify, and a mistake tolerance (the maximum number of allowed misclassifications), returns False if H misclassifies more points than the tolerance allows, otherwise True. H is represented as a list of (classifier, voting_power) tuples.
def is_good_enough(H, training_points, classifier_to_misclassified, mistake_tolerance=0): misclassified = get_overall_misclassifications(H, training_points, classifier_to_misclassified) if len(misclassified) > mistake_tolerance: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_overall_misclassifications(H, training_points, classifier_to_misclassified):\n misclassified = []\n\n for p in training_points:\n score = 0\n for tup in H:\n c = tup[0]\n voting_power = tup[1]\n if p in classifier_to_misclassified[c]:\n score -= voting_power\n else:\n score += voting_power\n if score <= 0:\n misclassified.append(p)\n return set(misclassified)", "def adaboost(training_points, classifier_to_misclassified,\n use_smallest_error=True, mistake_tolerance=0, max_rounds=INF):\n point_to_weight = initialize_weights(training_points)\n H = [] # (classifier, voting_power)\n\n while True:\n # exit conditions\n if is_good_enough(H, training_points, classifier_to_misclassified, mistake_tolerance):\n break\n if max_rounds == 0:\n break\n classifier_to_error_rate = calculate_error_rates(point_to_weight, classifier_to_misclassified)\n best_classifier = None\n try:\n best_classifier = pick_best_classifier(classifier_to_error_rate, use_smallest_error)\n except NoGoodClassifiersError:\n break\n\n max_rounds -= 1\n error_rate = classifier_to_error_rate[best_classifier]\n\n H.append((best_classifier, calculate_voting_power(error_rate)))\n\n point_to_weight = update_weights(point_to_weight, classifier_to_misclassified[best_classifier], error_rate)\n return H", "def misclassified_training_points(svm):\n wrong = []\n for point in svm.training_points:\n if point.classification is not classify(svm, point):\n wrong.append(point)\n return set(wrong)", "def crossValidate(cls, trinary_data, num_holdout=5, num_iter=10,\n clf_desc=ClassifierDescriptorSVM(), **kwargs):\n def dropIndices(df, indices):\n \"\"\"\n Drops the indices from the dataframe or series.\n \"\"\"\n df_result = df.copy()\n sorted_indices = list(indices)\n sorted_indices.sort()\n sorted_indices.reverse()\n for idx in sorted_indices:\n df_result = df_result.drop(idx, axis=0)\n return df_result\n #\n def getClasses(indices=None):\n \"\"\"\n Returns the list of classes for the indices.\n \"\"\"\n if indices is None:\n indices = list(trinary_data.ser_y.index)\n return list(set(trinary_data.ser_y.loc[indices]))\n #\n svm_ensemble = cls(clf_desc=clf_desc, **kwargs)\n all_classes = getClasses()\n total_correct = 0\n for _ in range(num_iter):\n # Select holdouts for each class\n holdout_idxs = []\n for cls in all_classes:\n cls_ser = trinary_data.ser_y[trinary_data.ser_y == cls]\n cls_idxs = list(cls_ser.index)\n if num_holdout >= len(cls_idxs):\n raise ValueError(\"Not enough samples in class %s for %d holdouts!\"\n % (cls, num_holdout))\n # Choose holdouts\n random_positions = np.random.permutation(range(len(cls_idxs)))\n [holdout_idxs.append(cls_idxs[n])\n for n in random_positions[:num_holdout]]\n # Fit\n df_X = dropIndices(trinary_data.df_X, holdout_idxs)\n ser_y = dropIndices(trinary_data.ser_y, holdout_idxs)\n svm_ensemble.fit(df_X, ser_y)\n # Evaluate\n df = pd.DataFrame(trinary_data.df_X.loc[holdout_idxs, :])\n df_pred = svm_ensemble.predict(df)\n for idx in holdout_idxs:\n true_cls = trinary_data.ser_y.loc[idx]\n total_correct += df_pred.loc[idx, true_cls]\n accuracy = total_correct/(num_holdout*num_iter*len(all_classes))\n return accuracy", "def validate_holdout(Xs, ys, X_holdout, y_holdout, non_bleeding_features, features_dir,\n scorer_type, feat_indices, result_string, learning_rate_string, features):\n # define folder to save the classifier and create it if not existing\n parent_folder = \"%s/data/fnc-1/mlp_models/\" % (path.dirname(path.dirname(path.abspath(__file__))))\n\n # create the new save folder\n save_folder = get_save_folder(parent_folder, scorer_type+\"_new\")\n\n # only pass a save folder if the classifier should be saved\n best_clf = esitmator_definitions.get_estimator(scorer_type, save_folder=save_folder)\n\n # stack all the feature vectors of all the folds\n X_train = np.vstack(tuple([Xs[i] for i in range(10)]))\n y_train = np.hstack(tuple([ys[i] for i in range(10)]))\n\n # concat non-bleeding features\n X_train, X_holdout, feat_indices_holdout = concat_non_bleeding_features(\n X_train, X_holdout,\n non_bleeding_features, features_dir, 'holdout')\n\n # test for oversampling: fits the current classifier, oversampled with a given\n # method and checks the score on the holdout set\n use_over_sampling = False\n if use_over_sampling == True:\n from imblearn.over_sampling import SMOTE\n kind = ['regular', 'borderline1', 'borderline2', 'svm']\n for m in kind:\n sm = SMOTE(kind=m)\n X_res, y_res = sm.fit_sample(X_train, y_train)\n best_clf.fit(X_res, y_res)\n y_predicted = best_clf.predict(X_holdout)\n predicted = [LABELS[int(a)] for a in y_predicted]\n actual = [LABELS[int(a)] for a in y_holdout]\n fold_score, _ = score_submission(actual, predicted)\n max_fold_score, _ = score_submission(actual, actual)\n score = fold_score / max_fold_score\n print(\"Score \" + m + \":\" + str(score))\n\n\n #Taken from Benjamins LSTM\n loss_monitor_file_dir = \"%s/data/fnc-1/model_results/loss_results/\" % (\n path.dirname(path.dirname(path.abspath(__file__))))\n loss_filename = loss_monitor_file_dir + str(datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M\")) + \".txt\"\n # fit the classifier\n if 'f_ext' in scorer_type:\n append_to_loss_monitor_file(\"\\n\\nFOLD holdout and classifier: \" + scorer_type + \"\\n\", loss_filename)\n append_to_loss_monitor_file(str(datetime.datetime.now()).split('.')[0], loss_filename)\n best_clf.fit(X_train, y_train, X_holdout, np.array(y_holdout), 'holdout', loss_filename)\n else:\n best_clf.fit(X_train, y_train)\n\n # predict labels\n y_predicted = best_clf.predict(X_holdout)\n predicted = [LABELS[int(a)] for a in y_predicted]\n actual = [LABELS[int(a)] for a in y_holdout]\n\n # calc FNC score\n fold_score, cm = score_submission(actual, predicted)\n max_fold_score, _ = score_submission(actual, actual)\n score = fold_score / max_fold_score\n\n # calc accuracy for related/unrelated and stances\n accuracy_stance = score_calculation.get_accuracy(y_predicted, y_holdout, stance=True)\n accuracy_related = score_calculation.get_accuracy(y_predicted, y_holdout, stance=False)\n f1_stance = score_calculation.get_f1score(y_predicted, y_holdout, stance=True)\n f1_related = score_calculation.get_f1score(y_predicted, y_holdout, stance=False)\n\n # prepare printout for final results of holdout set\n printout = printout_manager.get_holdout_printout(save_folder, accuracy_related, accuracy_stance, f1_related, f1_stance, score)\n printout += printout_manager.calculate_confusion_matrix(cm)\n print(printout) # print holdout results\n result_string += printout + \"\\n\"# add results to string that is going to be saved into a file\n\n result_file_folder = \"%s\" % (path.dirname(path.dirname(path.abspath(__file__))))\n printout_manager.save_file(result_string, result_file_folder + \"/fnc_results_holdout.txt\", \"a+\")\n\n #aligned printout for ablation:\n summary = printout_manager.get_holdout_ablation_printout(features, score,f1_stance,save_folder)\n printout_manager.save_file(summary, result_file_folder + \"/fnc_results_holdout_summary.txt\", \"a+\")\n\n # test saving and restoring model\n #filename = scorer_type + \".sav\"\n #save_model(best_clf, save_folder,filename)\n #load_clf = load_model(parent_folder + scorer_type + \"_new_0/\", filename) # the 0th folder should always exist\n #print_score_from_restored_model(load_clf, X_holdout, y_holdout)\n\n # add to special file that shows learning rate and loss of optimizer\n if isinstance(best_clf, MultiThreadingFeedForwardMLP):\n learning_rate_string += best_clf.get_learning_rates('holdout') + \"\\n\"\n\n # print feature importances\n if scorer_type == 'randomforest':\n result_file_folder = \"%s\" % (path.dirname(path.dirname(path.abspath(__file__))))\n importances = best_clf.feature_importances_\n std = np.std([tree.feature_importances_ for tree in best_clf.estimators_],\n axis=0)\n indices = np.argsort(importances)[::-1]\n feat_indices.append(feat_indices_holdout)\n\n feat_importance_string = str(feat_indices) + \"\\n\"\n for i in indices:\n feat_importance_string += str(i) + \";\" + str(importances[i]) + \";\" + str(std[i]) + \"\\n\"\n\n # save feature importances as file\n printout_manager.save_file(feat_importance_string, result_file_folder + \"/feat_importance_rf.txt\", \"a+\")\n\n return result_string, learning_rate_string", "def evaluate_SURF(x,y,NN,feature,inst,data,multiclass_map,maxInst):\r\n diff = 0\r\n if not data.discretePhenotype: #if continuous phenotype\r\n same_class_bound=data.phenSD #boundary to determine similarity between classes for continuous attributes\r\n \r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n #determining boundaries for continuous attributes\r\n min_bound=data.attributeInfo[feature][1][0]\r\n max_bound=data.attributeInfo[feature][1][1]\r\n \r\n diff_hit=0 #initializing the score to 0\r\n diff_miss=0\r\n \r\n count_hit=0\r\n count_miss=0\r\n \r\n if data.discretePhenotype:\r\n if len(data.phenotypeList) > 2: #multiclass endpoint\r\n class_Store = {}\r\n missClassPSum = 0\r\n for each in multiclass_map:\r\n if each != y[inst]: #Store all miss classes\r\n class_Store[each] = [0,0] #stores cout_miss and diff_miss\r\n missClassPSum += multiclass_map[each]\r\n \r\n for i in range(len(NN)): #for all nearest neighbors\r\n if x[inst][feature]!=data.labelMissingData and x[NN[i]][feature]!=data.labelMissingData: # add appropriate normalization.\r\n if y[inst]==y[NN[i]]: #HIT\r\n count_hit+=1\r\n if x[inst][feature]!=x[NN[i]][feature]:\r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n diff_hit-=abs(x[inst][feature]-x[NN[i]][feature])/(max_bound-min_bound)\r\n else:#Discrete\r\n diff_hit-=1\r\n else: #MISS\r\n for missClass in class_Store:\r\n if y[NN[i]] == missClass:\r\n class_Store[missClass][0] += 1\r\n if x[inst][feature]!=x[NN[i]][feature]:\r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n class_Store[missClass][1]+=abs(x[inst][feature]-x[NN[i]][feature])/(max_bound-min_bound)\r\n else:#Discrete\r\n class_Store[missClass][1]+=1\r\n \r\n #Corrects for both multiple classes, as well as missing data.\r\n missSum = 0 \r\n for each in class_Store:\r\n missSum += class_Store[each][0]\r\n missAverage = missSum/float(len(class_Store))\r\n \r\n hit_proportion=count_hit/float(len(NN)) #Correcting for Missing Data.\r\n for each in class_Store:\r\n diff_miss += (multiclass_map[each]/float(missClassPSum))*class_Store[each][1]\r\n \r\n diff = diff_miss*hit_proportion\r\n miss_proportion=missAverage/float(len(NN))\r\n diff += diff_hit*miss_proportion\r\n \r\n else: #Binary Class Problem\r\n for i in range(len(NN)): #for all nearest neighbors\r\n if x[inst][feature]!=data.labelMissingData and x[NN[i]][feature]!=data.labelMissingData: # add appropriate normalization.\r\n \r\n if y[inst]==y[NN[i]]: #HIT\r\n count_hit+=1\r\n if x[inst][feature]!=x[NN[i]][feature]:\r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n diff_hit-=abs(x[inst][feature]-x[NN[i]][feature])/(max_bound-min_bound)\r\n else:#Discrete\r\n diff_hit-=1\r\n else: #MISS\r\n count_miss+=1\r\n if x[inst][feature]!=x[NN[i]][feature]:\r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n diff_miss+=abs(x[inst][feature]-x[NN[i]][feature])/(max_bound-min_bound)\r\n else:#Discrete\r\n diff_miss+=1 \r\n\r\n #Take hit/miss inbalance into account (coming from missing data)\r\n hit_proportion=count_hit/float(len(NN))\r\n miss_proportion=count_miss/float(len(NN))\r\n \r\n diff=diff_hit*miss_proportion + diff_miss*hit_proportion #applying weighting scheme to balance the scores \r\n \r\n else: #continuous endpoint\r\n for i in range(len(NN)): #for all nearest neighbors\r\n if x[inst][feature]!=data.labelMissingData and x[NN[i]][feature]!=data.labelMissingData: # add appropriate normalization.\r\n \r\n if abs(y[inst]-y[NN[i]])<same_class_bound: #HIT\r\n count_hit+=1 \r\n if x[inst][feature]!=x[NN[i]][feature]:\r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n diff_hit-=abs(x[inst][feature]-x[NN[i]][feature])/(max_bound-min_bound)\r\n else:#Discrete\r\n diff_hit-=1\r\n else: #MISS\r\n count_miss+=1\r\n if x[inst][feature]!=x[NN[i]][feature]:\r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n diff_miss+=abs(x[inst][feature]-x[NN[i]][feature])/(max_bound-min_bound)\r\n else:#Discrete\r\n diff_miss+=1\r\n\r\n #Take hit/miss inbalance into account (coming from missing data, or inability to find enough continuous neighbors)\r\n hit_proportion=count_hit/float(len(NN))\r\n miss_proportion=count_miss/float(len(NN))\r\n \r\n diff=diff_hit*miss_proportion + diff_miss*hit_proportion #applying weighting scheme to balance the scores \r\n \r\n return diff", "def __test_digits(self, X, y, clf):\n self.assertEqual(len(X), len(y))\n correct = 0\n for i in xrange(len(y)):\n expected = y[i]\n prediction = clf.classify([X[i]])[0]\n if expected == prediction:\n correct += 1\n\n self.assertGreaterEqual(correct, self.TRHESH * len(y))\n return correct", "def thresholdClassify(sim_vec_dict, sim_thres):\n\n assert sim_thres >= 0.0 and sim_thres <= 1.0, sim_thres\n\n print('Similarity threshold based classification of %d record pairs' % \\\n (len(sim_vec_dict)))\n print(' Classification similarity threshold: %.3f' % (sim_thres))\n\n class_match_set = set()\n class_nonmatch_set = set()\n\n # Iterate over all record pairs\n #\n for (rec_id_tuple, sim_vec) in sim_vec_dict.items():\n # ********* Implement threshold based classification **********************\n\n pass # Add your code here\n\n # ************ End of your code *******************************************\n\n print(' Classified %d record pairs as matches and %d as non-matches' % \\\n (len(class_match_set), len(class_nonmatch_set)))\n print('')\n\n return class_match_set, class_nonmatch_set", "def cross_validate(data, k, distance_metric):\n fraction_correct = 0.00\n correctly_classified = 0\n for i, test_data in enumerate(data):\n training_data = []\n for j in range(len(data)):\n if j!=i:\n training_data.append(data[j])\n observed_classification = knn_classify_point(test_data, training_data, k, distance_metric)\n actual_classification = test_data.classification\n if observed_classification == actual_classification:\n correctly_classified += 1\n fraction_correct = float(correctly_classified/len(data))\n return fraction_correct", "def _validateClassification(self, trainingSet):\n wrongCount = 0.\n\n pv = []\n tv = []\n\n if self.K == 1:\n for example in trainingSet:\n Y = self.test(example)\n \n givenClass = example.label[0]\n if Y[0] < 0.5:\n chosenClass = 0\n else:\n chosenClass = 1\n \n pv.append(chosenClass)\n tv.append(givenClass)\n \n if chosenClass != givenClass:\n wrongCount += 1.\n else:\n for example in trainingSet:\n Y = self.test(example)\n \n posterior, chosenClass = max((x, i) for i, x in enumerate(Y))\n max_val, givenClass = max((x, i) for i, x in enumerate(example.label))\n \n pv.append(chosenClass)\n tv.append(givenClass)\n \t\t\t\n if chosenClass != givenClass:\n wrongCount += 1.\n \n return wrongCount/len(trainingSet), pv, tv", "def evaluate_features(X, y, clfKey):\n\n clf = {\n 'LogReg': LogisticRegression(),\n 'SDG_Mh': SGDClassifier(loss=\"modified_huber\", penalty=\"l2\", max_iter=5),\n 'SDG_Log': SGDClassifier(loss=\"log\", penalty=\"l2\", max_iter=5),\n 'Tree': RandomForestClassifier(n_estimators=1000, max_depth=5, verbose=1)\n }\n\n probabilities = cross_val_predict(clf[clfKey], X, y, cv=StratifiedKFold(n_splits=2, random_state=8),\n n_jobs=-1, method='predict_proba', verbose=2)\n predicted_indices = np.argmax(probabilities, axis=1)\n classes = np.unique(y)\n predicted = classes[predicted_indices]\n print('Log loss: {}'.format(log_loss(y, probabilities)))\n print('Accuracy: {}'.format(accuracy_score(y, predicted)))\n skplt.metrics.plot_confusion_matrix(y, predicted, normalize=True)\n plt.show()", "def evaluate_features(X, y, clf=None):\n if clf is None:\n clf = LogisticRegression()\n \n probas = cross_val_predict(clf, X, y, cv=StratifiedKFold(random_state=8), \n n_jobs=-1, method='predict_proba', verbose=2)\n pred_indices = np.argmax(probas, axis=1)\n classes = np.unique(y)\n preds = classes[pred_indices]\n print('Log loss: {}'.format(log_loss(y, probas)))\n print('Accuracy: {}'.format(accuracy_score(y, preds)))\n skplt.plot_confusion_matrix(y, preds)", "def misclassification_criterion_acc_check(instance, adv_imgs, labels, imgs):\n pred, det = instance.predict(imgs)\n acc_bool = torch.argmax(pred, dim=1).eq(labels.to(instance.device))\n acc_det_bool = acc_bool & det\n return ~acc_det_bool | misclassification_criterion(instance, adv_imgs, labels)", "def evaluate_classifier(self, clf):\n\n clf = clf.fit(self.training_data_train_x, self.training_data_train_y)\n predicted = clf.predict(self.training_data_opt_x)\n\n correct = 0\n for i in range(len(self.training_data_opt_y)):\n if predicted[i] == self.training_data_opt_y[i]:\n correct += 1\n\n accuracy = correct / len(self.training_data_opt_y)\n\n return clf, accuracy", "def validate_one_class(normal,outliers):\n print \"running a validation\"\n \n #classifier\n \n train_data = genfromtxt(normal, delimiter='\\t',skip_header=0)\n test_data = genfromtxt(outliers, delimiter='\\t',skip_header=0)\n\n outliers_proportion = float(test_data.shape[0])/(float(train_data.shape[0])+float(test_data.shape[0]))\n outliers_proportion=0.01\n\n clf = OneClassSVMClassifier(normal,outliers_proportion,0.95,0.05)\n #processing data without targets\n X_train = clf.scale(train_data)\n X_test = clf.scale(test_data)\n\n y_pred_train = clf.predict(X_train)\n y_pred_test = clf.predict(X_test)\n n_error_train = y_pred_train[y_pred_train == 1].size\n n_error_test = y_pred_test[y_pred_test == -1].size\n \n print \"training: \", 100.0*(float(n_error_train)/float(X_train.shape[0])), \"testing (test/outliers): \",100.0*(float(n_error_test)/float(X_test.shape[0]))\n\n sys.exit(0)\n\n #for further information about parameters, please google sklearn docs\n parameters = {'kernel':('sigmoid', 'rbf'), 'C':[.1,.2,1.0],'cache_size':[500]}\n svr = svm.SVC()\n clf = grid_search.GridSearchCV(svr, parameters,n_jobs=3)\n sys.stdout.write(\"%s:validating... \"%(strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())))\n output = clf.fit(X,Y)\n print output\n print \"(%s) DONE.\" % (strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))\n sys.exit(0)", "def important_features_(self):\n return self.scores_ > self.score_cutoff_", "def check_if_stopping_criterion_is_met(original_training_data_values):\n if len(original_training_data_values)<23:\n return True\n else:\n target_column = original_training_data_values[:, -1]\n recipe_type, cupcake_muffin_count = np.unique(target_column, return_counts=True)\n cupcake_ratio = cupcake_muffin_count[0] / (cupcake_muffin_count.sum())\n muffin_ratio = cupcake_muffin_count[1] / (cupcake_muffin_count.sum())\n\n if cupcake_ratio >= 0.9 or muffin_ratio >= 0.9:\n return True\n else:\n return False", "def supervisedMLClassify(sim_vec_dict, true_match_set):\n\n num_folds = 3 # Number of classifiers to create\n\n class_match_set = set()\n class_nonmatch_set = set()\n\n try:\n import numpy\n import sklearn.tree\n except:\n print('Either the \"numpy\" or \"sklearn\" modules is not installed! Aborting.')\n print('')\n\n return set(), set() # Return two empty sets so program continues\n\n import random\n\n print('Supervised decision tree classification of %d record pairs' % \\\n (len(sim_vec_dict)))\n\n # Generate the training data sets (similarity vectors plus class labels\n # (match or non-match)\n #\n num_train_rec = len(sim_vec_dict)\n num_features = len(list(sim_vec_dict.values())[0])\n\n print(' Number of training records and features: %d / %d' % \\\n (num_train_rec, num_features))\n\n all_train_data = numpy.zeros([num_train_rec, num_features])\n all_train_class = numpy.zeros(num_train_rec)\n\n rec_pair_id_list = []\n\n num_pos = 0\n num_neg = 0\n\n i = 0\n for (rec_id1, rec_id2) in sim_vec_dict:\n rec_pair_id_list.append((rec_id1, rec_id2))\n sim_vec = sim_vec_dict[(rec_id1, rec_id2)]\n\n all_train_data[:][i] = sim_vec\n\n if (rec_id1, rec_id2) in true_match_set:\n all_train_class[i] = 1.0\n num_pos += 1\n else:\n all_train_class[i] = 0.0\n num_neg += 1\n i += 1\n\n num_all = num_pos + num_neg # All training examples\n\n num_train_select = int(2. / 3 * num_all) # Select 2/3 for training\n num_test_select = num_all - num_train_select\n\n print(' Number of positive and negative training records: %d / %d' % \\\n (num_pos, num_neg))\n print('')\n\n class_list = [] # List of the generated classifiers\n\n for c in range(num_folds):\n\n train_index_list = random.sample(xrange(num_all), num_train_select)\n\n train_data = numpy.zeros([num_train_select, num_features])\n train_class = numpy.zeros(num_train_select)\n test_data = numpy.zeros([num_test_select, num_features])\n test_class = numpy.zeros(num_test_select)\n\n # Copy similarities and class labels\n #\n train_ind = 0\n test_ind = 0\n\n for i in range(num_all):\n\n if (i in train_index_list):\n train_data[:][train_ind] = all_train_data[:][i]\n train_class[train_ind] = all_train_class[i]\n train_ind += 1\n else:\n test_data[:][test_ind] = all_train_data[:][i]\n test_class[test_ind] = all_train_class[i]\n test_ind += 1\n\n # Now build and train the classifier\n #\n decision_tree = sklearn.tree.DecisionTreeClassifier()\n decision_tree.fit(train_data, train_class)\n\n # Now use the trained classifier on the testing data to see how accurate\n # it is\n #\n class_predict = decision_tree.predict(test_data)\n\n num_corr = 0\n num_wrong = 0\n\n for i in range(len(class_predict)):\n if (class_predict[i] == test_class[i]):\n num_corr += 1\n else:\n num_wrong += 1\n\n print(' Classifier %d gets %d correct and %d wrong' % \\\n (c, num_corr, num_wrong))\n\n class_list.append(decision_tree)\n\n # Now use the trained classifiers to classify all record pairs\n #\n num_match_class_list = [0] * num_all # Count how often a record pair is\n # classified as a match\n\n for decision_tree in class_list:\n\n class_predict = decision_tree.predict(all_train_data) # Classify all pairs\n\n for i in range(num_all):\n num_match_class_list[i] += class_predict[i]\n\n assert num_match_class_list[i] <= num_folds, num_match_class_list[i]\n\n for i in range(num_all):\n rec_id_pair = rec_pair_id_list[i]\n\n # More '1' (match) classifications than '0' (non-match ones)\n #\n if (float(num_match_class_list[i]) / num_folds > 0.5):\n class_match_set.add(rec_id_pair)\n else:\n class_nonmatch_set.add(rec_id_pair)\n\n print('')\n\n print(' Classified %d record pairs as matches and %d as non-matches' % \\\n (len(class_match_set), len(class_nonmatch_set)))\n print('')\n\n return class_match_set, class_nonmatch_set", "def digits_make_classifiers_to_misclassified(X,Y,classifiers,ids_to_points):\n\toutput = {key: [] for key in classifiers}\n\tN = len(X)\n\tfor cf in classifiers:\n\t\tfor i in range(N):\n\t\t\tcf_classification = cf[2](X[i])\n\t\t\tif cf_classification != Y[i]:\n\t\t\t\t# output[cf].append(X[i])\n\t\t\t\toutput[cf].append(adaboost.key_from_value(ids_to_points,X[i]))\n\n\treturn output", "def classify(self, nn=1):\n\t\t#err=0\n\t\tpossibilities=[]\n\t\tfor i in range(len(self.X_test)):\n\t\t\tfor lines in range(len((self.X_train))):\n\t\t\t\tdist=np.linalg.norm(self.X_test[i]-self.X_train[lines])\n\t\t\t\tpossibilities.append([dist,self.Y_train[lines]])\n\t\t\tpossibilities.sort()\n\t\t\tfinal=[]\n\t\t\tfor c in range(0,15):\n\t\t\t\tfinal.append(possibilities[c][1])\n\t\t\t\tprint possibilities[c][1]\n\t\t\tcount=np.zeros(10)\n\t\t\tfor m in final:\n\t\t\t\tcount[m]+=1\n\t\t\t\n\t\t\tans=np.any(count==count.max())\n\t\t\t\n\t\t\tprint \"actual=\",self.Y_test[i]\n\t\t\tif(ans!=self.Y_test[i]):\n\t\t\t\tglobal err\n\t\t\t\terr=err+1", "def crossValidate(D, hyper_guess, weight_dict, optList,\n F=10, seed=None, verbose=True):\n\n train_dats, test_dats = split_data(D, F=F, seed=seed)\n\n xval_logli = 0\n all_gw = []\n for f in range(F):\n if verbose:\n print(\"\\rRunning xval fold \" + str(f+1) + \" of \" + str(F), end=\"\")\n _, _, wMode, _ = hyperOpt(train_dats[f], hyper_guess, weight_dict,\n optList, hess_calc=None)\n logli, gw = xval_loglike(test_dats[f], wMode,\n train_dats[f]['missing_trials'], weight_dict)\n xval_logli += np.sum(logli)\n all_gw += [gw]\n \n xval_gw = np.array(all_gw).flatten()\n test_inds = np.array([i['test_inds'] for i in test_dats]).flatten()\n inds = np.argsort(test_inds)\n xval_gw = xval_gw[inds]\n xval_pL = 1 / (1 + np.exp(xval_gw))\n \n return xval_logli, xval_pL", "def minThresholdClassify(sim_vec_dict, sim_thres):\n\n assert sim_thres >= 0.0 and sim_thres <= 1.0, sim_thres\n\n print('Minimum similarity threshold based classification of ' + \\\n '%d record pairs' % (len(sim_vec_dict)))\n print(' Classification similarity threshold: %.3f' % (sim_thres))\n\n class_match_set = set()\n class_nonmatch_set = set()\n\n # Iterate over all record pairs\n #\n for (rec_id_tuple, sim_vec) in sim_vec_dict.items():\n # ********* Implement minimum threshold classification ********************\n\n pass # Add your code here\n\n # ************ End of your code *******************************************\n\n print(' Classified %d record pairs as matches and %d as non-matches' % \\\n (len(class_match_set), len(class_nonmatch_set)))\n print('')\n\n return class_match_set, class_nonmatch_set", "def test_classify(self):\n classifiers, estimates =\\\n ada_boost.train_dataset(self.larger_matrix,\n self.larger_class_labels,\n 9)\n data_to_classify = [1, 0.5]\n classifications = ada_boost.classify(data_to_classify, classifiers)\n expected = np.mat([-1.])\n self.assertEqual(classifications, expected)", "def enoughForLeader(self, votes):\n entry = self.getConfig()\n if entry['config'] == 'single':\n validVotes = len(set(entry['data'].keys()) & set(votes))\n return validVotes > len(entry['data']) / 2\n validVotesOld = len(set(entry['data'][0].keys()) & set(votes))\n validVotesNew = len(set(entry['data'][1].keys()) & set(votes))\n return validVotesOld > len(entry['data'][0]) / 2 and \\\n validVotesNew > len(entry['data'][1]) / 2", "def evaluation_detections(thresholds, bboxes_gt, bboxes_detected, num_instances):\r\n TP = np.zeros(len(thresholds), dtype=int)\r\n FP = np.zeros(len(thresholds), dtype=int)\r\n\r\n scores_detections = [[] for i in range(len(thresholds))]\r\n # scores_detections is pair of values [result, confidence] where result is true if the example is correctly\r\n # classified and confidence is the confidence of the prediction. It's used to compute the precision-recall\r\n # curve. Confidence score is random if the predicted scores do not belong to a detector.\r\n\r\n for key in bboxes_detected.keys():\r\n for bbox_noisy in bboxes_detected[key]:\r\n if key in bboxes_gt: # if we have detected stuff and it is in the gt\r\n scores = [bbox_iou(bbox_noisy[1:5], bbox[1:5]) for bbox in bboxes_gt[key]]\r\n max_score = max(scores)\r\n for i, threshold in enumerate(thresholds):\r\n if max_score > threshold:\r\n TP[i] += 1\r\n # we give correct boxes a slightly higher confidence score\r\n scores_detections[i].append([1, bbox_noisy[5]])\r\n else:\r\n FP[i] += 1\r\n scores_detections[i].append([0, bbox_noisy[5]])\r\n else: # if we have detected stuff and it is not in the gt\r\n for i, threshold in enumerate(thresholds):\r\n FP[i] += 1\r\n\r\n FN = num_instances - TP # number of instances not detected\r\n return TP, FP, FN, np.array(scores_detections)", "def accuracy(yHat, yTrue):\n # TODO calculate the accuracy\n acc = 0\n for i in range(len(yHat)): # count the number of correct classifications\n if yHat[i] == yTrue[i]:\n acc += 1\n return acc / len(yTrue) # return the num correct / total test samples", "def cross_validate(k, original_x_train, original_y_train, label, features: List[str],\n features_to_encode: List[str], balance_ratio: float,\n classifiers: List[ClassifierMixin]) \\\n -> Tuple[List[Tuple[Type[ClassifierMixin], Dict]], Type[ClassifierMixin]]:\n\n X, y = balance_train(original_x_train, original_y_train, label, balance_ratio)\n skf = StratifiedKFold(n_splits=k)\n val_scores = []\n for classifier in classifiers:\n print('Doing ', classifier.__class__)\n clf = make_pipeline(FeatureEncoder(features_to_encode, features), classifier)\n val_scores.append((classifier.__class__,\n model_selection.cross_validate(clf, X, y, scoring=('f1_weighted'),\n cv=skf, n_jobs=-1)))\n best_classifier_class = max([(mod, median(res['test_score'])) for mod, res in val_scores],\n key=lambda x: x[1])[0]\n return val_scores, best_classifier_class", "def check_homogeneity(impurities, hull, used_pivots):\n for instance in impurities:\n if check_inside_hull(hull, instance):\n return False\n return True", "def hard_judge(self, infer_result=None):\r\n if infer_result is None:\r\n logger.warning(f'infer result is invalid, value: {infer_result}!')\r\n return False\r\n elif len(infer_result) == 0:\r\n return False\r\n else:\r\n log_sum = 0.0\r\n data_check_list = [class_probability for class_probability\r\n in infer_result\r\n if data_check(class_probability)]\r\n if len(data_check_list) == len(infer_result):\r\n for class_data in data_check_list:\r\n log_sum += class_data * math.log(class_data)\r\n confidence_score = 1 + 1.0 * log_sum / math.log(\r\n len(infer_result))\r\n return confidence_score >= self.threshold_cross_entropy\r\n else:\r\n logger.warning(\"every value of infer_result should be in \"\r\n f\"[0,1], your data is {infer_result}\")\r\n return False", "def prob6(n_neighbors, filename=\"mnist_subset.npz\"):\n data = np.load(filename)\n X_train = data['X_train'].astype(np.float)\n y_train = data['y_train']\n X_test = data['X_test'].astype(np.float)\n y_test = data['y_test']\n \n model = KNeighborsClassifier(n_neighbors)\n model.fit(X_train, y_train)\n my_pred = model.predict(X_test)\n accuracy = (my_pred) == y_test #check to see if my predictions are equal to the y_truth values. a matrix of 1's (correct) and 0's (incorrect) is made\n return(np.mean(accuracy)*100) #takes the average of the matrix to calculate an accuracy\n \n raise NotImplementedError(\"Problem 6 Incomplete\")", "def _find_threshold(self, feature, y_train, num_class):\n assert len(num_class) == 2, \"This function only assumes work with binary classification.\"\n best_threshold = 0.0\n max_exact_classification = 0.0\n is_positive_negative = False\n sorted_feature = sorted(np.unique(feature))\n for i in range(len(sorted_feature)-1):\n # assume the value less than threshold is negative (0), greater than threshold is positive (1)\n threshold = (sorted_feature[i] + sorted_feature[i+1]) / 2\n left_partition = y_train[feature < threshold]\n right_partition = y_train[feature > threshold]\n negative_positive = ((len(left_partition[left_partition == 0]) + len(right_partition[right_partition == 1]))\n / len(feature))\n # assume the value less than threshold is positive (1), greater than threshold is negative. (0)\n positive_negative = ((len(left_partition[left_partition == 1]) + len(right_partition[right_partition == 0]))\n / len(feature))\n # make decision here\n is_positive_negative = positive_negative > negative_positive\n choose = positive_negative if is_positive_negative else negative_positive\n if max_exact_classification < choose:\n max_exact_classification = choose\n best_threshold = threshold\n return best_threshold, is_positive_negative", "def _checkValues(set_):\n if len(set_)<3: return False\n x = set_[2]\n # TODO: OPT: need optimization\n if (x is None) or len(x) == 0: return False # undefined\n for v in x:\n try:\n if Nlabels <= 2 and N.isscalar(v):\n continue\n if (isinstance(v, dict) or # not dict for pairs\n ((Nlabels>=2) and len(v)!=Nlabels) # 1 per each label for multiclass\n ): return False\n except Exception, e:\n # Something else which is not supported, like\n # in shogun interface we don't yet extract values per each label or\n # in pairs in the case of built-in multiclass\n if __debug__:\n debug('ROC', \"Exception %s while checking \"\n \"either %s are valid labels\" % (str(e), x))\n return False\n return True", "def check_classifier():\n content = []\n labels = []\n file = 'COMP3074-CW1-Dataset.csv'\n content, labels = get_tag(file, \"question_book\", content, labels)\n file = 'name.csv'\n content, labels = get_tag(file, \"question_book\", content, labels)\n file = 'Small_talk.csv'\n content, labels = get_tag(file, \"small_talk\", content, labels, )\n x_train, x_test, y_train, y_test = train_test_split(content, # Sample feature set to be divided\n labels, # The sample result to be divided (label)\n stratify=labels, # Keep the category proportions\n # the same in training and testing\n test_size=0.25, # Refers to the proportion of\n # samples reserved for testing\n random_state=22) # Random seed\n count_vect = CountVectorizer(stop_words=stopwords.words('english'))\n x_train_counts = count_vect.fit_transform(x_train)\n tfidf_transformer = TfidfTransformer(use_idf=True, # Tf_idf\n sublinear_tf=True).fit(x_train_counts)\n x_train_tf = tfidf_transformer.transform(x_train_counts) # Standardize the inherent attributes of the training set,\n # reduce dimensionality and normalize\n classify = LogisticRegression(random_state=0).fit(x_train_tf, y_train) # Logistic regression\n return classify, tfidf_transformer, count_vect", "def test_classifier(utterances, dialogue_index, sentence_index, classifier):\n keyword_classification = classifier(utterances[dialogue_index][1][sentence_index])\n ground_truths = [ground_truth.split('(')[0] for ground_truth in\n utterances[dialogue_index][0][sentence_index].split('|')]\n\n ground_truths.sort()\n keyword_classification.sort()\n if ground_truths != keyword_classification:\n return False, ground_truths, keyword_classification\n else:\n return True, ground_truths, keyword_classification", "def test_episodic_overfit(self,\n learner_class,\n learner_config,\n threshold=1.,\n attempts=1):\n gin_config = '\\n'.join((self.BASE_GIN_CONFIG, learner_config))\n gin.parse_config(gin_config)\n\n episode_config = config.EpisodeDescriptionConfig(\n num_ways=self.NUM_EXAMPLES, num_support=1, num_query=1)\n\n trainer_instance = trainer.Trainer(\n train_learner_class=learner_class,\n eval_learner_class=learner_class,\n is_training=True,\n train_dataset_list=['fake'],\n eval_dataset_list=['fake'],\n records_root_dir=self.temp_dir,\n checkpoint_dir=os.path.join(self.temp_dir, 'checkpoints'),\n train_episode_config=episode_config,\n eval_episode_config=episode_config,\n data_config=config.DataConfig(),\n )\n # Train 1 update at a time for the last `attempts - 1` steps.\n trainer_instance.num_updates -= (attempts - 1)\n trainer_instance.train()\n valid_accs = [trainer_instance.valid_acc]\n for _ in range(attempts - 1):\n trainer_instance.num_updates += 1\n trainer_instance.train()\n valid_accs.append(trainer_instance.valid_acc)\n self.assertGreaterEqual(max(valid_accs), threshold)", "def classify(self, testInstance):\n return self.fire(testInstance) > 0.5", "def CrossValidate(net, testExamples, tolerance, appendExamples=0):\n nTest = len(testExamples)\n nBad = 0\n badExamples = []\n for i in range(nTest):\n testEx = testExamples[i]\n trueRes = testExamples[i][-1]\n res = net.ClassifyExample(testEx)\n if math.fabs(trueRes - res) > tolerance:\n badExamples.append(testEx)\n nBad = nBad + 1\n\n return float(nBad) / nTest, badExamples", "def test(self):\r\n error_count = 0\r\n N_TESTING = len(self.TESTING_DATA)\r\n for i in range(N_TESTING):\r\n x_vec = self.TESTING_DATA[i][:-1]\r\n y = self.TESTING_DATA[i][-1]\r\n\r\n result = self.bp.classify(x_vec)\r\n if result != y: error_count += 1\r\n print(error_count, \" errors on the test data, out of \", N_TESTING, \"items.\")", "def non_max_suppression_kneron(prediction, conf_thres=0.1, iou_thres=0.6, top_k_num=3000, merge=False, classes=None, agnostic=False):\n if prediction.dtype is torch.float16:\n prediction = prediction.float() # to FP32\n\n nc = prediction[0].shape[1] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_det = 300 # maximum number of detections per image\n time_limit = 10.0 # seconds to quit after\n multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)\n\n t = time.time()\n output = [None] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero().t()\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n\n\n # If none remain process next image\n n = x.shape[0] # number of boxes\n if not n:\n continue\n\n # Sort by confidence\n # x = x[x[:, 4].argsort(descending=True)]\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n # Sort by confidence\n ind_Sort_by_confidence = x[:, 4].argsort(descending=True)\n boxes = boxes[ind_Sort_by_confidence][:top_k_num] #\n scores = scores[ind_Sort_by_confidence][:top_k_num] #\n x = x[ind_Sort_by_confidence][:top_k_num] #\n # cross classes nms\n i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n break # time limit exceeded\n\n return output", "def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, merge=False, classes=None, agnostic=False):\r\n if prediction.dtype is torch.float16:\r\n prediction = prediction.float() # to FP32\r\n\r\n nc = prediction[0].shape[1] - 5 # number of classes\r\n xc = prediction[..., 4] > conf_thres # candidates\r\n\r\n # Settings\r\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\r\n max_det = 300 # maximum number of detections per image\r\n time_limit = 10.0 # seconds to quit after\r\n redundant = True # require redundant detections\r\n multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)\r\n\r\n t = time.time()\r\n output = [None] * prediction.shape[0]\r\n for xi, x in enumerate(prediction): # image index, image inference\r\n # Apply constraints\r\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\r\n x = x[xc[xi]] # confidence\r\n\r\n # If none remain process next image\r\n if not x.shape[0]:\r\n continue\r\n\r\n # Compute conf\r\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\r\n\r\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\r\n box = xywh2xyxy(x[:, :4])\r\n\r\n # Detections matrix nx6 (xyxy, conf, cls)\r\n if multi_label:\r\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\r\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\r\n else: # best class only\r\n conf, j = x[:, 5:].max(1, keepdim=True)\r\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\r\n\r\n # Filter by class\r\n if classes:\r\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\r\n\r\n # Apply finite constraint\r\n # if not torch.isfinite(x).all():\r\n # x = x[torch.isfinite(x).all(1)]\r\n\r\n # If none remain process next image\r\n n = x.shape[0] # number of boxes\r\n if not n:\r\n continue\r\n\r\n # Sort by confidence\r\n # x = x[x[:, 4].argsort(descending=True)]\r\n\r\n # Batched NMS\r\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\r\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\r\n i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)\r\n if i.shape[0] > max_det: # limit detections\r\n i = i[:max_det]\r\n if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\r\n try: # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\r\n iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\r\n weights = iou * scores[None] # box weights\r\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\r\n if redundant:\r\n i = i[iou.sum(1) > 1] # require redundancy\r\n except: # possible CUDA error https://github.com/ultralytics/yolov3/issues/1139\r\n print(x, i, x.shape, i.shape)\r\n pass\r\n\r\n output[xi] = x[i]\r\n if (time.time() - t) > time_limit:\r\n break # time limit exceeded\r\n\r\n return output", "def partial_match_score(\n truth: List[Rationale], pred: List[Rationale], thresholds: List[float]\n) -> List[PartialMatchScore]:\n\n ann_to_rat = _keyed_rationale_from_list(truth)\n pred_to_rat = _keyed_rationale_from_list(pred)\n\n num_classifications = {k: len(v) for k, v in pred_to_rat.items()}\n num_truth = {k: len(v) for k, v in ann_to_rat.items()}\n ious: Dict[str, Dict[str, float]] = defaultdict(dict)\n for k in set(ann_to_rat.keys()) | set(pred_to_rat.keys()):\n for p in pred_to_rat.get(k, []):\n best_iou = 0.0\n for t in ann_to_rat.get(k, []):\n num = len(\n set(range(p.start_token, p.end_token))\n & set(range(t.start_token, t.end_token))\n )\n denom = len(\n set(range(p.start_token, p.end_token))\n | set(range(t.start_token, t.end_token))\n )\n iou = 0 if denom == 0 else num / denom\n if iou > best_iou:\n best_iou = iou\n ious[k][p] = best_iou\n\n scores: List[PartialMatchScore] = []\n for threshold in thresholds:\n threshold_tps: Dict[str, float] = {}\n for k, vs in ious.items():\n threshold_tps[k] = sum(int(x >= threshold) for x in vs.values())\n micro_r = (\n sum(threshold_tps.values()) / sum(num_truth.values())\n if sum(num_truth.values()) > 0\n else 0\n )\n micro_p = (\n sum(threshold_tps.values()) / sum(num_classifications.values())\n if sum(num_classifications.values()) > 0\n else 0\n )\n micro_f1 = _f1(micro_r, micro_p)\n macro_rs = list(\n threshold_tps.get(k, 0.0) / n if n > 0 else 0 for k, n in num_truth.items()\n )\n macro_ps = list(\n threshold_tps.get(k, 0.0) / n if n > 0 else 0\n for k, n in num_classifications.items()\n )\n macro_r = sum(macro_rs) / len(macro_rs) if len(macro_rs) > 0 else 0\n macro_p = sum(macro_ps) / len(macro_ps) if len(macro_ps) > 0 else 0\n macro_f1 = _f1(macro_r, macro_p)\n\n scores.append(\n PartialMatchScore(\n threshold=threshold,\n micro=InstanceScore(p=micro_p, r=micro_r, f1=micro_f1),\n macro=InstanceScore(p=macro_p, r=macro_r, f1=macro_f1),\n )\n )\n\n return scores", "def exactClassify(sim_vec_dict):\n\n print('Exact classification of %d record pairs' % (len(sim_vec_dict)))\n\n class_match_set = set()\n class_nonmatch_set = set()\n\n # Iterate over all record pairs\n #\n for (rec_id_tuple, sim_vec) in sim_vec_dict.items():\n\n sim_sum = sum(sim_vec) # Sum all attribute similarities\n\n if sim_sum == len(sim_vec): # All similarities were 1.0\n class_match_set.add(rec_id_tuple)\n else:\n class_nonmatch_set.add(rec_id_tuple)\n\n print(' Classified %d record pairs as matches and %d as non-matches' % \\\n (len(class_match_set), len(class_nonmatch_set)))\n print('')\n\n return class_match_set, class_nonmatch_set", "def check_probability(self, x, k_neighbours, expected_class, classifier, view = 0):\n match_number = 0\n distances, indexes = classifier.kneighbors(x.reshape(1, -1), k_neighbours)\n for idx in indexes:\n for element in idx:\n predict = classifier.predict(self.data[view][element].reshape(1, -1))\n if predict[0] and predict[0] == expected_class:\n match_number += 1\n return float(match_number)/k_neighbours", "def get_true_false_classifications(examples, attribute=None, k=None):\n p, n, pk, nk = 0, 0, 0, 0\n\n for example_dict in examples:\n if example_dict['class'] == TRUTH_VALUE:\n p += 1\n if (attribute is not None and k is not None) and (example_dict['object'][attribute] == k):\n pk += 1\n else:\n n += 1\n if (attribute is not None and k is not None) and (example_dict['object'][attribute] == k):\n nk += 1\n\n return p, n, pk, nk", "def check_convexity(hull, used_pivots):\n for instance in used_pivots:\n if not check_inside_hull(hull, instance):\n return False\n return True", "def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, agnostic=False, multi_label=False,\n labels=(), max_det=300):\n\n nc = prediction.shape[2] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n # Checks\n assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'\n assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n #redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n #merge = False # use merge-NMS\n\n output = [np.zeros((0, 6))] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n # Detections matrix nx6 (xyxy, conf, cls)\n # best class only\n conf = x[:, 5:].max(1, keepdims=True)\n j = np.argmax(x[:, 5:], axis=1)\n j = j.reshape(j.shape[0],1)\n #x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n x = np.concatenate((box, conf, j.astype(np.float32)),axis=1)\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 4].argsort()[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n #i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n i = nms(boxes, scores, iou_thres) # NMS\n \n output[xi] = x[i]\n\n return output", "def eval_all(cls_prob, dtrain):\n #determine the top k predictions\n labels = dtrain.get_label()\n top_k = cls_prob.argsort(axis = 1)[:,::-1][:,:5]\n# top_k = cls_prob.argsort(axis = 1)[:,:k:-1]\n #convert true values and compared with predictions to check for equality\n labels = labels[:, None]\n return 'error', 1-ndcg(top_k, labels)/len(labels)", "def check_correctness_raw(classifier_out, test_data):\n labels = test_data.labels\n num_correct = 0\n total = len(classifier_out)\n for index, label in classifier_out:\n if labels[index] == label:\n num_correct += 1\n print(f'Got {num_correct} out of {total} correct: {(num_correct / total) * 100}%')", "def validate_under_over_sampling_input(class_populations, minority_labels, majority_labels,\n base_minority=None, base_majority=None):\n union_of_labels = set(minority_labels).union(set(majority_labels))\n if union_of_labels != set(class_populations):\n # union of minority and majority classes must contain all classes.\n raise ValueError(\n \"\"\"\n One or more class labels are not present in either of the dictionaries, \n `minority_labels` or `majority_labels`!\n \"\"\"\n )\n intersection_of_labels = set(minority_labels).intersection(set(majority_labels))\n if len(intersection_of_labels) > 0:\n # no intersection of labels allowed.\n raise ValueError(\n \"\"\"\n The dictionaries, `minority_labels` and `majority_labels`, MUST be mutually exclusive!\n \"\"\"\n )\n\n if base_majority:\n if base_majority not in set(majority_labels):\n # base_minority should be a minority\n raise ValueError(\n \"\"\"\n The (majority) base label MUST be one of the labels in `majority_labels`! '{}' is\n not!\n \"\"\".format(base_majority)\n )\n if base_minority:\n if base_minority not in set(minority_labels):\n # base majority must be a majority\n raise ValueError(\n \"\"\"\n he (minority) base lanel MUST be one of the labels in `minority_labels`! '{}' is \n not!\n \"\"\".format(base_minority)\n )\n\n return True", "def _check_knn_score(knn_model, X, Y):\n assert knn_model.fit(X, Y).score(X, Y) > 0.9", "def validate_ballot_polling(T, alpha):\n for winner in T:\n for loser in T[winner]:\n if T[winner][loser] < 1 / alpha:\n return False\n\n return True", "def classify(train=None, test=None, data=None, res_dir=\"res/\", disp=True, outfilename=None):\n utils.print_success(\"Comparison of differents classifiers\")\n if data is not None:\n train_features = data[\"train_features\"]\n train_groundtruths = data[\"train_groundtruths\"]\n test_features = data[\"test_features\"]\n test_groundtruths = data[\"test_groundtruths\"]\n else:\n train = utils.abs_path_file(train)\n test = utils.abs_path_file(test)\n train_features, train_groundtruths = read_file(train)\n test_features, test_groundtruths = read_file(test)\n if not utils.create_dir(res_dir):\n res_dir = utils.abs_path_dir(res_dir)\n classifiers = {\n \"RandomForest\": RandomForestClassifier()\n # \"RandomForest\": RandomForestClassifier(n_estimators=5),\n # \"KNeighbors\":KNeighborsClassifier(3),\n # \"GaussianProcess\":GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),\n # \"DecisionTree\":DecisionTreeClassifier(max_depth=5),\n # \"MLP\":MLPClassifier(),\n # \"AdaBoost\":AdaBoostClassifier(),\n # \"GaussianNB\":GaussianNB(),\n # \"QDA\":QuadraticDiscriminantAnalysis(),\n # \"SVM\":SVC(kernel=\"linear\", C=0.025),\n # \"GradientBoosting\":GradientBoostingClassifier(),\n # \"ExtraTrees\":ExtraTreesClassifier(),\n # \"LogisticRegression\":LogisticRegression(),\n # \"LinearDiscriminantAnalysis\":LinearDiscriminantAnalysis()\n }\n for key in classifiers:\n utils.print_success(key)\n clf = classifiers[key]\n utils.print_info(\"\\tFit\")\n clf.fit(train_features, train_groundtruths)\n utils.print_info(\"\\tPredict\")\n predictions = clf.predict(test_features)\n\n if outfilename is not None:\n with open(outfilename, \"w\") as filep:\n for gt, pred in zip(test_groundtruths, predictions):\n filep.write(gt + \",\" + pred + \"\\n\")\n\n # Global\n data = [key]\n data.append(str(precision_score(test_groundtruths, predictions, average='weighted')))\n data.append(str(recall_score(test_groundtruths, predictions, average='weighted')))\n data.append(str(f1_score(test_groundtruths, predictions, average='weighted')))\n data = \",\".join(data)\n if disp:\n print(data)\n else:\n with open(res_dir + \"global.csv\", \"a\") as filep:\n filep.write(data + \",\\n\")\n # Local\n for index, tag in enumerate(list(set(train_groundtruths))):\n precision = precision_score(test_groundtruths, predictions, average=None)\n recall = recall_score(test_groundtruths, predictions, average=None)\n f1 = f1_score(test_groundtruths, predictions, average=None)\n line = key + \",\" + str(precision[index]) + \",\" + str(recall[index]) + \",\" + str(f1[index])\n if disp:\n print(line)\n else:\n with open(res_dir + \"tag_\" + tag + \".csv\", \"a\") as filep:\n filep.write(line + \",\\n\")\n return predictions", "def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, top_k_num=3000, merge=False, classes=None, agnostic=False):\n # print('conf_thres',conf_thres)\n if prediction.dtype is torch.float16:\n prediction = prediction.float() # to FP32\n\n nc = prediction[0].shape[1] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_det = 300 # maximum number of detections per image\n time_limit = 10.0 # seconds to quit after\n multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)\n\n t = time.time()\n output = [None] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero().t()\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n\n\n # If none remain process next image\n n = x.shape[0] # number of boxes\n if not n:\n continue\n\n # Sort by confidence\n # x = x[x[:, 4].argsort(descending=True)]\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n # # Sort by confidence\n ind_Sort_by_confidence = x[:, 4].argsort(descending=True)\n boxes = boxes[ind_Sort_by_confidence][:top_k_num] #\n scores = scores[ind_Sort_by_confidence][:top_k_num] #\n x = x[ind_Sort_by_confidence][:top_k_num] #\n # cross classes nms\n i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n # if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\n # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n # iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\n # weights = iou * scores[None] # box weights\n # x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\n # if redundant:\n # i = i[iou.sum(1) > 1] # require redundancy\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n break # time limit exceeded\n\n return output", "def evaluate_features(trainFeatures, testFeatures):\n classifier = NaiveBayesClassifier.train(trainFeatures)\n\n #initiates referenceSets and testSets\n referenceSets = collections.defaultdict(set)\n testSets = collections.defaultdict(set)\n\n for i, (features, label) in enumerate(testFeatures):\n referenceSets[label].add(i)\n predicted = classifier.classify(features)\n testSets[predicted].add(i)\n\n print 'train on %d instances, test on %d instances' % (len(trainFeatures), len(testFeatures))\n print 'accuracy:', nltk.classify.util.accuracy(classifier, testFeatures)\n print 'pos precision:', precision(referenceSets['pos'], testSets['pos'])\n print 'pos recall:', recall(referenceSets['pos'], testSets['pos'])\n print 'neg precision:',precision(referenceSets['neg'], testSets['neg'])\n print 'neg recall:', recall(referenceSets['neg'], testSets['neg'])\n classifier.show_most_informative_features(50)", "def trainAndTune(self, trainingData, trainingLabels, validationData, validationLabels, kgrid):\n no_of_examples = len(trainingLabels)\n prior_prob = dict(Counter(trainingLabels))\n for key in prior_prob.keys():\n prior_prob[key] = prior_prob[key] / float(no_of_examples)\n\n self.prior_prob = prior_prob\n\n likelihoods = dict()\n for cls, prob in prior_prob.items():\n # initializing the dictionary\n likelihoods[cls] = defaultdict(list)\n\n for cls, prob in prior_prob.items():\n # taking samples of only 1 class at a time\n row_indices = list()\n for index, value in enumerate(trainingLabels):\n if value == cls:\n row_indices.append(index)\n\n subset = list()\n for index in row_indices:\n subset.append(trainingData[index])\n\n for r in range(len(subset)):\n for key, value in subset[r].items():\n likelihoods[cls][key].append(value)\n\n classes = [key for key in prior_prob]\n self.classes = classes\n _like = likelihoods\n for cls in classes:\n for key, value in likelihoods[cls].items():\n likelihoods[cls][key] = self._occurrences(likelihoods[cls][key])\n\n self.likelihoods = likelihoods\n\n # results = {}\n # correct = 0\n # for itr in range(len(validationData)):\n # for cls in classes:\n # class_probability = prior_prob[cls]\n # for key, value in validationData[itr].items():\n # relative_feature_values = likelihoods[cls][key]\n # class_probability *= relative_feature_values.get(validationData[itr][key], 0.01)\n #\n # results[cls] = class_probability\n #\n # norm_factor = 0.0\n #\n # for key, value in results.items():\n # norm_factor += value\n #\n # for key in results:\n # try:\n # results[key] = results[key]/norm_factor\n # except ZeroDivisionError:\n # pass\n #\n # if (list(results.keys())[list(results.values()).index(max([value for key, value in results.items()]))]) == validationLabels[itr]:\n # correct += 1\n #\n # print \"validation accuracy: {}%\".format((correct/float(len(validationLabels))) * 100)", "def eval_det(pred_all, gt_all, ovthresh=0.25, use_07_metric=False):\n pred = {} # map {classname: pred}\n gt = {} # map {classname: gt}\n for img_id in pred_all.keys():\n for classname, bbox, score in pred_all[img_id]:\n if classname not in pred: pred[classname] = {}\n if img_id not in pred[classname]:\n pred[classname][img_id] = []\n if classname not in gt: gt[classname] = {}\n if img_id not in gt[classname]:\n gt[classname][img_id] = []\n pred[classname][img_id].append((bbox,score))\n for img_id in gt_all.keys():\n for classname, bbox in gt_all[img_id]:\n if classname not in gt: gt[classname] = {}\n if img_id not in gt[classname]:\n gt[classname][img_id] = []\n gt[classname][img_id].append(bbox)\n\n rec = {}\n prec = {}\n ap = {}\n for classname in gt.keys():\n print('Computing AP for class: ', classname)\n rec[classname], prec[classname], ap[classname] = eval_det_cls(pred[classname], gt[classname], ovthresh, use_07_metric)\n print(classname, ap[classname])\n \n return rec, prec, ap", "def cross_validation(T, y):\r\n from sklearn.model_selection import LeaveOneOut\r\n y = np.array(y)\r\n judge = list()\r\n for train_index, valid_index in LeaveOneOut().split(T):\r\n T_train = T[train_index]\r\n T_valid = T[valid_index]\r\n y_train = y[train_index]\r\n y_valid = y[valid_index]\r\n\r\n T_train, mean, std = feature.normalize(T_train)\r\n T_principle, T_principle_index, dist, AUC = feature_select(T_train,\r\n y_train, k=3)\r\n ts = threshold(dist, y_train)\r\n C = gen_center(T_principle, y_train)\r\n T_valid = (T_valid - mean) / std\r\n dist_valid = util.distance(T_valid.T[T_principle_index].T, C)\r\n if y_valid[0] == 1:\r\n if dist_valid[0] < ts:\r\n judge.append(1)\r\n else:\r\n judge.append(0)\r\n else:\r\n if dist_valid[0] < ts:\r\n judge.append(0)\r\n else:\r\n judge.append(1)\r\n accuracy = sum(judge) / len(judge)\r\n return accuracy", "def analyze_similarities():\r\n print('Total number of candidate pairs:', len(pairs))\r\n print(f'\\nNumber of actual item pairs in the train set: {pairs[\"true_match\"].sum()}\\n')\r\n\r\n for feature in ['text_score', 'image_score', 'txt_img_score', 'words_ratio', 'txt_img_words']:\r\n\r\n # Check distribution of True and False predictions for various similarity scores\r\n print('-' * 50)\r\n print(f'\\nDistribution of True/False predictions for {feature}')\r\n for thr in (0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95):\r\n print('-' * 50)\r\n print(f'Similarity score over {thr}')\r\n pairs_sample = pairs[pairs[feature] >= thr]\r\n print(f'Number of similar item pairs: {len(pairs_sample)}')\r\n print(pairs_sample['true_match'].value_counts(normalize=True))\r\n\r\n # Check if identical phash can be used to improve the accuracy\r\n same_phash = pairs[pairs['phash_match'] == 1]\r\n different_phash = pairs[pairs['phash_match'] == 0]\r\n\r\n print('\\nFor item pairs with the same phash:')\r\n print(same_phash['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(same_phash))\r\n\r\n print('\\nFor item pairs with different phash:')\r\n print(different_phash['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(different_phash))\r\n\r\n # Check if numbers in titles can be used to improve the accuracy\r\n same_numbers = pairs[pairs['nums_match'] == 1]\r\n different_numbers = pairs[pairs['nums_match'] == 0]\r\n\r\n print('\\nFor item pairs with the same numbers:')\r\n print(same_numbers['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(same_numbers))\r\n\r\n print('\\nFor item pairs with different numbers:')\r\n print(different_numbers['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(different_numbers))", "def cross_validation2(T, y):\r\n from sklearn.model_selection import LeaveOneOut\r\n y = np.array(y)\r\n judge = list()\r\n T_principle_index = np.array([0, 18, 43])\r\n\r\n for train_index, valid_index in LeaveOneOut().split(T):\r\n T_train = T[train_index]\r\n T_valid = T[valid_index]\r\n y_train = y[train_index]\r\n\r\n T_train, mean, std = feature.normalize(T_train)\r\n\r\n T_principle = T_train.T[T_principle_index].T\r\n C = gen_center(T_principle, y_train)\r\n dist = util.distance(T_principle, C)\r\n ts = threshold(dist, y_train)\r\n\r\n T_valid = (T_valid - mean) / std\r\n dist_valid = util.distance(T_valid.T[T_principle_index].T, C)\r\n\r\n if dist_valid[0] < ts:\r\n judge.append(1)\r\n else:\r\n judge.append(0)\r\n return np.array(judge)", "def single_supervise_evaluation(clf, x_train, y_train, x_test, y_test, r1_norm_step=0.05, r2_norm_step=0.05):\n # fit model\n clf.fit(x_train, y_train)\n\n # calc accuracy\n y_train_pred = clf.predict(x_train)\n accuracy_train = balanced_accuracy_score(y_true=y_train, y_pred=y_train_pred)\n print(f\"Balanced accuracy on the training set: {accuracy_train:.3f}\")\n y_test_pred = clf.predict(x_test)\n accuracy_test = balanced_accuracy_score(y_true=y_test, y_pred=y_test_pred)\n print(f\"Balanced accuracy on the hold-out set: {accuracy_test:.3f}\")\n\n # get confusion matrix\n y_pred = clf.predict(x_test)\n cmat = confusion_matrix(y_test, y_pred)\n\n # normalize confusion matrix\n r1_cmat = _r1_normalize(cmat)\n r2_cmat = _r2_normalize(cmat)\n m1 = np.max(r1_cmat)\n if np.isnan(m1):\n m1 = 1.0\n m2 = np.max(r2_cmat)\n\n cluster_map = {}\n while (len(cluster_map) == 0) and (m1 > 0) and (m2 > 0):\n m1 -= r1_norm_step\n m2 -= r2_norm_step\n\n # final binary matrix to calculate which clusters need to be merged\n judge = np.maximum.reduce([(r1_cmat > m1), (r2_cmat > m2)])\n if judge.sum() > 0:\n rows, cols = np.where(judge)\n edges = zip(rows.tolist(), cols.tolist())\n g = nx.Graph()\n g.add_edges_from(edges)\n for comp in nx.connected_components(g):\n to_label = comp.pop()\n for remain in comp:\n cluster_map[remain] = to_label\n return clf, accuracy_test, cluster_map, cmat, r1_cmat, r2_cmat", "def eval_det_cls(pred, gt, ovthresh=0.25, use_07_metric=False):\n\n # construct gt objects\n class_recs = {} # {img_id: {'bbox': bbox list, 'det': matched list}}\n npos = 0\n for img_id in gt.keys():\n bbox = np.array(gt[img_id])\n det = [False] * len(bbox)\n npos += len(bbox)\n class_recs[img_id] = {'bbox': bbox, 'det': det}\n # pad empty list to all other imgids\n for img_id in pred.keys():\n if img_id not in gt:\n class_recs[img_id] = {'bbox': np.array([]), 'det': []}\n\n # construct dets\n image_ids = []\n confidence = []\n BB = []\n for img_id in pred.keys():\n for box,score in pred[img_id]:\n image_ids.append(img_id)\n confidence.append(score)\n BB.append(box)\n confidence = np.array(confidence)\n BB = np.array(BB) # (nd,4 or 8,3)\n\n # sort by confidence\n sorted_ind = np.argsort(-confidence)\n sorted_scores = np.sort(-confidence)\n BB = BB[sorted_ind, ...]\n image_ids = [image_ids[x] for x in sorted_ind]\n\n # go down dets and mark TPs and FPs\n nd = len(image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d in range(nd):\n if d%100==0: \n print(d)\n R = class_recs[image_ids[d]]\n bb = BB[d,:].astype(float)\n ovmax = -np.inf\n BBGT = R['bbox'].astype(float)\n\n if BBGT.size > 0:\n # compute overlaps\n for j in range(BBGT.shape[0]):\n iou = get_iou(bb, BBGT[j,...]) \n if iou > ovmax:\n ovmax = iou\n jmax = j\n\n #print d, ovmax\n if ovmax > ovthresh:\n if not R['det'][jmax]:\n tp[d] = 1.\n R['det'][jmax] = 1\n else:\n fp[d] = 1.\n else:\n fp[d] = 1.\n\n # compute precision recall\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(npos)\n print('NPOS: ', npos)\n print('ND:', nd)\n # avoid divide by zero in case the first detection matches a difficult\n # ground truth\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n ap = voc_ap(rec, prec, use_07_metric)\n\n return rec, prec, ap", "def best_threshold_from_folds(y_tuples, scoring=f1_score, step_size=0.01, maximize=True):\n thresholds, scores = [], []\n for _, y_true, y_pred in y_tuples:\n t, s = find_best_threshold(y_true, y_pred, step_size, scoring, maximize=maximize)\n thresholds.append(t)\n scores.append(s)\n\n mean_threshold = np.mean(thresholds)\n mean_score = np.mean([score_for_threshold(y, y_hat, scoring, mean_threshold) for _, y, y_hat in y_tuples])\n return mean_threshold, mean_score", "def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=(), max_det=300):\n\n nc = prediction.shape[2] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Checks\n assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'\n assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 10.0 # seconds to quit after\n redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n merge = False # use merge-NMS\n\n t = time.time()\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\n # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\n weights = iou * scores[None] # box weights\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\n if redundant:\n i = i[iou.sum(1) > 1] # require redundancy\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n print(f'WARNING: NMS time limit {time_limit}s exceeded')\n break # time limit exceeded\n\n return output", "def eval_feature_verify(query_features, gallery_features, query_lst, gallery_lst, metric=\"cosine\"):\n if metric not in [\"euclidean\", \"cosine\"]:\n raise ValueError(\"Invalid metric! \")\n\n num_query = len(query_lst)\n num_gallery = len(gallery_lst)\n\n if metric == \"cosine\":\n gallery_features = normalize(gallery_features, axis=1)\n query_features = normalize(query_features, axis=1)\n\n if metric == \"euclidean\":\n dist = euclidean_distances(gallery_features, query_features).squeeze()\n else:\n dist = np.dot(gallery_features, query_features.T)\n print(dist[:].max())\n print(dist[:].min())\n\n gallery_cam_lst = np.array([x.cam_id for x in gallery_lst], dtype=np.int32)\n gallery_id_lst = np.array([x.class_id for x in gallery_lst], dtype=np.int32)\n query_cam_lst = np.array([x.cam_id for x in query_lst], dtype=np.int32)\n query_id_lst = np.array([x.class_id for x in query_lst], dtype=np.int32)\n\n # same id\n gallery_id_lst_tile = np.tile(gallery_id_lst, (num_query, 1))\n query_id_lst_tile = np.tile(query_id_lst, (num_gallery, 1))\n label_mat = query_id_lst_tile == gallery_id_lst_tile.T # gallery num * query num\n label_mat = label_mat.astype(np.int)\n\n # same camera and id\n gallery_cam_lst_tile = np.tile(gallery_cam_lst, (num_query, 1))\n query_cam_lst_tile = np.tile(query_cam_lst, (num_gallery, 1))\n # same camera, gallery num * query num\n label_cam_mat = query_cam_lst_tile == gallery_cam_lst_tile.T\n # filter by same id\n label_cam_mat = label_cam_mat * label_mat\n # same id but under same camera are labeled as -1,\n label_mat -= 2 * label_cam_mat.astype(np.int)\n\n ind = np.where(label_mat == 0)\n neg_dist = dist[ind]\n ind = np.where(label_mat == 1)\n pos_dist = dist[ind]\n\n new_dist = np.concatenate([pos_dist, neg_dist])\n new_label = np.concatenate([np.ones_like(pos_dist), np.zeros_like(neg_dist)])\n dist_label = zip(new_dist, new_label)\n dist_label_new = np.array(sorted(dist_label, key=lambda d: -d[0]))\n\n # ap = []\n n_good = np.sum(dist_label_new[:, 1])\n n_bad = np.sum(1 - dist_label_new[:, 1])\n print(n_good)\n for thresh in np.arange(0.90, 1, 0.01):\n # print(thresh)\n p = dist_label_new[:, 0] >= thresh\n if np.sum(p) == 0:\n recall, precision, f1 = 0, 0, 0\n else:\n tp = np.logical_and(dist_label_new[:, 1], p)\n recall = np.sum(tp) / n_good\n precision = np.float(np.sum(tp)) / np.sum(p)\n\n f1 = 2*recall*precision/(recall+precision)\n\n print('%.4f\\t%.4f\\t%.4f' % (recall, precision, f1))\n\n # ap.append(compute_ap(dist_label_new, thresh))\n # print(ap)", "def classify(data, labels, (train_idx, test_idx), classifier=None):\r\n\r\n assert classifier is not None, \"Why would you pass not classifier?\"\r\n\r\n # Data scaling based on training set\r\n scaler = SupervisedStdScaler() #SupervisedRobustScaler() # # \r\n scaler.fit(data[train_idx,:], labels[train_idx], label=-1)\r\n #scaler.fit(data[train_idx,:], labels[train_idx])\r\n data_train = scaler.transform(data[train_idx,:])\r\n data_test = scaler.transform(data[test_idx,:])\r\n try:\r\n classifier.fit(data_train, labels[train_idx])\r\n \r\n \r\n confMat = confusion_matrix(labels[test_idx],\r\n classifier.predict(data_test))\r\n if confMat.shape == (1,1):\r\n if all(labels[test_idx] == -1):\r\n confMat = np.array([[confMat[0], 0], [0, 0]], dtype=confMat.dtype)\r\n else:\r\n confMat = np.array([[0, 0], [0, confMat[0]]], dtype=confMat.dtype)\r\n confMatRate = confMat / np.tile(np.sum(confMat, axis=1).astype('float'), (2,1)).transpose()\r\n totalErr = (confMat[0, 1] + confMat[1, 0]) / float(confMat.sum())\r\n #if type(classifier) not in [type(None), DummyClassifier]:\r\n if hasattr(classifier,'param_grid'): \r\n #isinstance(classifier, GridSearchCV) or \\\r\n # isinstance(classifier, RandomizedSearchCV):\r\n fitted_model = classifier.best_estimator_\r\n else:\r\n fitted_model = copy.copy(classifier) \r\n return confMatRate, totalErr, fitted_model\r\n except np.linalg.linalg.LinAlgError as e:\r\n # sahil added statement to raise the error instead of returning nun values\r\n print e.message\r\n raise e\r\n # return np.array([[np.nan, np.nan], [np.nan, np.nan]]), np.nan, None\r", "def test_call_alt_threshold(self):\r\n # adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs\r\n\r\n exp = {0: ['cdhit_test_seqs_0'],\r\n 1: ['cdhit_test_seqs_1'],\r\n 2: ['cdhit_test_seqs_2'],\r\n 3: ['cdhit_test_seqs_3'],\r\n 4: ['cdhit_test_seqs_4'],\r\n 5: ['cdhit_test_seqs_5'],\r\n 6: ['cdhit_test_seqs_6', 'cdhit_test_seqs_8'],\r\n 7: ['cdhit_test_seqs_7'],\r\n 8: ['cdhit_test_seqs_9']}\r\n\r\n app = CdHitOtuPicker(params={'Similarity': 0.90})\r\n obs = app(self.tmp_seq_filepath1)\r\n self.assertEqual(obs, exp)", "def __pair_maximizer(alpha_pairs, pair):\n for alt in alpha_pairs:\n if pair != alt and pair[0].issubset(alt[0]) and pair[1].issubset(alt[1]):\n return False\n return True", "def valid_classification(cls, classification_dict):\n if not isinstance(classification_dict, dict):\n raise ValueError(\"classification needs to be a dictionary\")\n invalid_keys = [k for k in classification_dict if k not in [\"l1\", \"l2\", \"l3\"]]\n if invalid_keys:\n raise ValueError(f\"invalid key(s) in classification: {', '.join(invalid_keys)}\")\n\n if classification_dict and \"l1\" not in classification_dict:\n raise ValueError(\"L1 classification missing\")\n\n return classification_dict", "def test_recommendation_evaluation_18(model, interactions_ds):\n assert recommendation_evaluation(model, interactions_ds[1], cn_test_users=None, k=2, n_pos_interactions=None,\n novelty=False, ignore_low_predictions_threshold=2) == \\\n {'HitRatio@2': 0.0167, 'NDCG@2': 0.0189, 'Precision@2': 0.02, 'Recall@2': 0.0167}", "def check_model(X, w, y, thr = 0.9):\n assert np.mean((y > 0) == (X @ w > 0)) > thr, \"model accuracy\"", "def test_qsvm_multiclass_all_pairs(self):\n training_input = {'A': np.asarray([[0.6560706, 0.17605998], [0.25776033, 0.47628296],\n [0.8690704, 0.70847635]]),\n 'B': np.asarray([[0.38857596, -0.33775802], [0.49946978, -0.48727951],\n [0.49156185, -0.3660534]]),\n 'C': np.asarray([[-0.68088231, 0.46824423], [-0.56167659, 0.65270294],\n [-0.82139073, 0.29941512]])}\n\n test_input = {'A': np.asarray([[0.57483139, 0.47120732], [0.48372348, 0.25438544],\n [0.48142649, 0.15931707]]),\n 'B': np.asarray([[-0.06048935, -0.48345293], [-0.01065613, -0.33910828],\n [0.06183066, -0.53376975]]),\n 'C': np.asarray([[-0.74561108, 0.27047295], [-0.69942965, 0.11885162],\n [-0.66489165, 0.1181712]])}\n\n total_array = np.concatenate((test_input['A'], test_input['B'], test_input['C']))\n\n aqua_globals.random_seed = self.random_seed\n feature_map = SecondOrderExpansion(feature_dimension=get_feature_dimension(training_input),\n depth=2,\n entangler_map=[[0, 1]])\n try:\n svm = QSVM(feature_map, training_input, test_input, total_array,\n multiclass_extension=AllPairs())\n\n quantum_instance = QuantumInstance(BasicAer.get_backend('qasm_simulator'),\n shots=self.shots,\n seed_simulator=aqua_globals.random_seed,\n seed_transpiler=aqua_globals.random_seed)\n result = svm.run(quantum_instance)\n self.assertAlmostEqual(result['testing_accuracy'], 0.444444444, places=4)\n self.assertEqual(result['predicted_classes'], ['A', 'A', 'C', 'A',\n 'A', 'A', 'A', 'C', 'C'])\n except NameError as ex:\n self.skipTest(str(ex))", "def check_threshold(self, cleaned_data):\n\n computed_threshold = 3\n politician_data = cleaned_data['politician_set']\n total_threshold = int(cleaned_data['threshold'])\n\n for datum in politician_data:\n\n #compute politician threshold\n threshold_delta = self.compute_threshold_delta(datum)\n computed_threshold = computed_threshold + threshold_delta\n\n if computed_threshold != total_threshold:\n raise exceptions.ValidationError(\"La soglia indicata non corrisponde a quella ricavata dai politici scelti\")", "def test_ada_boost_stump_classify_partitions_lt(self):\n i = 1\n range_min = self.data_matrix[:, i].min()\n threshold = (range_min * 2)\n inequal = 'lt'\n returned = ada_boost.stump_classify(self.data_matrix,\n i,\n threshold,\n inequal)\n expected = np.mat([1.0, -1.0, -1.0, -1.0])\n\n delta_between_elements = returned - expected.T\n self.assertFalse(delta_between_elements.any())", "def test_lots_of_probability_thresholds(self):\n data = np.array(\n [\n [[2.9, 2.9, 2.9], [2.9, 2.9, 2.9], [2.9, 2.9, 2.9]],\n [[14.5, 14.5, 14.5], [14.5, 14.5, 14.5], [14.5, 14.5, 14.5]],\n [\n [26.099998, 26.099998, 26.099998],\n [26.099998, 26.099998, 26.099998],\n [26.099998, 26.099998, 26.099998],\n ],\n ],\n dtype=np.float32,\n )\n\n input_probs = np.tile(np.linspace(1, 0, 30), (3, 3, 1)).T\n cube = set_up_probability_cube(\n input_probs.astype(np.float32),\n np.arange(30).astype(np.float32),\n threshold_units=\"degC\",\n )\n\n result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)\n\n self.assertArrayAlmostEqual(result.data, data)", "def test_naive_bayes_soy(test_set, classes, class_probabilities, class_feature_probs):\n\n print('[ INFO ]: Testing soy data with Naive Bayes Classifier...')\n\n class_results = {}\n scores = {}\n\n for soy_class in classes:\n\n # Create new column for class predictions\n feature_set = test_set.drop(classes, axis=1)\n feature_set['pred_class'] = 0\n true_class = test_set[soy_class]\n\n for row in range(len(feature_set)):\n\n # Initialize probability sums for each class\n true_probs_sum = 1\n false_probs_sum = 1\n true_conditional_prob_sum = 1\n false_conditional_prob_sum = 1\n\n for col in feature_set.columns:\n\n if col != 'pred_class':\n\n # Calculate probabilities assuming the class is present or 1\n if feature_set[col].iloc[row] == 1:\n\n # Compute conditional feature probabilities based on\n # wether or not the feature is present (1 or 0)\n true_prob = class_feature_probs[soy_class][0].get(col)\n false_prob = 1 - class_feature_probs[soy_class][1].get(col)\n\n else:\n\n # Calculate probabilities assuming the class is not present or 0\n true_prob = 1 - class_feature_probs[soy_class][0].get(col)\n false_prob = class_feature_probs[soy_class][1].get(col)\n\n # Multiply all feature probabilities together for each record\n true_probs_sum = true_probs_sum * true_prob\n false_probs_sum = false_probs_sum * false_prob\n\n # Multiply class conditional probabilities by conditional feature probabilities\n true_conditional_prob_sum = class_probabilities[soy_class] * true_probs_sum\n false_conditional_prob_sum = (1 - class_probabilities[soy_class]) * false_probs_sum\n\n # Determine which probability is highest - highest one is selected as the prediction value\n if true_conditional_prob_sum > false_conditional_prob_sum:\n feature_set['pred_class'].iloc[row] = 1\n\n # Place the results into a data frame for comparison\n results = pd.concat([feature_set['pred_class'], true_class], axis=1)\n results.columns = ['pred_class', 'true_class']\n class_results[soy_class] = results\n\n # Calculate the number of TP, TN, FP, FN\n true_positives = len(results.loc[(results['true_class'] == 1) & (results['pred_class'] == 1)])\n true_negatives = len(results.loc[(results['true_class'] == 0) & (results['pred_class'] == 0)])\n false_positives = len(results.loc[(results['true_class'] == 0) & (results['pred_class'] == 1)])\n false_negatives = len(results.loc[(results['true_class'] == 1) & (results['pred_class'] == 0)])\n\n scores[soy_class] = {\n 'TP' : true_positives,\n 'TN' : true_negatives,\n 'FP' : false_positives,\n 'FN' : false_negatives\n }\n\n return class_results, scores", "def _plot_good_pred_whit_reject(self, test: Set, title=None, fig_size=None):\r\n if fig_size is not None:\r\n fig = plt.figure(figsize=fig_size)\r\n else:\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n goodclassified_index = []\r\n for idx_preds in range(self.preds.shape[1] - 1):\r\n new_good_index = []\r\n for idx in range(self.preds.shape[0]):\r\n if self.preds[idx][0][idx_preds] == test.labels[idx] and \\\r\n self.preds[idx][1][idx_preds] != self.preds[idx][1][idx_preds + 1]:\r\n new_good_index.append(idx)\r\n if new_good_index:\r\n ax.scatter(test.features[new_good_index, 0], self.features[new_good_index, 1],\r\n label='Good classified top{0}'.format(int(idx_preds + 1)))\r\n goodclassified_index += new_good_index\r\n new_good_index = []\r\n for idx in range(self.preds.shape[0]):\r\n if self.preds[idx][0][-1] == test.labels[idx]:\r\n new_good_index.append(idx)\r\n if new_good_index:\r\n ax.scatter(test.features[new_good_index, 0], self.features[new_good_index, 1],\r\n label='Good classified top{0}'.format(int(self.preds.shape[1])))\r\n goodclassified_index += new_good_index\r\n reject_idx, misclassified_idx = ([], [])\r\n for idx in range(self.preds.shape[0]):\r\n if idx not in goodclassified_index:\r\n reject = False\r\n for idx_preds in range(self.preds.shape[1] - 1):\r\n if self.preds[idx][1][idx_preds] == self.preds[idx][1][idx_preds + 1]:\r\n reject_idx.append(idx)\r\n reject = True\r\n break\r\n if not reject:\r\n misclassified_idx.append(idx)\r\n if reject_idx:\r\n ax.scatter(test.features[reject_idx, 0], self.features[reject_idx, 1],\r\n label='Reject', c='orange', marker='^')\r\n if misclassified_idx:\r\n ax.scatter(test.features[misclassified_idx, 0], self.features[misclassified_idx, 1],\r\n label='Misclassified', marker='x', c='red')\r\n box = ax.get_position()\r\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\r\n # Put a legend to the right of the current axis\r\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\r\n if title is not None:\r\n ax.set_title(title)\r\n plt.show()", "def calc_openset_classification(data_outlier_probs, num_classes, num_outlier_threshs=50):\n\n dataset_outliers = []\n threshs = []\n\n # loop through each rejection prior value and evaluate the percentage of the dataset being considered as\n # statistical outliers, i.e. each data point's outlier probability > rejection prior.\n for i in range(num_outlier_threshs - 1):\n outlier_threshold = (i + 1) * (1.0 / num_outlier_threshs)\n threshs.append(outlier_threshold)\n\n dataset_outliers.append(0)\n total_dataset = 0\n\n for j in range(num_classes):\n total_dataset += len(data_outlier_probs[j])\n\n for k in range(len(data_outlier_probs[j])):\n if data_outlier_probs[j][k] > outlier_threshold:\n dataset_outliers[i] += 1\n\n dataset_outliers[i] = dataset_outliers[i] / float(total_dataset)\n\n return {\"thresholds\": threshs, \"outlier_percentage\": dataset_outliers}", "def isConvexApproximate(data, boundaryPointsDict, triangleDict, approximation, tolerance):\n outliersAllowed = int(np.floor(tolerance * len(list(boundaryPointsDict.keys()))))\n\n outliersCount = 0\n # This loop the boundary points:\n for bdrPntIdx in list(boundaryPointsDict.keys()):\n point = np.array(data[bdrPntIdx]['Coordinate'])\n #The flag showing whether a point is on at least of the triangles after looping all the triangles:\n PntonConvexFlag = False\n # print('PntonConvexFlag ghable tri ha', PntonConvexFlag)\n for tri in list(origIdxtriangleDict.keys()):\n # print('triidx', tri)\n triangle = np.zeros([3,3])\n for corner in range(0,3):\n triangle[corner, :] = data[origIdxtriangleDict[tri][corner]]['Coordinate']\n dis, ptp = distFromPtToTri(point, triangle)\n # isIn = ptInTriangle(ptp, triangle, approximation_treshold)\n\n # if we find a triangle for the selected point such that their distance is zero, we dont need to check the\n # distance of that particular point to the rest of triangles, so we continue by selecting the next point\n if dis <= approximation:\n PntonConvexFlag = True\n break\n # If at the end of the loop still the flag is Flag is false means that the particular point is not on none of the\n # triangles, so we can immediately decide the shape is non convex, and there is no need to check other points\n\n if not PntonConvexFlag:\n outliersCount += 1\n if outliersCount > outliersAllowed:\n return False\n\n # at the end of checking all the points, if there is no false as return we conclude that all the points are on the\n # convex hall and the shape is convex\n plotDemoIfConvex()\n return True", "def test_k_models(param_dict, features, classes, cross_val=4):\r\n assert type(param_dict) == dict\r\n model = GridSearchCV(KNeighborsClassifier(), param_dict, cv=cross_val)\r\n model.fit(features, classes)\r\n return list(model.best_params_.values())[0]", "def score_hard_rationale_predictions(\n truth: List[Rationale], pred: List[Rationale]\n) -> InstanceScores:\n\n truth = set(truth)\n pred = set(pred)\n micro_prec = len(truth & pred) / len(pred)\n micro_rec = len(truth & pred) / len(truth)\n micro_f1 = _f1(micro_prec, micro_rec)\n instance_micro = InstanceScore(p=micro_prec, r=micro_rec, f1=micro_f1)\n\n ann_to_rat = _keyed_rationale_from_list(truth)\n pred_to_rat = _keyed_rationale_from_list(pred)\n instances_to_scores: Dict[str, InstanceScore] = {}\n for k in set(ann_to_rat.keys()) | (pred_to_rat.keys()):\n if len(pred_to_rat.get(k, set())) > 0:\n instance_prec = len(\n ann_to_rat.get(k, set()) & pred_to_rat.get(k, set())\n ) / len(pred_to_rat[k])\n else:\n instance_prec = 0\n if len(ann_to_rat.get(k, set())) > 0:\n instance_rec = len(\n ann_to_rat.get(k, set()) & pred_to_rat.get(k, set())\n ) / len(ann_to_rat[k])\n else:\n instance_rec = 0\n\n instance_f1 = _f1(instance_prec, instance_rec)\n instances_to_scores[k] = InstanceScore(\n p=instance_prec, r=instance_rec, f1=instance_f1\n )\n\n # these are calculated as sklearn would\n macro_prec = sum(instance.p for instance in instances_to_scores.values()) / len(\n instances_to_scores\n )\n macro_rec = sum(instance.r for instance in instances_to_scores.values()) / len(\n instances_to_scores\n )\n macro_f1 = sum(instance.f1 for instance in instances_to_scores.values()) / len(\n instances_to_scores\n )\n instance_macro = InstanceScore(p=macro_prec, r=macro_rec, f1=macro_f1)\n\n return InstanceScores(instance_micro=instance_micro, instance_macro=instance_macro)", "def thresh_vote(lst, f):\n\n if len(lst) == 0: # guess 0 by default (appropriate for our dataset)\n q = 0\n else:\n q = float(sum(lst)) / len(lst)\n\n return q >= f", "def test_multiclass_hamming_distance_differentiability(self, inputs):\n preds, target = inputs\n self.run_differentiability_test(\n preds=preds,\n target=target,\n metric_module=MulticlassHammingDistance,\n metric_functional=multiclass_hamming_distance,\n metric_args={\"num_classes\": NUM_CLASSES},\n )", "def classify(X, Y, skf, clf, round_threshold=0.5, average=\"macro\"):\n X = X.values\n if isinstance(Y, pd.Series):\n labels = [\"{}_0\".format(Y.name), \"{}_1\".format(Y.name)]\n Y = np.ravel(Y)\n else:\n Y, labels = Y.values, list(Y.columns)\n\n fold_results = []\n for train, test in skf.split(X, Y):\n current_clf = clone(clf)\n X_train, X_test, Y_train, Y_test = X[train], X[test], Y[train], Y[test]\n\n current_clf.fit(X_train, Y_train)\n Y_prob = current_clf.predict_proba(X_test)\n Y_pred = current_clf.predict(X_test)\n\n (p, r, f1, auc, jac, hl, p_c,\n r_c, f1_c, s_c) = calculate_metrics(Y_test, Y_pred, Y_prob, average)\n\n # calculate overall scores for current fold\n fold_scores = {\n \"precision\": p,\n \"recall\": r,\n \"f1\": f1,\n \"auc\": auc,\n \"jaccard\": jac,\n \"hamming_loss\": hl\n }\n\n for i in range(len(labels)):\n fold_scores[\"precision_{0}\".format(labels[i])] = p_c[i]\n fold_scores[\"recall_{0}\".format(labels[i])] = r_c[i]\n fold_scores[\"f1_{0}\".format(labels[i])] = f1_c[i]\n fold_scores[\"support_{0}\".format(labels[i])] = s_c[i]\n\n fold_results.append({\n \"scores\": fold_scores,\n \"y_pred\": Y_pred,\n \"y_prob\": Y_prob,\n \"y_test\": Y_test\n })\n\n scores = {}\n for score in fold_results[0][\"scores\"].keys():\n values = [s[\"scores\"][score] for s in fold_results]\n scores[score] = (np.sum(values) if score.startswith(\"support_\")\n else np.mean(values))\n\n return scores, fold_results", "def terminate(fitness, tolerance):\n for i in fitness:\n if abs((2**(1.0 / 2)) - i) < tolerance:\n return True\n return False", "def classify(dataset,classifier,feat_mask=None):\r\n \r\n train = dataset.get_data('train',True)\r\n X_train = train['x']\r\n if feat_mask is not None:\r\n X_train = X_train[:,feat_mask]\r\n y_train = train['y']\r\n \r\n classifier.fit(X_train,y_train)\r\n \r\n test = dataset.get_data('test',True)\r\n X_test = test['x']\r\n if feat_mask is not None:\r\n X_test = X_test[:,feat_mask]\r\n y_test = test['y']\r\n \r\n pred = classifier.predict(X_test)\r\n \r\n acc = np.count_nonzero(pred==y_test) / len(y_test)\r\n return acc,y_test,pred", "def make_nonparametric_ab_test(dataframe, iteration_column,\n target_column, not_normal_ids_list):\n rejected_pairs = []\n not_rejected_pairs = []\n category_list = list(itertools.combinations(not_normal_ids_list, 2))\n for i in category_list:\n ttest, p_value = mannwhitneyu(dataframe.\n loc[dataframe[iteration_column] == i[0],\n target_column],\n dataframe.\n loc[dataframe[iteration_column] == i[1],\n target_column])\n if p_value >= 0.05:\n not_rejected_pairs.append(i)\n else:\n rejected_pairs.append(i)\n return rejected_pairs, not_rejected_pairs", "def evaluate_classifications(self):\n test_labels = open('./digitdata/testlabels', 'r')\n self.init_confusion_matrix()\n i = 0\n class_stats = {0:[0,0], 1:[0,0], 2:[0,0], 3:[0,0], 4:[0,0], 5:[0,0], 6:[0,0], 7:[0,0], 8:[0,0], 9:[0,0]}\n total_correct = 0\n num_labels = 1000\n for label in test_labels:\n int_label = int(label)\n if int_label == self.solutions[i]:\n class_stats[int_label][0] += 1\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n else:\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n class_stats[int_label][1] += 1\n i += 1\n for k in class_stats:\n print \"Class \" + str(k) + \": \" + str(float(class_stats[k][0])/class_stats[k][1])\n total_correct += float(class_stats[k][0])\n print \"Overall Accuracy: \" + str(total_correct/num_labels) \n for l in range(0,10):\n for w in range(0,10):\n self.confusion_matrix[l][w] = float(self.confusion_matrix[l][w]) / class_stats[l][1]\n \n s = [[str(e) for e in row] for row in self.confusion_matrix]\n lens = [len(max(col, key=len)) for col in zip(*s)]\n fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens)\n table = [fmt.format(*row) for row in s]\n print '\\n'.join(table)\n #self.print_confusion_matrix() ", "def test_ada_boost_stump_classify_partitions_gt(self):\n i = 1\n range_min = self.data_matrix[:, i].min()\n threshold = (range_min * 2)\n inequal = 'gt'\n returned = ada_boost.stump_classify(self.data_matrix,\n i,\n threshold,\n inequal)\n expected = np.mat([-1.0, 1.0, 1.0, 1.0])\n\n delta_between_elements = returned - expected.T\n self.assertFalse(delta_between_elements.any())", "def classifier(x):\n return x[0] - x[1] + 4 < 0", "def test_recommendation_evaluation_6(model):\n assert recommendation_evaluation(model, cn_test_users=None, k=2, n_pos_interactions=None, novelty=True) == \\\n {'HitRatio@2': 0.0, 'NDCG@2': 0.0, 'Precision@2': 0.0, 'Recall@2': 0.0}", "def __check_features(f_list, stopwords):\n ok = True\n for f in f_list:\n if not(__check_feature(f,stopwords)):\n return False\n return True", "def check_accuracy(validation_iterator, model, criterion):\n val_losses = []\n val_accuracies = []\n with torch.no_grad():\n for val_batch_idx, val_batch in enumerate(validation_iterator):\n val_hyp, val_hyp_length = val_batch.hypothesis\n val_prem, val_prem_length = val_batch.premise\n val_target = val_batch.label - 1\n scores = model(val_prem, val_hyp, val_prem_length, val_hyp_length)\n loss = criterion(scores, val_target)\n # return the indices of each prediction\n _, predictions = scores.max(1)\n num_correct = float((predictions == val_target).sum())\n num_sample = float(predictions.size(0))\n val_losses.append(loss.item())\n val_accuracies.append(num_correct / num_sample)\n return val_losses, val_accuracies", "def multiclass_accuracy(prediction, ground_truth):\n correct = sum(a == b for a, b in zip(prediction, ground_truth))\n\n accuracy = correct / len(ground_truth)\n\n return accuracy", "def evalute_subset(X_train, X_test, y_train, y_test):\r\n clf = KNeighborsClassifier(n_neighbors=3)\r\n clf.fit(X_train, y_train) \r\n y_pred = clf.predict(X_test)\r\n return accuracy_score(y_test, y_pred)", "def test_recommendation_evaluation_7(model, interactions_ds):\n assert recommendation_evaluation(model, interactions_ds[1], cn_test_users=None, k=2, n_pos_interactions=None,\n novelty=False, interaction_threshold=2) == \\\n {'HitRatio@2': 0.0069, 'NDCG@2': 0.0116, 'Precision@2': 0.0104, 'Recall@2': 0.0069}", "def evaluate_detections(ground_truth, predictions, class_name, overlap_threshold=0.5,\n allow_multiple_matches_per_ignored=True,\n verbose=True):\n\n Detection = namedtuple('Detection', ['image', 'bbox', 'score', 'gt_match'])\n GT = namedtuple('GroundTruth', ['bbox', 'is_matched', 'is_ignored'])\n detections = [Detection(image=img_pred.image_path,\n bbox=np.array(obj_pred[\"bbox\"]),\n score=obj_pred.get(\"score\", 0.0),\n gt_match=-1)\n for img_pred in predictions\n for obj_pred in img_pred\n if obj_pred[\"type\"] == class_name]\n\n scores = np.array([detection.score for detection in detections])\n sorted_ind = np.argsort(-scores)\n detections = [detections[i] for i in sorted_ind]\n\n gts = {}\n for img_gt in ground_truth:\n gts[img_gt.image_path] = GT(\n bbox=np.vstack([np.array(obj_gt[\"bbox\"]) for obj_gt in img_gt]) if img_gt else np.empty(\n (0, 4)),\n is_matched=np.zeros(len(img_gt), dtype=bool),\n is_ignored=np.array([obj_gt.get(\"is_ignored\", False) for obj_gt in img_gt], dtype=bool))\n\n detections_num = len(detections)\n true_pos = np.zeros(detections_num)\n false_pos = np.zeros(detections_num)\n\n for i, detection in tqdm(enumerate(detections), desc=\"Processing detections\",\n disable=not verbose):\n image_path = detection.image\n bboxes_gt = gts[image_path].bbox\n bbox = detection.bbox\n max_overlap = -np.inf\n\n if bboxes_gt is not None and bboxes_gt.shape[0] > 0:\n intersection_xmin = np.maximum(bboxes_gt[:, 0], bbox[0])\n intersection_ymin = np.maximum(bboxes_gt[:, 1], bbox[1])\n intersection_xmax = np.minimum(bboxes_gt[:, 0] + bboxes_gt[:, 2], bbox[0] + bbox[2])\n intersection_ymax = np.minimum(bboxes_gt[:, 1] + bboxes_gt[:, 3], bbox[1] + bbox[3])\n intersection_width = np.maximum(intersection_xmax - intersection_xmin, 0.)\n intersection_height = np.maximum(intersection_ymax - intersection_ymin, 0.)\n intersection = intersection_width * intersection_height\n\n det_area = bbox[2] * bbox[3]\n gt_area = bboxes_gt[:, 2] * bboxes_gt[:, 3]\n union = (det_area + gt_area - intersection)\n ignored_mask = gts[image_path].is_ignored\n if allow_multiple_matches_per_ignored:\n if np.any(ignored_mask):\n union[ignored_mask] = det_area\n\n overlaps = intersection / union\n # Match not ignored ground truths first.\n if np.any(~ignored_mask):\n overlaps_filtered = np.copy(overlaps)\n overlaps_filtered[ignored_mask] = 0.0\n max_overlap = np.max(overlaps_filtered)\n argmax_overlap = np.argmax(overlaps_filtered)\n # If match with non-ignored ground truth is not good enough,\n # try to match with ignored ones.\n if max_overlap < overlap_threshold and np.any(ignored_mask):\n overlaps_filtered = np.copy(overlaps)\n overlaps_filtered[~ignored_mask] = 0.0\n max_overlap = np.max(overlaps_filtered)\n argmax_overlap = np.argmax(overlaps_filtered)\n detections[i] = detection._replace(gt_match=argmax_overlap)\n\n if max_overlap >= overlap_threshold:\n if not gts[image_path].is_ignored[argmax_overlap]:\n if not gts[image_path].is_matched[argmax_overlap]:\n true_pos[i] = 1.\n gts[image_path].is_matched[argmax_overlap] = True\n else:\n false_pos[i] = 1.\n elif not allow_multiple_matches_per_ignored:\n gts[image_path].is_matched[argmax_overlap] = True\n else:\n false_pos[i] = 1.\n\n false_pos = np.cumsum(false_pos)\n true_pos = np.cumsum(true_pos)\n\n debug_visualization = False\n if debug_visualization:\n for image_path, bboxes_gt in gts.items():\n\n print(image_path)\n image = cv2.imread(image_path)\n image_gt = np.copy(image)\n for bbox in bboxes_gt.bbox:\n cv2.rectangle(image_gt, tuple(bbox[:2]), tuple(bbox[2:] + bbox[:2]),\n color=(255, 255, 0), thickness=2)\n cv2.imshow(\"gt\", image_gt)\n for detection in detections:\n if detection.image != image_path:\n continue\n bbox = detection.bbox\n cv2.rectangle(image, tuple(bbox[:2]), tuple(bbox[2:] + bbox[:2]), color=(0, 255, 0),\n thickness=2)\n if detection.gt_match is not None:\n bbox = bboxes_gt.bbox[detection.gt_match]\n cv2.rectangle(image, tuple(bbox[:2]), tuple(bbox[2:] + bbox[:2]),\n color=(0, 0, 255), thickness=1)\n cv2.imshow(\"image\", image)\n cv2.waitKey(0)\n\n # Handle equal-score detections.\n # Get index of the last occurrence of a score.\n ind = len(scores) - np.unique(scores[sorted_ind[::-1]], return_index=True)[1] - 1\n ind = ind[::-1]\n # Though away redundant points.\n false_pos = false_pos[ind]\n true_pos = true_pos[ind]\n\n total_positives_num = np.sum([np.count_nonzero(~gt.is_ignored) for gt in gts.values()])\n recall = true_pos / float(total_positives_num)\n # Avoid divide by zero in case the first detection matches an ignored ground truth.\n precision = true_pos / np.maximum(true_pos + false_pos, np.finfo(np.float64).eps)\n miss_rate = 1.0 - recall\n fppi = false_pos / float(len(gts))\n\n return recall, precision, miss_rate, fppi", "def classify(self, testData):\n guesses = []\n self.posteriors = [] # Log posteriors are stored for later data analysis (autograder).\n for datum in testData:\n posterior = self.calculateLogJointProbabilities(datum)\n guesses.append(posterior.argMax())\n self.posteriors.append(posterior)\n return guesses", "def classify(self, testData):\n guesses = []\n self.posteriors = [] # Log posteriors are stored for later data analysis (autograder).\n for datum in testData:\n posterior = self.calculateLogJointProbabilities(datum)\n guesses.append(posterior.argMax())\n self.posteriors.append(posterior)\n return guesses", "def test_true_detections(self):\n expected_accuracy = dict(num_recall=10, uniq_recall=10, num_precision=10, uniq_precision=10)\n self._run_and_validate(self.true_dets, self.ground_truths, expected_accuracy)", "def accuracy(targets: List[int], preds: Union[List[float], List[List[float]]], \n threshold: float = 0.5) -> float:\n if type(preds[0]) == list: # multiclass\n hard_preds = [p.index(max(p)) for p in preds]\n else:\n hard_preds = [1 if p > threshold else 0 for p in preds] # binary prediction\n\n return accuracy_score(targets, hard_preds)" ]
[ "0.68727624", "0.59430206", "0.58152044", "0.5540413", "0.55316037", "0.5448521", "0.5405066", "0.5398594", "0.5388817", "0.5313519", "0.53004795", "0.5284414", "0.5269668", "0.5265305", "0.5240204", "0.5234198", "0.520831", "0.5201402", "0.5200734", "0.51976657", "0.5176463", "0.5150115", "0.5148363", "0.5142511", "0.51412207", "0.51372844", "0.513423", "0.5127056", "0.5117695", "0.50936884", "0.5075625", "0.5053135", "0.5045613", "0.50324523", "0.50249934", "0.50213784", "0.50211537", "0.5020892", "0.49843004", "0.49763316", "0.49641344", "0.49597162", "0.49492797", "0.4947234", "0.4937743", "0.4933934", "0.49317765", "0.49293977", "0.49155936", "0.49095097", "0.49094766", "0.4906703", "0.48947906", "0.4892107", "0.48913765", "0.489102", "0.4890895", "0.4888126", "0.48876202", "0.48863643", "0.4880006", "0.48718172", "0.48707432", "0.486931", "0.48671743", "0.4865583", "0.48635033", "0.48384437", "0.48354274", "0.48352796", "0.4831691", "0.48276317", "0.482076", "0.48164684", "0.48152566", "0.48134828", "0.48127258", "0.48108453", "0.4809481", "0.48072484", "0.48056626", "0.48046225", "0.47995716", "0.47938493", "0.47930476", "0.4792763", "0.47839576", "0.47814745", "0.4768613", "0.47650298", "0.47601613", "0.47598526", "0.4758613", "0.47583443", "0.4757217", "0.4753151", "0.47517306", "0.47517306", "0.4751513", "0.4751095" ]
0.7659174
0
Given a dictionary mapping training points to their old weights, a list of training points misclassified by the current weak classifier, and the error rate of the current weak classifier, returns a dictionary mapping training points to their new weights. This function is allowed (but not required) to modify the input dictionary point_to_weight.
def update_weights(point_to_weight, misclassified_points, error_rate): for p in point_to_weight: if p in misclassified_points: point_to_weight[p] *= make_fraction(1,2)*make_fraction(1, error_rate) else: point_to_weight[p] *= make_fraction(1,2)*make_fraction(1, 1-error_rate) return point_to_weight
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_error_rates(point_to_weight, classifier_to_misclassified):\n ans = {}\n for c in classifier_to_misclassified:\n misclassified = classifier_to_misclassified[c]\n ans[c] = 0\n for p in misclassified:\n ans[c] += point_to_weight[p]\n return ans", "def keypoint_loss(self, pred_keypoints_2d, gt_keypoints_2d, openpose_weight, gt_weight):\n conf = gt_keypoints_2d[:, :, -1].unsqueeze(-1).clone()\n conf[:, :25] *= openpose_weight\n conf[:, 25:] *= gt_weight\n loss = (conf * self.criterion_keypoints(pred_keypoints_2d, gt_keypoints_2d[:, :, :-1])).mean()\n return loss", "def update_weights(x_train, y_train, weights, learning_rate):\r\n predictions = compute_prediction(x_train, weights)\r\n weights_delta = np.dot(x_train.T, y_train - predictions)\r\n m = y_train.shape[0]\r\n weights += learning_rate / float(m) * weights_delta\r\n return weights", "def prepare_loss_weights(training_endpoints, loss_weights=None):\n if loss_weights is None:\n for e in training_endpoints:\n e.loss_weight = 1.\n elif isinstance(loss_weights, collections.abc.Mapping):\n generic_utils.check_for_unexpected_keys(\n 'loss_weights', loss_weights,\n [e.output_name for e in training_endpoints])\n for e in training_endpoints:\n e.loss_weight = loss_weights.get(e.output_name, 1.)\n elif isinstance(loss_weights, list):\n if len(loss_weights) != len(training_endpoints):\n raise ValueError('When passing a list as loss_weights, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(training_endpoints)) +\n ' outputs, but you passed loss_weights=' +\n str(loss_weights))\n for w, e in zip(loss_weights, training_endpoints):\n e.loss_weight = w\n else:\n raise TypeError('Could not interpret loss_weights argument: ' +\n str(loss_weights) + ' - expected a list of dicts.')", "def trainWeakClassifier(trainingSamples, weights, feature):\n #compute feature values\n featureValues = []\n positiveOrNegative = []\n for sample in trainingSamples:\n featureValues.append(feature.computeScore(sample[0], 0, 0))\n positiveOrNegative.append(sample[1])\n \n #zip with weights and sort by feature value\n featureValues = zip(featureValues, weights, positiveOrNegative)\n featureValues = sorted(featureValues, key=lambda tup: tup[0])\n \n #sum all weights of the positive and negative samples\n negativeWeightsTotal = 0\n positiveWeightsTotal = 0\n for value in featureValues:\n if value[2] == 1:\n positiveWeightsTotal += value[1]\n else:\n negativeWeightsTotal += value[1]\n \n #find the feature with the smallest error\n bestFeatureIndex = 0\n bestFeatureError = 1e10\n negativeWeightsSoFar = 0\n positiveWeightsSoFar = 0\n positiveOnTheLeft = 0\n positivesTotal = 0\n for i in range(0, len(featureValues)):\n error1 = positiveWeightsSoFar-negativeWeightsSoFar+negativeWeightsTotal\n error2 = negativeWeightsSoFar-positiveWeightsSoFar+positiveWeightsTotal\n error = min([error1, error2])\n \n if bestFeatureError > error:\n bestFeatureError = error\n bestFeatureIndex = i\n positiveOnTheLeft = positivesTotal\n \n if featureValues[i][2] == 1:\n positiveWeightsSoFar += featureValues[i][1]\n positivesTotal += 1\n else:\n negativeWeightsSoFar += featureValues[i][1]\n \n #count how much samples are there on the right\n positiveOnTheRight = positivesTotal - positiveOnTheLeft\n \n #determine the polarity and threshold\n polarity = -1\n threshold = featureValues[bestFeatureIndex][0]\n if positiveOnTheLeft > positiveOnTheRight:\n polarity = 1\n else:\n polarity = -1\n \n #build and return a weak classifier\n return WeakClassifier(feature, threshold, polarity)", "def _compute_keypoint_estimation_losses(self, task_name, input_height,\n input_width, prediction_dict,\n per_pixel_weights):\n kp_params = self._kp_params_dict[task_name]\n heatmap_key = get_keypoint_name(task_name, KEYPOINT_HEATMAP)\n offset_key = get_keypoint_name(task_name, KEYPOINT_OFFSET)\n regression_key = get_keypoint_name(task_name, KEYPOINT_REGRESSION)\n depth_key = get_keypoint_name(task_name, KEYPOINT_DEPTH)\n heatmap_loss = self._compute_kp_heatmap_loss(\n input_height=input_height,\n input_width=input_width,\n task_name=task_name,\n heatmap_predictions=prediction_dict[heatmap_key],\n classification_loss_fn=kp_params.classification_loss,\n per_pixel_weights=per_pixel_weights)\n offset_loss = self._compute_kp_offset_loss(\n input_height=input_height,\n input_width=input_width,\n task_name=task_name,\n offset_predictions=prediction_dict[offset_key],\n localization_loss_fn=kp_params.localization_loss)\n reg_loss = self._compute_kp_regression_loss(\n input_height=input_height,\n input_width=input_width,\n task_name=task_name,\n regression_predictions=prediction_dict[regression_key],\n localization_loss_fn=kp_params.localization_loss)\n\n loss_dict = {}\n loss_dict[heatmap_key] = (\n kp_params.keypoint_heatmap_loss_weight * heatmap_loss)\n loss_dict[offset_key] = (\n kp_params.keypoint_offset_loss_weight * offset_loss)\n loss_dict[regression_key] = (\n kp_params.keypoint_regression_loss_weight * reg_loss)\n if kp_params.predict_depth:\n depth_loss = self._compute_kp_depth_loss(\n input_height=input_height,\n input_width=input_width,\n task_name=task_name,\n depth_predictions=prediction_dict[depth_key],\n localization_loss_fn=kp_params.localization_loss)\n loss_dict[depth_key] = kp_params.keypoint_depth_loss_weight * depth_loss\n return loss_dict", "def update_weights(weights, alpha, y_true, y_pred):\n def change_labels(arr):\n for i,a in enumerate(arr):\n if a == 0:\n arr[i] = -1\n return arr \n \n y_true, y_pred = change_labels(y_true), change_labels(y_pred)\n w_hat = weights * np.exp(-alpha * y_true * y_pred)\n return w_hat / sum(w_hat)", "def initialize_weights(training_points):\n N = len(training_points)\n ans = {}\n for p in training_points:\n ans[p] = make_fraction(1, N)\n return ans", "def weightKmers(self, weightDict):\n for k, w in weightDict.iteritems():\n assert k in self.kmers\n self.G.edge[k + \"_L\"][k + \"_R\"]['weight'] = w", "def update_weight():\n\twts = request.json['featureWeights']\n\n\t# Intialize new model with the latest weights\n\tglobal model\n\tmodel = tscore.ScoreModel(wts)\n\treturn jsonify( { 'updated': \"True\", 'featureWeights': wts } ), 201", "def adjust_weights(weights, target, learn_rate):\r\n\r\n for w in range(0, len(target)):\r\n weights[w] += learn_rate * (target[w] - weights[w])", "def upgrade_state_dict_with_infoxlm_weights(\n state_dict: Dict[str, Any], pretrained_infoxlm_checkpoint: str, num_layers: int, shared_cross_attn: bool=False\n) -> Dict[str, Any]:\n if not os.path.exists(pretrained_infoxlm_checkpoint):\n raise IOError(\"Model file not found: {}\".format(pretrained_infoxlm_checkpoint))\n\n # state = checkpoint_utils.load_checkpoint_to_cpu(pretrained_infoxlm_checkpoint)\n with open(PathManager.get_local_path(pretrained_infoxlm_checkpoint), \"rb\") as f:\n state = torch.load(f, map_location=torch.device(\"cpu\"))\n infoxlm_state_dict = state[\"model\"]\n # print(state_dict.keys())\n \n for key in infoxlm_state_dict.keys():\n if 'layers' in key and int(key.split('.')[3]) > num_layers-1:\n continue\n if not key.startswith('decoder.'):\n continue\n if 'lm_head' not in key:\n if 'in_proj_weight' in key:\n q, k ,v = infoxlm_state_dict[key].chunk(3, dim=0)\n state_dict[key.replace('decoder.sentence_encoder.', '').replace('in_proj_weight', 'q_proj.weight')] = q\n state_dict[key.replace('decoder.sentence_encoder.', '').replace('in_proj_weight', 'k_proj.weight')] = k\n state_dict[key.replace('decoder.sentence_encoder.', '').replace('in_proj_weight', 'v_proj.weight')] = v\n if shared_cross_attn:\n state_dict[key.replace('decoder.sentence_encoder.', '').replace('in_proj_weight', 'q_proj.weight').replace('self_attn', 'encoder_attn')] = q\n state_dict[key.replace('decoder.sentence_encoder.', '').replace('in_proj_weight', 'k_proj.weight').replace('self_attn', 'encoder_attn')] = k\n state_dict[key.replace('decoder.sentence_encoder.', '').replace('in_proj_weight', 'v_proj.weight').replace('self_attn', 'encoder_attn')] = v\n elif 'in_proj_bias' in key:\n q, k ,v = infoxlm_state_dict[key].chunk(3, dim=0)\n state_dict[key.replace('decoder.sentence_encoder.', '').replace('in_proj_bias', 'q_proj.bias')] = q\n state_dict[key.replace('decoder.sentence_encoder.', '').replace('in_proj_bias', 'k_proj.bias')] = k\n state_dict[key.replace('decoder.sentence_encoder.', '').replace('in_proj_bias', 'v_proj.bias')] = v\n if shared_cross_attn:\n state_dict[key.replace('decoder.sentence_encoder.', '').replace('in_proj_bias', 'q_proj.bias').replace('self_attn', 'encoder_attn')] = q\n state_dict[key.replace('decoder.sentence_encoder.', '').replace('in_proj_bias', 'k_proj.bias').replace('self_attn', 'encoder_attn')] = k\n state_dict[key.replace('decoder.sentence_encoder.', '').replace('in_proj_bias', 'v_proj.bias').replace('self_attn', 'encoder_attn')] = v\n elif 'emb_layer_norm' in key:\n state_dict[key.replace('decoder.sentence_encoder.emb_layer_norm', 'layernorm_embedding')] = infoxlm_state_dict[key]\n elif 'embed_positions' in key:\n state_dict[key.replace('decoder.sentence_encoder.', '')] = infoxlm_state_dict[key][:state_dict[key.replace('decoder.sentence_encoder.', '')].size(0)]\n elif 'embed_tokens' in key:\n state_dict[key.replace('decoder.sentence_encoder.', '')][:infoxlm_state_dict[key].size(0)] = infoxlm_state_dict[key]\n else:\n state_dict[key.replace('decoder.sentence_encoder.', '')] = infoxlm_state_dict[key]\n\n return state_dict", "def upgrade_state_dict_with_xlm_weights(\r\n state_dict: Dict[str, Any], pretrained_xlm_checkpoint: str\r\n) -> Dict[str, Any]:\r\n if not os.path.exists(pretrained_xlm_checkpoint):\r\n raise IOError(\"Model file not found: {}\".format(pretrained_xlm_checkpoint))\r\n\r\n state = checkpoint_utils.load_checkpoint_to_cpu(pretrained_xlm_checkpoint)\r\n xlm_state_dict = state[\"model\"]\r\n for key in xlm_state_dict.keys():\r\n\r\n for search_key in [\"embed_tokens\", \"embed_positions\", \"layers\"]:\r\n if search_key in key:\r\n subkey = key[key.find(search_key) :]\r\n assert subkey in state_dict, (\r\n \"{} Transformer encoder / decoder \"\r\n \"state_dict does not contain {}. Cannot \"\r\n \"load {} from pretrained XLM checkpoint \"\r\n \"{} into Transformer.\".format(\r\n str(state_dict.keys()), subkey, key, pretrained_xlm_checkpoint\r\n )\r\n )\r\n\r\n state_dict[subkey] = xlm_state_dict[key]\r\n return state_dict", "def getGradientWeights(y_train):\n cls_indices, event_count = np.unique(np.array(y_train), return_counts=True)\n min_class = min(event_count)\n return {cls_index: float(min_class) / cls_count\n for cls_index, cls_count in zip(cls_indices, event_count)}", "def update(self, values, train, eta=.1):\n\t\tfor X, y_true in zip(values, train):\n\t\t\tprediction = self.activate(X)\n\t\t\terror = y_true - prediction\n\t\t\tweight_update = error * eta * X\n\t\t\tself.weights += weight_update", "def ApplyWeights(frame):\n if \"Wpol\" not in frame and \"Wunpol\" not in frame:\n return\n\n if frame[\"T\"].weighted:\n return frame\n ValidateMaps(frame)\n\n tmap = frame.pop(\"T\")\n\n if \"Wpol\" in frame:\n wmap = frame[\"Wpol\"]\n qmap = frame.pop(\"Q\")\n umap = frame.pop(\"U\")\n maps.apply_weights(tmap, qmap, umap, wmap)\n else:\n wmap = frame[\"Wunpol\"]\n maps.apply_weights_t(tmap, wmap)\n\n frame[\"T\"] = tmap\n if \"Wpol\" in frame:\n frame[\"Q\"] = qmap\n frame[\"U\"] = umap\n\n return frame", "def _compute_kp_heatmap_loss(self, input_height, input_width, task_name,\n heatmap_predictions, classification_loss_fn,\n per_pixel_weights):\n gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)\n gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)\n gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)\n gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)\n\n assigner = self._target_assigner_dict[task_name]\n (keypoint_heatmap, num_instances_per_kp_type,\n valid_mask_batch) = assigner.assign_keypoint_heatmap_targets(\n height=input_height,\n width=input_width,\n gt_keypoints_list=gt_keypoints_list,\n gt_weights_list=gt_weights_list,\n gt_classes_list=gt_classes_list,\n gt_boxes_list=gt_boxes_list)\n flattened_valid_mask = _flatten_spatial_dimensions(valid_mask_batch)\n flattened_heapmap_targets = _flatten_spatial_dimensions(keypoint_heatmap)\n # Sum over the number of instances per keypoint types to get the total\n # number of keypoints. Note that this is used to normalized the loss and we\n # keep the minimum value to be 1 to avoid generating weird loss value when\n # no keypoint is in the image batch.\n num_instances = tf.maximum(\n tf.cast(tf.reduce_sum(num_instances_per_kp_type), dtype=tf.float32),\n 1.0)\n loss = 0.0\n # Loop through each feature output head.\n for pred in heatmap_predictions:\n pred = _flatten_spatial_dimensions(pred)\n unweighted_loss = classification_loss_fn(\n pred,\n flattened_heapmap_targets,\n weights=tf.ones_like(per_pixel_weights))\n # Apply the weights after the loss function to have full control over it.\n loss += unweighted_loss * per_pixel_weights * flattened_valid_mask\n loss = tf.reduce_sum(loss) / (\n float(len(heatmap_predictions)) * num_instances)\n return loss", "def _update_module_kl_coeff(\n self, module_id: ModuleID, hps: AppoLearnerHyperparameters, sampled_kl: float\n ) -> Mapping[str, Any]:", "def boosting(train_data, dim, t):\n w = []\n w.append([float(1) / float(len(train_data))] * len(train_data))\n\n # Store models in m, models are stored as a tuple with the w_vector as well\n # as the t_vector\n\n m = []\n\n for i in range(t):\n print(\"Iteration \" + str(i + 1) + str(\":\"))\n t_vec, w_vec, error = binary_classifier(train_data, dim, w[i])\n alpha = 0.5 * math.log(float(1 - error) / float(error))\n print(\"Error = \" + str(error))\n print(\"Alpha = \" + str(alpha))\n if error >= 0.5:\n break\n # Add model only if it has error rate less than 0.5\n m.append((t_vec, w_vec, alpha))\n\n is_increase_weights_printed = False\n is_decrease_weights_printed = False\n factor_to_increase = 0\n factor_to_decrease = 0\n # Update weights by figuring out which points that are misclassified\n w.append([0] * len(train_data))\n for j in range(len(train_data)):\n if np.dot(train_data[j][0:dim], w_vec) > t_vec:\n if train_data[j][dim] == -1:\n # misclassified\n w[i + 1][j] = float(w[i][j]) / float(2 * error)\n if not is_increase_weights_printed:\n factor_to_increase = float(1) / float(2 * error)\n is_increase_weights_printed = True\n else:\n # correctly classified\n w[i + 1][j] = float(w[i][j]) / float(2 * (1 - error))\n if not is_decrease_weights_printed:\n factor_to_decrease = float(1) / float(2 * (1 - error))\n is_decrease_weights_printed = True\n else:\n if train_data[j][dim] == 1:\n # misclassified\n w[i + 1][j] = float(w[i][j]) / float(2 * error)\n if not is_increase_weights_printed:\n factor_to_increase = float(1) / float(2 * error)\n is_increase_weights_printed = True\n else:\n # correctly classified\n w[i + 1][j] = float(w[i][j]) / float(2 * (1 - error))\n if not is_decrease_weights_printed:\n factor_to_decrease = float(1) / float(2 * (1 - error))\n is_decrease_weights_printed = True\n\n print(\"Factor to increase weights = \" + str(factor_to_increase))\n print(\"Factor to decrease weights = \" + str(factor_to_decrease))\n\n return m", "def loss(self,\n inputs: Tuple[Tensor],\n batch_data_samples: OptSampleList,\n train_cfg: ConfigType = {}) -> dict:\n\n pred_coords, pred_heatmaps = self.forward(inputs)\n keypoint_labels = torch.cat(\n [d.gt_instance_labels.keypoint_labels for d in batch_data_samples])\n keypoint_weights = torch.cat([\n d.gt_instance_labels.keypoint_weights for d in batch_data_samples\n ])\n gt_heatmaps = torch.stack(\n [d.gt_fields.heatmaps for d in batch_data_samples])\n\n input_list = [pred_coords, pred_heatmaps]\n target_list = [keypoint_labels, gt_heatmaps]\n # calculate losses\n losses = dict()\n\n loss_list = self.loss_module(input_list, target_list, keypoint_weights)\n\n loss = loss_list[0] + loss_list[1]\n\n if self.lambda_t > 0:\n mh = MessageHub.get_current_instance()\n cur_epoch = mh.get_info('epoch')\n if cur_epoch >= self.lambda_t:\n loss = loss_list[0]\n\n losses.update(loss_kpt=loss)\n\n # calculate accuracy\n _, avg_acc, _ = keypoint_pck_accuracy(\n pred=to_numpy(pred_coords),\n gt=to_numpy(keypoint_labels),\n mask=to_numpy(keypoint_weights) > 0,\n thr=0.05,\n norm_factor=np.ones((pred_coords.size(0), 2), dtype=np.float32))\n\n acc_pose = torch.tensor(avg_acc, device=keypoint_labels.device)\n losses.update(acc_pose=acc_pose)\n\n return losses", "def __update_weights_grad_desc(self, x_train, y_train):\n\n predictions = self.__compute_prediction(x_train)\n weights_delta = np.dot(x_train.T, y_train - predictions)\n\n m = y_train.shape[0]\n self.__weights += self.__learning_rate / float(m) * weights_delta", "def get_loss_weights(self, speakers, type='fair'):\r\n mean_losses = np.mean(self.history, axis=1)\r\n \"\"\"Sort lowest to highest\"\"\"\r\n order_indices = np.argsort(mean_losses)\r\n \"\"\"Create weights as in Dr. Hasegawa-Johnson's slides (weight is number of classes performing better)\r\n We add one to each so that every class has some weight in the loss\"\"\"\r\n weights = np.linspace(1, mean_losses.shape[0], mean_losses.shape[0])\r\n \"\"\"Assign the weights according to the proper order\"\"\"\r\n class_weights = {}\r\n for index, i in enumerate(order_indices):\r\n class_weights[i] = weights[index]\r\n \"\"\"Now grab the correct weight for each speaker\"\"\"\r\n loss_weights = []\r\n for speaker in speakers:\r\n loss_weights.append(class_weights[self.s2i[speaker]])\r\n if type == 'fair':\r\n \"\"\"Add in the lambda weighting for fair and unfair training\"\"\"\r\n unfair_weights = np.ones(shape=(len(loss_weights, )))\r\n loss_weights = np.asarray(loss_weights)\r\n\r\n \"\"\"Lambda part\"\"\"\r\n loss_weights = config.train.fairness_lambda * loss_weights + (1-config.train.fairness_lambda) * unfair_weights\r\n\r\n elif type == 'unfair':\r\n \"\"\"All class losses are weighted evenly, unfair\"\"\"\r\n loss_weights = np.ones(shape=(len(loss_weights,)))\r\n\r\n loss_weights = torch.from_numpy(loss_weights)\r\n loss_weights = self.fix_tensor(loss_weights)\r\n return loss_weights", "def _lr_weight():\n feature_weights = dict(zip(lr['labels'], [dict(zip(lr['features'], r.coef_.squeeze().tolist())) for r in lr['regressions']]))\n return feature_weights['IT']", "def get_tuned_weights(pmap, W, EC, alpha=0.5, grow_synapses=False):\n norm = numpy.sqrt((W[0]**2).sum(axis=0))\n W0 = W / norm\n W1 = numpy.empty((pmap.num_maps, EC.num_maps), 'd')\n for i in xrange(pmap.num_maps):\n W1[i] = numpy.tanh(3*(pmap.maxima[i,2]-0.5)) * \\\n EC.map_value(pmap.maxima[i,0], pmap.maxima[i,1])\n if not grow_synapses:\n W1[i] *= W0[i] > 0.0\n W1[i] /= numpy.sqrt((W1[i]**2).sum(axis=0)) # normalize\n W2 = (1.0-alpha)*W0 + alpha*W1 # mixed old and tuned matrices\n for i in xrange(pmap.num_maps):\n W2[i] *= norm / numpy.sqrt((W2[i]**2).sum(axis=0)) # hetersynaptic LTD\n return W2", "def _get_weights(dist, weights):\n if weights in (None, \"uniform\"):\n return None\n\n if weights == \"distance\":\n # if user attempts to classify a point that was zero distance from one\n # or more training points, those training points are weighted as 1.0\n # and the other points as 0.0\n if dist.dtype is np.dtype(object):\n for point_dist_i, point_dist in enumerate(dist):\n # check if point_dist is iterable\n # (ex: RadiusNeighborClassifier.predict may set an element of\n # dist to 1e-6 to represent an 'outlier')\n if hasattr(point_dist, \"__contains__\") and 0.0 in point_dist:\n dist[point_dist_i] = point_dist == 0.0\n else:\n dist[point_dist_i] = 1.0 / point_dist\n else:\n with np.errstate(divide=\"ignore\"):\n dist = 1.0 / dist\n inf_mask = np.isinf(dist)\n inf_row = np.any(inf_mask, axis=1)\n dist[inf_row] = inf_mask[inf_row]\n return dist\n\n if callable(weights):\n return weights(dist)", "def update(self, x_dict, y_dict, weight):\n assert len(x_dict) == len(y_dict), \"invalid # of qids\"\n \n qids = self.__get_shuffled_qids(x_dict, y_dict, weight.epoch)\n w = weight.get_dense_weight()\n for qid in tqdm(qids):\n w = approx_ap(x_dict[qid].toarray(), y_dict[qid], w, self.eta, self.alpha, self.beta)\n weight.set_weight(sp.csr_matrix(w.reshape((1, weight.dims))))\n weight.epoch += 1", "def update_weights(a_plus, a_minus, tau_plus, tau_minus, X, Y, pre_post_trace, post_pre_trace, trace, tau_e): \n # pre trace without spikes - for coincident spikes \n conv_pre_old, _ = convolution2(pre_post_trace, tau_plus, a_plus, 0) \n # post trace without spikes - for coincident spikes \n conv_post_old, _ = convolution2(post_pre_trace, tau_minus, a_minus, 0)\n \n # presynaptic neuron trace \n conv_pre_scaled, pre_post_trace = convolution2(pre_post_trace, tau_plus, a_plus, X)\n # postynaptic neuron trace \n conv_post_scaled, post_pre_trace = convolution2(post_pre_trace, tau_minus, a_minus, Y)\n \n # total synaptic change due to STDP \n W = (conv_pre_scaled*Y + conv_post_scaled*X)* ~(X&Y) + \\\n ((conv_pre_old*Y + conv_post_old*X)+(a_plus + a_minus)/2)*(X&Y)\n \n ## weight change is convoluted with eligibility trace \n eligibility_trace, trace = convolution2(trace, tau_e, 1, W)\n \n return pre_post_trace, post_pre_trace, eligibility_trace, trace, W", "def _update_weights(self, good_da, bad_da, good_tree, bad_tree, good_feats, bad_feats):\n # import ipdb; ipdb.set_trace()\n if self.diffing_trees:\n good_sts, bad_sts = good_tree.diffing_trees(bad_tree, symmetric=True)\n for good_st, bad_st in zip(good_sts, bad_sts):\n good_feats = self._extract_feats(good_st, good_da)\n bad_feats = self._extract_feats(bad_st, bad_da)\n subtree_w = 1\n if self.diffing_trees.endswith('weighted'):\n subtree_w = (len(good_st) + len(bad_st)) / float(len(good_tree) + len(bad_tree))\n self._update_nn(bad_feats, good_feats, subtree_w * self.alpha)\n else:\n self._update_nn(bad_feats, good_feats, self.alpha)", "def update_weights(self):\n\t\tpass", "def fit(self,X,y, sample_weight):\n classes=sorted(y.unique())\n differences={}\n #set the differences between signal points\n differences={classes[i]:(classes[i+1]-classes[i-1])/2 for i in range(1,len(classes)-1) if classes[i]>0}\n differences[classes[0]]=classes[1]-classes[0]\n differences[classes[-1]]=classes[-1]-classes[-2]\n diffsum=sum(differences.values())\n #print differences, \"->\", diffsum\n self.scale_={}\n for classlabel in classes:\n sumweight=sample_weight[y==classlabel].sum()\n self.scale_[classlabel]=differences[classlabel]/(sumweight*diffsum)\n return", "def model_loss(\n model, true_vals, logit_pred_vals, epoch_num, avg_class_loss,\n att_prior_loss_weight, att_prior_loss_weight_anneal_type,\n att_prior_loss_weight_anneal_speed, att_prior_grad_smooth_sigma,\n fourier_att_prior_freq_limit, fourier_att_prior_freq_limit_softness,\n att_prior_loss_only, l2_reg_loss_weight, input_grads=None, status=None\n):\n corr_loss = model.correctness_loss(\n true_vals, logit_pred_vals, avg_class_loss\n )\n final_loss = corr_loss\n \n if att_prior_loss_weight > 0:\n att_prior_loss = model.fourier_att_prior_loss(\n status, input_grads, fourier_att_prior_freq_limit,\n fourier_att_prior_freq_limit_softness, att_prior_grad_smooth_sigma\n )\n\n # att_prior_loss = model.smoothness_att_prior_loss(status, input_grads)\n # att_prior_loss = model.sparsity_att_prior_loss(status, input_grads)\n \n if att_prior_loss_weight_anneal_type is None:\n weight = att_prior_loss_weight\n elif att_prior_loss_weight_anneal_type == \"inflate\":\n exp = np.exp(-att_prior_loss_weight_anneal_speed * epoch_num)\n weight = att_prior_loss_weight * ((2 / (1 + exp)) - 1)\n elif att_prior_loss_weight_anneal_type == \"deflate\":\n exp = np.exp(-att_prior_loss_weight_anneal_speed * epoch_num)\n weight = att_prior_loss_weight * exp\n\n if att_prior_loss_only:\n final_loss = att_prior_loss\n else:\n final_loss = final_loss + (weight * att_prior_loss)\n else:\n att_prior_loss = torch.zeros(1)\n\n # If necessary, add the L2 penalty\n if l2_reg_loss_weight > 0:\n l2_loss = util.place_tensor(torch.tensor(0).float())\n for param in model.parameters():\n if param.requires_grad_:\n l2_loss = l2_loss + torch.sum(param * param)\n final_loss = final_loss + (l2_reg_loss_weight * l2_loss)\n\n return final_loss, (corr_loss, att_prior_loss)", "def update_weights(self, X, Y, learning_rate):\n grads = self.calculate_gradients(X, Y)\n #update weights and biases\n self.weights[0] = self.weights[0] - learning_rate * grads[\"dW1\"]\n self.weights[1] = self.weights[1] - learning_rate * grads[\"dW2\"]\n self.biases[0] = self.biases[0] - learning_rate * grads[\"db1\"]\n self.biases[1] = self.biases[1] - learning_rate * grads[\"db2\"]", "def _update_weights(self, xi, target):\n output = self.net_input(xi)\n error = (target - output)\n self.w_[1:] += self.eta * xi.dot(error)\n self.w_[0] += self.eta * error\n cost = 0.5 * error**2\n return cost", "def _update_weights(self, xi, target):\n output = self.net_input(xi)\n error = (target - output)\n self.w_[1:] += self.eta * xi.dot(error)\n self.w_[0] += self.eta * error\n cost = 0.5 * error**2\n return cost", "def update_weight(self, error_matrix, i, j):\n #weight i,j gets error of j\n error = error_matrix[i]\n input = log_deriv(self.input_matrix[i-1][j])\n self.bias[i-1][j] += ALPHA * input * error_matrix[i-1][j]\n self.weights[i][j] = [weight + ALPHA * input * error[i] for i, weight in enumerate(self.weights[i][j])]", "def keypoints_from_heatmaps(heatmaps, center, scale, unbiased=False, post_process='default', kernel=11, valid_radius_factor=0.0546875, use_udp=False, target_type='GaussianHeatmap'):\n heatmaps = heatmaps.copy()\n if unbiased:\n assert post_process not in [False, None, 'megvii']\n if post_process in ['megvii', 'unbiased']:\n assert kernel > 0\n if use_udp:\n assert not post_process == 'megvii'\n if post_process is False:\n warnings.warn('post_process=False is deprecated, please use post_process=None instead', DeprecationWarning)\n post_process = None\n elif post_process is True:\n if unbiased is True:\n warnings.warn(\"post_process=True, unbiased=True is deprecated, please use post_process='unbiased' instead\", DeprecationWarning)\n post_process = 'unbiased'\n else:\n warnings.warn(\"post_process=True, unbiased=False is deprecated, please use post_process='default' instead\", DeprecationWarning)\n post_process = 'default'\n elif post_process == 'default':\n if unbiased is True:\n warnings.warn(\"unbiased=True is deprecated, please use post_process='unbiased' instead\", DeprecationWarning)\n post_process = 'unbiased'\n if post_process == 'megvii':\n heatmaps = _gaussian_blur(heatmaps, kernel=kernel)\n N, K, H, W = heatmaps.shape\n if use_udp:\n if target_type.lower() == 'GaussianHeatMap'.lower():\n preds, maxvals = _get_max_preds(heatmaps)\n preds = post_dark_udp(preds, heatmaps, kernel=kernel)\n elif target_type.lower() == 'CombinedTarget'.lower():\n for person_heatmaps in heatmaps:\n for i, heatmap in enumerate(person_heatmaps):\n kt = 2 * kernel + 1 if i % 3 == 0 else kernel\n cv2.GaussianBlur(heatmap, (kt, kt), 0, heatmap)\n valid_radius = valid_radius_factor * H\n offset_x = heatmaps[:, 1::3, :].flatten() * valid_radius\n offset_y = heatmaps[:, 2::3, :].flatten() * valid_radius\n heatmaps = heatmaps[:, ::3, :]\n preds, maxvals = _get_max_preds(heatmaps)\n index = (preds[..., 0] + preds[..., 1] * W).flatten()\n index += W * H * np.arange(0, N * K / 3)\n index = index.astype(int).reshape(N, K // 3, 1)\n preds += np.concatenate((offset_x[index], offset_y[index]), axis=2)\n else:\n raise ValueError(\"target_type should be either 'GaussianHeatmap' or 'CombinedTarget'\")\n else:\n preds, maxvals = _get_max_preds(heatmaps)\n if post_process == 'unbiased':\n heatmaps = np.log(np.maximum(_gaussian_blur(heatmaps, kernel), 1e-10))\n for n in range(N):\n for k in range(K):\n preds[n][k] = _taylor(heatmaps[n][k], preds[n][k])\n elif post_process is not None:\n for n in range(N):\n for k in range(K):\n heatmap = heatmaps[n][k]\n px = int(preds[n][k][0])\n py = int(preds[n][k][1])\n if 1 < px < W - 1 and 1 < py < H - 1:\n diff = np.array([heatmap[py][px + 1] - heatmap[py][px - 1], heatmap[py + 1][px] - heatmap[py - 1][px]])\n preds[n][k] += np.sign(diff) * 0.25\n if post_process == 'megvii':\n preds[n][k] += 0.5\n for i in range(N):\n preds[i] = transform_preds(preds[i], center[i], scale[i], [W, H], use_udp=use_udp)\n if post_process == 'megvii':\n maxvals = maxvals / 255.0 + 0.5\n return preds, maxvals", "def get_new_weights(self):\n\n # Initializes new_weights with max value weights\n new_weights = []\n new_key_to_old_key = {}\n\n for i in range(2 ** (self.width - 1)):\n new_key_to_old_key[i] = None\n new_weights.append(sys.maxsize)\n\n for key in self.trellis_keys:\n key_int = reduce(lambda acc, x: acc * 2 + x, key)\n # If the next msg bit is a 0\n parity_0 = self.convolve(key+(0,))\n msg_0_int = reduce(lambda acc, x: acc * 2 + x, key[1:]+(0,))\n # Takes parity bits tuple and turns it from tuple with base 2 digits to a base 10 int\n # This is so that it can be used as an index\n parity_0_int = reduce(lambda acc, x: acc * 2 + x, parity_0)\n\n weight_0 = self.get_hamming_distance(parity_0) + self.prev_weights[key_int]\n\n if(weight_0 < new_weights[msg_0_int]):\n new_weights[msg_0_int] = weight_0\n new_key_to_old_key[msg_0_int] = key_int\n\n # If the next msg bit is a 1\n parity_1 = self.convolve(key+(1,))\n msg_1_int = reduce(lambda acc, x: acc * 2 + x, key[1:]+(1,))\n # Takes parity bits tuple and turns it from tuple with base 2 digits to a base 10 int\n # This is so that it can be used as an index for new_weights\n parity_1_int = int(str(reduce(lambda acc, x: acc * 10 + x, parity_1)),2)\n\n weight_1 = self.get_hamming_distance(parity_1) + self.prev_weights[key_int]\n\n\n if(weight_0 < new_weights[msg_1_int]):\n new_weights[msg_1_int] = weight_1\n new_key_to_old_key[msg_1_int] = key_int\n\n self.backtrack_dict = new_key_to_old_key\n \n return tuple(new_weights)", "def _reweight_and_discard_irrelevant(self, weighted_sample_pool, t):\n tmp = []\n ret = []\n wc = self.classifiers[t]\n theta_a = wc.theta_a\n theta_b = wc.theta_b\n\n norm_factor = 0\n discarded = 0\n for patch, w in weighted_sample_pool:\n response = self.h_t(patch, t)\n # if t > 3:\n # if response < theta_a or response > theta_b: # throw it away\n # discarded += 1\n # continue\n r = self.classify(patch)\n label = patch.label\n new_weight = w * np.exp(-label * r)\n\n tmp.append([patch, new_weight])\n norm_factor += new_weight\n for patch, w in tmp: # normalize weights\n normalized_weight = w / norm_factor\n ret.append([patch, normalized_weight])\n print \"Discarded %d training samples\" % discarded\n return ret", "def _lowess_wt_standardize(weights, new_entries, x_copy_i, width):\n weights[:] = new_entries\n weights -= x_copy_i\n weights /= width", "def getLossWeights(weights_dict, n):\n\n w = torch.ones(2*n+6,)\n w[0] *= weights_dict['r1']\n w[1] *= weights_dict['r2']\n w[2] *= weights_dict['e1_r']\n w[3] *= weights_dict['e1_i']\n w[4] *= weights_dict['e3_r']\n w[5] *= weights_dict['e3_i']\n w[6:6+n] *= weights_dict['e2_r']\n w[6+n:] *= weights_dict['e2_i']\n return w", "def weight_decay_loss_wrapper(\n loss_fn = gin.REQUIRED,\n factor = gin.REQUIRED,\n exclude = (),\n):\n traversal = traverse_util.ModelParamTraversal(\n lambda path, _: all([e not in path for e in exclude]))\n\n def wrapped_loss(outputs, *args, params, **kwargs):\n losses = loss_fn(outputs, *args, **kwargs)\n weight_decay_params = list(traversal.iterate(params))\n weight_l2 = sum([jnp.sum(x**2) for x in weight_decay_params])\n weight_penalty = factor * 0.5 * weight_l2\n\n if isinstance(losses, dict):\n if 'model_loss' not in losses:\n raise ValueError(\n 'Losses must contain `model_loss` key as total model loss.')\n losses['pre_weight_penalty_model_loss'] = losses['model_loss']\n losses['model_loss'] += weight_penalty\n losses['l2_regularization_loss'] = weight_penalty\n elif isinstance(losses, jnp.ndarray):\n losses += weight_penalty\n else:\n raise ValueError('Encountered invalid loss type: ', type(losses))\n\n return losses\n\n return wrapped_loss", "def loss_and_metrics_one_pair(self, params, rng):\n key_pq, key_samples = jax.random.split(rng)\n p_logits, q_logits = self.logit_pair_distribution_fn(key_pq)\n\n def sample_loss(key_sample):\n \"\"\"Computes loss for a single sample of a relaxed pair of outcomes.\"\"\"\n q_kwargs = dict(transpose=True) if self.use_transpose else {}\n soft_p = self.model.apply(\n params, p_logits, key_sample, method=self.model.sample_relaxed)\n soft_q = self.model.apply(\n params,\n q_logits,\n key_sample,\n method=self.model.sample_relaxed,\n **q_kwargs)\n coupling_loss_matrix = self.coupling_loss_matrix_fn(p_logits, q_logits)\n coupling_loss = jnp.sum(soft_p[:, None] * soft_q[None, :] *\n coupling_loss_matrix)\n\n return coupling_loss\n\n loss_samples = jax.vmap(sample_loss)(\n jax.random.split(key_samples, self.inner_num_samples))\n loss = jnp.mean(loss_samples)\n return loss, {\"loss\": loss}", "def match_ckpt_weights_to_model(\n model: tf.keras.Model,\n names_to_keys: Mapping[str, str],\n keys_to_weights: Mapping[str, Any]) -> List[Any]:\n init_weight_list = []\n\n for weight in model.weights:\n # Look up weight name in checkpoint weight names.\n weight_name = weight.name.replace(':0', '')\n ckpt_key = names_to_keys.get(weight_name, None)\n\n if ckpt_key:\n init_weight = keys_to_weights[ckpt_key]\n else:\n logging.info(\n '\"%s\" not found in checkpoint. '\n 'Using randomly initialized values.', weight_name)\n init_weight = weight.numpy()\n\n init_weight_list.append(init_weight)\n\n return init_weight_list", "def classweighted_mortality_loss(class_weights):\n\n def tmp_mortality_loss(y_true, y_pred):\n sample_weights = (1 - y_true) * class_weights[0] + y_true * class_weights[1]\n bce = K.binary_crossentropy(y_true, y_pred)\n return K.mean(sample_weights * bce, axis=-1)\n\n return tmp_mortality_loss", "def keep_lesser_x0_y0_zbt0_pair_in_dict(p, p1, p2):\n const_dict = p[3]\n cd1, cd2 = p1[3], p2[3]\n if 'x0' in cd1 and 'x0' in cd2:\n if cd2['x0'] < cd1['x0']:\n const_dict['x0'] = cd2['x0']\n const_dict['y0'] = cd2['y0'] if 'y0' in cd2 else None\n const_dict['zbt0'] = cd2['zbt0'] if 'zbt0' in cd2 else None\n else:\n const_dict['x0'] = cd1['x0']\n const_dict['y0'] = cd1['y0'] if 'y0' in cd1 else None\n const_dict['zbt0'] = cd1['zbt0'] if 'zbt0' in cd1 else None\n p = p[0:3] + (const_dict,)\n return p", "def my_assign_weights(context, data):\n pass", "def _update_weights(self, xi, target):\n output = self.net_input(xi)\n error= (target - output)\n self.w_[1:] += self.eta * xi.dot(error)\n self.w_[0] += self.eta * error\n cost =0.5* error**2\n return cost", "def soft_update(self, other, tau):\n new_weights = {}\n\n own_weights = self.get_weight_copies()\n other_weights = other.get_weight_copies()\n\n for k in own_weights:\n #print(own_weights[k].shape, other_weights[k].shape)\n new_weights[k] = (1 - tau) * own_weights[k] + tau * other_weights[k]\n self.set_weights(new_weights)", "def feed_dict(training_data=True):\n if training_data:\n xs, ys = mnist.train.next_batch(batch_size)\n kp = keep_prob\n else:\n xs, ys = mnist.validation.images, mnist.validation.labels\n kp = 1.0\n return {x: xs, t: ys, k: kp}", "def weight_update(u_ff, u_wc, alpha, beta, w, fan_all):\r\n mult_wc = np.matmul(np.reshape(hard_sigmoid_array(u_wc), (fan_all, 1)),\r\n np.reshape(hard_sigmoid_array(u_wc), (1, fan_all)))\r\n mult_ff = np.matmul(np.reshape(hard_sigmoid_array(u_ff), (fan_all, 1)),\r\n np.reshape(hard_sigmoid_array(u_ff), (1, fan_all)))\r\n delta_w = alpha * (1 / beta) * (mult_wc - mult_ff)\r\n delta_w[np.diag_indices(fan_all)] = 0\r\n w = w + delta_w\r\n return w", "def increase_dict_values_for_low_enough_keys(threshold, dictionary):\n\n dictionary = dictionary.update((x, y+1) for x, y in dictionary.items() if threshold >= x)\n return dictionary", "def mapk(y_true, y_pred, k, sample_weight=None):\n def apk(actual, predict, weight, k):\n if len(predict)>k:\n predict = predict[:k]\n score = 0.0\n nums = 0.0\n for i,p in enumerate(predict):\n if p in actual and p not in predict[:i]:\n nums += 1.0\n score += nums / (i+1.0)\n return score / min(len(actual), k)*weight if actual else 0.0\n sample_weight = _sample_weight(y_true, sample_weight)\n return pd.DataFrame({'label1':y_true, 'label2':y_pred, 'weight':sample_weight}).apply(lambda x:apk(x[0], x[1], x[2], k=k), axis=1).mean()", "def adjust_params(self, inputs, target_costs, original_costs):\n costs = self.measure_costs(inputs)\n weights = [(target_costs[i] - costs[i]) / (target_costs[i] - original_costs[i]) for i in range(target_costs)]\n totalweights = sum(weights)\n for i in range(weights):\n self.morphnet_target_cost_thresholds[i] = weights[i] / totalweights", "def update_weights(self, example):\n pred = self.predict(example)\n if pred != example.label:\n self.weights[example.label] = self.weights[example.label] + example.fvector\n self.weights[pred] = self.weights[pred] - example.fvector", "def update_weights(self, alpha, ind):\n inside = -alpha * self.labels * self.predictions[ind, :]\n new_weights = self.weights * np.exp(inside)\n self.weights = new_weights / np.sum(new_weights)", "def generate_loss_weight_criteria(num_traits, num_classes=num_classes, weighted_loss=True, top_only=None):\n\tif (weighted_loss):\n\t\tprint(\"use different weight for each class\")\n\t\tprint(\"put more focus on locus with more significant DM p-values\")\n\t\tif (top_only):\n\t\t\tprint(\"weighted loss for the top class only...\")\n\t\t\tclass_weight_each_output_dict = dict([(i, 1) for i in range(0, num_classes-1)])\n\t\t\tclass_weight_each_output_dict[num_classes-1] = num_classes\n\t\t\tclass_weight_dict = dict([(\"out%i\"%i, class_weight_each_output_dict) for i in range(1, num_traits+1)])\n\t\telse:\n\t\t\tclass_weight_each_output_dict = dict([(i, i+1) for i in range(0, num_classes)])\n\t\t\tclass_weight_dict = dict([(\"out%i\"%i, class_weight_each_output_dict) for i in range(1, num_traits+1)])\n\telse:\n\t\tprint(\"use balanced weight for each class\")\n\t\tclass_weight_each_output_dict = dict([(i, 1) for i in range(0, num_classes)])\n\t\tclass_weight_dict = dict([(\"out%i\"%i, class_weight_each_output_dict) for i in range(1, num_traits+1)])\n\treturn class_weight_dict", "def generate_loss_weight_criteria(num_traits, num_classes=num_classes, weighted_loss=True, top_only=None):\n\tif (weighted_loss):\n\t\tprint(\"use different weight for each class\")\n\t\tprint(\"put more focus on locus with more significant DM p-values\")\n\t\tif (top_only):\n\t\t\tprint(\"weighted loss for the top class only...\")\n\t\t\tclass_weight_each_output_dict = dict([(i, 1) for i in range(0, num_classes-1)])\n\t\t\tclass_weight_each_output_dict[num_classes-1] = num_classes\n\t\t\tclass_weight_dict = dict([(\"out%i\"%i, class_weight_each_output_dict) for i in range(1, num_traits+1)])\n\t\telse:\n\t\t\tclass_weight_each_output_dict = dict([(i, i+1) for i in range(0, num_classes)])\n\t\t\tclass_weight_dict = dict([(\"out%i\"%i, class_weight_each_output_dict) for i in range(1, num_traits+1)])\n\telse:\n\t\tprint(\"use balanced weight for each class\")\n\t\tclass_weight_each_output_dict = dict([(i, 1) for i in range(0, num_classes)])\n\t\tclass_weight_dict = dict([(\"out%i\"%i, class_weight_each_output_dict) for i in range(1, num_traits+1)])\n\treturn class_weight_dict", "def updateWeights(inputs, outputs, learning_rate, y, weights):\n for i in range(len(weights)):\n weights[i] = weights[i] + learning_rate * (outputs - y) * inputs[i]\n return weights", "def weighted_sensitivity(y_true, y_pred):\n\n tpw, _, fnw, _ = get_weighted_confusion_matrix(y_true, y_pred)\n\n return tpw / (tpw + fnw)", "def getWeights(self, gameState, actton):\n\t\treturn {'successorScore': 1.0}", "def node_loss_dict():\n\n from .losses import node_kinematics, node_primary, node_type\n\n losses = {\n \"kinematics\" : node_kinematics.NodeKinematicsLoss,\n \"kinematics_edl\" : node_kinematics.NodeEvidentialKinematicsLoss,\n \"kinematics_attn\": node_kinematics.NodeTransformerLoss,\n \"primary\" : node_primary.NodePrimaryLoss,\n \"type\" : node_type.NodeTypeLoss\n }\n\n return losses", "def update_training_dict(self, feed_dict):\n self._update_with_dual(feed_dict, 'hebb_init_pl') # re-initialize hebbian at the start of episode\n self._update_with_dual(feed_dict, 'init_pl') # re-initialize rnn state at the start of episode\n self._update_with_dual(feed_dict, 'mask') # mask", "def transfer_weights(\n self, new_model, new_optimizer=None, optimizer=None, ignore_weights=None\n ):\n if type(self) is not type(new_model):\n raise ValueError(\n \"Transferring weights to another model type is not supported\"\n )\n if ignore_weights is None:\n ignore_weights = set()\n ignore_weights_ref = set(weight.ref() for weight in ignore_weights)\n weights = self.weights\n new_weights = new_model.weights\n for weight, new_weight in zip(weights, new_weights):\n if new_weight.ref() not in ignore_weights_ref:\n new_weight.assign(weight)\n if new_optimizer is not None and optimizer is not None:\n for slot_name in new_optimizer.get_slot_names():\n if slot_name not in optimizer.get_slot_names():\n continue\n new_slot = new_optimizer.get_slot(new_weight, slot_name)\n slot = optimizer.get_slot(weight, slot_name)\n new_slot.assign(slot)", "def weighted_average(x, y, x_new, fwhm=300):\n bin_avg = np.zeros(len(x_new))\n bin_std = np.zeros(len(x_new))\n bin_se = np.zeros(len(x_new))\n\n # Gaussian function as weights\n sigma = fwhm / (2 * np.sqrt(2 * np.log(2)))\n\n for index in range(0, len(x_new)):\n xn = x_new[index]\n weights = gauss(x, 1, xn, sigma)\n weights /= sum(weights)\n # weighted mean\n bin_avg[index] = np.average(y, weights=weights)\n # weighted standard deviation\n bin_std[index] = np.sqrt(np.average((y - bin_avg[index])**2, weights=weights))\n # weighted standard error (mean / sqrt(n_points_in_gaussian))\n bin_se[index] = np.sqrt(np.average((y - bin_avg[index])**2, weights=weights)) / \\\n np.sqrt(sum((x > xn - 2 * sigma) & (x < xn + 2 * sigma)))\n\n return {'mean': bin_avg,\n 'std': bin_std,\n 'stderr': bin_se}", "def gradient_penalty_loss(y_true, y_pred, averaged_samples, gradient_penalty_weight):\n gradients = K.gradients(K.sum(y_pred), averaged_samples)\n gradient_l2_norm = K.sqrt(K.sum(K.square(gradients)))\n gradient_penalty = gradient_penalty_weight * K.square(1 - gradient_l2_norm)\n return gradient_penalty", "def _equalize_weights_lsq_pact(self, bn_dict={}, verbose=False, eps=None):\n\n if not bn_dict:\n bn_dict = get_bn_dict_from_supernodes(self)\n\n module_dict = {}\n for n,m in self.named_modules():\n if (m.__class__.__name__ == \"PACT_Conv2d\" or \\\n m.__class__.__name__ == \"PACT_Conv1d\" or \\\n m.__class__.__name__ == \"PACT_Linear\" or \\\n m.__class__.__name__ == \"BatchNorm2d\" or \\\n m.__class__.__name__ == \"BatchNorm1d\" ):\n module_dict[n] = m\n for n_before in bn_dict.keys():\n n_after = bn_dict[n_before]\n m_before = module_dict[n_before]\n m_after = module_dict[n_after]\n if eps is None:\n eps = m_after.eps\n min_before = weight_min(m_before, 0).cpu().detach().numpy()\n max_before = weight_max(m_before, 0).cpu().detach().numpy()\n if verbose:\n logging.info(\"[Equalization by Least Squares] %s: wrange_min=%.5f wrange_max=%.5f\" % (n_before, weight_range(m_before, 0).min().item(), weight_range(m_before, 0).max().item()))\n X = np.vstack((min_before, max_before))\n y = np.asarray((-1,1))\n coeff = torch.zeros(len(min_before), device=m_before.weight.device)\n regr = linear_model.LinearRegression(fit_intercept=False)\n for i in range(len(min_before)):\n regr.fit(X[:,i].reshape((-1,1)), y)\n coeff[i] = torch.as_tensor(regr.coef_[0], device=m_before.weight.device)\n coeff = 1./coeff\n m_before.weight.data[:] = m_before.weight.data[:] / reshape_before(m_before, coeff)\n try:\n m_before.bias.data[:] = m_before.bias.data[:] / coeff\n except AttributeError:\n pass\n m_after.running_mean.data[:] = m_after.running_mean.data[:] / coeff\n m_after.weight.data[:] = m_after.weight.data[:] * reshape_after(m_after, coeff)\n if verbose:\n logging.info(\"[Equalization by Least Squares] %s: wrange_min=%.5f wrange_max=%.5f\" % (n_before, weight_range(m_before, 0).min().item(), weight_range(m_before, 0).max().item()))", "def weighted_loss(y_true, y_pred):\r\n # initialize loss to zero\r\n loss = 0.0\r\n for i in range(len(freq_pos)):\r\n # for each class, add average weighted loss for that class \r\n loss += -K.mean(freq_neg[i]*y_true[:,i]*K.log(y_pred[:,i] + epsilon) + freq_pos[i]*(1-y_true[:,i])*K.log(1-y_pred[:,i] + epsilon)) #complete this line\r\n return loss", "def propose_patch(self, weight_bounds, learn_rate=1.0):\n in_dims, mid_dims, _, _ = weight_bounds.shape\n\n best_index = (None, None)\n best_constraints = -1\n best_delta = 0.0\n indices = itertools.product(range(in_dims), range(mid_dims))\n for in_dim, mid_dim in tqdm(indices, total=(in_dims * mid_dims),\n desc=\"Computing Patch\"):\n bounds = weight_bounds[in_dim, mid_dim, :, :]\n # We focus on the bounds that are non-NaN\n non_nan_bounds = bounds[~np.isnan(bounds[:, 0])]\n if len(non_nan_bounds) < best_constraints:\n continue\n lower, upper, n_met = self.interval_MAX_SMT(non_nan_bounds)\n\n if n_met <= best_constraints:\n continue\n best_constraints = n_met\n best_index = (in_dim, mid_dim)\n\n if lower <= 0.0 <= upper:\n best_delta = 0.0\n else:\n # True if the interval suggests to increase the weight.\n is_increase = lower > 0.0\n # If the interval suggests to increase the weight, suggest a\n # delta slightly above lower. Otherwise, suggest one slightly\n # below upper. Either way, we're trying to stay as close to 0\n # as possible.\n ratio = 0.1 if is_increase else 0.9\n best_delta = lower + (ratio * (upper - lower))\n if not np.isfinite(best_delta):\n eps = 0.1\n if is_increase: # => upper == np.Infinity\n assert np.isfinite(lower + eps)\n best_delta = lower + eps\n elif upper < 0.0: # => lower == -np.Infinity\n assert np.isfinite(upper - eps)\n best_delta = upper - eps\n else:\n assert False\n assert np.isfinite(best_delta)\n print(\"Would be satisfying\", best_constraints, \"constraints.\")\n print(\"Updating weight\", best_index)\n best_delta *= learn_rate\n return best_index, best_delta, best_constraints", "def get_loss(self, output, target, target_weight):\n losses = dict()\n assert not isinstance(self.loss, nn.Sequential)\n assert target.dim() == 4 and target_weight.dim() == 3\n losses['heatmap_loss'] = self.loss(output, target, target_weight)\n return losses", "def get_loss(self, output, target, target_weight):\n losses = dict()\n assert not isinstance(self.loss, nn.Sequential)\n assert target.dim() == 4 and target_weight.dim() == 3\n losses['heatmap_loss'] = self.loss(output, target, target_weight)\n return losses", "def pointnet_loss(preds, targets, chamfer_lambda, physical_lambda, joint_lambda, has_anno=None, joint_idxs=None):\n\n gen_pc = preds[0]\n out_idxs = preds[1]\n coords_pred = preds[2]\n seg_out = preds[8]\n seg_out = seg_out.argmin(-1)\n bone_weights = preds[9]\n target_pc = targets[1]\n\n if joint_idxs is not None:\n bone_weights = bone_weights[joint_idxs]\n if has_anno is not None:\n coords_pred = coords_pred[has_anno]\n if joint_idxs is not None:\n coords_pred = coords_pred[:, joint_idxs]\n\n # Chamfer loss\n N = gen_pc.shape[1]\n M = target_pc.shape[1]\n\n pc1_expand = gen_pc.unsqueeze(2).repeat(1, 1, M, 1)\n pc2_expand = target_pc.unsqueeze(1).repeat(1, N, 1, 1)\n pc_diff = pc1_expand - pc2_expand\n pc_dist = (pc_diff ** 2).sum(-1)\n pc_dist = torch.sqrt(pc_dist)\n dist1, idx1 = pc_dist.min(2)\n dist2, idx2 = pc_dist.min(1)\n chamfer_loss = (dist1 + dist2).mean()\n loss = chamfer_lambda * chamfer_loss\n\n # Joint loss\n joint_loss = []\n for i in range(bone_weights.shape[0]):\n # Gen mask\n joint_weight = bone_weights[i]\n joint_weight = joint_weight.unsqueeze(0).expand(coords_pred.shape[0], -1)\n joint_weight = torch.gather(joint_weight, -1, out_idxs)\n joint_weight[joint_weight > 0.15] = 1 # threshold\n joint_weight[joint_weight < 1] = 0\n joint_weight_exp = joint_weight.unsqueeze(-1).expand(joint_weight.shape[0], joint_weight.shape[1], joint_weight.shape[1]).byte()\n\n # Target mask TODO: clean up masking\n part_mask = seg_out.clone()\n part_mask[part_mask != i] = -1\n part_mask[part_mask == i] = 1\n part_mask[part_mask == -1] = 0\n part_mask_exp = part_mask.byte().unsqueeze(1).expand(part_mask.shape[0], part_mask.shape[1], part_mask.shape[1])\n mask = joint_weight_exp & part_mask_exp\n part_dist = pc_dist.clone()\n part_dist[mask == 0] = 10\n\n d1, _ = part_dist.min(2)\n d2, _ = part_dist.min(1)\n\n # This is very hacky and I suspect it will cause all points to acquire\n # a gradient value.\n d1[d1 == 10] = 0\n d2[d2 == 10] = 0\n joint_loss.append((d1 + d2).mean())\n\n joint_loss = torch.tensor(joint_loss, device=target_pc.device)\n loss += (joint_lambda * joint_loss.mean())\n\n # Coord Loss Only\n coord_loss = F.mse_loss(preds[2], targets[0])\n loss += coord_loss\n\n # with torch.no_grad():\n # coord_loss = F.mse_loss(coords_pred, targets[0])\n\n # Physical loss\n p_loss = physical_loss(preds[6])\n loss += (physical_lambda * p_loss)\n\n return loss, coord_loss, joint_loss, chamfer_loss, p_loss", "def update_recurrent_weights_step(self):\n \n # update weights: hebbian term\n self.delta_Wee=self.learn_rate*(self.rr[0:self.N_e]-self.input_mean)*\\\n (self.rr[0:self.N_e].T-self.input_mean)\n \n self.W_ee+=self.dt*self.delta_Wee\n\n # update weights: normalize to fixed mean of incoming and outgoing weights\n self.W_ee-=(self.W_ee.mean(axis=1)-self.W_av_star)[:,np.newaxis]\n self.W_ee-=(self.W_ee.mean(axis=0)-self.W_av_star)[np.newaxis,:]\n \n # clip weights \n self.W_ee=np.clip(self.W_ee,0,self.W_max_ee)\n \n # update excitatory weights in the big weight matrix\n self.W[:self.N_e,:self.N_e]=self.W_ee", "def get_weighted_loss(pos_weights, neg_weights, epsilon=1e-7):\n def weighted_loss(y_true, y_pred):\n \"\"\"\n Return weighted loss value. \n\n Args:\n y_true (Tensor): Tensor of true labels, size is (num_examples, num_classes)\n y_pred (Tensor): Tensor of predicted labels, size is (num_examples, num_classes)\n Returns:\n loss (Tensor): overall scalar loss summed across all classes\n \"\"\"\n # initialize loss to zero\n loss = 0.0\n \n ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###\n\n for i in range(len(pos_weights)):\n \n # for each class, add average weighted loss for that class \n pos_avg = -1*(pos_weights[i]*y_true[:,i]*tf.keras.backend.log(y_pred[:,i]+epsilon))\n neg_avg = -1*(neg_weights[i]*(1-y_true[:,i])*tf.keras.backend.log(1-y_pred[:,i]+epsilon))\n loss =loss + tf.keras.backend.mean(pos_avg + neg_avg) \n #complete this line\n return loss\n \n ### END CODE HERE ###\n return weighted_loss", "def load_weights(self, state_dict):\n own_state = self.state_dict()\n # Copy the convloutional layers\n for name, param in state_dict.iteritems():\n if 'base_conv' in name:\n own_state[name].copy_(param)\n # Convert the FC layers to convolutional layers\n own_state['base_fc.0.weight'].copy_(state_dict['base_fc.0.weight'].view(500, 160, 3, 3))\n own_state['base_fc.0.bias'].copy_(state_dict['base_fc.0.bias'])\n own_state['base_fc.2.weight'].copy_(state_dict['base_fc.2.weight'].view(500, 500, 1, 1))\n own_state['base_fc.2.bias'].copy_(state_dict['base_fc.2.bias'])\n \n # Freeze the weights for the initial Conv and FC layers\n for i in range(len(self.base_conv)):\n if hasattr(self.base_conv[i], 'weight'):\n self.base_conv[i].weight.requires_grad = False\n if hasattr(self.base_conv[i], 'bias'):\n self.base_conv[i].bias.requires_grad = False\n \n for i in range(len(self.base_fc)):\n if hasattr(self.base_fc[i], 'weight'):\n self.base_fc[i].weight.requires_grad = False\n if hasattr(self.base_fc[i], 'bias'):\n self.base_fc[i].bias.requires_grad = False", "def _determine_new_weight(self, weight, input, currentNeuron, bmu):\n return weight \\\n + (self.neighborhood.fn(currentNeuron, bmu) \\\n * self.learning_rate * (input - weight))", "def get_loss(self, output, target, target_weight):\n losses = dict()\n assert not isinstance(self.keypoint_loss, nn.Sequential)\n out, tar, tar_weight = output[0], target[0], target_weight[0]\n assert tar.dim() == 5 and tar_weight.dim() == 3\n losses['hand_loss'] = self.keypoint_loss(out, tar, tar_weight)\n assert not isinstance(self.root_depth_loss, nn.Sequential)\n out, tar, tar_weight = output[1], target[1], target_weight[1]\n assert tar.dim() == 2 and tar_weight.dim() == 2\n losses['rel_root_loss'] = self.root_depth_loss(out, tar, tar_weight)\n assert not isinstance(self.hand_type_loss, nn.Sequential)\n out, tar, tar_weight = output[2], target[2], target_weight[2]\n assert tar.dim() == 2 and tar_weight.dim() in [1, 2]\n losses['hand_type_loss'] = self.hand_type_loss(out, tar, tar_weight)\n return losses", "def load_bert_weight_from_ckpt(\n bert_model: tf.keras.Model,\n bert_ckpt_dir: str,\n repl_patterns: Optional[Dict[str, str]] = None\n) -> Tuple[tf.keras.Model, Mapping[str, str], Mapping[str, Any]]:\n # Load a dict mapping of weight names to their corresponding checkpoint keys.\n names_to_keys = object_graph_key_mapping(bert_ckpt_dir)\n if repl_patterns:\n # Update weight names so they match those in bert_model\n names_to_keys = {\n update_weight_name(repl_patterns, weight_name): weight_key\n for weight_name, weight_key in names_to_keys.items()\n }\n\n # Load a dict mapping of checkpoint keys to weight values.\n logging.info('Loading weights from checkpoint: %s', bert_ckpt_dir)\n keys_to_weights = load_ckpt_keys_to_weight_mapping(bert_ckpt_dir)\n\n # Arranges the pre-trained weights in the order of model weights.\n init_weight_list = match_ckpt_weights_to_model(bert_model, names_to_keys,\n keys_to_weights)\n\n # Load weights into model.\n bert_model.set_weights(init_weight_list)\n\n return bert_model, names_to_keys, keys_to_weights", "def _boost_real(self, iboost, X, y, sample_weight, random_state):\r\n estimator = self._make_estimator(random_state=random_state)\r\n estimator.fit(X, y, sample_weight=sample_weight)\r\n\r\n y_predict_proba = estimator.predict_proba(X)\r\n\r\n if iboost == 0:\r\n self.classes_ = getattr(estimator, 'classes_', None)\r\n self.n_classes_ = len(self.classes_)\r\n\r\n y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),\r\n axis=0)\r\n\r\n incorrect = y_predict != y\r\n\r\n estimator_error = np.mean(\r\n np.average(incorrect, weights=sample_weight, axis=0))\r\n\r\n if estimator_error <= 0:\r\n return sample_weight, 1., 0.\r\n\r\n n_classes = self.n_classes_\r\n classes = self.classes_\r\n y_codes = np.array([-1. / (n_classes - 1), 1.])\r\n y_coding = y_codes.take(classes == y[:, np.newaxis])\r\n proba = y_predict_proba # alias for readability\r\n proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps\r\n\r\n estimator_weight = (-1. * self.learning_rate\r\n * (((n_classes - 1.) / n_classes) *\r\n inner1d(y_coding, np.log(y_predict_proba))))\r\n\r\n # 样本更新的公式,只需要改写这里\r\n if not iboost == self.n_estimators - 1:\r\n sample_weight *= np.exp(estimator_weight *\r\n ((sample_weight > 0) |\r\n (estimator_weight < 0)) *\r\n self._beta(y, y_predict)) # 在原来的基础上乘以self._beta(y, y_predict),即代价调整函数\r\n return sample_weight, 1., estimator_error\r\n\r\n\r\n # 新定义的代价调整函数\r", "def _drop_float_precision(dict_obj, point):\n for keyword in dict_obj.keys():\n for lang_id, prob in enumerate(dict_obj[keyword]):\n dict_obj[keyword][lang_id] = round(prob, point)", "def update_weights(self):\n activation, activation_p = self.feedforward()\n # initialize delta_weights\n delta_w = np.zeros(2)\n # simultaneous calculate delta_weights\n for i, element in enumerate(self.y):\n delta_w += (activation[i]-element)*(activation_p[i])*self.X[i]\n # update weight\n self.weights -= self.alfa*delta_w", "def train(image, labels, weights, learning_rate=0.01):\n label = np.argmax(labels)\n y_hat = predict(image, weights)\n active_img = image > np.mean(image)\n weights[label, active_img ] += learning_rate\n if y_hat != label:\n weights[y_hat, active_img ] -= learning_rate\n return y_hat, weights", "def loss(self,\n inputs: Union[Tensor, Tuple[Tensor]],\n batch_data_samples: OptSampleList,\n train_cfg: ConfigType = {}) -> dict:\n\n pred_outputs = self.forward(inputs)\n\n lifting_target_label = torch.cat([\n d.gt_instance_labels.lifting_target_label\n for d in batch_data_samples\n ])\n trajectory_weights = torch.cat([\n d.gt_instance_labels.trajectory_weights for d in batch_data_samples\n ])\n\n # calculate losses\n losses = dict()\n loss = self.loss_module(pred_outputs, lifting_target_label,\n trajectory_weights.unsqueeze(-1))\n\n losses.update(loss_traj=loss)\n\n # calculate accuracy\n _, avg_acc, _ = keypoint_pck_accuracy(\n pred=to_numpy(pred_outputs),\n gt=to_numpy(lifting_target_label),\n mask=to_numpy(trajectory_weights) > 0,\n thr=0.05,\n norm_factor=np.ones((pred_outputs.size(0), 3), dtype=np.float32))\n\n mpjpe_traj = torch.tensor(avg_acc, device=lifting_target_label.device)\n losses.update(mpjpe_traj=mpjpe_traj)\n\n return losses", "def update_weights(self, weight_priors, latent_mat, output_mat):\n new_weights = {'mu': np.zeros(weight_priors['alpha'].shape),\n 'sigma': np.zeros((self.task_count,\n self.R + 1, self.R + 1))}\n\n h_sum = np.sum(latent_mat['mu'], axis=1)\n hht_mat = (latent_mat['mu'] @ latent_mat['mu'].transpose()\n + latent_mat['sigma'] * self.sample_count)\n\n for i in range(self.task_count):\n new_weights['sigma'][i, 0, 0] = (\n weight_priors['alpha'][0, i] / weight_priors['beta'][0, i]\n + self.sample_count\n )\n new_weights['sigma'][i, 1:, 0] = h_sum\n new_weights['sigma'][i, 0, 1:] = h_sum\n\n new_weights['sigma'][i, 1:, 1:] = (\n hht_mat + np.diag(weight_priors['alpha'][1:, i]\n / weight_priors['beta'][1:, i])\n )\n\n new_weights['sigma'][i, :, :] = np.linalg.inv(\n new_weights['sigma'][i, :, :])\n new_weights['mu'][:, i] = np.dot(\n new_weights['sigma'][i, :, :],\n np.dot(np.vstack([np.ones(self.sample_count),\n latent_mat['mu']]),\n output_mat['mu'][i, :])\n )\n\n return new_weights", "def standardize_weights(y,\n sample_weight=None,\n class_weight=None,\n sample_weight_mode=None):\n # Iterator may return sample_weight as 1-tuple\n if isinstance(sample_weight, tuple):\n sample_weight = sample_weight[0]\n if sample_weight_mode is not None and sample_weight_mode != 'samplewise':\n if sample_weight_mode != 'temporal':\n raise ValueError('\"sample_weight_mode '\n 'should be None or \"temporal\". '\n 'Found: ' + str(sample_weight_mode))\n if len(y.shape) < 3:\n raise ValueError('Found a sample_weight array for '\n 'an input with shape ' + str(y.shape) + '. '\n 'Timestep-wise sample weighting (use of '\n 'sample_weight_mode=\"temporal\") is restricted to '\n 'outputs that are at least 3D, i.e. that have '\n 'a time dimension.')\n if sample_weight is not None and len(sample_weight.shape) != 2:\n raise ValueError('Found a sample_weight array with shape ' +\n str(sample_weight.shape) + '. '\n 'In order to use timestep-wise sample weighting, '\n 'you should pass a 2D sample_weight array.')\n else:\n if sample_weight is not None and len(sample_weight.shape) != 1:\n raise ValueError(\n 'Found a sample_weight array with shape {}. In order to '\n 'use timestep-wise sample weights, you should specify '\n 'sample_weight_mode=\"temporal\" in compile(); founssd \"{}\" '\n 'instead. If you just mean to use sample-wise weights, '\n 'make sure your sample_weight array is 1D.'.format(\n sample_weight.shape, sample_weight_mode))\n\n if sample_weight is not None:\n if len(sample_weight.shape) > len(y.shape):\n raise ValueError('Found a sample_weight with shape' +\n str(sample_weight.shape) + '.'\n 'Expected sample_weight with rank '\n 'less than or equal to ' + str(len(y.shape)))\n\n if (not tensor_util.is_tf_type(sample_weight) and\n y.shape[:sample_weight.ndim] != sample_weight.shape):\n raise ValueError('Found a sample_weight array with shape ' +\n str(sample_weight.shape) + ' for an input with shape ' +\n str(y.shape) + '. '\n 'sample_weight cannot be broadcast.')\n\n # Class weights applied per-sample.\n class_sample_weight = None\n if isinstance(class_weight, dict):\n if len(y.shape) > 2:\n raise ValueError('`class_weight` not supported for '\n '3+ dimensional targets.')\n\n if tensor_util.is_tf_type(y):\n # Few classes are expected, so densifying is reasonable.\n keys = np.array(sorted(class_weight.keys()))\n values = np.array([class_weight[i] for i in keys])\n weight_vector = np.zeros(np.max(keys) + 1)\n weight_vector[:] = np.nan\n weight_vector[keys] = values\n\n y_classes = smart_cond.smart_cond(\n len(y.shape.as_list()) == 2 and backend.shape(y)[1] > 1,\n lambda: backend.argmax(y, axis=1),\n lambda: math_ops.cast(backend.reshape(y, (-1,)), dtypes.int64))\n class_sample_weight = array_ops.gather(weight_vector, y_classes)\n gen_array_ops.check_numerics(\n class_sample_weight,\n 'Invalid classes or class weights detected. NaN values indicate that '\n 'an appropriate class weight could not be determined.')\n class_sample_weight = math_ops.cast(class_sample_weight, backend.floatx())\n if sample_weight is not None:\n sample_weight = math_ops.cast(\n tensor_conversion.convert_to_tensor_v2_with_dispatch(sample_weight),\n backend.floatx(),\n )\n else:\n y_classes = y\n if len(y.shape) == 2:\n if y.shape[1] > 1:\n y_classes = np.argmax(y, axis=1)\n elif y.shape[1] == 1:\n y_classes = np.reshape(y, y.shape[0])\n\n class_sample_weight = np.asarray(\n [class_weight[cls] for cls in y_classes if cls in class_weight])\n\n if len(class_sample_weight) != len(y_classes):\n # subtract the sets to pick all missing classes\n existing_classes = set(y_classes)\n existing_class_weight = set(class_weight.keys())\n raise ValueError(\n '`class_weight` must contain all classes in the data.'\n ' The classes %s exist in the data but not in '\n '`class_weight`.' % (existing_classes - existing_class_weight))\n\n if class_sample_weight is not None and sample_weight is not None:\n # Multiply weights if both are provided.\n return class_sample_weight * sample_weight\n if sample_weight is not None:\n return sample_weight\n if class_sample_weight is not None:\n return class_sample_weight\n return None", "def update_bias(pruned_model, pinned_in, pinned_out):\n state = deepcopy(pruned_model.state_dict())\n zeros = []\n non_zeros = []\n biases = None\n was_conv = False\n propagate_bias = False\n prev_module = None\n\n # Iterate through all the element of the state_dict\n for k in state:\n # print(k)\n # If the element is a weight we use it to eventually update the corresponding biases\n if \"weight\" in k:\n if k.replace(\".weight\", \"\") in pinned_out:\n continue\n\n # Find in the pruned model the layer that corresponds to the current state_dict element\n current_module, next_module = find_module(pruned_model, k)\n\n if current_module is None:\n raise RuntimeError('The supplied model does not contain a module corresponding to key {}'.format(k))\n\n # Select the activation function\n if isinstance(prev_module, (nn.ReLU, nn.ReLU6, nn.LeakyReLU)):\n activation_function = prev_module\n else:\n activation_function = nn.Identity()\n\n bias_key = k.replace(\"weight\", \"bias\")\n\n # The current element is a convolutional layer\n if isinstance(current_module, nn.Conv2d):\n # Sum the convolutional values for dimensions: input, h, w\n conv_sum = torch.sum(torch.abs(state[k]), dim=(1, 2, 3))\n\n # If the current layer has a bias or the previous layer had a bias we may have to update it\n if bias_key in state or propagate_bias:\n propagate_bias = False\n # Memorize that we encountered a conv layer\n was_conv = True\n # Define the bias update tensor initialized at zero\n update = torch.zeros(state[k].shape[0])\n # Check if we have biases coming from the previous layer\n if biases is not None and torch.sum(biases) != 0:\n # Apply the activation function to the biases of the previous layer\n biases = activation_function(biases)\n # Take the biases to be propagated one by one\n for i in range(biases.shape[0]):\n bias = biases[i]\n if bias != 0:\n # Manually execute the forward propagation of the bias in order to obtain it's weighted value\n # Add each weighted sum to the update value\n bias_prop = state[k][:, i, :, :].mul(bias)\n update += torch.sum(torch.sum(bias_prop, dim=2), dim=1)\n\n if torch.sum(torch.abs(update)) != 0:\n if bias_key in state:\n state[bias_key].add_(update)\n if bias_key in state:\n # If the next layer is a convolutional with padding we do not propagate the bias\n # Get current layer biases\n biases = (state[bias_key]).clone().detach()\n # Set to 0 the biases corresponding to non-zero filters, this are not propagated to the next layer\n biases[torch.where(conv_sum != 0)[0]] = 0\n # This layer has no natural biases but we have value propagated from the previous layer\n # The computed update are the \"ghost biases\" of this layer that will be propagated to the next layer with bises\n biases = update\n\n # Signal that we have biases to propagate to the next layer\n if biases is not None and torch.sum(biases) != 0:\n propagate_bias = True\n\n # Memorize the number of output channel of the current conv layer\n out_ch_num = state[k].shape[0]\n\n # Identify the zeroed and the non-zero filters index\n zeros = (conv_sum == 0).nonzero()\n non_zeros = (conv_sum != 0).nonzero()\n\n # The current element is a linear layer\n if isinstance(current_module, nn.Linear):\n # If the current layer has a bias or the previous layer had a bias we may have to update it\n if bias_key in state or propagate_bias:\n propagate_bias = False\n if biases is not None and torch.sum(biases) != 0:\n # Apply the activation function to the bieses of the previous layer\n biases = activation_function(biases)\n\n # The previous layer was a convolutional\n if was_conv:\n was_conv = False\n # Evaluate how many FC neurons correspond to the previous conv out channel\n # current_layer_neurons_number / previous_layer_output_channels\n neurons_per_channel = int(state[k].shape[1] / out_ch_num)\n # Define the bias update tensor initialized at zero\n update = torch.zeros(state[k].shape[0])\n\n # Take the zeroed filters index one by one\n for z in zeros:\n # Compute the starting and end index of the neurons that correspond to the filter\n from_idx = z * neurons_per_channel\n to_idx = (z + 1) * neurons_per_channel\n # Get the bias corresponding to the filter\n bias = biases[z]\n # Manually compute the weighted sum between the bias and the weights of the neurons in the previously defined range\n # Add each sum to the update value\n update += torch.sum(state[k][:, from_idx:to_idx].mul(bias), dim=1)\n\n # The previous layer was a linear\n else:\n # Multiply the previous layer biases by the current element weights\n biases = biases * state[k]\n # Define the update value for the biases, summing the values related to the same neuron\n update = torch.sum(biases, dim=1)\n\n # Update the biases of the current layer\n if torch.sum(torch.abs(update)) != 0:\n if bias_key in state:\n state[bias_key].add_(update)\n\n if bias_key in state:\n # Get current layer biases\n biases = (state[bias_key]).clone().detach()\n\n # Set to 0 the biases corresponding to non-zero neurons, this are not propagated to the next layer\n if torch.sum(torch.abs(biases)) != 0:\n for col in range(state[k].shape[0]):\n if torch.sum(torch.abs(state[k][col])) != 0:\n biases[col] = 0\n # This layer has no natural biases but we have value propagated from the previous layer\n # The computed update are the \"ghost biases\" of this layer that will be propagated to the next layer with bises\n if bias_key not in state:\n biases = update\n\n # Signal that we have biases to propagate to the next layer\n if torch.sum(torch.abs(biases)) != 0:\n propagate_bias = True\n\n prev_module = next_module\n\n return state", "def process_datapoints(datapoints):\n point_dict = {}\n\n ddata = [p for p in datapoints]\n for point in ddata:\n point_dict[hash_datapoint(point)] = {'results': [],\n 'time': [],\n 'features': point['features']}\n\n for point in ddata:\n point_dict[hash_datapoint(point)]['results'].append(point['result'])\n point_dict[hash_datapoint(point)]['time'].append(point['time'])\n\n for e in point_dict:\n result_array = np.array(point_dict[e]['results'])\n point_dict[e]['n'] = len(point_dict[e]['results'])\n point_dict[e]['mu'] = np.mean(result_array)\n point_dict[e]['sigma'] = np.std(result_array)\n del point_dict[e]['results']\n\n return point_dict", "def update_weights(net, input_values, desired_output, neuron_outputs, r=1):\n raise NotImplementedError", "def get_weights(train_generator):\n classes = list(train_generator.class_indices.values())\n cw = class_weight.compute_class_weight('balanced',\n np.unique(classes),\n train_generator.classes)\n m = min(cw)\n cw = [(el / m) for el in cw]\n\n return dict(zip(classes, cw))", "def predictRating(toPredict, candidateList):\n\n ratingRelevantCandidates = []\n\n #Remove candidates with no rating specified\n for candidate in candidateList:\n currentCandidate = candidate[1]\n\n if float(currentCandidate['vote_avg']) > 0:\n ratingRelevantCandidates.append((float(currentCandidate['vote_avg']), candidate))\n\n #print(\"ratings::::::::\",currentCandidate['vote_avg'])\n\n #Remove outlier candidates based on rating\n ratingMean = np.mean([x[0] for x in ratingRelevantCandidates])\n print(\"ratingMean\", ratingMean)\n ratingSD = np.std([x[0] for x in ratingRelevantCandidates])\n print(\"ratingSD\", ratingSD)\n\n finalRatings = [x for x in ratingRelevantCandidates if (float(x[0]) < ratingMean + ratingSD)]#1.5 *\n finalRatings = [x for x in finalRatings if (float(x[0]) > ratingMean - ratingSD)]#.75 *\n\n finalRatingCandidatesWithWeight = []\n\n #Weight each candidate based on vote count, direct and actor popularity and matching score from part 1\n for candidate in finalRatings:\n directorPoints = compareDirectorPoints(toPredict['director'], candidate[1][1]['director'])\n actorPoints = compareActorPoints(toPredict['cast'], candidate[1][1]['cast'])\n voteCountPoints = int(candidate[1][1]['vote_count'])\n matchPoints = candidate[1][0] / np.max([float(x[1][0]) for x in finalRatings]) * 100\n candidateWeight = PREDICTION_MATCHPOINTS_WEIGHT * matchPoints \\\n + PREDICTION_ACTOR_WEIGHT * actorPoints \\\n + PREDICTION_DIRECTOR_WEIGHT * directorPoints \\\n + PREDICTION_VOTECOUNT_WEIGHT * voteCountPoints\n\n finalRatingCandidatesWithWeight.append((candidateWeight, candidate[0]))\n\n #Calculate the prediction\n sumRatingCandidateWeights = np.sum([float(x[0]) for x in finalRatingCandidatesWithWeight])\n sumRatingTimesCandidateWeight = np.sum([float(x[0]) * float(x[1]) for x in finalRatingCandidatesWithWeight])\n\n ratingPrediction = float(sumRatingTimesCandidateWeight / sumRatingCandidateWeights)\n\n return ratingPrediction", "def _weights_for_target(self, target):\n\n self._update_global_transform(target)\n projected_target = self.global_transform.pseudoinverse().apply(target)\n # now we have the target in model space, project it to recover the\n # weights\n new_weights = self.model.project(projected_target)\n # TODO investigate the impact of this, could be problematic\n # the model can't perfectly reproduce the target we asked for -\n # reset the global_transform.target to what it CAN produce\n #refined_target = self._target_for_weights(new_weights)\n #self.global_transform.target = refined_target\n return new_weights", "def adjusted_grade(clicker_points, midterm_grade):\n if not clicker_points:\n return midterm_grade\n avg_clicker_points = sum(clicker_points.values())/len(clicker_points)\n joint_set = set(clicker_points).union(set(midterm_grade))\n clicker_extra_points = {student: 1 if clicker_points.get(student)\n >= avg_clicker_points else 0 for\n student in clicker_points}\n return {name: midterm_grade.get(name, 0) +\n clicker_extra_points.get(name, 0)\n for name in joint_set}", "def lfads_training_loss(params, lfads_hps, key, x_bxt, kl_scale, keep_rate):\n losses = lfads_losses(params, lfads_hps, key, x_bxt, kl_scale, keep_rate)\n return losses['total']", "def checkpoint_filter_fn(state_dict, model):\n if 'patch_embed.proj.weight' in state_dict:\n # Remap FB ResMlp models -> timm\n out_dict = {}\n for k, v in state_dict.items():\n k = k.replace('patch_embed.', 'stem.')\n k = k.replace('attn.', 'linear_tokens.')\n k = k.replace('mlp.', 'mlp_channels.')\n k = k.replace('gamma_', 'ls')\n if k.endswith('.alpha') or k.endswith('.beta'):\n v = v.reshape(1, 1, -1)\n out_dict[k] = v\n return out_dict\n return state_dict", "def learn_MAP(self,knowledge,k=10):\n \n gradients = {}\n for i in range(k):\n\n #create TILDE(R) tree object\n tree_i = TILDE(typ=\"regression\",score=\"WV\",max_depth=self.max_depth)\n\n #subsample negatives if too many for each tree\n sampled_neg = deepcopy(self.neg)\n if len(self.neg) > 2*len(self.pos):\n sampled_neg = sample(self.neg,2*len(self.pos))\n\n #compute gradients as I-P\n for ex in self.examples:\n parameter = knowledge.calculate_parameter(self.data,\n ex,\n self.examples[ex])\n p = sigmoid(self.examples[ex])\n if ex in self.pos:\n gradients[ex] = 1-p - parameter\n elif ex in sampled_neg:\n gradients[ex] = 0-p - parameter\n\n #fit tree on gradients\n tree_i.learn(self.data,self.bk,self.target,examples=gradients)\n \n #recompute example values as previous example value + tree_i value\n for ex in self.examples:\n tree_i_value = tree_i.infer(self.data,ex)\n self.examples[ex] += tree_i_value\n\n #add tree to boosted_trees\n self.boosted_trees.append(tree_i)", "def get_hard_target_model_updates(target, source):\n target.set_weights(source.get_weights())\n\n return target", "def fit_mweight(data, C=1., threshold=1e-4, learning_rate=None):\n dataPM = toPM(data).T;\n m,n = dataPM.shape;\n if learning_rate is None: learning_rate = 1-np.sqrt(np.log(n)/m);\n\n L,h = np.zeros((n,n)), np.zeros((n,)) # initialize parameters (dense) & weights\n eye = list(range(n));\n Wp = np.ones((n,n))/(2*(n-1)); Wp[eye,eye] = 0; Wm = np.copy(Wp);\n Hp = np.ones((n,))/2; Hm = np.copy(Hp);\n\n for i,xpm in enumerate(dataPM):\n phat = 1./(1.+np.exp(2.*L.dot(xpm) + h))\n ell_H = (phat - (1.-xpm)/2.);\n ell_W = np.outer( ell_H , xpm);\n Wp *= learning_rate**(-ell_W); Wm *= learning_rate**(ell_W);\n Hp *= learning_rate**(-ell_H); Hm *= learning_rate**(ell_H);\n L = C/(Wp.sum(1)+Wm.sum(1))[:,np.newaxis]*(Wp-Wm)\n h = C/(Hp+Hm)*(Hp-Hm)\n \n L = .5*(L+L.T);\n L[np.abs(L)<threshold] = 0;\n L[eye,eye] = h;\n return Ising(L);", "def test_weights_update(self):\n # Reset models.\n self.model.load_state_dict(self.initial_model_dict)\n self.actor_model.load_state_dict(self.initial_actor_model_dict)\n\n polybeast.learn(*self.learn_args)\n\n model_state_dict = self.model.state_dict(keep_vars=True)\n actor_model_state_dict = self.actor_model.state_dict(keep_vars=True)\n for key, initial_tensor in self.initial_model_dict.items():\n model_tensor = model_state_dict[key]\n actor_model_tensor = actor_model_state_dict[key]\n # Assert that the gradient is not zero for the learner.\n self.assertGreater(torch.norm(model_tensor.grad), 0.0)\n # Assert actor has no gradient.\n # Note that even though actor model tensors have no gradient,\n # they have requires_grad == True. No gradients are ever calculated\n # for these tensors because the inference function in polybeast.py\n # (that performs forward passes with the actor_model) uses torch.no_grad\n # context manager.\n self.assertIsNone(actor_model_tensor.grad)\n # Assert that the weights are updated in the expected way.\n # We manually perform a gradient descent step,\n # and check that they are the same as the calculated ones\n # (ignoring floating point errors).\n expected_tensor = (\n initial_tensor.detach().numpy() - self.lr * model_tensor.grad.numpy()\n )\n np.testing.assert_almost_equal(\n model_tensor.detach().numpy(), expected_tensor\n )\n np.testing.assert_almost_equal(\n actor_model_tensor.detach().numpy(), expected_tensor\n )", "def mutate_link_weights(self, perturb_prob=.9, cold_prob=.1):\n # genetics.cpp:737 - Looks like they either just add a random value\n # in (-1,1) or they make the weight a value (-1,1). This seems a bit\n # odd. Also, not sure why they say \"GAUSSIAN\" since I think they are\n # using a uniform value. This is complicated somewhat by the power and\n # powermod, but randposneg()*randfloat() just yields a random number in\n # (-1,1). These functions are defined in networks.h\n\n # Their code for this section contains much more than was described in\n # the paper. For now, I'm implementing it as it sounds from the paper\n # \"There was an 80% chance of a genome having its connection weights\n # mutated, in which case each weight had a 90% chance of being\n # uniformly perturbed and a 10% chance of being assigned a new random\n # value.\n\n if perturb_prob + cold_prob > 1:\n raise ValueError('perturb_prob + cold_prob cannot be greater than 1')\n for g in self.link_genes:\n r = random.random()\n weight_change = random.uniform(-1,1)\n if r < perturb_prob:\n g.weight += weight_change\n elif r < perturb_prob+cold_prob:\n g.weight = weight_change\n # Else do nothing to that weight", "def make_guard_fp_to_bw(safe_fp_to_bw, malicious_fp_to_bw):\r\n\r\n guard_fp_to_bw = {}\r\n\r\n # add innocent guards\r\n for guard_fp, bw in safe_fp_to_bw.items():\r\n guard_fp_to_bw[guard_fp] = bw\r\n\r\n # add malicious guards\r\n for guard_fp, bw in malicious_fp_to_bw.items():\r\n guard_fp_to_bw[guard_fp] = bw\r\n\r\n return guard_fp_to_bw", "def _update_weight(self, time):\r\n # Until the relative time window, return original weights.\r\n if time < self.window - 1:\r\n return self.weights\r\n\r\n # Set the current predicted relatives value.\r\n current_prediction = self._calculate_predicted_relatives(time)\r\n\r\n # Set the deviation from the mean of current prediction.\r\n predicted_deviation = current_prediction - np.ones(self.number_of_assets) * np.mean(\r\n current_prediction)\r\n\r\n # Calculate alpha, the lagrangian multiplier.\r\n norm2 = np.linalg.norm(predicted_deviation, ord=1) ** 2\r\n\r\n # If norm2 is zero, return previous weights.\r\n if norm2 == 0:\r\n return self.weights\r\n alpha = np.minimum(0, (current_prediction * self.weights - self.epsilon) / norm2)\r\n\r\n # Update new weights.\r\n new_weights = self.weights - alpha * predicted_deviation\r\n\r\n # Project to simplex domain.\r\n new_weights = self._simplex_projection(new_weights)\r\n\r\n return new_weights" ]
[ "0.67305875", "0.58836514", "0.5748413", "0.5478308", "0.54563385", "0.53294194", "0.5201591", "0.51845413", "0.5084596", "0.50814384", "0.5060465", "0.50252044", "0.5015587", "0.50075173", "0.49619642", "0.49579272", "0.49395669", "0.4937159", "0.49219316", "0.490734", "0.4895948", "0.48815513", "0.48718518", "0.48683572", "0.4858004", "0.48577145", "0.48546764", "0.4853565", "0.48513058", "0.48468053", "0.48375124", "0.4830606", "0.48306015", "0.48306015", "0.48122045", "0.47950864", "0.47912475", "0.47876573", "0.47862765", "0.47665283", "0.47647104", "0.47601807", "0.4748075", "0.47413445", "0.4740971", "0.47405353", "0.47404122", "0.47349194", "0.47331467", "0.47307375", "0.47131866", "0.47130272", "0.47129512", "0.47111768", "0.47064653", "0.47010368", "0.47010368", "0.46960095", "0.46928254", "0.4687646", "0.46828154", "0.46772918", "0.4675599", "0.46740788", "0.467143", "0.46686912", "0.4656021", "0.46507895", "0.4647852", "0.4647852", "0.46418607", "0.46272177", "0.46217248", "0.46183142", "0.46058935", "0.45986685", "0.45889297", "0.45860803", "0.45832992", "0.45829138", "0.45812497", "0.45735875", "0.4572078", "0.45667595", "0.45630488", "0.45535776", "0.45519054", "0.45497826", "0.4549346", "0.45486242", "0.4539701", "0.45396984", "0.45392993", "0.45358697", "0.45339853", "0.45336485", "0.4531873", "0.4528623", "0.45284376", "0.45250374" ]
0.78441054
0
Performs the Adaboost algorithm for up to max_rounds rounds. Returns the resulting overall classifier H, represented as a list of (classifier, voting_power) tuples.
def adaboost(training_points, classifier_to_misclassified, use_smallest_error=True, mistake_tolerance=0, max_rounds=INF): point_to_weight = initialize_weights(training_points) H = [] # (classifier, voting_power) while True: # exit conditions if is_good_enough(H, training_points, classifier_to_misclassified, mistake_tolerance): break if max_rounds == 0: break classifier_to_error_rate = calculate_error_rates(point_to_weight, classifier_to_misclassified) best_classifier = None try: best_classifier = pick_best_classifier(classifier_to_error_rate, use_smallest_error) except NoGoodClassifiersError: break max_rounds -= 1 error_rate = classifier_to_error_rate[best_classifier] H.append((best_classifier, calculate_voting_power(error_rate))) point_to_weight = update_weights(point_to_weight, classifier_to_misclassified[best_classifier], error_rate) return H
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modelAdaBoost():\n num_estimators = [1,5,10,50,100,150]\n learning_rate = 0.1\n max_depth = 3\n base_estimate = DecisionTreeClassifier(max_depth=max_depth)\n random_state = 20 # Do not change this random_state\n \n obj_boost = []\n \n \"\"\" \n Create a list of objects for the classifier \n for each of combination of above num_estimators and learning_rate\n \"\"\"\n for n_est in num_estimators:\n boost = AdaBoostClassifier(n_estimators=n_est, \n learning_rate=learning_rate, \n base_estimator = base_estimate, \n random_state =random_state)\n obj_boost.append(boost)\n\n return obj_boost", "def give_balanced_classes(reviews, votes, votes_threshold):\n if votes_threshold <= 0:\n print \"Needs positive threshold\"\n return\n\n negative_reviews_indices = []\n\n # Find all the funny reviews we can\n final_reviews = []\n final_labels = []\n for i, review in enumerate(reviews):\n if votes[i] >= votes_threshold:\n final_reviews.append(review)\n final_labels.append(1)\n elif votes[i] == 0:\n negative_reviews_indices.append(i)\n\n # We want balanced classes so take same number\n np.random.shuffle(negative_reviews_indices)\n num_positive_reviews = len(final_reviews)\n for i in range(num_positive_reviews):\n final_reviews.append(reviews[negative_reviews_indices[i]])\n final_labels.append(0)\n\n # Shuffle final reviews and labels\n combined_lists = zip(final_reviews, final_labels)\n np.random.shuffle(combined_lists)\n final_reviews[:], final_labels[:] = zip(*combined_lists)\n\n print \"Returning %d positive reviews and a total of %d reviews\" % (num_positive_reviews, len(final_reviews))\n\n return (final_reviews, final_labels)", "def predict_boosting_example(x, h_ens):\r\n\r\n arr = []\r\n sum_alpha = 0\r\n\r\n for y in h_ens:\r\n # splitting hypothesis, weight pairs\r\n alpha, tree = h_ens[y]\r\n tst_pred = predict_example(x, tree)\r\n # appending prediction\r\n arr.append(tst_pred*alpha)\r\n sum_alpha += alpha\r\n predict_egz = np.sum(arr) / sum_alpha\r\n # weak learner\r\n if predict_egz >= 0.5:\r\n return 1\r\n else:\r\n return 0", "def predict_bagging_example(x, h_ens):\r\n arr = []\r\n for y in h_ens:\r\n \t# calls predict example repeatedly and stores them in an array\r\n tst_pred = predict_example(x, h_ens[y][1])\r\n arr.append(tst_pred)\r\n # returning the maximum voted\r\n predict_egz = max(set(arr), key=arr.count)\r\n return predict_egz", "def from_adaboost(cls, X_train, y_train, data_normalizer_class, n_classifiers):\n\n normalizer = data_normalizer_class().fit(X_train.values)\n X_train = pd.DataFrame(data=normalizer.transform(X_train.values), index=X_train.index, columns=X_train.columns)\n\n rf = AdaBoostClassifier(n_estimators=n_classifiers, algorithm='SAMME') # type: AdaBoostClassifier\n rf = rf.fit(X_train, y_train) # type: AdaBoostClassifier\n\n n_classes = rf.n_classes_\n\n voting_weights = np.empty((n_classifiers, n_classes), dtype=np.float32)\n voting_weights[:] = rf.estimator_weights_[:, np.newaxis]\n\n ensemble = Ensemble(\n X_train=X_train, y_train=y_train,\n data_normalizer_class=normalizer,\n classifiers=rf.estimators_,\n features=np.ones(\n (n_classifiers, X_train.shape[1]), dtype=np.int32\n ),\n activated=np.ones(n_classifiers, dtype=np.int32),\n voting_weights=voting_weights,\n )\n return ensemble", "def adaBoost(trainingSamples, weights, featuresSet, classifierCount, threadCount=12, verbose=True):\n #result\n classifiers = []\n classifierWeights = []\n \n for i in range(0, classifierCount):\n if verbose:\n print(\"Training classifier \" + str(i+1) + \"/\" + str(classifierCount) + \"....\")\n\n #normalize weights\n weights = normWeights(weights)\n \n ########## train all classifiers ##########\n #build workers\n workers = []\n for chunk in chunks(featuresSet, threadCount):\n workers.append(Trainer(trainingSamples, weights, chunk))\n\n #run them in a thread pool\n trainedClassifiers = []\n with Pool(processes=threadCount) as pool:\n trainedClassifiers = pool.map(workerRunner, workers)\n trainedClassifiers = getResults(trainedClassifiers)\n\n ########## compute error for each classifier ##########\n #build workers\n workers = []\n for chunk in chunks(trainedClassifiers, threadCount):\n workers.append(ErrorWorker(trainingSamples, weights, chunk))\n \n #run them in a thread pool\n errors = []\n with Pool(processes=threadCount) as pool:\n errors = pool.map(workerRunner, workers)\n errors = getResults(errors)\n\n #choose the classifier with the lowest error\n errors = np.array(errors) \n bestError = errors.min()\n bestClassifierIndex = np.where(errors == errors.min())[0][0]\n bestClassifier = trainedClassifiers[bestClassifierIndex]\n b = bestError/(1-bestError)\n classifiers.append(bestClassifier)\n if verbose:\n print(\"Found feature with error: \" + str(bestError))\n \n #remove used feature from the dataset\n del featuresSet[bestClassifierIndex]\n \n #compute classifier weight\n classifierWeight = np.log(1/b)\n classifierWeights.append(classifierWeight)\n if verbose:\n print(\"Classifier has weight: \" + str(classifierWeight))\n \n #update weights\n for i in range(0, len(weights)):\n weights[i] = weights[i]*np.power(b, 1-bestError)\n\n return classifiers, classifierWeights", "def evaluation(self):\n rows_list = []\n for name in self.single_classifier_best.keys():\n row = {}\n row['algorithm'] = name \n row[self.scoring_metric] = self.single_classifier_best[name].best_score_\n rows_list.append(row)\n \n scoring_df = pd.DataFrame(rows_list)\n scoring_sorted = scoring_df.sort_values(self.scoring_metric, ascending=False)\n print()\n print('*'*shutil.get_terminal_size().columns)\n print(scoring_sorted)\n print('*'*shutil.get_terminal_size().columns)\n self.evaluation_scores = scoring_sorted", "def lazy_greedy_max(self, budget):\r\n\r\n classes, no_elements = torch.unique(self.y_trn, return_counts=True)\r\n len_unique_elements = no_elements.shape[0]\r\n per_class_bud = int(budget / len(classes))\r\n final_per_class_bud = []\r\n _, sorted_indices = torch.sort(no_elements, descending = True)\r\n\r\n if self.selection_type == 'PerClass':\r\n \r\n total_idxs = 0\r\n for n_element in no_elements:\r\n final_per_class_bud.append(min(per_class_bud, torch.IntTensor.item(n_element)))\r\n total_idxs += min(per_class_bud, torch.IntTensor.item(n_element))\r\n \r\n if total_idxs < budget:\r\n bud_difference = budget - total_idxs\r\n for i in range(len_unique_elements):\r\n available_idxs = torch.IntTensor.item(no_elements[sorted_indices[i]])-per_class_bud \r\n final_per_class_bud[sorted_indices[i]] += min(bud_difference, available_idxs)\r\n total_idxs += min(bud_difference, available_idxs)\r\n bud_difference = budget - total_idxs\r\n if bud_difference == 0:\r\n break\r\n\r\n total_greedy_list = []\r\n for i in range(len_unique_elements):\r\n idxs = torch.where(self.y_trn == classes[i])[0]\r\n \r\n if self.submod == 'facility_location':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'graph_cut':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'saturated_coverage':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'sum_redundancy':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'feature_based':\r\n fl = apricot.functions.featureBased.FeatureBasedSelection(random_state=0, n_samples=final_per_class_bud[i])\r\n\r\n if self.submod == 'feature_based':\r\n\r\n x_sub = fl.fit_transform(self.x_trn[idxs].numpy())\r\n greedyList = self.get_index(self.x_trn[idxs].numpy(), x_sub)\r\n total_greedy_list.extend(idxs[greedyList])\r\n\r\n else: \r\n\r\n sim_sub = fl.fit_transform(self.dist_mat.cpu().numpy())\r\n greedyList = list(np.argmax(sim_sub, axis=1))\r\n total_greedy_list.extend(idxs[greedyList])\r\n\r\n elif self.selection_type == 'Supervised':\r\n \r\n \r\n if self.submod == 'feature_based':\r\n \r\n class_map = {}\r\n for i in range(len_unique_elements):\r\n class_map[torch.IntTensor.item(classes[i])] = i #Mapping classes from 0 to n\r\n \r\n sparse_data = torch.zeros([self.x_trn.shape[0], self.x_trn.shape[1]*len_unique_elements])\r\n for i in range(self.x_trn.shape[0]):\r\n \r\n start_col = class_map[torch.IntTensor.item(self.y_trn[i])]*self.x_trn.shape[1]\r\n end_col = start_col+self.x_trn.shape[1]\r\n sparse_data[i, start_col:end_col] = self.x_trn[i, :]\r\n\r\n fl = apricot.functions.featureBased.FeatureBasedSelection(random_state=0, n_samples=budget)\r\n x_sub = fl.fit_transform(sparse_data.numpy())\r\n total_greedy_list = self.get_index(sparse_data.numpy(), x_sub)\r\n\r\n else:\r\n for i in range(len(classes)):\r\n \r\n if i == 0:\r\n idxs = torch.where(self.y_trn == classes[i])[0]\r\n N = len(idxs)\r\n self.compute_score(idxs)\r\n row = idxs.repeat_interleave(N)\r\n col = idxs.repeat(N)\r\n data = self.dist_mat.cpu().numpy().flatten()\r\n else:\r\n idxs = torch.where(self.y_trn == classes[i])[0]\r\n N = len(idxs)\r\n self.compute_score(idxs)\r\n row = torch.cat((row, idxs.repeat_interleave(N)), dim=0)\r\n col = torch.cat((col, idxs.repeat(N)), dim=0)\r\n data = np.concatenate([data, self.dist_mat.cpu().numpy().flatten()], axis=0)\r\n \r\n \r\n sparse_simmat = csr_matrix((data, (row.numpy(), col.numpy())), shape=(self.N_trn, self.N_trn))\r\n #self.dist_mat = sparse_simmat\r\n\r\n if self.submod == 'facility_location':\r\n fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'graph_cut':\r\n fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'saturated_coverage':\r\n fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'sum_redundancy':\r\n fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n sim_sub = fl.fit_transform(sparse_simmat)\r\n total_greedy_list = list(np.array(np.argmax(sim_sub, axis=1)).reshape(-1))\r\n\r\n\r\n if self.selection_type == 'Full':\r\n \r\n\r\n total_greedy_list = []\r\n idx_end = self.x_trn.shape[0] - 1\r\n idxs = torch.linspace(0, idx_end, self.x_trn.shape[0]).long()\r\n\r\n if self.submod == 'facility_location':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'graph_cut':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'saturated_coverage':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'sum_redundancy':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'feature_based':\r\n fl = apricot.functions.featureBased.FeatureBasedSelection(random_state=0, n_samples=budget)\r\n\r\n if self.submod == 'feature_based':\r\n\r\n x_sub = fl.fit_transform(self.x_trn.numpy())\r\n total_greedy_list = self.get_index(self.x_trn.numpy(), x_sub)\r\n\r\n else: \r\n\r\n sim_sub = fl.fit_transform(self.dist_mat.cpu().numpy())\r\n total_greedy_list = list(np.argmax(sim_sub, axis=1))\r\n\r\n return total_greedy_list", "def collect_best_features(self):\n bincsp = self.binary_csp # just to make code shorter\n n_folds = len(self.binary_csp.folds)\n n_class_pairs = len(self.binary_csp.class_pairs)\n result_shape = (n_folds, n_class_pairs)\n self.train_feature = np.empty(result_shape, dtype=object)\n self.train_feature_full_fold = np.empty(result_shape, dtype=object)\n self.test_feature = np.empty(result_shape, dtype=object)\n self.test_feature_full_fold = np.empty(result_shape, dtype=object)\n self.selected_filters_per_filterband = np.empty(result_shape, dtype=object)\n for fold_i in range(n_folds):\n for class_pair_i in range(n_class_pairs):\n bin_csp_train_features = deepcopy(bincsp.train_feature[\n self.selected_filter_inds, fold_i, class_pair_i])\n bin_csp_train_features_full_fold = deepcopy(\n bincsp.train_feature_full_fold[\n self.selected_filter_inds,\n fold_i, class_pair_i])\n bin_csp_test_features = deepcopy(bincsp.test_feature[\n self.selected_filter_inds, fold_i, class_pair_i])\n bin_csp_test_features_full_fold = deepcopy(\n bincsp.test_feature_full_fold[\n self.selected_filter_inds,fold_i, class_pair_i])\n selected_filters_per_filt = self.select_best_filters_best_filterbands(\n bin_csp_train_features, max_features=self.n_features,\n forward_steps=self.forward_steps, \n backward_steps=self.backward_steps,\n stop_when_no_improvement=self.stop_when_no_improvement)\n self.train_feature[fold_i, class_pair_i] = \\\n self.collect_features_for_filter_selection(\n bin_csp_train_features, selected_filters_per_filt)\n self.train_feature_full_fold[fold_i, class_pair_i] = \\\n self.collect_features_for_filter_selection(\n bin_csp_train_features_full_fold, selected_filters_per_filt)\n \n self.test_feature[fold_i, class_pair_i] = \\\n self.collect_features_for_filter_selection(\n bin_csp_test_features, selected_filters_per_filt)\n self.test_feature_full_fold[fold_i, class_pair_i] = \\\n self.collect_features_for_filter_selection(\n bin_csp_test_features_full_fold, selected_filters_per_filt)\n \n self.selected_filters_per_filterband[fold_i, class_pair_i] = \\\n selected_filters_per_filt", "def get_optimal_rounds(dtrain, param):\n num_round = 1000\n bst = xgb.cv(param, dtrain, num_round, nfold=10,\n metrics={'logloss', 'auc'}, seed=0,\n callbacks=[xgb.callback.print_evaluation(show_stdv=True),\n xgb.callback.early_stop(10)])\n return len(bst)-1", "def ensemble_accuracy(n_classifiers, accuracy):\n k_start = int(math.ceil(n_classifiers / 2.0))\n probs = [comb(n_classifiers, k) *\n accuracy**k *\n (1 - accuracy)**(n_classifiers - k)\n for k in range(k_start, n_classifiers + 1)]\n return sum(probs)", "def try_ada_boost_decision_tree():\n\n print(\"AdaBoost to Decision Tree\")\n from sklearn.tree import DecisionTreeClassifier\n from sklearn.ensemble import AdaBoostClassifier\n from sklearn.grid_search import GridSearchCV\n\n param_grid = {\"base_estimator__criterion\" : [\"gini\", \"entropy\"],\n \"base_estimator__splitter\" : [\"best\", \"random\"],\n \"n_estimators\": [10, 30]\n }\n\n DTC = DecisionTreeClassifier(random_state = 11, max_features = \"auto\", class_weight = \"balanced\",max_depth = None)\n\n ABC = AdaBoostClassifier(base_estimator = DTC)\n\n grid_search_ABC = GridSearchCV(ABC, param_grid=param_grid, scoring = 'roc_auc')\n\n grid_search_ABC.fit(features_train,labels_train)\n\n pred = grid_search_ABC.predict(features_test)\n accuracy = accuracy_score(labels_test, pred)\n precision = precision_score(labels_test, pred)\n recall = recall_score(labels_test, pred)\n\n print(\"DecisionTree after applying AdaBoost and GridSearchCV:\")\n print(\"accuracy AdaBoost: \", accuracy)\n print(\"precision: \", precision)\n print(\"recall: \", recall)\n print_separator_line()\n dict_results = { \"classifier\": \"AdaBoost decision tree\", \"accuracy\": accuracy, \"precision\": precision, \"recall\": recall }\n return dict_results, grid_search_ABC", "def classify(self, data):\n guesses = []\n for datum in data:\n vectors = util.Counter()\n for l in self.legalLabels:\n vectors[l] = self.weights[l] * datum + self.bias[l]\n guesses.append(vectors.argMax())\n return guesses", "def ensembleVote(x, classes, ensemble):\n votes = np.array([0 for kk in range(len(classes))])\n for i in ensemble:\n votes = votes + classProbs(x, ensemble[i][\"tree\"], classes)\n maxVote = 0\n loc = None\n for ind, vote in enumerate(votes):\n if vote > maxVote:\n maxVote = vote\n loc = ind\n prediction = classes[loc]\n return prediction", "def classify(self, data ):\n guesses = []\n for datum in data:\n vectors = util.Counter()\n for l in self.legalLabels:\n vectors[l] = self.weights[l] * datum\n guesses.append(vectors.argMax())\n return guesses", "def main(keep_best_count, mutation_factor, rounds, target, stagnate):\n ways = [range(len(DISTANCES))]\n result = {'round':0,'cost':None}\n for i in range(rounds):\n ways = mutate(ways,mutation_factor)\n best = []\n for way in ways:\n best.append((rate(way),way))\n best.sort()\n if VERBOSITY:\n for way in best:\n print way\n print \"Round %d best way is %s\" % (i+1, best[0][0])\n # break if we hit the target\n if best[0][0] <= target:\n print \"Hit Target\"\n break\n # break if we stagnate to long\n if result['cost'] is None or best[0][0] <result['cost']:\n result['cost'] = best[0][0]\n result['round'] = i+1\n elif result['round'] + stagnate <= i+1:\n print \"Stagnate to long\"\n break\n ways = list(b[1] for b in best[0:keep_best_count])\n print \"\"\n print \"best found order with cost=%d\" % best[0][0]\n print ' '.join(list(NAMES[i] for i in best[0][1]))\n print \"\"", "def __call__(self, clfs, dataset):\n if len(clfs)==0:\n return [] # to don't even bother\n\n all_label_counts = None\n for clf in clfs:\n # Lets check first if necessary conditional attribute is enabled\n if not clf.ca.is_enabled(\"predictions\"):\n raise ValueError, \"MaximalVote needs classifiers (such as \" + \\\n \"%s) with state 'predictions' enabled\" % clf\n predictions = clf.ca.predictions\n if all_label_counts is None:\n all_label_counts = [ {} for i in xrange(len(predictions)) ]\n\n # for every sample\n for i in xrange(len(predictions)):\n prediction = predictions[i]\n # XXX fishy location due to literal labels,\n # TODO simplify assumptions and logic\n if isinstance(prediction, basestring) or \\\n not is_sequence_type(prediction):\n prediction = (prediction,)\n for label in prediction: # for every label\n # XXX we might have multiple labels assigned\n # but might not -- don't remember now\n if not label in all_label_counts[i]:\n all_label_counts[i][label] = 0\n all_label_counts[i][label] += 1\n\n predictions = []\n # select maximal vote now for each sample\n for i in xrange(len(all_label_counts)):\n label_counts = all_label_counts[i]\n # lets do explicit search for max so we know\n # if it is unique\n maxk = [] # labels of elements with max vote\n maxv = -1\n for k, v in label_counts.iteritems():\n if v > maxv:\n maxk = [k]\n maxv = v\n elif v == maxv:\n maxk.append(k)\n\n assert len(maxk) >= 1, \\\n \"We should have obtained at least a single key of max label\"\n\n if len(maxk) > 1:\n warning(\"We got multiple labels %s which have the \" % maxk +\n \"same maximal vote %d. XXX disambiguate. \" % maxv +\n \"Meanwhile selecting the first in sorted order\")\n predictions.append(sorted(maxk)[0])\n\n ca = self.ca\n ca.estimates = all_label_counts\n ca.predictions = predictions\n return predictions", "def classify(X, Y, skf, clf, round_threshold=0.5, average=\"macro\"):\n X = X.values\n if isinstance(Y, pd.Series):\n labels = [\"{}_0\".format(Y.name), \"{}_1\".format(Y.name)]\n Y = np.ravel(Y)\n else:\n Y, labels = Y.values, list(Y.columns)\n\n fold_results = []\n for train, test in skf.split(X, Y):\n current_clf = clone(clf)\n X_train, X_test, Y_train, Y_test = X[train], X[test], Y[train], Y[test]\n\n current_clf.fit(X_train, Y_train)\n Y_prob = current_clf.predict_proba(X_test)\n Y_pred = current_clf.predict(X_test)\n\n (p, r, f1, auc, jac, hl, p_c,\n r_c, f1_c, s_c) = calculate_metrics(Y_test, Y_pred, Y_prob, average)\n\n # calculate overall scores for current fold\n fold_scores = {\n \"precision\": p,\n \"recall\": r,\n \"f1\": f1,\n \"auc\": auc,\n \"jaccard\": jac,\n \"hamming_loss\": hl\n }\n\n for i in range(len(labels)):\n fold_scores[\"precision_{0}\".format(labels[i])] = p_c[i]\n fold_scores[\"recall_{0}\".format(labels[i])] = r_c[i]\n fold_scores[\"f1_{0}\".format(labels[i])] = f1_c[i]\n fold_scores[\"support_{0}\".format(labels[i])] = s_c[i]\n\n fold_results.append({\n \"scores\": fold_scores,\n \"y_pred\": Y_pred,\n \"y_prob\": Y_prob,\n \"y_test\": Y_test\n })\n\n scores = {}\n for score in fold_results[0][\"scores\"].keys():\n values = [s[\"scores\"][score] for s in fold_results]\n scores[score] = (np.sum(values) if score.startswith(\"support_\")\n else np.mean(values))\n\n return scores, fold_results", "def classify(self, data):\n guesses = []\n for datum in data:\n vectors = util.Counter()\n for l in self.legalLabels:\n vectors[l] = self.weights[l] * datum\n guesses.append(vectors.argMax())\n return guesses", "def classify(self, testData):\n\tguesses = []\n\tself.posteriors = [] # Log posteriors are stored for later data analysis (autograder).\n\tfor datum in testData:\n\t posterior = self.calculateLogJointProbabilities(datum)\n\t guesses.append(posterior.argMax())\n\t self.posteriors.append(posterior)\n\treturn guesses", "def fit(self, X, y):\n X_array=np.asarray(X) \n X_data_int=X_array[:,0:X_array.shape[1]-1]\n X_data= X_data_int/1.0\n \n p=1\n number_of_p=int(X_data.shape[0]*p)\n l = range(0,X_data.shape[0])\n randlist = random.sample(l, number_of_p) \n \n for i in randlist:\n y[i]=y[i]*(-1)\n\n sample_weight2 = np.empty(X_data.shape[0], dtype=np.float)\n sample_weight2[:] = 1. / X_data.shape[0]\n self.estimators_ = []\n\n\n self.estimator_errors_ = np.ones(self.iters, dtype=np.float)\n \n #classfier=CLASSIFIERS[self.algorithm]()\n \n for iboost in range(self.iters):\n # Boosting step\n classfier=CLASSIFIERS[self.algorithm]() \n sample_weight3=[]\n sample_weight3=sample_weight2\n classfier.fit(X_data, y,sample_weight3)\n y_predict = classfier.predict(X_data)\n \n # Instances incorrectly classified\n incorrect = y_predict != y\n\n # Error fraction\n \n estimator_error = np.mean(np.average(incorrect, weights=sample_weight2, axis=0))\n #estimator_error = np.sum(np.average(incorrect, weights=sample_weight2, axis=0))\n \n # Stop if classification is perfect\n if estimator_error <= 0:\n estimator_weight = 1.\n estimator_error = 0.\n \n # Stop if the error is at least as bad as random guessing\n if estimator_error >= 1. - (1. / 2):\n raise ValueError('BaseClassifier in AdaBoostClassifier '\n 'ensemble is worse than random, ensemble '\n 'can not be fit.')\n break\n\n #estimator_weight = 1./2*np.log((1. - estimator_error) / estimator_error)\n estimator_weight = np.log((1. - estimator_error) / estimator_error)\n # Only boost the weights if I will fit again\n if not iboost == self.iters - 1:\n # Only boost positive weights\n sample_weight2 *= np.exp(estimator_weight * incorrect * ((sample_weight2 > 0) | (estimator_weight < 0)))\n\n # Early termination\n if sample_weight2 is None:\n break\n\n self.estimator_weights_[iboost] = estimator_weight\n self.estimator_errors_[iboost] = estimator_error\n\n # Stop if error is zero\n if estimator_error == 0:\n break\n\n sample_weight_sum = np.sum(sample_weight2)\n \n\n # Stop if the sum of sample weights has become non-positive\n if sample_weight_sum <= 0:\n break\n\n if iboost < self.iters - 1:\n # Normalize\n sample_weight2 /= sample_weight_sum\n \n self.estimators_.append(classfier)\n\n return self", "def naive_bayes_classify(df: pd.DataFrame, vect, names):\n features = vect\n target = df.success_lvl\n\n X_train, X_test, y_train, y_test = \\\n train_test_split(features, target, test_size=0.2, random_state=42)\n\n nb_clf = MultinomialNB()\n nb_clf.fit(X_train, y_train)\n nb_predictions = nb_clf.predict(X_test)\n print('Accuracy score for Naive Bayes:', accuracy_score(y_test, nb_predictions))\n\n\n # Find Top/Bottom num of terms used to describe the classes.\n num = 10\n low_class_prob_sorted = nb_clf.feature_log_prob_[0, :].argsort()[::-1]\n hi_class_prob_sorted = nb_clf.feature_log_prob_[1, :].argsort()[::-1]\n print('\\n', f'Low score Top{num} phrases:', np.take(names, low_class_prob_sorted[:num]))\n print('\\n', f'Low score Bot{num} phrases:', np.take(names, low_class_prob_sorted[-num:]))\n print('\\n', f'High score Top{num} phrases:', np.take(names, hi_class_prob_sorted[:num]))\n print('\\n', f'High score Bot{num} phrases:', np.take(names, hi_class_prob_sorted[-num:]))", "def borda(election, tiebreaker=None):\n election = np.asarray(election)\n\n ncands = election.shape[1]\n total_tally = np.zeros(ncands, dtype=int)\n\n # Tally candidates in each column, multiply by points for each rank level\n for n, column in enumerate(election.T):\n tally = np.bincount(column, minlength=ncands)\n total_tally += (ncands - n)*tally\n\n # Python lists are faster than NumPy here\n total_tally = total_tally.tolist()\n\n # Find the set of candidates who have the highest score (usually only one)\n highest = max(total_tally)\n winners = _all_indices(total_tally, highest)\n\n # Break any ties using specified method\n tiebreak = _get_tiebreak(tiebreaker, _tiebreak_map)\n return tiebreak(winners)[0]", "def tune_learning_algo(data_X, data_y, params_set, algo_factory, folds=5):\n accuracy_for_params = {}\n best_accuracy = 0.0\n best_params = None\n\n # For all the available parameters, do a k-fold evaluation to get average accuracy.\n # Report the params with best accuracy.\n for params in params_set:\n # Do a k fold training and evaluation for this params and get average accuracy.\n accuracy = train_and_evaluate_k_fold(data_X, data_y, params, algo_factory, folds)\n # Save\n accuracy_for_params[params] = accuracy\n # Update the best parameters till now.\n if accuracy > best_accuracy:\n best_params = params\n best_accuracy = accuracy\n # Return.\n return accuracy_for_params, best_params", "def classify(self, data ):\n\t\tguesses = []\n\t\tfor datum in data:\n\t\t\tvectors = util.Counter()\n\t\t\tfor l in self.legalLabels:\n\t\t\t\tvectors[l] = self.weights[l] * datum\n\t\t\tguesses.append(vectors.argMax())\n\t\treturn guesses", "def classify(self, data ):\n\t\tguesses = []\n\t\tfor datum in data:\n\t\t\tvectors = util.Counter()\n\t\t\tfor l in self.legalLabels:\n\t\t\t\tvectors[l] = self.weights[l] * datum\n\t\t\tguesses.append(vectors.argMax())\n\t\treturn guesses", "def classify(self, testData):\n guesses = []\n self.posteriors = [] # Log posteriors are stored for later data analysis (autograder).\n for datum in testData:\n posterior = self.calculateLogJointProbabilities(datum)\n guesses.append(posterior.argMax())\n self.posteriors.append(posterior)\n return guesses", "def classify(self, testData):\n guesses = []\n self.posteriors = [] # Log posteriors are stored for later data analysis (autograder).\n for datum in testData:\n posterior = self.calculateLogJointProbabilities(datum)\n guesses.append(posterior.argMax())\n self.posteriors.append(posterior)\n return guesses", "def get_bests(self):\n set_names = [\"training\", \"hp_selection\", \"validation\"]\n run_tec_conf_set = recursivedict()\n validation = self._campaign_configuration['General']['validation']\n hp_selection = self._campaign_configuration['General']['hp_selection']\n if (validation, hp_selection) in {(\"All\", \"All\"), (\"Extrapolation\", \"All\"), (\"All\", \"HoldOut\"), (\"HoldOut\", \"All\"), (\"HoldOut\", \"HoldOut\"), (\"Extrapolation\", \"HoldOut\")}:\n # For each run, for each technique the best configuration\n run_tec_best_conf = recursivedict()\n\n # Hyperparameter search\n for conf in self._exp_confs:\n run = int(conf.get_signature()[0].replace(\"run_\", \"\"))\n technique = conf.technique\n run_tec_conf_set[run][technique][str(conf.get_signature()[4:])] = conf.mapes\n # First experiment for this technique or better than the current best\n if technique not in run_tec_best_conf[run] or conf.mapes[\"hp_selection\"] < run_tec_best_conf[run][technique].mapes[\"hp_selection\"]:\n run_tec_best_conf[run][technique] = conf\n\n # Print results for each run\n for run in range(0, self._campaign_configuration['General']['run_num']):\n self._logger.info(\"-->Printing results for run %s\", str(run))\n overall_run_best = None\n # Print data of single techniques\n for technique in run_tec_best_conf[run]:\n temp = run_tec_best_conf[run][technique]\n self._logger.info(\"---Best result for %s - Configuration is %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", technique, temp.get_signature()[4:], temp.mapes[\"training\"], temp.mapes[\"hp_selection\"], temp.mapes[\"validation\"])\n\n # Compute which is the best technique\n if not overall_run_best or temp.mapes[\"hp_selection\"] < overall_run_best.mapes[\"hp_selection\"]:\n overall_run_best = temp\n best_model_description = overall_run_best.print_model()\n self._logger.info(\"<--Overall best result is %s %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", overall_run_best.get_signature()[3:], \"(\" + best_model_description + \")\" if best_model_description else \"\", overall_run_best.mapes[\"training\"], overall_run_best.mapes[\"hp_selection\"], overall_run_best.mapes[\"validation\"])\n\n elif (validation, hp_selection) in {(\"KFold\", \"All\"), (\"KFold\", \"HoldOut\")}:\n folds = float(self._campaign_configuration['General']['folds'])\n # For each run, for each fold, for each technique, the best configuration\n run_fold_tec_best_conf = recursivedict()\n\n # Hyperparameter search inside each fold\n for conf in self._exp_confs:\n run = int(conf.get_signature()[0].replace(\"run_\", \"\"))\n fold = int(conf.get_signature()[1].replace(\"f\", \"\"))\n technique = conf.technique\n if \"hp_selection\" not in run_tec_conf_set[run][technique][str(conf.get_signature_string()[4:])]:\n for set_name in set_names:\n run_tec_conf_set[run][technique][str(conf.get_signature_string()[4:])][set_name] = 0\n for set_name in set_names:\n run_tec_conf_set[run][technique][str(conf.get_signature_string()[4:])][set_name] = run_tec_conf_set[run][technique][str(conf.get_signature_string()[4:])][set_name] + conf.mapes[set_name] / folds\n # First experiment for this fold+technique or better than the current best\n if technique not in run_fold_tec_best_conf[run][fold] or conf.mapes[\"hp_selection\"] < run_fold_tec_best_conf[run][fold][technique].mapes[\"hp_selection\"]:\n run_fold_tec_best_conf[run][fold][technique] = conf\n\n # Aggregate different folds (only the value of the mapes)\n run_tec_set = recursivedict()\n for run in run_fold_tec_best_conf:\n for fold in run_fold_tec_best_conf[run]:\n for tec in run_fold_tec_best_conf[run][fold]:\n if \"hp_selection\" not in run_tec_set[run][technique]:\n for set_name in set_names:\n run_tec_set[run][tec][set_name] = 0\n for set_name in set_names:\n run_tec_set[run][tec][set_name] = run_fold_tec_best_conf[run][fold][tec].mapes[set_name]\n # Print results for each run\n for run in range(0, self._campaign_configuration['General']['run_num']):\n self._logger.info(\"Printing results for run %s\", str(run))\n overall_run_best = ()\n # Print data of single techniques\n for technique in run_tec_set[run]:\n self._logger.info(\"---Best result for %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", technique, run_tec_set[run][technique][\"training\"], run_tec_set[run][technique][\"hp_selection\"], run_tec_set[run][technique][\"validation\"])\n\n # Compute which is the best technique\n if not overall_run_best or run_tec_set[run][technique][\"hp_selection\"] < overall_run_best[1][\"hp_selection\"]:\n overall_run_best = (technique, run_tec_set[run][technique])\n\n self._logger.info(\"---Overall best result is %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", overall_run_best[0], overall_run_best[1][\"training\"], overall_run_best[1][\"hp_selection\"], overall_run_best[1][\"validation\"])\n\n # Overall best will contain as first argument the technique with the best (across runs) average (across folds) mape on validation; now we consider on all the runs and on all the folds the configuraiton of this technique with best validation mape\n\n elif (validation, hp_selection) in {(\"All\", \"KFold\"), (\"HoldOut\", \"KFold\"), (\"Extrapolation\", \"KFold\")}:\n folds = float(self._campaign_configuration['General']['folds'])\n # For each run, for each technique, for each configuration, the aggregated mape\n run_tec_conf_set = recursivedict()\n\n # Hyperparameter search aggregating over folders\n for conf in self._exp_confs:\n run = int(conf.get_signature()[0].replace(\"run_\", \"\"))\n fold = int(conf.get_signature()[2].replace(\"f\", \"\"))\n technique = conf.technique\n configuration = str(conf.get_signature()[4:])\n if \"hp_selection\" not in run_tec_conf_set[run][technique][configuration]:\n for set_name in set_names:\n run_tec_conf_set[run][technique][configuration][set_name] = 0\n for set_name in set_names:\n run_tec_conf_set[run][technique][configuration][set_name] = run_tec_conf_set[run][technique][configuration][set_name] + conf.mapes[set_name] / folds\n\n # Select the best configuration for each technique across different folders\n run_tec_best_conf = recursivedict()\n for run in run_tec_conf_set:\n for tec in run_tec_conf_set[run]:\n for conf in run_tec_conf_set[run][tec]:\n if tec not in run_tec_best_conf[run] or run_tec_conf_set[run][tec][conf][\"hp_selection\"] < run_tec_best_conf[run][tec][1][\"hp_selection\"]:\n run_tec_best_conf[run][tec] = (conf, run_tec_conf_set[run][tec][conf])\n\n # Print results for each run\n for run in range(0, self._campaign_configuration['General']['run_num']):\n self._logger.info(\"Printing results for run %s\", run)\n overall_run_best = () # (technique, configuration, mapes)\n # Print data of single techniques\n for technique in run_tec_best_conf[run]:\n temp = run_tec_best_conf[run][technique]\n self._logger.info(\"---Best result for %s - Configuration is %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", technique, temp[0], temp[1][\"training\"], temp[1][\"hp_selection\"], temp[1][\"validation\"])\n\n # Compute which is the best technique\n if not overall_run_best or temp[1][\"hp_selection\"] < overall_run_best[2][\"hp_selection\"]:\n overall_run_best = (technique, temp[0], temp[1])\n\n self._logger.info(\"---Overall best result is %s %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", overall_run_best[0], overall_run_best[1], overall_run_best[2][\"training\"], overall_run_best[2][\"hp_selection\"], overall_run_best[2][\"validation\"])\n\n elif (validation, hp_selection) in {(\"KFold\", \"KFold\")}:\n folds = float(self._campaign_configuration['General']['folds'])\n # For each run, for each external fold, for each technique, the aggregated mape\n run_efold_tec_conf_set = recursivedict()\n\n # Hyperparameter search aggregating over internal folders\n for conf in self._exp_confs:\n run = int(conf.get_signature()[0].replace(\"run_\", \"\"))\n ext_fold = int(conf.get_signature()[2].replace(\"f\", \"\"))\n technique = conf.technique\n configuration = str(conf.get_signature()[4:])\n if \"hp_selection\" not in run_tec_conf_set[run][technique][configuration]:\n for set_name in set_names:\n run_tec_conf_set[run][technique][configuration][set_name] = 0\n for set_name in set_names:\n run_tec_conf_set[run][technique][configuration][set_name] = run_tec_conf_set[run][technique][configuration][set_name] + (conf.mapes[set_name] / (folds * folds))\n if configuration not in run_efold_tec_conf_set[run][ext_fold][technique]:\n for set_name in set_names:\n run_efold_tec_conf_set[run][ext_fold][technique][configuration][set_name] = 0\n for set_name in set_names:\n run_efold_tec_conf_set[run][ext_fold][technique][configuration][set_name] = run_efold_tec_conf_set[run][ext_fold][technique][configuration][set_name] + (conf.mapes[set_name] / (folds * folds))\n\n # Select the best configuration for each technique in each external fold across different internal folders\n run_efold_tec_best_conf = recursivedict()\n for run in run_efold_tec_conf_set:\n for efold in run_efold_tec_conf_set[run]:\n for tec in run_efold_tec_conf_set[run][efold]:\n for conf in run_efold_tec_conf_set[run][efold][tec]:\n if conf not in run_efold_tec_best_conf[run][efold][tec] or run_efold_tec_conf_set[run][efold][tec][conf][\"hp_selection\"] < run_efold_tec_best_conf[run][efold][tec][1][\"hp_selection\"]:\n run_efold_tec_best_conf[run][efold][tec] = (conf, run_efold_tec_conf_set[run][efold][tec][conf], run_efold_tec_conf_set[run][efold][tec][conf])\n\n # Aggregate on external folds\n run_tec_set = recursivedict()\n for run in run_efold_tec_best_conf:\n for efold in run_efold_tec_best_conf[run]:\n for tec in run_efold_tec_best_conf[run][efold]:\n if \"hp_selection\" not in run_tec_set[run][tec]:\n for set_name in set_names:\n run_tec_set[run][tec][set_name] = 0\n for set_name in set_names:\n run_tec_set[run][tec][set_name] = run_tec_set[run][tec][set_name] + run_efold_tec_best_conf[run][efold][tec][1][set_name]\n\n # Print results for each run\n for run in range(0, self._campaign_configuration['General']['run_num']):\n self._logger.info(\"Printing results for run %s\", run)\n overall_run_best = ()\n # Print data of single techniques\n for technique in run_tec_set[run]:\n self._logger.info(\"---Best result for %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", technique, run_tec_set[run][technique][\"training\"], run_tec_set[run][technique][\"hp_selection\"], run_tec_set[run][technique][\"validation\"])\n\n # Compute which is the best technique\n if not overall_run_best or run_tec_set[run][technique][\"hp_selection\"] < overall_run_best[1][\"hp_selection\"]:\n overall_run_best = (technique, run_tec_set[run][technique])\n\n self._logger.info(\"---Overall best result is %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", overall_run_best[0], overall_run_best[1][\"training\"], overall_run_best[1][\"hp_selection\"], overall_run_best[1][\"validation\"])\n\n else:\n self._logger.error(\"Unexpected combination: %s\", str((validation, hp_selection)))\n sys.exit(1)\n best_confs = {}\n best_technique = None\n for conf in self._exp_confs:\n technique = conf.technique\n if technique not in best_confs or conf.mapes[\"validation\"] < best_confs[technique].mapes[\"validation\"]:\n best_confs[technique] = conf\n for technique in best_confs:\n if not best_technique or best_confs[technique].mapes[\"validation\"] < best_confs[best_technique].mapes[\"validation\"]:\n best_technique = technique\n if bool(self._campaign_configuration['General']['details']):\n for run in run_tec_conf_set:\n for tec in run_tec_conf_set[run]:\n for conf in run_tec_conf_set[run][tec]:\n assert \"hp_selection\" in run_tec_conf_set[run][tec][conf]\n assert \"validation\" in run_tec_conf_set[run][tec][conf], \"training MAPE not found for \" + str(run) + str(tec) + str(conf)\n self._logger.info(\"Run %s - Technique %s - Conf %s - Training MAPE %f - Test MAPE %f\", str(run), ec.enum_to_configuration_label[tec], str(conf), run_tec_conf_set[run][tec][conf][\"hp_selection\"], run_tec_conf_set[run][tec][conf][\"validation\"])\n return best_confs, best_technique", "def postprocess2(scores, classes, bboxes, iou_threshold=0.2, score_threshold=0.5):\n n = len(scores)\n \n count_per_class = {cls:0 for cls in classes}\n bbox_per_class = {cls:[] for cls in classes}\n score_per_class = {cls:[] for cls in classes}\n\n for i in range(n):\n count_per_class[classes[i]] += 1\n bbox_per_class[classes[i]] += [bboxes[i]]\n score_per_class[classes[i]] += [scores[i]]\n \n det_num = 0\n det_classes = [] \n det_scores = []\n det_bboxes = []\n\n for cls in count_per_class:\n current_count = count_per_class[cls]\n current_scores = np.array(score_per_class[cls], np.float32)\n current_bboxes = np.array(bbox_per_class[cls], np.int32)\n\n idx = np.argsort(current_scores)[::-1]\n sorted_scores = current_scores[idx]\n sorted_bboxes = current_bboxes[idx]\n\n top_k_ids = []\n size = 0\n i = 0\n\n while i < current_count:\n if sorted_scores[i] < score_threshold:\n break\n top_k_ids.append(i)\n det_num += 1\n det_classes.append(cls)\n det_scores.append(sorted_scores[i])\n det_bboxes.append(sorted_bboxes[i])\n size += 1\n i += 1\n\n while i < current_count:\n tiled_bbox_i = np.tile(sorted_bboxes[i], (size, 1))\n ious, iofs, ioss = iou_bbox(tiled_bbox_i, sorted_bboxes[top_k_ids])\n max_iou = np.max(ious)\n # max_iof = np.max(iofs)\n # max_ios = np.max(ioss)\n # temp = np.max((max_iof, max_ios))\n if max_iou > iou_threshold:\n i += 1\n else:\n break\n\n return det_num, np.array(det_scores, np.float32), np.array(det_classes, np.int32), np.array(det_bboxes, np.int32)", "def __calculate_agg_shap_scores(self):\n self.agg_stats_timer = SimbaTimer(start=True)\n for clf_state, clf_state_name in zip(range(2), [\"ABSENT\", \"PRESENT\"]):\n self.results = {}\n self.df_save_path = os.path.join(\n self.shap_logs_path,\n \"SHAP_summary_{}_{}_{}.csv\".format(\n self.classifier_name, clf_state_name, self.datetime\n ),\n )\n shap_clf_sliced = self.shap_df[\n self.shap_df[self.classifier_name] == clf_state\n ]\n for feature_category, feature_time_bin in itertools.product(\n self.unique_feature_category_names, self.unique_time_bin_names\n ):\n if feature_category not in self.results.keys():\n self.results[feature_category] = {}\n feature_names_sliced = list(\n self.feature_categories_df.loc[\n :, (feature_category, feature_time_bin)\n ]\n )\n feature_names_sliced = [\n x\n for x in feature_names_sliced\n if str(x) != \"nan\" and x in shap_clf_sliced\n ]\n self.results[feature_category][feature_time_bin] = round(\n shap_clf_sliced[feature_names_sliced].sum(axis=1).mean() * 100, 6\n )\n self.__save_aggregate_scores()\n self.agg_stats_timer.stop_timer()\n self.visualization_timer = SimbaTimer(start=True)\n\n stdout_success(\n msg=f\"Aggregate SHAP statistics saved in {self.shap_logs_path} directory\",\n elapsed_time=self.agg_stats_timer.elapsed_time_str,\n )", "def classify(self, testData):\n guesses = []\n self.posteriors = [] # Log posteriors are stored for later data analysis (autograder).\n for datum in testData:\n posterior = self.calculateLogJointProbabilities(datum)\n guesses.append(posterior.argMax())\n self.posteriors.append(posterior)\n return guesses", "def train(self):\n\n # Step 1 - Obtain optimized weights for final model ------------------------------------------------------------\n\n t0 = time()\n\n # Check the training data for potential hazardous problems\n self.check_training_samples()\n\n opt_results = pd.DataFrame()\n kf_opt = StratifiedKFold(n_splits=self.kfold_cv, shuffle=True)\n rep_str, opt_str = '', ''\n\n if self.verbose:\n print('\\n\\n__ TRAINING STEP 1/2 \\_______________________________')\n print(' \\ Train with reverse %d-fold CV - %d time(s) /\\n' % (self.kfold_cv, self.n_repeat))\n\n for i_rep in range(self.n_repeat):\n\n if self.verbose:\n rep_str = '\\n_/--- Rep %d/%d' % (i_rep + 1, self.n_repeat)\n\n # Sample clf-net parameters to test\n param = [\n np.random.normal(loc=self.n_estimators,\n scale=self.n_estimators*self.param_tune_scale,\n size=self.kfold_cv),\n np.random.normal(loc=self.min_impurity_decrease,\n scale=self.min_impurity_decrease*self.param_tune_scale,\n size=self.kfold_cv),\n np.random.normal(loc=self.min_sample_leaf,\n scale=np.ceil(self.min_sample_leaf*self.param_tune_scale),\n size=self.kfold_cv),\n ]\n scores = list()\n\n for j_fold, (opt_idxs, cv_train_idxs) in enumerate(kf_opt.split(\n X=self.datas[self.train_idx].nidx_train,\n y=self.datas[self.train_idx].gen_labels(condense_labels=True))):\n\n if self.verbose:\n print(rep_str + ' - CV %d/%d ---\\_____\\n' % (j_fold + 1, self.kfold_cv))\n\n # set clf-net parameters\n self.n_estimators = param[0][j_fold]\n self.min_impurity_decrease = param[1][j_fold]\n self.min_sample_leaf = param[2][j_fold]\n self.clf_net = self.gen_rfc()\n\n # Split data\n opt_nidxs = np.array([self.datas[self.train_idx].nidx_train[i] for i in opt_idxs])\n cv_train_nidxs = np.array([self.datas[self.train_idx].nidx_train[i] for i in cv_train_idxs])\n\n # Partition train/eval nidx for reverse k-fold CV training\n _, _, opt_eval_nidxs, opt_train_nidxs = train_test_split(\n np.zeros(len(opt_nidxs)),\n opt_nidxs,\n test_size=1/(self.kfold_cv - 1),\n shuffle=True,\n stratify=self.datas[self.train_idx].gen_labels(nidxs=opt_nidxs, condense_labels=True))\n\n # Train clfs\n if self.verbose:\n print('\\n> Training base classifiers ...')\n self._train_clfs(train_nidxs=cv_train_nidxs)\n\n # Evaluate train with cv_train data\n if self.verbose:\n print('\\n> Evaluating base classifiers with cv_train partition ...')\n self.clfs_predict(nidxs_target=cv_train_nidxs, data=self.datas[self.train_idx], to_eval=True,\n eval_idx=self.train_idx)\n\n # Evaluate pre-optimization with opt_train data\n if self.verbose:\n print('\\n> Evaluating base classifiers with cv_eval partition ...')\n cv_res = self.clfs_predict(nidxs_target=opt_train_nidxs, data=self.datas[self.train_idx], to_eval=True,\n nidxs_train=cv_train_nidxs, eval_idx=self.train_idx)\n\n # Train clf-opt with opt_train partition results\n if self.verbose:\n print('\\n> Training clf-opt ...')\n self._train_clf_opt(predictions=cv_res)\n\n # Evaluate clf-opt with opt_eval partition\n if self.verbose:\n print('\\n> Evaluating optimized classifier with opt_test partition ...')\n opt_res = self.clfs_predict(nidxs_target=opt_eval_nidxs, data=self.datas[self.train_idx], to_eval=True,\n nidxs_train=cv_train_nidxs, eval_idx=self.train_idx)\n opt_results = opt_results.append(opt_res, ignore_index=True)\n\n # Append score to optimize clf-net parameter\n r = self.scores(opt_res['ytruth'], opt_res['ynet'])\n if not self.aim:\n scores.append(r['aucroc'])\n else:\n aim = self.aim.replace('hard', '')\n scores.append(r[aim])\n\n # reset link2featidx\n self.datas[self.train_idx].link2featidx = {}\n\n # Aggregate results from clf-net parameter search\n self._set_clf_net_param(param, scores)\n\n # STEP 2 - Train final model -----------------------------------------------------------------------------------\n # .clf_opt is already trained through previous iterations by using warm_start\n\n if self.verbose:\n print('\\n__ TRAINING STEP 2/2 \\_______________________________')\n print(' \\ Train final model with all train data /\\n')\n\n # Train clfs with all the data\n self._train_clfs()\n\n # Evaluate final clf-opt with all data\n print('\\n> Evaluating final classifier ...')\n self.clfs_predict(nidxs_target=self.datas[self.train_idx].nidx_train, to_eval=True, eval_idx=self.train_idx)\n print('** Because this is evaluating with the training data, classifier performances should be very high.')\n\n # Assign model ID - this is here so that if retrained, it would be known that it is not the same model anymore\n self.id = 'm_%s' % gen_id()\n\n if self.verbose:\n te = (time() - t0) / 60\n print('\\n Training took %.1f minutes on %d processors' % (te, os.cpu_count()))\n print('\\n__ __________')\n print(' \\ Training complete! /\\n')\n\n return opt_results", "def get_optimal_postprocess(loaders=None, runner=None, logdir: str = \"\"):\n loaders[\"infer\"] = loaders[\"valid\"]\n\n runner.infer(\n model=runner.model,\n loaders=loaders,\n callbacks=[\n CheckpointCallback(resume=f\"{logdir}/checkpoints/best.pth\"),\n InferCallback(),\n ],\n )\n valid_masks = []\n probabilities = np.zeros((2220, 350, 525))\n for i, (batch, output) in enumerate(\n zip(loaders[\"infer\"].dataset, runner.callbacks[0].predictions[\"logits\"])\n ):\n image, mask = batch\n for m in mask:\n if m.shape != (350, 525):\n m = cv2.resize(m, dsize=(525, 350), interpolation=cv2.INTER_LINEAR)\n valid_masks.append(m)\n\n for j, probability in enumerate(output):\n if probability.shape != (350, 525):\n probability = cv2.resize(\n probability, dsize=(525, 350), interpolation=cv2.INTER_LINEAR\n )\n probabilities[i * 4 + j, :, :] = probability\n\n class_params = {}\n for class_id in range(4):\n print(class_id)\n attempts = []\n for t in range(0, 100, 10):\n t /= 100\n for ms in [\n 0,\n 100,\n 1000,\n 5000,\n 10000,\n 11000,\n 14000,\n 15000,\n 16000,\n 18000,\n 19000,\n 20000,\n 21000,\n 23000,\n 25000,\n 27000,\n 30000,\n 50000,\n ]:\n masks = []\n for i in range(class_id, len(probabilities), 4):\n probability = probabilities[i]\n predict, num_predict = post_process(sigmoid(probability), t, ms)\n masks.append(predict)\n\n d = []\n for i, j in zip(masks, valid_masks[class_id::4]):\n if (i.sum() == 0) & (j.sum() == 0):\n d.append(1)\n else:\n d.append(dice(i, j))\n\n attempts.append((t, ms, np.mean(d)))\n\n attempts_df = pd.DataFrame(attempts, columns=[\"threshold\", \"size\", \"dice\"])\n\n attempts_df = attempts_df.sort_values(\"dice\", ascending=False)\n print(attempts_df.head())\n best_threshold = attempts_df[\"threshold\"].values[0]\n best_size = attempts_df[\"size\"].values[0]\n\n class_params[class_id] = (best_threshold, int(best_size))\n\n print(class_params)\n return class_params", "def getAllContributingAlgorithmsToBest(algnamelist, target_lb=1e-8, \n target_ub=1e2):\n \n print \"Generating best algorithm data from given algorithm list...\\n\", \n customgenerate(algnamelist)\n \n bestalgfilepath = 'bestCustomAlg'\n picklefilename = os.path.join(bestalgfilepath, 'bestalg.pickle')\n fid = open(picklefilename, 'r')\n bestalgentries = pickle.load(fid)\n fid.close()\n print 'loading of best algorithm data done.'\n \n countsperalgorithm = {}\n for (d, f) in bestalgentries:\n print 'dimension:', d, ', function:', f\n print f\n setofalgs = set(bestalgentries[d,f].algs)\n # pre-processing data to only look at targets >= target_lb:\n correctedbestalgentries = []\n for i in range(0,len(bestalgentries[d,f].target)):\n if ((bestalgentries[d,f].target[i] >= target_lb) and\n (bestalgentries[d,f].target[i] <= target_ub)):\n \n correctedbestalgentries.append(bestalgentries[d,f].algs[i])\n print len(correctedbestalgentries)\n # now count how often algorithm a is best for the extracted targets\n for a in setofalgs:\n # use setdefault to initialize with zero if a entry not existant:\n countsperalgorithm.setdefault((d, a), 0) \n countsperalgorithm[(d,a)] += correctedbestalgentries.count(a)\n \n selectedalgsperdimension = {}\n for (d,a) in sorted(countsperalgorithm):\n if not selectedalgsperdimension.has_key(d):\n selectedalgsperdimension[d] = []\n selectedalgsperdimension[d].append((countsperalgorithm[(d,a)], a))\n \n for d in sorted(selectedalgsperdimension):\n print d, 'D:'\n for (count, alg) in sorted(selectedalgsperdimension[d], reverse=True):\n print count, alg\n print '\\n'\n \n \n print \" done.\"", "def train_all_categories(features, outputs, model, params_grid=None):\n results = dict() \n for output_class in outputs.keys():\n output_class=\"ASSAULT\"\n output = outputs[output_class]\n\n \tdefault_model = model()\n default_score = cross_validate(features, output, default_model)\n\n\tprint(\"Fine Tuning for class %s\"%(output_class))\n\t\n\tfine_tune_model = fine_tune(features, output, model(), verbose=1, params_grid=params_grid,)\n best_model, fine_tune_score = fine_tune_model.best_estimator_, fine_tune_model.best_score_\n results[output_class] = [(default_model, default_score), (best_model, fine_tune_score)]\n\tbreak\n return results", "def Bayes_prediction(X, y, fold_number=10):\n D = X.shape[1]\n fold = KFold(n_splits=fold_number)\n cross_tab_all = []\n lamb_hat_all = []\n \n for train_index, test_index in fold.split(X, y):\n X_train, X_test = X.iloc[train_index], X.iloc[test_index]\n y_train, y_test = y.iloc[train_index], y.iloc[test_index]\n length = X_train.shape[0]\n pi_hat = y_train.mean()\n lamb_hat = np.zeros((2, D))\n \n for flag in range(2):\n for d in range(D):\n lamb_hat[flag][d] = (sum(X_train.iloc[i][d] * (y_train.iloc[i]==flag) for i in range(length))) / (sum(y_train.iloc[i]==flag for i in range(length)))\n\n y_pred = np.zeros(len(X_test))\n for i in range(len(X_test)):\n y_pred[i] = Bayes_classifier(pi_hat, X_test.iloc[i], lamb_hat)\n \n cross_tab = np.zeros((2, 2))\n for m in [0, 1]:\n for n in [0, 1]:\n cross_tab[m][n] = sum([(y_test.values[i]==m) & (y_pred[i]==n) for i in range(len(y_pred))]) \n \n cross_tab_all.append(cross_tab)\n lamb_hat_all.append(lamb_hat)\n \n cross_tab_all = sum(cross_tab_all)\n lamb_hat_all\n\n return lamb_hat_all, cross_tab_all", "def compute_accuracy(data, num_labels = 4): \n \n # Declarating list to store results\n accuracies = []\n \n for instance in data:\n \n # Declarating list to store individual results\n instance_accuracies = []\n \n for i in np.arange(num_labels):\n \n # Computing and storing accuracy for each class\n instance_accuracies.append(accuracy_score(instance[:, 2 + i], instance[:, 2 + i + 4]))\n \n # Storing mean results of the instance\n accuracies.append(np.mean(instance_accuracies))\n \n # Returning mean of all results\n return np.mean(accuracies)", "def random_objective(params, iteration, n_folds = N_FOLDS):\n\n start = timer()\n \n # Perform n_folds cross validation\n cv_results = lgb.cv(params, train_set, num_boost_round = 10000, nfold = n_folds, \n early_stopping_rounds = 100, metrics = 'l2', seed = 50, stratified=False)\n end = timer()\n best_score = np.max(cv_results['l2-mean'])\n \n # Loss must be minimized\n loss = 1 - best_score\n \n # Boosting rounds that returned the highest cv score\n n_estimators = int(np.argmax(cv_results['l2-mean']) + 1)\n \n # Return list of results\n return [loss, params, iteration, n_estimators, end - start]", "def test_205_boosted_goal_difference_for_home_models_with_thresholds(self):\n\n def create_model_fn(fn_team: str):\n team_stat = Stats.n_sample_stats_for_team(cursor=db_in_cursor,\n team=fn_team,\n last_sample_date=self.model_date,\n n_samples=self.num_samples,\n normalize_by_matches=True)\n\n\n return FeatureModel(\n input_data=[self.home_boost + team_stat.goal_diff, team_stat.goal_diff],\n id=team_stat.team_name\n )\n\n\n for match_date in played_home_OR_away_before_dates:\n ####\n #  Build model up to the day before the match\n ####\n self.home_boost = 0.72\n self.model_date = match_date - timedelta(days=1)\n self.num_samples = num_matches_in_season\n\n models: {str: FeatureModel} = FeatureModel.create_models_for_all_teams(\n model_making_fn=create_model_fn, entities=teams)\n\n self.persist_models(model_gen_date=self.model_date, model_description=self.shortDescription(), models=models)\n\n self.make_and_store_predictions_for_date(match_date=match_date, models=models, draw_range=(-0.792, 1.945))", "def train_and_evaluate_k_fold(data_X, data_y, params, algo_factory, folds=5):\n kf = KFold(n_splits=folds)\n\n accuracies = []\n for train_index, test_index in kf.split(data_X):\n learning_algo = algo_factory(params)\n train_data_X, train_data_y = data_X[train_index], data_y[train_index]\n test_data_X, test_data_y = data_X[test_index], data_y[test_index]\n # Train\n learning_algo.fit(train_data_X, train_data_y)\n # Evaluate\n accuracy = learning_algo.score(test_data_X, test_data_y)\n # Save\n accuracies.append(accuracy)\n # return average accuracy\n return np.mean(accuracies)", "def get_balancing_weight_factors(instances):\n sum_continue = sum(w for _, classification, w in instances\n if classification == Action.CONTINUE)\n sum_turn_peak = sum(w for _, classification, w in instances\n if classification == Action.TURN_PEAK)\n sum_backtrack = sum(w for _, classification, w in instances\n if classification == Action.BACKTRACK)\n\n min_sum = min(sum_continue, sum_turn_peak, sum_backtrack)\n factors = { Action.CONTINUE : multiplier_continue *\n float(min_sum) / sum_continue,\n Action.TURN_PEAK : multiplier_turn_peak *\n float(min_sum) / sum_turn_peak,\n Action.BACKTRACK : multiplier_backtrack *\n float(min_sum) / sum_backtrack }\n return factors", "def get_metrics(cfg, model, X_anchor, y_anchor, X_gal, y_gal, annoy_index, vec_dim):\n rank10_acc = 0\n rank5_acc = 0\n rank1_acc = 0\n avg_acc = 0\n vote_res = 0\n\n l2 = []\n for anchor in range(0, len(X_anchor)):\n res = get_result(get_image_features(cfg, model, X_anchor[anchor]), annoy_index)\n vote = defaultdict(int)\n # Accuracy\n correct = 0\n for i in res[:10]:\n vote[y_gal[i]] += 1\n\n max_key = max(vote, key=vote.get)\n if max_key == y_anchor[anchor]:\n vote_res += 1\n \n\n for recomm in res[:10]:\n if y_gal[recomm] == y_anchor[anchor]:\n correct += 1 \n\n avg_acc += correct/len(res)\n\n # Mean Average Precision\n l1 = []\n for recomm in res[:10]:\n if y_gal[recomm] == y_anchor[anchor]:\n correct += 1\n l1.append(1)\n else:\n l1.append(0)\n l2.append(l1) \n\n # Rank10 Accuracy\n for each_val in res[:10]:\n if y_gal[each_val] == y_anchor[anchor]:\n rank10_acc += 1\n break\n \n # Rank5 Accuracy\n for each_val in res[:5]:\n if y_gal[each_val] == y_anchor[anchor]:\n rank5_acc += 1\n break\n\n # Rank1 Accuracy\n for each_val in res[:1]:\n if y_gal[each_val] == y_anchor[anchor]:\n rank1_acc += 1\n break\n\n print(\"Avg acc is :: {avg_acc}\".format(avg_acc = avg_acc/len(X_anchor)))\n print(\"Rank 10 acc is :: {rank10_acc}\".format(rank10_acc = rank10_acc/len(X_anchor)))\n print(\"Rank 5 acc is :: {rank5_acc}\".format(rank5_acc = rank5_acc/len(X_anchor)))\n print(\"Rank 1 acc is :: {rank1_acc}\".format(rank1_acc = rank1_acc/len(X_anchor)))\n print(\"Mean Avg Precision is :: {mAP}\".format(mAP=mean_average_precision(l2)))\n print(\"Vote res :: \", vote_res/len(X_anchor))\n\n return rank1_acc/len(X_anchor), mean_average_precision(l2)", "def strategy(hand, num_die_sides):\n\n possible_holds = gen_all_holds(hand)\n best_val = 0\n best_score = 0\n dice_to_hold = []\n\n for hold in possible_holds:\n hold_val = expected_value(hold, NUM_DIE_SIDES, NUM_DICE - len(hold))\n\n hand_score = score(hold) + score(hand)\n if hand_score > best_val:\n # best_val = hold_val\n best_score = hand_score\n dice_to_hold = hold\n hand_copy = list(hand)\n sugg_hand = hand_copy.append(dice_to_hold)\n return (hand_score, sugg_hand)", "def get_num_boosting_rounds(self):\n return self.n_estimators", "def balence_classes(df, btol):\r\n #Find the least supported class and muliply by the tolerance coefficient to get max_count:\r\n ccounts = df['classification'].value_counts()\r\n max_count = np.min(ccounts.values) * btol\r\n #Create a new dataframe with balenced support:\r\n newdf = pd.DataFrame(columns=df.columns.values)\r\n for x in df.groupby('classification'):\r\n if x[1].shape[0] > max_count:\r\n newdf = newdf.append(x[1].sample(max_count).reset_index(drop=True))\r\n else:\r\n newdf = newdf.append(x[1].reset_index(drop=True))\r\n return newdf.reset_index(drop=True)", "def findBestScore():\n resultList = []\n BestScore = 0\n # iterate through different max_depths from 1 to 19\n for max_depth in range(1,20):\n dtree = tree.DecisionTreeClassifier(max_depth=max_depth)\n trainng_score = []\n testing_score = []\n # run 10 different cross-validation\n for index in range(10):\n # split into cross-validation sets.\n cv_data_train, cv_data_test, cv_target_train, cv_target_test = \\\n cross_validation.train_test_split(X_train, y_train, test_size=0.1)\n # fit the model using the cross-validation data\n # and tune parameter, such as max_depth here\n dtree = dtree.fit(cv_data_train, cv_target_train)\n dtree.feature_importances_\n trainng_score += [dtree.score(cv_data_train,cv_target_train)]\n testing_score += [dtree.score(cv_data_test,cv_target_test)]\n\n # Compute the average score for both traning and testing data\n trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)\n testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)\n\n # find the best score\n if testing_avgScore > BestScore:\n BestScore = testing_avgScore\n best_depth = max_depth\n resultList += [[best_depth, trainng_avgScore, testing_avgScore]]\n print ('The best average score and the corresponding max_depth is: ')\n return BestScore, best_depth", "def train_and_score_bagging(network):\n\n train_predictions = pd.read_pickle('data/train_predictions.pkl.gz', compression='gzip')\n test_predictions = pd.read_pickle('data/test_predictions.pkl.gz', compression='gzip')\n\n train_actuals = pd.read_pickle('data/train_actuals.pkl.gz', compression='gzip')\n test_actuals = pd.read_pickle('data/test_actuals.pkl.gz', compression='gzip')\n\n\n train_x = np.array(train_predictions.values)\n train_y = train_actuals[0].values\n train_log_y = safe_log(train_y)\n test_x = np.array(test_predictions.values)\n test_y = test_actuals[0].values\n test_log_y = safe_log(test_y)\n\n model = compile_model(network)\n\n print('\\rNetwork')\n\n for property in network:\n print(property, ':', network[property])\n logging.info('%s: %s' % (property, network[property]))\n\n test = xgb.DMatrix(test_x)\n train = xgb.DMatrix(train_x, label=train_log_y)\n\n\n\n eval_set = [(test_x, test_log_y)]\n model.fit(train_x, train_log_y, early_stopping_rounds=20, eval_metric='mae', eval_set=eval_set,\n verbose=False)\n\n # eval_set = [(test, test_log_y)]\n # xgb.train(network, train, num_boost_round=5000, evals=eval_set, early_stopping_rounds=5)\n\n predictions = model.predict(test_x)\n # predictions = xgb.predict(test_x)\n inverse_predictions = safe_exp(predictions)\n score = mean_absolute_error(test_y, inverse_predictions)\n mape = safe_mape(test_y, inverse_predictions)\n\n print('\\rResults')\n\n best_round = xgb.best_iteration\n\n if np.isnan(score):\n score = 9999\n\n print('best round:', best_round)\n print('loss:', score)\n print('mape:', mape)\n print('-' * 20)\n\n logging.info('best round: %d' % best_round)\n logging.info('loss: %.4f' % score)\n logging.info('mape: %.4f' % mape)\n logging.info('-' * 20)\n\n eval_results({'xgb_predictions': {\n 'actual_y': test_y,\n 'y_predict': inverse_predictions\n }\n })\n\n range_results({\n 'xgb_predictions': inverse_predictions,\n }, test_y)", "def _mean_match_multiclass_accurate(\n mean_match_candidates,\n bachelor_preds,\n candidate_preds,\n candidate_values,\n random_state,\n hashed_seeds,\n):\n if mean_match_candidates == 0:\n imp_values = np.argmax(bachelor_preds, axis=1)\n\n else:\n _to_2d(bachelor_preds)\n _to_2d(candidate_preds)\n\n num_bachelors = bachelor_preds.shape[0]\n kd_tree = KDTree(candidate_preds, leafsize=16, balanced_tree=False)\n _, knn_indices = kd_tree.query(\n bachelor_preds, k=mean_match_candidates, workers=-1\n )\n\n # We can skip the random selection process if mean_match_candidates == 1\n if mean_match_candidates == 1:\n index_choice = knn_indices\n\n else:\n # Come up with random numbers 0-mean_match_candidates, with priority given to hashed_seeds\n if hashed_seeds is None:\n ind = random_state.randint(mean_match_candidates, size=(num_bachelors))\n else:\n ind = hashed_seeds % mean_match_candidates\n\n index_choice = knn_indices[np.arange(knn_indices.shape[0]), ind]\n\n imp_values = np.array(candidate_values)[index_choice]\n\n return imp_values", "def test_gbc(x, y, tune):\n # Perform classification without tuning. It was determined through trial-and-error\n # that log2 features produced the highest accuracy\n gbt = GradientBoostingClassifier(max_features=\"log2\")\n pipeline = create_pipeline(gbt)\n return accuracy(pipeline, x, y)", "def classify( self, data):\n\n\t\t\"*** YOUR CODE HERE ***\"\n\t\tguesses = np.zeros(len(data))\n\n\t\tfor k in range(len(self.classifiers)):\n\t\t\tclassifier = self.classifiers[k]\n\t\t\tguesses += np.dot(classifier.classify(data),self.alphas[k])\n\t\t\n\t\tguesses = np.sign(guesses)\n\t\tguesses[np.where(guesses == 0)[0]] = np.repeat(np.expand_dims(np.random.choice([-1,1]),axis=0),len(np.where(guesses == 0)[0]),axis=0)\n\t\treturn guesses\n\t\t# util.raiseNotDefined()", "def optimal_instances_per_class(df, factor=1.0, draw=False):\n # `bincount` returns the number of instances we have for each website\n counts = np.bincount(df.class_label.tolist())\n hist, bin_edges = np.histogram(counts)\n if draw:\n inst_counts = get_num_instances(df)\n inst_counts.hist(cumulative=-1, bins=100)\n plt.xlabel('Num of instances')\n plt.ylabel('Num of classes with x or more insts')\n plt.show()\n\n # scale the y-axis\n dx = bin_edges[1] - bin_edges[0]\n cum_hist = np.cumsum(hist) * dx\n\n # get the inverse cumulative sum\n inv_cum_hist = max(cum_hist) - cum_hist\n\n # compute the harmonic mean of tuples (y=f(x), x)\n hms = [harmonic_mean(x, y, factor) if y > 0 and x > 0 else 0\n for x, y in zip(bin_edges[1:], inv_cum_hist)]\n\n print(hms)\n\n # find index for max harmonic mean\n i = np.argmax(hms)\n\n # this is the optimal number of instances:\n opt_num_insts = int(bin_edges[i])\n\n # which leaves us with this number of classes:\n opt_num_classes = len(counts[counts >= opt_num_insts])\n\n if draw:\n print(\"Optimal number of instances:\", opt_num_insts)\n print(\"Optimal number of classes:\", opt_num_classes)\n\n return opt_num_insts, opt_num_classes", "def bin_class_grid_search(model_class, data_loader, hp_file,\n loader_args=None, model_args=None,\n folds=4, random_oversample=False):\n loader = data_loader(**loader_args)\n\n # load index values from main table\n data_ix = loader.get_index()\n\n # load hyperparameters from file\n with open(hp_file, 'r') as f:\n hyperparameters = ast.literal_eval(f.read())\n\n # create a list of dicts with hyperparameters for each experiment to run\n keys, values = zip(*hyperparameters.items())\n experiments = [dict(zip(keys, v)) for v in itertools.product(*values)]\n exp_df = pd.DataFrame(experiments)\n\n # prepare for storing confusion matrix and accuracy results to file\n cm = np.zeros((len(experiments), 2, 2), dtype=int)\n cm_df_cols = ['CM True Neg', 'CM False Pos', 'CM False Neg', 'CM True Pos']\n results_path = 'data/results/gridsearch_results_{:%Y%m%d_%H%M%S}.csv'.format(datetime.now())\n\n # fit model using k-fold verification\n kf = KFold(n_splits=folds, shuffle=True)\n\n for j, fold_indexes in enumerate(kf.split(data_ix)):\n # load train and validation data\n data_train, target_train, data_val, target_val = loader.load_train_val(fold_indexes[0], fold_indexes[1])\n\n # determine shape of input arrays\n input_shape = loader.get_input_shape()\n\n # oversample to correct for class imbalance\n if random_oversample:\n ros = RandomOverSampler()\n os_index, target_train = ros.fit_sample(np.arange(len(fold_indexes[0])).reshape(-1, 1), target_train)\n if isinstance(data_train, list):\n data_train = [data_train_part[os_index.squeeze()] for data_train_part in data_train]\n else:\n data_train = data_train[os_index.squeeze()]\n\n logging.debug('Fold {} of {}'.format(j + 1, folds))\n\n for i, experiment in enumerate(experiments):\n logging.debug(experiment)\n model = model_class(input_shape, **model_args, **experiment)\n # TODO: track oos accuracy per epoch\n history = model.fit(data_train, target_train, validation_data=(data_val, target_val))\n predict_val = model.predict(data_val)\n\n cm[i, :, :] = cm[i, :, :] + confusion_matrix(target_val, predict_val.round())\n cm_df = pd.DataFrame(cm.reshape((cm.shape[0], 4)), columns=cm_df_cols)\n results_df = exp_df.join(cm_df)\n # TODO: also store run time\n results_df.to_csv(results_path)", "def train_epoch(self):\n # We can't validate a winner for submissions generated by the learner,\n # so we will use a winner-less match when getting rewards for such states\n blank_match = {\"winner\":None}\n\n learner_submitted_actions = 0\n null_actions = 0\n\n # Shuffle match presentation order\n if(self.N_TEMP_TRAIN_MATCHES):\n path_to_db = \"../data/competitiveMatchData.db\"\n sources = {\"patches\":self.TEMP_TRAIN_PATCHES, \"tournaments\":[]}\n print(\"Adding {} matches to training pool from {}.\".format(self.N_TEMP_TRAIN_MATCHES, path_to_db))\n temp_matches = pool.match_pool(self.N_TEMP_TRAIN_MATCHES, path_to_db, randomize=True, match_sources=sources)[\"matches\"]\n else:\n temp_matches = []\n data = self.training_data + temp_matches\n\n shuffled_matches = random.sample(data, len(data))\n for match in shuffled_matches:\n for team in self.teams:\n # Process match into individual experiences\n experiences = mp.process_match(match, team)\n for pick_id, experience in enumerate(experiences):\n # Some experiences include NULL submissions (usually missing bans)\n # The learner isn't allowed to submit NULL picks so skip adding these\n # to the buffer.\n state,actual,_,_ = experience\n (cid,pos) = actual\n if cid is None:\n null_actions += 1\n continue\n # Store original experience\n self.replay.store([experience])\n self.step_count += 1\n\n # Give model feedback on current estimations\n if(self.step_count > self.observations):\n # Let the network predict the next action\n feed_dict = {self.ddq_net.online_ops[\"input\"]:[state.format_state()],\n self.ddq_net.online_ops[\"valid_actions\"]:[state.get_valid_actions()]}\n q_vals = self.ddq_net.sess.run(self.ddq_net.online_ops[\"valid_outQ\"], feed_dict=feed_dict)\n sorted_actions = q_vals[0,:].argsort()[::-1]\n top_actions = sorted_actions[0:4]\n\n if(random.random() < self.epsilon):\n pred_act = random.sample(list(top_actions), 1)\n else:\n # Use model's top prediction\n pred_act = [sorted_actions[0]]\n\n for action in pred_act:\n (cid,pos) = state.format_action(action)\n if((cid,pos)!=actual):\n pred_state = deepcopy(state)\n pred_state.update(cid,pos)\n r = get_reward(pred_state, blank_match, (cid,pos), actual)\n new_experience = (state, (cid,pos), r, pred_state)\n\n self.replay.store([new_experience])\n learner_submitted_actions += 1\n\n if(self.epsilon > 0.1):\n # Reduce epsilon over time\n self.epsilon -= self.eps_decay_rate\n\n # Use minibatch sample to update online network\n if(self.step_count > self.pre_training_steps):\n self.train_step()\n\n if(self.step_count % self.target_update_frequency == 0):\n # After the online network has been updated, update target network\n _ = self.ddq_net.sess.run(self.ddq_net.target_ops[\"target_update\"])\n\n # Get training loss, training_acc, and val_acc to return\n loss, train_acc = self.validate_model(self.training_data)\n _, val_acc = self.validate_model(self.validation_data)\n return (loss, train_acc, val_acc)", "def algo_CVmetrics(classifier_object, X_train, Y_train):\r\n \r\n cv = RepeatedStratifiedKFold(n_splits = 5, n_repeats = 3, random_state = seed_custom)\r\n \r\n metricslist = {'f2': make_scorer(metrics.fbeta_score, beta = 2), \r\n 'balacc': make_scorer(metrics.balanced_accuracy_score),\r\n 'precision': make_scorer(metrics.precision_score),\r\n 'recall': make_scorer(metrics.recall_score)}\r\n \r\n cv_results = cross_validate(classifier_object, X_train, Y_train, cv = cv, scoring = metricslist, return_estimator = True)\r\n \r\n f2_mean = np.mean(cv_results['test_f2'])\r\n f2_std = np.std(cv_results['test_f2'])\r\n \r\n balacc_mean = np.mean(cv_results['test_balacc'])\r\n balacc_std = np.std(cv_results['test_balacc'])\r\n\r\n precision_mean = np.mean(cv_results['test_precision'])\r\n precision_std = np.std(cv_results['test_precision'])\r\n \r\n recall_mean = np.mean(cv_results['test_recall'])\r\n recall_std = np.std(cv_results['test_recall'])\r\n \r\n scorebox = pd.DataFrame(np.zeros((1,8)), columns = list(['F2-Score Mean', 'F2-Score STD', 'Balanced Accuracy Mean', 'Balanced Accuracy STD',\r\n 'Precision Mean', 'Precision STD', 'Recall Mean', 'Recall STD']))\r\n \r\n scorebox.iloc[0,0] = f2_mean\r\n scorebox.iloc[0,1] = f2_std\r\n scorebox.iloc[0,2] = balacc_mean\r\n scorebox.iloc[0,3] = balacc_std\r\n scorebox.iloc[0,4] = precision_mean\r\n scorebox.iloc[0,5] = precision_std\r\n scorebox.iloc[0,6] = recall_mean\r\n scorebox.iloc[0,7] = recall_std \r\n \r\n scorebox = np.round(scorebox, 3)\r\n \r\n print(\"Model has a mean CV balanced accuracy of {0}, (Std: {1})\".format(round(balacc_mean,3), round(balacc_std,3)))\r\n print(\"Model has a mean CV F2_Score of {0}, (Std: {1})\".format(round(f2_mean,3), round(f2_std,3)))\r\n print(\"Model has a mean CV Precision of {0}, (Std: {1})\".format(round(precision_mean,3), round(precision_std,3)))\r\n print(\"Model has a mean CV Recall of {0}, (Std: {1})\".format(round(recall_mean,3), round(recall_std,3)))\r\n \r\n return scorebox", "def calculate_average_run_accuracy(self):\n overall_true_rate, true_positive_rate, true_negative_rate, false_positive_rate, false_negative_rate, true_positive_rate_cutoff, true_negative_rate_cutoff, \\\n false_positive_rate_cutoff, false_negative_rate_cutoff, unclassified_cutoff, matthews_correlation_coefficient, brier_score, auc_score, fit_time, hmeasure = [0] * 15\n balanced_accuracy_arr = []\n auc_arr = []\n hmeasure_arr = []\n brier_score_arr = []\n fit_time_arr = []\n mcc_arr = []\n true_positive_arr = []\n true_negative_arr = []\n false_positive_arr = []\n false_negative_arr = []\n\n count = 0\n for result_dictionary in self.errors:\n for z in range(len(result_dictionary[\"balanced_accuracy_arr\"])):\n overall_true_rate += result_dictionary[\"balanced_accuracy_arr\"][z]\n true_positive_rate += result_dictionary[\"true_positive_rate_arr\"][z]\n true_negative_rate += result_dictionary[\"true_negative_rate_arr\"][z]\n false_positive_rate += result_dictionary[\"false_positive_rate_arr\"][z]\n false_negative_rate += result_dictionary[\"false_negative_rate_arr\"][z]\n matthews_correlation_coefficient += result_dictionary[\"mcc_arr\"][z]\n auc_score += result_dictionary[\"auc_arr\"][z]\n brier_score += result_dictionary[\"brier_score_arr\"][z]\n fit_time += result_dictionary[\"fit_time_arr\"][z]\n hmeasure += result_dictionary[\"hmeasure_arr\"][z]\n count += 1\n\n true_positive_rate_cutoff += result_dictionary[\"avg_true_positive_rate_with_prob_cutoff\"]\n true_negative_rate_cutoff += result_dictionary[\"avg_true_negative_rate_with_prob_cutoff\"]\n false_positive_rate_cutoff += result_dictionary[\"avg_false_positive_rate_with_prob_cutoff\"]\n false_negative_rate_cutoff += result_dictionary[\"avg_false_negative_rate_with_prob_cutoff\"]\n unclassified_cutoff += result_dictionary[\"avg_false_negative_rate_with_prob_cutoff\"]\n balanced_accuracy_arr += result_dictionary[\"balanced_accuracy_arr\"]\n hmeasure_arr += result_dictionary[\"hmeasure_arr\"]\n auc_arr += result_dictionary[\"auc_arr\"]\n brier_score_arr += result_dictionary[\"brier_score_arr\"]\n fit_time_arr += result_dictionary[\"fit_time_arr\"]\n mcc_arr += result_dictionary[\"mcc_arr\"]\n true_positive_arr += result_dictionary[\"true_positive_rate_arr\"]\n true_negative_arr += result_dictionary[\"true_negative_rate_arr\"]\n false_positive_arr += result_dictionary[\"false_positive_rate_arr\"]\n false_negative_arr += result_dictionary[\"false_negative_rate_arr\"]\n\n avg_run_results = [None] * 31\n avg_run_results[0] = matthews_correlation_coefficient / float(count)\n avg_run_results[1] = brier_score / float(count)\n avg_run_results[2] = overall_true_rate / float(count)\n avg_run_results[3] = true_positive_rate / float(count)\n avg_run_results[4] = true_negative_rate / float(count)\n avg_run_results[5] = false_positive_rate / float(count)\n avg_run_results[6] = false_negative_rate / float(count)\n avg_run_results[7] = true_positive_rate_cutoff / float(len(self.errors))\n avg_run_results[8] = true_negative_rate_cutoff / float(len(self.errors))\n avg_run_results[9] = false_positive_rate_cutoff / float(len(self.errors))\n avg_run_results[10] = false_negative_rate_cutoff / float(len(self.errors))\n avg_run_results[11] = unclassified_cutoff / float(len(self.errors))\n avg_run_results[12] = fit_time / float(count)\n avg_run_results[14] = balanced_accuracy_arr\n avg_run_results[15] = auc_score / float(count)\n avg_run_results[16] = auc_arr\n avg_run_results[17] = brier_score_arr\n avg_run_results[18] = fit_time_arr\n avg_run_results[19] = mcc_arr\n avg_run_results[13] = self.calculate_std_deviation(balanced_accuracy_arr)\n avg_run_results[20] = self.calculate_std_deviation(mcc_arr)\n avg_run_results[21] = self.calculate_std_deviation(brier_score_arr)\n avg_run_results[22] = self.calculate_std_deviation(auc_arr)\n avg_run_results[23] = self.calculate_std_deviation(fit_time_arr)\n avg_run_results[24] = self.calculate_std_deviation(true_positive_arr)\n avg_run_results[25] = self.calculate_std_deviation(true_negative_arr)\n avg_run_results[26] = self.calculate_std_deviation(false_positive_arr)\n avg_run_results[27] = self.calculate_std_deviation(false_negative_arr)\n avg_run_results[28] = hmeasure / float(count)\n avg_run_results[29] = self.calculate_std_deviation(hmeasure_arr)\n avg_run_results[30] = hmeasure_arr\n\n return avg_run_results", "def _mean_match_multiclass_fast(\n mean_match_candidates, bachelor_preds, random_state, hashed_seeds\n):\n if mean_match_candidates == 0:\n imp_values = np.argmax(bachelor_preds, axis=1)\n\n else:\n num_bachelors = bachelor_preds.shape[0]\n\n # Turn bachelor_preds into discrete cdf:\n bachelor_preds = bachelor_preds.cumsum(axis=1)\n\n # Randomly choose uniform numbers 0-1\n if hashed_seeds is None:\n # This is the fastest way to adjust for numeric\n # imprecision of float16 dtype. Actually ends up\n # barely taking any time at all.\n bp_dtype = bachelor_preds.dtype\n unif = np.minimum(\n random_state.uniform(0, 1, size=num_bachelors).astype(bp_dtype),\n bachelor_preds[:, -1],\n )\n else:\n unif = []\n for i in range(num_bachelors):\n np.random.seed(seed=hashed_seeds[i])\n unif.append(np.random.uniform(0, 1, size=1)[0])\n unif = np.array(unif)\n\n # Choose classes according to their cdf.\n # Distribution will match probabilities\n imp_values = np.array(\n [\n np.searchsorted(bachelor_preds[i, :], unif[i])\n for i in range(num_bachelors)\n ]\n )\n\n return imp_values", "def evaluate(clf, dataset, feature_list, features, labels, num_iter, params):\n\n features_train, features_test, labels_train, labels_test = \\\n train_test_split(features, labels, test_size=0.3, random_state=42)\n\n\n\n precision_values = []\n recall_values = []\n accuracy_values = []\n print clf\n for i in xrange(0, num_iter):\n #print params\n clf = GridSearchCV(clf, params)\n clf.fit(features_train, labels_train)\n print '*****************************'\n print clf.best_estimator_\n print clf.best_params_\n\n clf = clf.best_estimator_\n #test_classifier(clf, dataset, feature_list)\n pred = clf.predict(features_test)\n precision_values.append(precision_score(labels_test, pred))\n recall_values.append(recall_score(labels_test, pred))\n accuracy_values.append(accuracy_score(labels_test, pred))\n print 'Recall score: ', mean(recall_values)\n print 'Precision score: ', mean(precision_values)\n print 'Accuracy score: ' , mean(accuracy_values)", "def get_balancing_probabilities(instances):\n count_continue = sum(classification == Action.CONTINUE \n for _, classification in instances)\n count_turn_peak = sum(classification == Action.TURN_PEAK\n for _, classification in instances)\n count_backtrack = sum(classification == Action.BACKTRACK\n for _, classification in instances)\n\n min_count = min(count_continue, count_turn_peak, count_backtrack)\n probabilities = { Action.CONTINUE : multiplier_continue * \n float(min_count) / count_continue,\n Action.TURN_PEAK : multiplier_turn_peak * \n float(min_count) / count_turn_peak,\n Action.BACKTRACK : multiplier_backtrack *\n float(min_count) / count_backtrack }\n return probabilities", "def best_threshold_from_folds(y_tuples, scoring=f1_score, step_size=0.01, maximize=True):\n thresholds, scores = [], []\n for _, y_true, y_pred in y_tuples:\n t, s = find_best_threshold(y_true, y_pred, step_size, scoring, maximize=maximize)\n thresholds.append(t)\n scores.append(s)\n\n mean_threshold = np.mean(thresholds)\n mean_score = np.mean([score_for_threshold(y, y_hat, scoring, mean_threshold) for _, y, y_hat in y_tuples])\n return mean_threshold, mean_score", "def findRFBestN():\n resultList = []\n BestScore = 0\n nList = [ n for n in range(1,200) if n%10 == 0]\n for n in nList:\n rforest = ensemble.RandomForestClassifier(max_depth=5, n_estimators=n)\n trainng_score = []\n testing_score = []\n # run 10 different cross-validation\n for index in range(10):\n # split into cross-validation sets.\n cv_data_train, cv_data_test, cv_target_train, cv_target_test = \\\n cross_validation.train_test_split(X_train, y_train, test_size=0.1)\n\n # fit the model using the cross-validation data\n # and tune parameter, such as max_depth here\n rforest = rforest.fit(cv_data_train, cv_target_train)\n trainng_score += [rforest.score(cv_data_train,cv_target_train)]\n testing_score += [rforest.score(cv_data_test,cv_target_test)]\n\n # Compute the average score for both traning and testing data\n trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)\n testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)\n\n # find the best score\n if testing_avgScore > BestScore:\n BestScore = testing_avgScore\n best_n = n\n resultList += [[n, trainng_avgScore, testing_avgScore]]\n print ('The best average score and the corresponding n_estimator is: ')\n return BestScore, best_n", "def evaluation_detections(thresholds, bboxes_gt, bboxes_detected, num_instances):\r\n TP = np.zeros(len(thresholds), dtype=int)\r\n FP = np.zeros(len(thresholds), dtype=int)\r\n\r\n scores_detections = [[] for i in range(len(thresholds))]\r\n # scores_detections is pair of values [result, confidence] where result is true if the example is correctly\r\n # classified and confidence is the confidence of the prediction. It's used to compute the precision-recall\r\n # curve. Confidence score is random if the predicted scores do not belong to a detector.\r\n\r\n for key in bboxes_detected.keys():\r\n for bbox_noisy in bboxes_detected[key]:\r\n if key in bboxes_gt: # if we have detected stuff and it is in the gt\r\n scores = [bbox_iou(bbox_noisy[1:5], bbox[1:5]) for bbox in bboxes_gt[key]]\r\n max_score = max(scores)\r\n for i, threshold in enumerate(thresholds):\r\n if max_score > threshold:\r\n TP[i] += 1\r\n # we give correct boxes a slightly higher confidence score\r\n scores_detections[i].append([1, bbox_noisy[5]])\r\n else:\r\n FP[i] += 1\r\n scores_detections[i].append([0, bbox_noisy[5]])\r\n else: # if we have detected stuff and it is not in the gt\r\n for i, threshold in enumerate(thresholds):\r\n FP[i] += 1\r\n\r\n FN = num_instances - TP # number of instances not detected\r\n return TP, FP, FN, np.array(scores_detections)", "def runUCB(self):\n \n #Init vars, N number of user sessions, d=number of ads\n N = self.myDS.shape[0] \n d = self.myDS.shape[1] \n total_reward=0\n self.opt_selected=[]\n \n #Declare vars to count to calculate upper bounds\n numbers_of_selections = [0] * d\n sums_of_rewards = [0] * d\n \n #Calcultate confidance bounds\n for n in range(0,N):\n ad=0\n max_upper_bound=0\n for i in range (0,d):\n if (numbers_of_selections[i]>0):\n average_reward=sums_of_rewards[i]/numbers_of_selections[i]\n delta_i=math.sqrt(3/2 * math.log(n+1) / numbers_of_selections[i])\n upper_bound=average_reward+delta_i\n else:\n upper_bound=1e400\n if upper_bound>max_upper_bound:\n max_upper_bound=upper_bound\n ad = i\n self.opt_selected.append(ad)\n numbers_of_selections[ad]=numbers_of_selections[ad]+1\n reward=self.myDS.values[n,ad]\n sums_of_rewards[ad]=sums_of_rewards[ad]+reward\n total_reward=total_reward+reward\n \n return total_reward", "def BayesPaperStats(maxIters, numRuns):\n\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n\n assert comm.Get_size() == numRuns, \"Please ensure there is one process running per run i.e \" + str(numRuns) + \" processes.\"\n \n problemBounds = {\"Bfield\": choco.uniform(10, 1300), \"T\": choco.uniform(50, 230), \"Btheta\": choco.uniform(0, 90), \"Etheta\": choco.uniform(0, 90)}\n\n # The target for each algorithm. This was determined by using the values in the literature, so there is clearly some deviation either due to the detuning or computation.\n globalFoM = 1.033\n\n if rank == 0:\n timeList = []\n iterationList = []\n\n # Set up the database for the chocolate optimiser.\n connection = choco.SQLiteConnection(\"sqlite:///bayes_paper_\" + str(rank) + \"_db.db\")\n\n # Define which solver will be used.\n solver = choco.Bayes(connection, problemBounds, utility_function = \"ei\", n_bootstrap = int(np.ceil(maxIters/10)), clear_db = True)\n\n # Clear the database. TODO: To do this?\n connection.clear()\n\n # Start timing.\n startTime = time.time()\n timeElapsed = None\n iterationSuccess = None\n\n # Start optimisation.\n for iteration in range(maxIters):\n\n # Make one suggestion.\n try:\n token, nextParams = solver.next()\n except:\n print(\"Error suggesting a new point. Here are the last set of parameters sampled:\")\n print(str(nextParams))\n print(\"Iteration number: \" + str(iteration))\n continue\n\n # Check what FoM this gives. Go negative as this is a minimisation routine.\n fEval = abs(FitnessPaper(**nextParams))\n\n # Update best FoM.\n if fEval >= globalFoM:\n # The algorithm has managed to surpass or equal the paper value.\n iterationSuccess = iteration\n timeElapsed = time.time() - startTime\n \n if rank == 0:\n iterationList.append(iterationSuccess)\n timeList.append(timeElapsed)\n\n break\n \n # Tell the optimiser about the result.\n solver.update(token, fEval)\n\n # Run complete. Send results to main process. Tags are unique identifiers.\n if rank != 0:\n comm.send(timeElapsed, dest = 0, tag = 1)\n comm.send(iterationSuccess, dest = 0, tag = 2)\n\n # Wait for all the processes to end.\n comm.Barrier()\n\n if rank == 0:\n # Aggregate the data.\n for process in range(comm.Get_size() - 1):\n # Get the data.\n individualTime = None\n individualTime = comm.recv(individualTime, source = process + 1, tag = 1)\n\n individualIter = None\n individualIter = comm.recv(individualIter, source = process + 1, tag = 2)\n\n if individualIter is not None:\n # Both values must therefore be non-null.\n iterationList.append(individualIter)\n timeList.append(individualTime)\n\n avgRuntime = np.average(timeList)\n avgIters = np.average(iterationList)\n try:\n\n fastestTime = np.min(timeList)\n\n except ValueError:\n \n # List is empty.\n fastestTime = float('NaN')\n\n numSuccess = len(iterationList)\n successRate = numSuccess/numRuns\n\n print(\"Bayesian optimisation paper testing complete! Here are the stats:\")\n print(\"Number of successful runs: \" + str(numSuccess) + \" (Success rate of \" + str(successRate) + \")\")\n print(\"Average iterations required for success: \" + str(avgIters))\n print(\"Average time required for success: \" + str(avgRuntime))\n print(\"Fastest convergence time: \" + str(fastestTime))\n print(\"------------------------------------------------------------------------------------------------------------------\")\n \n return", "def get_combinations(classes_folder='./data/CASIA1_classes_by_unbalanced_kmeans/', \n originals='./data/CASIA1_originals', fakes_ela='./data/CASIA1_fakes_ela'):\n classes_ = []\n for i in range(20):\n classes_.append('{}{}' .format(classes_folder, i+1))\n\n medians_ = [0,3,5,7,9,11,13,15,17,19]\n\n iterations_ = []\n for i in range(21):\n iterations_.append(i)\n\n threshold_ = []\n for i in range(40):\n threshold_.append(i)\n\n for i, item in enumerate(classes_):\n fakes_list = os.listdir(item)\n fakes = load_fakes(fakes_list, item, originals)\n\n best = 0\n best_median_filter_size = 0\n best_number_of_iterations = 0\n best_thresh = 0\n for x, median_filter_size in enumerate(medians_):\n for y, number_of_iterations in enumerate(iterations_):\n for t, thresh in enumerate(threshold_):\n whole_score = 0\n for e, elem in enumerate(fakes):\n image = cv2.imread(os.path.join(fakes_ela, elem.path.split('\\\\')[-1]))\n\n if thresh > 0:\n image_ = pywt.threshold(image, thresh, 'soft')\n image = cv2.normalize(image_, image, 0, 1, cv2.NORM_MINMAX)\n image = 255 * image\n image = image.astype(np.uint8)\n\n image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n \n image = cv2.inRange(image, np.array([0,0,0]), np.array([180,255,60]))\n image = cv2.bitwise_not(image)\n\n if median_filter_size > 0:\n image = cv2.medianBlur(image, median_filter_size)\n\n kernel = np.ones((3, 3), np.uint8)\n image = cv2.morphologyEx(image, cv2.MORPH_GRADIENT, kernel, iterations=number_of_iterations)\n\n cnts = cv2.findContours(image.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n\n max_idx = 0\n max_pnts = 0\n for u, ulem in enumerate(cnts):\n if cv2.contourArea(ulem) < max_pnts:\n continue\n else:\n max_idx = u\n max_pnts = cv2.contourArea(ulem)\n\n if len(cnts) > 0:\n (x, y, w, h) = cv2.boundingRect(cnts[max_idx])\n pred = {\n \"x\": x,\n \"y\": y,\n \"w\": w,\n \"h\": h\n }\n else:\n pred = None\n\n whole_score += evaluate_augmentation_fit(pred, elem)\n if best < whole_score:\n best = whole_score\n best_median_filter_size = median_filter_size\n best_number_of_iterations = number_of_iterations\n best_thresh = thresh\n print(\"Class: {}; MedianFilterSize: {}; Iterations: {}; Thresh: {}; Score: {}\" .format(item, median_filter_size, number_of_iterations, thresh, round(whole_score, 2)))\n print(\"###########\")\n print(\"Best: {} -> {} % ({}, {}, {})\" .format(round(best, 2), round((best/len(fakes)), 2), best_median_filter_size, best_number_of_iterations, best_thresh))\n print(\"###########\")", "def learn_with_bootstrapping(self, sample_count=10000):\n tic = time.clock()\n training_set_size = 150 # TODO: change to 1000, 500 or something\n sample_pool = self.training_stream.extract_training_patches(sample_count, negative_ratio=1.)\n # initialize weights\n weighted_patches = []\n for patch in sample_pool: # weight all patches: training pool P\n weighted_patches.append([patch, 1. / len(sample_pool)])\n # if patch.label == +1:\n # pos_patch = patch # PRESENTATION, REPORT\n # shuffle training pool\n weighted_patches = random_sample_weighted_patches(weighted_patches, len(weighted_patches))\n\n if self.algorithm == 'adaboost': # Shuffle the training data\n training_data = random_sample_weighted_patches(weighted_patches, len(weighted_patches))\n elif self.algorithm == 'wald': # Sample training_set_size samples\n training_data = random_sample_weighted_patches(weighted_patches, training_set_size)\n\n for t in range(self.layers): # choose the weak classifier with the minimum error\n print \"Learn with bootstrapping using %s, layer #%d\" % (self.algorithm.title(), t+1)\n\n if self.algorithm == 'adaboost':\n h_t = self._fetch_best_weak_classifier(weighted_patches)\n elif self.algorithm == 'wald':\n h_t = self._fetch_best_weak_classifier(training_data)\n # h_t.visualize(pos_patch) # PRESENTATION, REPORT\n self.classifiers.append(copy.deepcopy(h_t)) # add it to the strong classifier\n\n if self.algorithm == 'adaboost':\n self.classifiers[-1].update_alpha(weighted_patches)\n weighted_patches = self._adaboost_reweight(weighted_patches, t)\n elif self.algorithm == 'wald':\n kde_n, kde_p, xs_n, xs_p = self._estimate_ratios(training_data, t)\n # find decision thresholds for the strong classifier\n self._tune_thresholds(kde_n, kde_p, xs_n, xs_p, t)\n # throw away training samples that fall in our thresholds\n weighted_patches = self._reweight_and_discard_irrelevant(weighted_patches, t)\n # sample new training data\n training_data = random_sample_weighted_patches(weighted_patches, training_set_size)\n if len(training_data) == 0:\n print \"no more training data!\"\n break\n toc = time.clock()\n print toc - tic\n print self", "def probs_for_breeds_h5(breed_predictions):\n predictions = []\n for (x), value in np.ndenumerate(breed_predictions[0]):\n predictions.append([x[0], str(dog_names[x[0]]), value])\n sorted_predictions = sorted(predictions, key=itemgetter(2), reverse=True)\n for prediction in sorted_predictions[:5]:\n print dog_names[prediction[0]] + ': ' + str(prediction[2])", "def compute_recall(data, num_labels = 4): \n \n # Declarating list to store results\n recalls = []\n \n for instance in data:\n \n # Declarating list to store individual results\n instance_recalls = []\n \n for i in np.arange(num_labels):\n \n # Computing and storing accuracy for each class\n instance_recalls.append(recall_score(instance[:, 2 + i], instance[:, 2 + i + 4]))\n \n # Storing mean results of the instance\n recalls.append(np.mean(instance_recalls))\n \n # Returning mean of all results\n return np.mean(recalls)", "def test_200_boosted_goal_difference_for_home_models_with_thresholds(self):\n\n def create_model_fn(fn_team: str):\n team_stat = Stats.n_sample_stats_for_team(cursor=db_in_cursor,\n team=fn_team,\n last_sample_date=self.model_date,\n n_samples=self.num_samples,\n normalize_by_matches=True)\n\n\n return FeatureModel(\n input_data=[self.home_boost + team_stat.goal_diff, team_stat.goal_diff],\n id=team_stat.team_name\n )\n\n\n for match_date in played_home_OR_away_before_dates:\n ####\n #  Build model up to the day before the match\n ####\n self.home_boost = 0.72\n self.model_date = match_date - timedelta(days=1)\n self.num_samples = num_matches_in_season\n\n models: {str: FeatureModel} = FeatureModel.create_models_for_all_teams(\n model_making_fn=create_model_fn, entities=teams)\n\n self.persist_models(model_gen_date=self.model_date, model_description=self.shortDescription(), models=models)\n\n self.make_and_store_predictions_for_date(match_date=match_date, models=models, draw_range=(0.3, 0.9))", "def xgboost_cv(self, nsplits: int = 5) -> (float, float, float):\r\n x_train, x_test, y_train, y_test = train_test_split(self.x, self.y, test_size=0.2)\r\n params = {\r\n \"max_depth\": [2, 3, 5, 8],\r\n \"eta\": [0.01, 0.05, 0.1, 0.15, 0.2],\r\n \"objective\": ['binary:logistic'],\r\n \"sumsample\": [0.5, 0.7, 1],\r\n \"colsample_bytree\": [0.5, 0.7, 1],\r\n \"n_estimators\": [50, 100, 200, 500],\r\n }\r\n \"\"\"\r\n fit_params = {\r\n \"early_stopping_rounds\": 20,\r\n \"eval_metric\": \"error\",\r\n \"eval_set\": [(x_test, y_test)]\r\n }\r\n \"\"\"\r\n model = xgb.XGBClassifier()\r\n gridcv = GridSearchCV(model, params, cv=nsplits)\r\n gridcv.fit(x_train, y_train) # , **fit_params)\r\n best_params = gridcv.best_params_\r\n cv = KFold(n_splits=nsplits)\r\n acc_result = []\r\n for train, test in cv.split(self.x):\r\n x_train = self.x[train, :]\r\n x_test = self.x[test, :]\r\n y_train = self.y[train]\r\n y_test = self.y[test]\r\n model = xgb.XGBClassifier(**best_params).fit(x_train, y_train)\r\n \"\"\"\r\n x_t, x_v, y_t, y_v = train_test_split(x_train, y_train, test_size=0.2)\r\n model = xgb.XGBClassifier(**best_params).fit(x_t, y_t, eval_metric=\"error\", eval_set=[(x_v, y_v)],\r\n early_stopping_rounds=20)\r\n \"\"\"\r\n y_predict = model.predict(x_test)\r\n acc_result.append(binary_acc(y_test, y_predict))\r\n return np.mean(acc_result), np.std(acc_result), best_params", "def compute_strategies_given_max_winning_bid(max_winning_bid, bidders):\r\n bid_start_points = [[-1.0] * len(bidder.values) for bidder in bidders]\r\n bid_end_points = [[-1.0] * len(bidder.values) for bidder in bidders]\r\n F_jump_points = [[(max_winning_bid, 1.0)] for _ in range(len(bidders))]\r\n\r\n # current state\r\n is_active = [0] * len(bidders)\r\n remaining_prob = [-1.0] * len(bidders)\r\n cur_bid = max_winning_bid\r\n cur_value_idx = [len(bidder.values) - 1 for bidder in bidders]\r\n\r\n def cur_value(bidder_idx):\r\n if cur_value_idx[bidder_idx] >= 0:\r\n return bidders[bidder_idx].values[cur_value_idx[bidder_idx]]\r\n else:\r\n return None\r\n\r\n def next_candidate():\r\n \"\"\"the next bidder to enter the bidding set\"\"\"\r\n candidate_bidder = -1\r\n candidate_value = -1\r\n for n in range(len(bidders)):\r\n if (is_active[n] == 0 and cur_value(n) is not None\r\n and cur_value(n) > max(candidate_value, cur_bid)):\r\n candidate_value = bidders[n].values[cur_value_idx[n]]\r\n candidate_bidder = n\r\n return candidate_value, candidate_bidder\r\n\r\n while True:\r\n # compute bidding set\r\n max_inactive_value, entering_bidder = next_candidate()\r\n while entering_bidder >= 0:\r\n active_values = [cur_value(j) for j in range(len(bidders)) if is_active[j] == 1]\r\n if ((sum(is_active) < 2 or\r\n h(cur_bid, cur_value(entering_bidder), active_values) >= 0) and\r\n not max_inactive_value > cur_bid > max_inactive_value - 1e-8):\r\n is_active[entering_bidder] = 1\r\n bid_start_points[entering_bidder][cur_value_idx[entering_bidder]] = cur_bid\r\n remaining_prob[entering_bidder] = bidders[entering_bidder].prob[cur_value_idx[entering_bidder]]\r\n max_inactive_value, entering_bidder = next_candidate()\r\n else:\r\n break\r\n\r\n # terminates computation\r\n if sum(is_active) < 2:\r\n for i in range(len(bidders)):\r\n if is_active[i] == 1:\r\n bid_end_points[i][cur_value_idx[i]] = cur_bid\r\n break\r\n\r\n # compute next change point\r\n exiting_criteria = H\r\n exp_exiting_criteria = exp_H\r\n change_points = [-1e8] * len(bidders)\r\n active_values = [cur_value(j) for j in range(len(bidders)) if is_active[j] == 1 and cur_value(j) is not None]\r\n for i in range(len(bidders)):\r\n if cur_value(i) is None:\r\n continue\r\n if is_active[i] == 0:\r\n try:\r\n change_points[i] = optimize.brentq(\r\n lambda x: h(x, cur_value(i), active_values, poly_form=True),\r\n -1e8,\r\n cur_bid)\r\n except ValueError:\r\n change_points[i] = -1e8\r\n else:\r\n if sum(bidders[i].prob[:cur_value_idx[i]]) == 0:\r\n change_points[i] = -1e8\r\n else:\r\n try:\r\n change_points[i] = optimize.brentq(\r\n lambda x: (exiting_criteria(cur_bid, cur_value(i), active_values) -\r\n exiting_criteria(x, cur_value(i), active_values) -\r\n (np.log(sum(bidders[i].prob[:cur_value_idx[i]]) + remaining_prob[i]) -\r\n np.log(sum(bidders[i].prob[:cur_value_idx[i]])))),\r\n -1e8,\r\n cur_bid)\r\n except ValueError:\r\n change_points[i] = -1e8\r\n\r\n # update state\r\n next_change = max(change_points)\r\n changing_bidder = np.argmax(change_points)\r\n for i in range(len(bidders)):\r\n if i == changing_bidder:\r\n continue\r\n if is_active[i] == 1:\r\n remaining_prob[i] = ((sum(bidders[i].prob[:cur_value_idx[i]]) + remaining_prob[i]) /\r\n exp_exiting_criteria(cur_bid, cur_value(i), active_values) *\r\n exp_exiting_criteria(next_change, cur_value(i), active_values) -\r\n sum(bidders[i].prob[:cur_value_idx[i]]))\r\n if np.abs(remaining_prob[i]) <= 1e-8:\r\n F_jump_points[i].append((next_change, sum(bidders[i].prob[:cur_value_idx[i]])))\r\n is_active[i] = 0\r\n remaining_prob[i] = -1.0\r\n bid_end_points[i][cur_value_idx[i]] = next_change\r\n cur_value_idx[i] -= 1\r\n else:\r\n F_jump_points[i].append((next_change, sum(bidders[i].prob[:cur_value_idx[i]]) + remaining_prob[i]))\r\n else:\r\n F_jump_points[i].append((next_change, sum(bidders[i].prob[:cur_value_idx[i] + 1])))\r\n if is_active[changing_bidder] == 0: # entering the bidding set\r\n is_active[changing_bidder] = 1\r\n remaining_prob[changing_bidder] = bidders[entering_bidder].prob[cur_value_idx[entering_bidder]]\r\n bid_start_points[changing_bidder][cur_value_idx[changing_bidder]] = next_change\r\n F_jump_points[changing_bidder].append(\r\n (next_change, sum(bidders[changing_bidder].prob[:cur_value_idx[changing_bidder] + 1])))\r\n else: # exiting the bidding set\r\n is_active[changing_bidder] = 0\r\n remaining_prob[changing_bidder] = -1.0\r\n bid_end_points[changing_bidder][cur_value_idx[changing_bidder]] = next_change\r\n cur_value_idx[changing_bidder] -= 1\r\n F_jump_points[changing_bidder].append(\r\n (next_change, sum(bidders[changing_bidder].prob[:cur_value_idx[changing_bidder] + 1])))\r\n cur_bid = next_change\r\n if cur_bid <= 0.0:\r\n break\r\n solution_state = State(is_active=is_active,\r\n remaining_prob=remaining_prob,\r\n cur_bid=cur_bid,\r\n cur_value_idx=cur_value_idx)\r\n\r\n return bid_start_points, bid_end_points, F_jump_points, solution_state", "def classify():\n yes_dataset = df[df[\"_class\"] == 1] # 470588\n no_dataset = df[df[\"_class\"] == 0] # 1971\n\n parameter_analysis = list()\n for criterion in np.arange(0.05, 0.91, 0.05):\n print(\"doing experiment at criterion = %s ...\" % criterion)\n rate_list = list()\n for i in range(10):\n # shuffle yes_dataset and no_dataset, so we can randomly choose 90% yes_dataset\n # + 90% no_dataset as train dataset, 10% yes_dataset + 10% no_dataset as test dataset\n yes_index = yes_dataset.index.tolist()\n random.shuffle(yes_index)\n no_index = no_dataset.index.tolist()\n random.shuffle(no_index)\n \n # concatenate 90%yes + 90%no, 10%yes + 10%no\n train = pd.concat([\n yes_dataset.loc[yes_index[:1774], :],\n no_dataset.loc[no_index[:423530], :]\n ])\n test = pd.concat([\n yes_dataset.loc[yes_index[1774:], :],\n no_dataset.loc[no_index[423530:], :]\n ]) \n \n # split data and label\n train_data, train_label = (train[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n train[\"_class\"])\n test_data, test_label = (test[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n test[\"_class\"])\n \n # apply classifier\n clf = GaussianNB()\n clf.fit(train_data, train_label)\n probability = clf.predict_proba(test_data).T[1]\n \n result = pd.DataFrame()\n result[\"_class\"] = test_label\n result[\"_predict\"] = probability >= criterion\n \n result_yes = result[result[\"_class\"] == 1]\n yes_yes_rate = sum(result_yes[\"_class\"] == result_yes[\"_predict\"])/len(result_yes[\"_predict\"])\n \n result_no = result[result[\"_class\"] == 0]\n no_no_rate = sum(result_no[\"_class\"] == result_no[\"_predict\"])/len(result_no[\"_predict\"])\n \n rate_list.append((yes_yes_rate, no_no_rate))\n\n rate_list = pd.DataFrame(rate_list)\n yes_yes_rate, no_no_rate = rate_list.mean()[0], rate_list.mean()[1]\n parameter_analysis.append((criterion, yes_yes_rate, no_no_rate))\n \n # save data to excel spreadsheet\n parameter_analysis = pd.DataFrame(parameter_analysis, columns=[\"criterion\", \"yes_yes_rate\", \"no_no_rate\"])\n writer = pd.ExcelWriter(\"parameter_analysis.xlsx\")\n parameter_analysis.to_excel(writer, \"parameter_analysis\", index=False)\n writer.save()", "def ensemble(scores):\r\n c = Counter ()\r\n for probs in zip (scores):\r\n idx = int (np.argmax (np.array (probs)))\r\n c.update ([idx])\r\n best = c.most_common (1)[0][0]\r\n return best", "def run_classifier_bayes_statistics(mode, image_type, indices, percentage, smoothing, debug):\n start = time.time()\n dat = train_naive_bayes(image_type, smoothing, percentage)\n end = time.time()\n output = classify_naive_bayes(dat, mode, indices, debug)\n return {'accuracy': check_correctness_statistics(output, mode, image_type), 'runtime': (end-start)}", "def tuneRandomForest(train_set):\n\n auc_score = make_scorer(roc_auc_score)\n acc = make_scorer(accuracy_score)\n\n train_set = pd.read_csv(train_set, sep=\"\\t\", low_memory=False)\n\n train_output = train_set[\"output\"].values\n train_features = train_set[train_set.columns.drop([\"labels\", \"output\"])].values\n\n #X_train, X_test, y_train, y_test = train_test_split(train_features, train_output, test_size=0.20)\n\n # define parameters to be optimized\n parameters = {\n 'n_estimators': [int(x) for x in range(200, 3000, 300)],\n 'max_features': ['log2', 'sqrt', \"auto\"],\n 'criterion': [\"gini\", \"entropy\"],\n }\n #plotGrid(parameters, script_path + \"/results/GridSearchPlot.png\")\n\n scores = ['precision', 'recall', 'f1', auc_score, acc] # compute efficiency based on scores\n for score in scores:\n print(\"# Tuning hyper-parameters for %s\" % score)\n\n tune_search = GridSearchCV(\n RandomForestClassifier(n_jobs=-1),\n parameters,\n scoring=score\n )\n #tune_search.fit(X_train, y_train)\n tune_search.fit(train_features, train_output)\n print(tune_search.best_params_)\n\n means = tune_search.cv_results_['mean_test_score']\n stds = tune_search.cv_results_['std_test_score']\n for mean, std, params in zip(means, stds, tune_search.cv_results_['params']):\n print(\"%0.3f (+/-%0.03f) for %r\" % (mean, std * 2, params))\n\n #y_true, y_pred = y_test, tune_search.predict(X_test)\n # print(classification_report(y_true, y_pred))\n #print()", "def postprocess(scores, classes, bboxes, iou_threshold=0.3, score_threshold=0.5):\n n = len(scores)\n \n det_num = 0\n det_classes = [] \n det_scores = []\n det_bboxes = []\n\n idx = np.argsort(scores)[::-1]\n sorted_scores = scores[idx]\n sorted_bboxes = bboxes[idx]\n sorted_classes = classes[idx]\n\n top_k_ids = []\n i = 0\n\n while i < n:\n if sorted_scores[i] < score_threshold:\n break\n\n top_k_ids.append(i)\n det_num += 1\n det_scores.append(sorted_scores[i])\n det_bboxes.append(sorted_bboxes[i])\n det_classes.append(sorted_classes[i])\n i += 1\n\n while i < n:\n tiled_bbox_i = np.tile(sorted_bboxes[i], (det_num, 1)) \n flags = (sorted_classes[top_k_ids]==sorted_classes[i])*1.0 \n ious, iofs, ioss = iou_bbox(tiled_bbox_i, sorted_bboxes[top_k_ids]) \n max_iou = np.max(ious) \n # max_iof = np.max(iofs*flags) \n # max_ios = np.max(ioss*flags) \n # temp = np.max((max_iof, max_ios))\n if max_iou > iou_threshold:\n i += 1\n else:\n break\n\n return det_num, np.array(det_scores, np.float32), np.array(det_classes, np.int32), np.array(det_bboxes, np.int32)", "def _train_clf_opt(self, predictions):\n\n X = self._construct_clf_opt_X(predictions)\n y = predictions['ytruth']\n X, y = self._check_train_labels(X, y)\n\n # self.clf_opt.n_estimators += 10 # This was for warm-start\n self.clf_opt.fit(X, y)\n self.clf_opt_trained = True\n\n if self.aim is not None:\n ypred = self._predict_proba(self.clf_opt, X=X)\n ybl = self.bl_predict(n_samples=len(X))\n x = minimize(self._opt_round_cutoff,\n np.array([self.round_cutoff if self.round_cutoff is not None else 0.5]),\n (y, ypred, ybl),\n method='cobyla',\n options={'rhobeg': 0.1}).x[0]\n self.round_cutoff_history.append(x)\n self.round_cutoff = np.median(self.round_cutoff_history)\n print('Optimal [%s] round_cutoff=%.4f' % (self.aim, self.round_cutoff))\n\n return", "def compute_splits(feature_df, target_col, max_num_splits):\n tree_estimator = DecisionTreeClassifier(max_leaf_nodes=max_num_splits+1,\n class_weight='balanced',\n random_state=1407)\n\n tree_estimator.fit(feature_df, target_col)\n thresholds = tree_estimator.tree_.threshold[tree_estimator.tree_.children_left != _tree.TREE_LEAF]\n return sorted(thresholds)", "def build_classifiers(self):\n classifiers = []\n for index, view in enumerate(BayesianKneighborClassifier.views):\n knn = KNeighborsClassifier(n_neighbors=BayesianKneighborClassifier.best_neighbours[index])\n BayesianKneighborClassifier.update_current_data(self, index)\n X_train, X_test, y_train, y_test = BayesianKneighborClassifier.split_test_and_train_data\\\n (self, test_size=0.3)\n knn.fit(X_train, y_train)\n classifiers.append(knn)\n return classifiers", "def eval_metrics_for_multiclass(self, predicted_answers):\n total_correct_in_all = 0\n total_pred_in_all = len(predicted_answers)\n # initial a dict for total correct in topK counting.\n total_correct_in_topK = dict([(i, 0) for i in self.topK_list])\n total_pred_in_topK = dict([(i, 0) for i in self.topK_list])\n max_topK = max(self.topK_list)\n label_pred = []\n label_true = []\n label_weights = []\n digits = 3\n metrics = {}\n\n for e_id, sample in predicted_answers.iteritems():\n # get all correct ids\n correct_label_indices = sample['correct_labels']\n # current case, we only have a majority lable for the correct label\n label_true.append(correct_label_indices[0])\n # counting all correct for each sample\n total_correct_in_all += len(correct_label_indices)\n # select topK\n sorted_probs_max_topK = sorted(sample['pred_probs'], reverse=True, key=lambda x: x['prob'])[:max_topK]\n top1_pred = sorted_probs_max_topK[0]\n label_pred.append(top1_pred['label_index'])\n\n # for all topK predictions\n for i in range(len(sorted_probs_max_topK)):\n pred = sorted_probs_max_topK[i]\n for topK in self.topK_list:\n if i >= topK:\n continue\n else:\n total_pred_in_topK[topK] += 1\n if pred['label_index'] in correct_label_indices:\n total_correct_in_topK[topK] += 1\n\n if total_correct_in_all != 0:\n # recall@K\n recall_at_K = dict([(k, total_correct_in_topK[k] / (total_correct_in_all * 1.0)) for k in self.topK_list])\n # assign recall@K into metrics\n for k, v in recall_at_K.items():\n # Jie\n # 1 means the greater the better.\n # -1 means the smaller the better.\n metrics['R@{}'.format(k)] = (1, v)\n\n self.logger.info('total_correct_in_all = {}, correct_in_topK = {}, recall@K = {}'.format(total_correct_in_all, sorted(total_correct_in_topK.items()), sorted(recall_at_K.items())))\n # here return all the p,r,f for each label, then we compute the micro average later.\n p, r, f1, s = precision_recall_fscore_support(label_true, label_pred, beta=1.0, labels=range(self.num_classes), average=None)\n total_s = np.sum(s)\n p_micro, r_micro, f1_micro, _ = precision_recall_fscore_support(label_true, label_pred, beta=1.0, labels=range(self.num_classes), average='micro')\n last_lines_heading = ['macro / total', 'weighted_mac / total', 'micro / total']\n target_names = self.classes\n name_width = max(len(cn) for cn in target_names)\n width = max(name_width, max([len(x) for x in last_lines_heading]), digits)\n\n headers = [\"precision\", \"recall\", \"f1-score\", \"support\"]\n head_fmt = u'{:>{width}s} ' + u' {:>9}' * len(headers)\n report = head_fmt.format(u'', *headers, width=width)\n report += u'\\n\\n'\n row_fmt = u'{:>{width}s} ' + u' {:>9.{digits}f}' * 3 + u' {:>9}\\n'\n rows = zip(target_names, p, r, f1, s)\n for row in rows:\n label_weights.append(row[4])\n report += row_fmt.format(*row, width=width, digits=digits)\n metrics['P_{}'.format(row[0])] = (1, row[1])\n metrics['R_{}'.format(row[0])] = (1, row[2])\n metrics['F1_{}'.format(row[0])] = (1, row[3])\n report += u'\\n'\n\n # compute macro averages\n p_macro = np.average(p, weights = None)\n r_macro = np.average(r, weights = None)\n f1_macro = np.average(f1, weights = None)\n metrics['P_{}'.format(\"macro\")] = (1, p_macro)\n metrics['R_{}'.format(\"macro\")] = (1, r_macro)\n metrics['F1_{}'.format(\"macro\")] = (1, f1_macro)\n report += row_fmt.format(last_lines_heading[0],\n p_macro,\n r_macro,\n f1_macro,\n total_s,\n width=width, digits=digits)\n\n # compute weighted macro average\n label_weights = map(lambda x : x/(total_s * 1.0), label_weights)\n p_weighted_average = np.average(p, weights = label_weights)\n r_weighted_average = np.average(r, weights = label_weights)\n f1_weighted_average = np.average(f1, weights = label_weights)\n metrics['P_{}'.format(\"weighted_macro\")] = (1, p_weighted_average)\n metrics['R_{}'.format(\"weighted_macro\")] = (1, r_weighted_average)\n metrics['F1_{}'.format(\"weighted_macro\")] = (1, f1_weighted_average)\n report += row_fmt.format(last_lines_heading[1],\n p_weighted_average,\n r_weighted_average,\n f1_weighted_average,\n total_s,\n width=width, digits=digits)\n # micro average\n metrics['P_{}'.format(\"micro\")] = (1, p_micro)\n metrics['R_{}'.format(\"micro\")] = (1, r_micro)\n metrics['F1_{}'.format(\"micro\")] = (1, f1_micro)\n report += row_fmt.format(last_lines_heading[2],\n p_micro,\n r_micro,\n f1_micro,\n total_s,\n width=width, digits=digits)\n\n self.logger.info(\"P,R,F1 report as follows:\\n {}\".format(report))\n # only plot it at dev and test time, not during training.\n if self.gen_confusing_matrix:\n\n self.logger.info(\"Generate confusing matrix photo.\")\n # Compute confusion matrix\n conf_matrix = confusion_matrix(label_true, label_pred)\n np.set_printoptions(precision=2)\n\n # Plot non-normalized confusion matrix\n plt.figure()\n self.plot_confusion_matrix(conf_matrix, classes=self.brief_classes, ori_fmt='d',\n title='Confusion matrix, without normalization')\n wo_norm_fig_path = os.path.join(self.result_dir, '{}_wo_norm.png'.format(self.result_prefix))\n plt.savefig(wo_norm_fig_path)\n\n # Plot normalized confusion matrix\n plt.figure()\n self.plot_confusion_matrix(conf_matrix, classes=self.brief_classes, ori_fmt='d', normalize=True,\n title='Normalized confusion matrix')\n\n norm_fig_path = os.path.join(self.result_dir, '{}_w_norm.png'.format(self.result_prefix))\n plt.savefig(norm_fig_path)\n\n else:\n self.logger.warn('invalid total_correct_in_all')\n\n return metrics", "def balance_dataset_weighting(instances):\n factors = get_balancing_weight_factors(instances)\n new_instances = [ (features, classification, w * factors[classification]) \n for features, classification, w in instances ]\n return new_instances", "def get_score(data, labels, fold_pairs, name, model, param, numTopVars,\r\n rank_per_fold=None, parallel=True, rand_iter=-1):\r\n assert isinstance(name, str)\r\n logging.info(\"Classifying %s\" % name)\r\n ksplit = len(fold_pairs)\r\n# if name not in NAMES:\r\n# raise ValueError(\"Classifier %s not supported. \"\r\n# \"Did you enter it properly?\" % name)\r\n\r\n # Redefine the parameters to be used for RBF SVM (dependent on\r\n # training data)\r\n if \"SGD\" in name:\r\n param[\"n_iter\"] = [25] # [np.ceil(10**3 / len(fold_pairs[0][0]))]\r\n classifier = get_classifier(name, model, param, rand_iter=rand_iter)\r\n \r\n if name == \"RBF SVM\": #This doesn't use labels, but looks as ALL data\r\n logging.info(\"RBF SVM requires some preprocessing.\"\r\n \"This may take a while\")\r\n #\r\n is_data_computed_gamma = True\r\n #\r\n if not is_data_computed_gamma:\r\n # Sahil commented the code below that computes the gamma choices from data.\r\n # The computed gamma choices seem too low thereby making SVM very slow. Instead, trying out fixed values.\r\n print param\r\n gamma = param['gamma']\r\n gamma = np.array(gamma)\r\n print 'gamma', gamma\r\n else:\r\n #Euclidean distances between samples\r\n # sahil switched from the first call to second one for computing the dist as the first one is giving error.\r\n # dist = pdist(StandardScaler().fit(data), \"euclidean\").ravel()\r\n dist = pdist(RobustScaler().fit_transform(data), \"euclidean\").ravel()\r\n print 'dist', dist\r\n #Estimates for sigma (10th, 50th and 90th percentile)\r\n sigest = np.asarray(np.percentile(dist, [10, 50, 90]))\r\n print 'sigest', sigest\r\n #Estimates for gamma (= -1/(2*sigma^2))\r\n gamma = 1./(2*sigest**2)\r\n print 'gamma', gamma\r\n #\r\n #\r\n #Set SVM parameters with these values\r\n # sahil changed the code a bit to remove a bug\r\n # param = [{\"kernel\": [\"rbf\"],\r\n # \"gamma\": gamma.tolist(),\r\n # \"C\": np.logspace(-2,2,5).tolist()}]\r\n param = {\"kernel\": [\"rbf\"],\r\n \"gamma\": gamma.tolist(),\r\n \"C\": np.logspace(-2, 2, 5).tolist()}\r\n # if name not in [\"Decision Tree\", \"Naive Bayes\"]:\r\n if param:\r\n if hasattr(classifier,'param_grid'): \r\n # isinstance(classifier, GridSearchCV):\r\n print 'param', param\r\n N_p = np.prod([len(l) for l in param.values()])\r\n elif isinstance(classifier, RandomizedSearchCV):\r\n N_p = classifier.n_iter\r\n else:\r\n N_p = 1\r\n# is_cv = isinstance(classifier, GridSearchCV) or \\\r\n# isinstance(classifier, RandomizedSearchCV)\r\n# print('Name: {}, ksplit: {}, N_p: {}'.format(name, ksplit, N_p))\r\n if (not parallel) or ksplit <= N_p or \\\r\n (name == \"Random Forest\") or (\"SGD\" in name):\r\n logging.info(\"Attempting to use grid search...\")\r\n classifier.n_jobs = PROCESSORS\r\n classifier.pre_dispatch = 1 # np.floor(PROCESSORS/24)\r\n allConfMats = []\r\n allTotalErrs = []\r\n allFittedClassifiers = []\r\n for i, fold_pair in enumerate(fold_pairs):\r\n confMats = []\r\n totalErrs = []\r\n fitted_classifiers = []\r\n logging.info(\"Classifying a %s the %d-th out of %d folds...\"\r\n % (name, i+1, len(fold_pairs)))\r\n if rank_per_fold is not None:\r\n rankedVars = rank_per_fold[i]\r\n else:\r\n rankedVars = np.arange(data.shape[1])\r\n #\r\n for numVars in numTopVars:\r\n logging.info('Classifying for top %i variables' % numVars)\r\n #\r\n # print 'rankedVars', rankedVars\r\n #\r\n confMat, totalErr, fitted_classifier = classify(data[:, rankedVars[:numVars]],\r\n labels,\r\n fold_pair,\r\n classifier)\r\n confMats.append(confMat)\r\n totalErrs.append(totalErr)\r\n fitted_classifiers.append(fitted_classifier)\r\n # recheck the structure of area and fScore variables\r\n allConfMats.append(confMats)\r\n allTotalErrs.append(totalErrs)\r\n allFittedClassifiers.append(fitted_classifiers)\r\n else:\r\n print 'parallel computing going on (debug Sahil ...) ..........................'\r\n #\r\n classifier.n_jobs = PROCESSORS\r\n logging.info(\"Multiprocessing folds for classifier {}.\".format(name))\r\n pool = Pool(processes=min(ksplit, PROCESSORS))\r\n out_list = pool.map(per_split_classifier(data, labels, classifier,\r\n numTopVars),\r\n zip(rank_per_fold, fold_pairs))\r\n pool.close()\r\n pool.join()\r\n #allConfMats = [el[0] for el in out_list]\r\n #allTotalErrs = [el[1] for el in out_list]\r\n #allFittedClassifiers = [el[2] for el in out_list]\r\n allConfMats, allTotalErrs, allFittedClassifiers = tuple(zip(*out_list))\r\n return classifier, allConfMats, allTotalErrs, allFittedClassifiers", "def evaluate(self, Estimator, params):\n assert hasattr(Estimator, 'fit'),\\\n \"Estimator must implement the fit method\"\n assert hasattr(Estimator, 'predict'),\\\n \"Estimator must implement the predict method\"\n # Initialize Estimators\n models = [Estimator(param) for param in params]\n ac = list()\n for idx, (search, hold_out) in enumerate(self.cv):\n if idx >= self.max_outer:\n break\n cv = StratifiedKFold(y=self.b[search], n_folds=self.k_folds-1)\n for jdx, (train, test) in enumerate(cv):\n if jdx >= self.max_inner:\n break\n scores = [self._score(model, train, test) for model in models]\n ac.append(self._score(models[np.argmax(scores)], search, hold_out))\n return np.mean(ac)", "def evaluate_prediction_BoW(vectorizer, classifier, test_data):\n \n data = (test_data[k][0] for k in range(len(test_data))) # generator for the train data\n data_features = vectorizer.transform(data)\n predictions = classifier.predict(data_features)\n target = [test_data[k][1] for k in range(len(test_data))]\n \n return accuracy_score(target, predictions)", "def run(self):\n if self.pb.xvalEN and not self.isXvalMain:\n dt = np.ones(self.pb.total_exam_no,\n dtype=\"float32\") / self.train_exam_no\n dt[self.val_indices] = 0.0\n else:\n dt = np.ones(self.pb.total_exam_no,\n dtype=\"float32\") / self.pb.total_exam_no\n\n val = np.zeros(8,dtype=\"float32\")-1\n boosting = None\n wl = None\n if self.pb.algorithm == 'conf-rated':\n boosting = ConfidenceRated(self)\n wl = ConfidenceRatedWL(self)\n elif self.pb.algorithm == 'adaboost':\n boosting = AdaBoost(self)\n wl = AdaBoostWL(self)\n elif self.pb.algorithm == 'adaboost-fast':\n boosting = AdaBoostFast(self)\n wl = AdaBoostFastWL(self)\n elif self.pb.algorithm == 'rankboost':\n boosting = RankBoost(self)\n wl = ConfidenceRatedWL(self)\n elif self.pb.algorithm == 'rankboost-fast':\n boosting = RankBoost(self)\n wl = AdaBoostFastWL(self)\n else:\n raise Exception(\"Unknown Boosting Algorithm\")\n \n for r in range(self.pb.rounds):\n tree = wl.run(dt)\n dt = boosting.run(dt = dt,\n r = r,\n tree = tree)\n \n if self.isXvalMain:\n boosting.finalize()\n \n \"\"\"Sync the predictions and save them to a file\"\"\"\n if self.pb.isLeader:\n if self.pb.xvalEN and not self.isXvalMain:\n val_predictions = boosting.get_val_predictions()\n hypotheses = boosting.get_hypotheses()\n np.savez(self.out_fp,\n val_predictions = val_predictions,\n hypotheses = hypotheses,\n )\n if self.pb.testEN and self.isXvalMain:\n train_predictions = boosting.get_train_predictions()\n test_predictions = boosting.get_test_predictions()\n hypotheses = boosting.get_hypotheses()\n np.savez(self.out_fp,\n train_predictions = train_predictions,\n test_predictions = test_predictions,\n hypotheses = hypotheses,\n )\n if not self.pb.testEN and self.isXvalMain:\n train_predictions = boosting.get_train_predictions()\n hypotheses = boosting.get_hypotheses()\n np.savez(self.out_fp,\n train_predictions = train_predictions,\n hypotheses = hypotheses,\n )", "def _do_grid_search_round(self) -> Dict[str, Dict[str, Any]]:\n\n cfg = self.cfg_\n\n # Get the data to use, vectorizing the sample feature dictionaries\n y_train = list(self._generate_samples(self.grid_search_ids_, 'y'))\n X_train = self._vectorize_and_sparsify_data(self.gs_vec_,\n self.grid_search_ids_)\n\n # Feature selection\n if cfg.feature_selection_percentile != 1.0:\n loginfo('Removing {0}% of the features during grid search round...'\n .format(100 - 100*cfg.feature_selection_percentile))\n X_train = \\\n (SelectPercentile(chi2,\n percentile=100*cfg.feature_selection_percentile)\n .fit_transform(X_train, y_train))\n\n # Make a `StratifiedKFold` object using the list of labels\n # NOTE: This will effectively redistribute the samples in the\n # various grid search folds, but it will maintain the\n # distribution of labels. Furthermore, due to the use of the\n # `RandomState` object, it should always happen in the exact\n # same way.\n prng = np.random.RandomState(12345)\n gs_cv_folds_ = StratifiedKFold(y=y_train,\n n_folds=self.data_.grid_search_folds,\n shuffle=True,\n random_state=prng)\n\n # Iterate over the learners/parameter grids, executing the grid search\n # cross-validation for each\n loginfo('Doing a grid search cross-validation round with {0} folds for'\n ' each learner and each corresponding parameter grid.'\n .format(self.data_.grid_search_folds))\n n_jobs_learners = ['Perceptron', 'SGDClassifier',\n 'PassiveAggressiveClassifier']\n learner_gs_cv_params_ = {}\n for learner, learner_name, param_grids in zip(self.learners_,\n self.learner_names_,\n cfg.param_grids):\n\n loginfo('Grid search cross-validation for {0}...'\n .format(learner_name))\n\n # If the learner is `MiniBatchKMeans`, set the `batch_size`\n # parameter to the number of training samples\n if learner_name == 'MiniBatchKMeans':\n for param_grid in param_grids:\n param_grid['batch_size'] = [len(y_train)]\n\n # If learner is of any of the learner types in\n # `n_jobs_learners`, add in the `n_jobs` parameter specified\n # in the config (but only do so if that `n_jobs` value is\n # greater than 1 since it won't matter because 1 is the\n # default, anyway)\n if cfg.n_jobs > 1:\n if learner_name in n_jobs_learners:\n for param_grid in param_grids:\n param_grid['n_jobs'] = [cfg.n_jobs]\n\n # Make `GridSearchCV` instance\n folds_diff = cfg.grid_search_folds - self.data_.grid_search_folds\n if (self.data_.grid_search_folds < 2\n or folds_diff/cfg.grid_search_folds > 0.25):\n msg = ('Either there weren\\'t enough folds after collecting '\n 'data (via `ExperimentalData`) to do the grid search '\n 'round or the number of folds had to be reduced to such'\n ' a degree that it would mean a +25\\% reduction in the '\n 'total number of folds used during the grid search '\n 'round.')\n logerr(msg)\n raise ValueError(msg)\n gs_cv = GridSearchCV(learner(),\n param_grids,\n cv=gs_cv_folds_,\n scoring=self._resolve_objective_function())\n\n # Do the grid search cross-validation\n gs_cv.fit(X_train, y_train)\n learner_gs_cv_params_[learner_name] = gs_cv.best_params_\n del gs_cv\n\n del X_train\n del y_train\n\n return learner_gs_cv_params_", "def compute_score_fast(verbose=1):\n res = []\n\n batch = math.ceil(len(train) / LINEAR_ASSIGNMENT_SEGMENT_SIZE)\n for start in range(0, len(train), batch):\n end = min(len(train), start + batch)\n train_batch = train[start:end]\n\n features = branch_model.predict_generator(FeatureGen(train_batch, verbose=verbose), max_queue_size=12, workers=6, verbose=0)\n score = head_model.predict_generator(ScoreGen(features, verbose=verbose), max_queue_size=12, workers=6, verbose=0)\n score = score_reshape(score, features)\n\n res.append(score)\n\n return res", "def train_w_batch_boost(self, out_tag='my_lstm', save=True, auc_threshold=0.01):\n\n self.create_train_valid_set()\n\n #paramaters that control batch size\n best_auc = 0.5\n #current_batch_size = 1024\n current_batch_size = 64\n #max_batch_size = 50000\n max_batch_size = 50000\n\n #keep track of epochs for plotting loss vs epoch, and for getting best model\n epoch_counter = 0 \n best_epoch = 1 \n\n keep_training = True\n\n while keep_training:\n epoch_counter += 1\n print('beginning training iteration for epoch {}'.format(epoch_counter))\n self.train_network(epochs=1, batch_size=current_batch_size)\n\n self.save_model(epoch_counter, out_tag)\n val_roc = self.compute_roc(batch_size=current_batch_size, valid_set=True) #FIXME: what is the best BS here? final BS from batch boost... initial BS? current BS??\n\n #get average of validation rocs and clear list entries \n improvement = ((1-best_auc) - (1-val_roc)) / (1-best_auc)\n\n #FIXME: if the validation roc does not improve after n bad \"epochs\", then update the batch size accordingly. Rest bad epochs to zero each time the batch size increases, if it does\n\n #do checks to see if batch size needs to change etc\n if improvement > auc_threshold:\n print('Improvement in (1-AUC) of {:.4f} percent. Keeping batch size at {}'.format(improvement*100, current_batch_size))\n best_auc = val_roc\n best_epoch = epoch_counter\n elif current_batch_size*4 < max_batch_size:\n print('Improvement in (1-AUC) of only {:.4f} percent. Increasing batch size to {}'.format(improvement*100, current_batch_size*4))\n current_batch_size *= 4\n if val_roc > best_auc: \n best_auc = val_roc\n best_epoch = epoch_counter\n elif current_batch_size < max_batch_size: \n print('Improvement in (1-AUC) of only {:.4f} percent. Increasing to max batch size of {}'.format(improvement*100, max_batch_size))\n current_batch_size = max_batch_size\n if val_roc > best_auc: \n best_auc = val_roc\n best_epoch = epoch_counter\n elif improvement > 0:\n print('Improvement in (1-AUC) of only {:.4f} percent. Cannot increase batch further'.format(improvement*100))\n best_auc = val_roc\n best_epoch = epoch_counter\n else: \n print('AUC did not improve and batch size cannot be increased further. Stopping training...')\n keep_training = False\n\n if epoch_counter > self.max_epochs:\n print('At the maximum number of training epochs ({}). Stopping training...'.format(self.max_epochs))\n keep_training = False\n best_epoch = self.max_epochs\n \n print 'best epoch was: {}'.format(best_epoch)\n print 'best validation auc was: {}'.format(best_auc)\n self.val_roc = best_auc\n \n\n #delete all models that aren't from the best training. Re-load best model for predicting on test set \n for epoch in range(1,epoch_counter+1):\n if epoch is not best_epoch:\n os.system('rm {}/models/{}_model_epoch_{}.hdf5'.format(os.getcwd(), out_tag, epoch))\n os.system('rm {}/models/{}_model_architecture_epoch_{}.json'.format(os.getcwd(), out_tag, epoch))\n os.system('mv {0}/models/{1}_model_epoch_{2}.hdf5 {0}/models/{1}_model.hdf5'.format(os.getcwd(), out_tag, best_epoch))\n os.system('mv {0}/models/{1}_model_architecture_epoch_{2}.json {0}/models/{1}_model_architecture.json'.format(os.getcwd(), out_tag, best_epoch))\n\n #reset model state and load in best weights\n with open('{}/models/{}_model_architecture.json'.format(os.getcwd(), out_tag), 'r') as model_json:\n best_model_architecture = model_json.read()\n self.model = keras.models.model_from_json(best_model_architecture)\n self.model.load_weights('{}/models/{}_model.hdf5'.format(os.getcwd(), out_tag))\n\n if not save:\n os.system('rm {}/models/{}_model_architecture.json'.format(os.getcwd(), out_tag))\n os.system('rm {}/models/{}_model.hdf5'.format(os.getcwd(), out_tag))", "def calculate_accuracy(ground_truth_object_list, pred_object_list):\n\n ground_truth_list = np.zeros((1,10))\n pred_list = np.zeros((1,10))\n \n for gt_index in range(len(ground_truth_object_list)):\n \n intClass_ground_truth = classMappingDict[ground_truth_object_list[gt_index]['name']]\n ground_truth_list[0][intClass_ground_truth] = ground_truth_list[0][intClass_ground_truth] + 1\n\n for pred_index in range(len(pred_object_list)):\n\n intClass_pred = classMappingDict[pred_object_list[pred_index]['name']]\n pred_list[0][intClass_pred] = pred_list[0][intClass_pred] + 1\n return ground_truth_list, pred_list", "def find_best_classifier(classifiers, X_t, y_t, X_v, y_v, params, jobs):\n\n # Initialize result storage\n clfs_return = []\n train_scores = []\n test_scores = []\n\n # Loop through classifiers\n for classifier in classifiers:\n # Grid search, calibrate, and test the classifier\n classifier, train_score, test_score = train_calibrate_predict(\n classifier, X_t, y_t, X_v, y_v, params[classifier], jobs)\n\n # Append the result to storage\n clfs_return.append(classifier)\n train_scores.append(train_score)\n test_scores.append(test_score)\n\n # Return storage\n return clfs_return, train_scores, test_scores", "def model(**params):\n N_frb = 0\n vs = []\n hs = []\n cs = []\n ncands = []\n\n for cand in candlist:\n c_res = calculate_metric_terms(\n cand, cluster_function=cluster_function, debug=False, plot=False, **params\n )\n t, frb_found, h, c, v = c_res\n vs.append(v)\n hs.append(h)\n cs.append(c)\n ncands.append(t)\n\n if frb_found:\n N_frb += 1\n\n vs = np.array(vs)\n hs = np.array(hs)\n cs = np.array(cs)\n c_avg = np.average(cs, axis=0, weights=ncands)\n h_avg = np.average(hs, axis=0, weights=ncands)\n v_avg = np.average(vs, axis=0, weights=ncands)\n recall = N_frb / len(vs)\n score = v_avg * recall\n\n return score", "def update(self, round, npc, cheat_labels=None):\n with torch.no_grad():\n batch_size = 100\n ANs_num = self.get_ANs_num(round)\n logger.debug('Going to choose %d samples as anchors' % ANs_num)\n features = npc.memory\n logger.debug(\"Start to compute each sample's entropy\")\n for start in xrange(0, self.samples_num, batch_size):\n logger.progress(start, self.samples_num, 'processing %d/%d samples...')\n end = start + batch_size\n end = min(end, self.samples_num)\n preds = F.softmax(npc(features[start:end], None), 1)\n self.entropy[start:end] = -(preds * preds.log()).sum(1)\n logger.debug('Compute entropy done, max(%.2f), min(%.2f), mean(%.2f)' % (self.entropy.max(), self.entropy.min(), self.entropy.mean()))\n self.anchor_indexes = self.entropy.topk(ANs_num, largest=False)[1]\n self.instance_indexes = torch.ones_like(self.position).scatter_(0, self.anchor_indexes, 0).nonzero().view(-1)\n anchor_entropy = self.entropy.index_select(0, self.anchor_indexes)\n instance_entropy = self.entropy.index_select(0, self.instance_indexes)\n if self.anchor_indexes.size(0) > 0:\n logger.debug('Entropies of anchor samples: max(%.2f), min(%.2f), mean(%.2f)' % (anchor_entropy.max(), anchor_entropy.min(), anchor_entropy.mean()))\n if self.instance_indexes.size(0) > 0:\n logger.debug('Entropies of instance sample: max(%.2f), min(%.2f), mean(%.2f)' % (instance_entropy.max(), instance_entropy.min(), instance_entropy.mean()))\n logger.debug('Start to get the position of both anchor and instance samples')\n instance_cnt = 0\n for i in xrange(self.samples_num):\n logger.progress(i, self.samples_num, 'processing %d/%d samples...')\n if (i == self.anchor_indexes).any():\n self.position[i] = (self.anchor_indexes == i).max(0)[1]\n continue\n instance_cnt -= 1\n self.position[i] = instance_cnt\n logger.debug('Start to find %d neighbours for each anchor sample' % self.ANs_size)\n anchor_features = features.index_select(0, self.anchor_indexes)\n self.neighbours = torch.LongTensor(ANs_num, self.ANs_size)\n for start in xrange(0, ANs_num, batch_size):\n logger.progress(start, ANs_num, 'processing %d/%d samples...')\n end = start + batch_size\n end = min(end, ANs_num)\n sims = torch.mm(anchor_features[start:end], features.t())\n sims.scatter_(1, self.anchor_indexes[start:end].view(-1, 1), -1.0)\n _, self.neighbours[start:end] = sims.topk(self.ANs_size, largest=True, dim=1)\n logger.debug('ANs discovery done')\n if cheat_labels is None:\n return 0.0\n logger.debug('Start to compute ANs consistency')\n anchor_label = cheat_labels.index_select(0, self.anchor_indexes)\n neighbour_label = cheat_labels.index_select(0, self.neighbours.view(-1)).view_as(self.neighbours)\n self.consistency = (anchor_label.view(-1, 1) == neighbour_label).float().mean()\n return self.consistency", "def test_220_boosted_goal_difference_for_home_models_with_various_upper_home_win_threshold(self):\n\n def create_model_fn(fn_team: str):\n team_stat = Stats.n_sample_stats_for_team(cursor=db_in_cursor,\n team=fn_team,\n last_sample_date=self.model_date,\n n_samples=self.num_samples,\n normalize_by_matches=True)\n\n\n return FeatureModel(\n input_data=[self.home_boost + team_stat.goal_diff, team_stat.goal_diff],\n id=team_stat.team_name\n )\n\n default_threshold_lower = 0.3\n default_threshold_upper = 0.9\n\n explore_range = (default_threshold_lower, 5.0)\n num_steps_wanted = 60\n step_size = (explore_range[1] - explore_range[0])/num_steps_wanted\n\n threshold_lower = default_threshold_lower\n for threshold_upper in StatsPredictionPremierLeague.crange(first=explore_range[0], test=lambda x: x <= explore_range[1],\n update=lambda x: x + step_size):\n for match_date in played_home_OR_away_before_dates:\n ####\n #  Build model up to the day before the match\n ####\n self.home_boost = 0.72\n self.model_date = match_date - timedelta(days=1)\n self.num_samples = num_matches_in_season\n\n models: {str: FeatureModel} = FeatureModel.create_models_for_all_teams(\n model_making_fn=create_model_fn, entities=teams)\n\n self.persist_models(model_gen_date=self.model_date, model_description=self.shortDescription(), models=models)\n\n # variant_string = 'threshold_lower=%f, threshold_upper=%f' % (threshold_lower, threshold_upper)\n self.make_and_store_predictions_for_date(match_date=match_date, models=models, draw_range=(threshold_lower, threshold_upper),\n variants=threshold_upper)", "def cal_average_kill_turns(deck):\n #Results array\n turn_results = np.zeros(NUM_SIMS)\n \n #Simulation loop\n for i in range(NUM_SIMS): \n if VERBOSE:\n print('Running simulation ' + str(i + 1)) \n turn_results[i] = cal_kill_turn(copy.deepcopy(deck))\n #End of Simulations\n \n #DETERMINE ATK\n average_kill_turn = np.average(turn_results)\n min_kill_turn = np.min(turn_results)\n max_kill_turn = np.max(turn_results)\n \n return average_kill_turn, min_kill_turn, max_kill_turn", "def setUpMax(roundX):\n vals = []\n for attribute in roundX[\"attributes\"]:\n vals.append(roundX[\"attributes\"][attribute][\"gain\"])\n branch = []\n for attribute in roundX[\"attributes\"]:\n if max(vals) == roundX[\"attributes\"][attribute][\"gain\"]:\n branch.append(attribute)\n return branch", "def findRFBestDepth():\n resultList = []\n BestScore = 0\n # iterate through different max_depths from 1 to 19\n for max_depth in range(1,20):\n rforest = ensemble.RandomForestClassifier(max_depth=max_depth, n_estimators=100)\n trainng_score = []\n testing_score = []\n # run 10 different cross-validation\n for index in range(10):\n # split into cross-validation sets.\n cv_data_train, cv_data_test, cv_target_train, cv_target_test = \\\n cross_validation.train_test_split(X_train, y_train, test_size=0.1)\n\n # fit the model using the cross-validation data\n # and tune parameter, such as max_depth here\n rforest = rforest.fit(cv_data_train, cv_target_train)\n trainng_score += [rforest.score(cv_data_train,cv_target_train)]\n testing_score += [rforest.score(cv_data_test,cv_target_test)]\n\n # Compute the average score for both traning and testing data\n trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)\n testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)\n\n # find the best score\n if testing_avgScore > BestScore:\n BestScore = testing_avgScore\n best_depth = max_depth\n resultList += [[best_depth, trainng_avgScore, testing_avgScore]]\n print ('The best average score and the corresponding max_depth is: ')\n return BestScore, best_depth", "def get_candidate_objects(output, img_size, classes, anchors, threshold):\n\n #threshold = 0.8\n iou_threshold = 0.4\n\n boxes, probs = parse_yolo_output_v2(output, img_size, len(classes), anchors)\n filter_mat_probs = (probs >= threshold)\n filter_mat_boxes = np.nonzero(filter_mat_probs)[0:3]\n boxes_filtered = boxes[filter_mat_boxes]\n probs_filtered = probs[filter_mat_probs]\n classes_num_filtered = np.argmax(probs, axis=3)[filter_mat_boxes]\n\n idx = np.argsort(probs_filtered)[::-1]\n boxes_filtered = boxes_filtered[idx]\n probs_filtered = probs_filtered[idx]\n classes_num_filtered = classes_num_filtered[idx]\n\n # too many detections - exit\n if len(boxes_filtered) > 1e3:\n print(\"Too many detections, maybe an error? : {}\".format(\n len(boxes_filtered)))\n return []\n\n probs_filtered = non_maxima_suppression(boxes_filtered, probs_filtered,\n classes_num_filtered, iou_threshold)\n\n filter_iou = (probs_filtered > 0.0)\n boxes_filtered = boxes_filtered[filter_iou]\n probs_filtered = probs_filtered[filter_iou]\n classes_num_filtered = classes_num_filtered[filter_iou]\n\n result = []\n for class_id, box, prob in zip(classes_num_filtered, boxes_filtered, probs_filtered):\n result.append([classes[class_id], box[0], box[1], box[2], box[3], prob])\n\n return result", "def test_071_various_boosted_goal_difference_for_home_models(self):\n\n def create_model_fn(fn_team: str):\n team_stat = Stats.n_sample_stats_for_team(cursor=db_in_cursor,\n team=fn_team,\n last_sample_date=self.model_date,\n n_samples=self.num_samples,\n normalize_by_matches=True)\n\n\n return FeatureModel(\n input_data=[self.home_boost + team_stat.goal_diff, team_stat.goal_diff],\n id=team_stat.team_name\n )\n\n # TODO: convert this to use crange\n for i in range(0, 201):\n boost = i/100\n\n for match_date in played_home_OR_away_before_dates:\n ####\n #  Build model up to the day before the match\n ####\n self.home_boost = boost\n self.model_date = match_date - timedelta(days=1)\n self.num_samples = num_matches_in_season\n\n models: {str: FeatureModel} = FeatureModel.create_models_for_all_teams(\n model_making_fn=create_model_fn, entities=teams)\n\n model_desc = 'gdn_boost_%s' % boost\n self.persist_models(model_gen_date=self.model_date, model_description=model_desc, models=models)\n\n self.make_and_store_predictions_for_date(match_date=match_date, models=models, variants=model_desc)", "def _evaluate_performance__static_winners(self):\n # | - _evaluate_performance__\n\n # | - class attributes #################################################\n AL = self\n al_gen = self.al_gen\n verbose = self.verbose\n seed_ids = self.seed_ids\n acquisition_bin = self.acquisition_bin\n completed_ids = self.completed_ids\n CandidateSpace = self.CandidateSpace\n RegressionModel = self.RegressionModel\n DuplicateFinder = self.DuplicateFinder\n al_gen_dict = self.al_gen_dict\n\n stop_mode = self.stop_mode\n stop_num_generations = self.stop_num_generations\n\n index_acq_gen_dict = self.index_acq_gen_dict\n #__| #################################################################\n\n # #####################################################################\n mode = \"lowest_N\" # 'lowest_N' or 'lowest_perc'\n\n N_ids = 10\n lowest_perc = 5\n\n # Number of consecutive generations that the Nth best systems must\n # remain static\n M_gens = 3\n # #####################################################################\n\n if mode == \"lowest_perc\":\n num_candidates = CandidateSpace.FingerPrints.df_pre.shape[0]\n N_ids = int(num_candidates * (lowest_perc * 0.01))\n\n gen_keys = list(AL.al_gen_dict.keys())\n\n if len(gen_keys) > M_gens:\n latest_M_keys = gen_keys[-(M_gens + 1):]\n last_gen_key = gen_keys[-1]\n\n al_gen_dict_subset_i = dict(zip(\n latest_M_keys,\n [AL.al_gen_dict.get(i, None) for i in latest_M_keys]))\n\n indices_list = []\n iterator = enumerate(al_gen_dict_subset_i.items())\n for i_cnt, (gen_i, AL_i) in iterator:\n model_i = AL_i.model\n\n model_i = AL.add_main_Y_to_model(\n model_i, plot_dft_instead_of_pred=True)\n model_i = model_i[(model_i[\"duplicate\"] == False)]\n model_i = model_i.sort_values(\"Y_main\")\n\n indices_i = model_i.index.tolist()\n\n indices_list.append(indices_i)\n\n if i_cnt >= M_gens:\n indices_i = indices_list[i_cnt][0:N_ids]\n ids_static_list = []\n for j in range(M_gens):\n indices_j = indices_list[i_cnt - (j + 1)][0:N_ids]\n ids_static = indices_j == indices_i\n ids_static_list.append(ids_static)\n\n ids_are_static = all(ids_static_list)\n\n self.performance__static_winners[last_gen_key] = ids_are_static\n #__|", "def majority_vote():\n iris = datasets.load_iris()\n x_vals, y_vals = iris.data[50:, [1, 2]], iris.target[50:]\n labenc = LabelEncoder()\n y_vals = labenc.fit_transform(y_vals)\n x_train, x_test, y_train, y_test = train_test_split(x_vals, y_vals,\n test_size=0.5, random_state=1)\n\n clf1 = LogisticRegression(penalty='l2', C=0.001, random_state=0)\n clf2 = DecisionTreeClassifier(max_depth=1, criterion='entropy', random_state=0)\n clf3 = KNeighborsClassifier(n_neighbors=1, p=2, metric='minkowski')\n pipe1 = Pipeline([['sc', StandardScaler()], ['clf', clf1]])\n pipe3 = Pipeline([['sc', StandardScaler()], ['clf', clf3]])\n clf_labels = ['Logistic Regression', 'Decision Tree', 'KNN']\n\n # Majority Rule (hard) Voting\n mv_clf = MajorityVoteClassifier(classifiers=[pipe1, clf2, pipe3])\n\n clf_labels += ['Majority Voting']\n all_clf = [pipe1, clf2, pipe3, mv_clf]\n print('10-fold cross validation:\\n')\n for clf, label in zip(all_clf, clf_labels):\n scores = cross_val_score(estimator=clf, X=x_train, y=y_train, cv=10, scoring='roc_auc')\n print(\"ROC AUC: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label))\n\n colors = ['black', 'orange', 'blue', 'green']\n linestyles = [':', '--', '-.', '-']\n for clf, label, clr, lin_style in zip(all_clf, clf_labels, colors, linestyles):\n # assuming the label of the positive class is 1\n y_pred = clf.fit(x_train, y_train).predict_proba(x_test)[:, 1]\n fpr, tpr, _ = roc_curve(y_true=y_test, y_score=y_pred)\n print(y_pred)\n roc_auc = auc(x=fpr, y=tpr)\n plt.plot(fpr, tpr, color=clr, linestyle=lin_style,\n label='%s (auc = %0.2f)' % (label, roc_auc))\n\n plt.legend(loc='lower right')\n plt.plot([0, 1], [0, 1], linestyle='--', color='gray', linewidth=2)\n\n plt.xlim([-0.1, 1.1])\n plt.ylim([-0.1, 1.1])\n plt.grid()\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.tight_layout()\n plt.savefig(IMG_PATH + 'roc.png', dpi=300)\n plt.close()\n\n stdc = StandardScaler()\n x_train_std = stdc.fit_transform(x_train)\n all_clf = [pipe1, clf2, pipe3, mv_clf]\n x_min = x_train_std[:, 0].min() - 1\n x_max = x_train_std[:, 0].max() + 1\n y_min = x_train_std[:, 1].min() - 1\n y_max = x_train_std[:, 1].max() + 1\n xxx, yyy = np.meshgrid(np.arange(x_min, x_max, 0.1), np.arange(y_min, y_max, 0.1))\n _, axarr = plt.subplots(nrows=2, ncols=2, sharex='col', sharey='row', figsize=(7, 5))\n for idx, clf, ttt in zip(product([0, 1], [0, 1]), all_clf, clf_labels):\n clf.fit(x_train_std, y_train)\n z_vals = clf.predict(np.c_[xxx.ravel(), yyy.ravel()])\n z_vals = z_vals.reshape(xxx.shape)\n axarr[idx[0], idx[1]].contourf(xxx, yyy, z_vals, alpha=0.3)\n axarr[idx[0], idx[1]].scatter(x_train_std[y_train == 0, 0], x_train_std[y_train == 0, 1],\n c='blue', marker='^', s=50)\n axarr[idx[0], idx[1]].scatter(x_train_std[y_train == 1, 0], x_train_std[y_train == 1, 1],\n c='red', marker='o', s=50)\n axarr[idx[0], idx[1]].set_title(ttt)\n plt.text(-3.5, -4.5, s='Sepal width [standardized]', ha='center', va='center', fontsize=12)\n plt.text(-10.5, 4.5, s='Petal length [standardized]', ha='center', va='center',\n fontsize=12, rotation=90)\n plt.tight_layout()\n plt.savefig(IMG_PATH + 'voting_panel.png', bbox_inches='tight', dpi=300)\n # print(mv_clf.get_params())\n params = {'decisiontreeclassifier__max_depth': [1, 2],\n 'pipeline-1__clf__C': [0.001, 0.1, 100.0]}\n grid = GridSearchCV(estimator=mv_clf, param_grid=params, cv=10, scoring='roc_auc')\n grid.fit(x_train, y_train)\n\n for params, mean_score, scores in grid.cv_results_:\n print(\"%0.3f+/-%0.2f %r\" % (mean_score, scores.std() / 2, params))\n print('Best parameters: %s' % grid.best_params_)\n print('Accuracy: %.2f' % grid.best_score_)" ]
[ "0.5816989", "0.5509983", "0.541834", "0.53876275", "0.53350043", "0.5324034", "0.5317509", "0.5210553", "0.51253355", "0.51249367", "0.5117827", "0.50774145", "0.50676054", "0.5053001", "0.5049434", "0.50463754", "0.50307447", "0.5024344", "0.50045466", "0.4988225", "0.49871996", "0.49853265", "0.496797", "0.49630922", "0.4962463", "0.4962463", "0.49445808", "0.49445808", "0.49346837", "0.4915061", "0.49147037", "0.49029648", "0.48947603", "0.48902234", "0.4880092", "0.48785284", "0.4872494", "0.48663113", "0.48621702", "0.4836322", "0.48341253", "0.48313582", "0.48304862", "0.48124772", "0.48106804", "0.48093024", "0.480035", "0.4774042", "0.4753646", "0.4751876", "0.47341278", "0.47042534", "0.47012588", "0.46882576", "0.4684615", "0.4669601", "0.46657744", "0.46640527", "0.4653915", "0.46515006", "0.4651438", "0.46501628", "0.46494442", "0.4638865", "0.46360883", "0.4633391", "0.4628563", "0.46262583", "0.46225294", "0.46203148", "0.4619837", "0.46168995", "0.4613162", "0.4611607", "0.4610725", "0.46106502", "0.4610073", "0.4602581", "0.45965496", "0.4595106", "0.45937976", "0.45905387", "0.4581257", "0.45794016", "0.4575004", "0.45725113", "0.45719722", "0.45711634", "0.4568506", "0.45661357", "0.45653266", "0.45635393", "0.45629495", "0.45607963", "0.4555738", "0.4553366", "0.4550541", "0.45473", "0.45426768", "0.45426017" ]
0.6702093
0
Create a plane through a given point with given normal and surface material
def __init__(self, point, normal, material): self.point = point self.norm = unit(normal) self.mat = material
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def project_point_plane(point, plane):\n base, normal = plane\n normal = normalize_vector(normal)\n vector = subtract_vectors(point, base)\n snormal = scale_vector(normal, dot_vectors(vector, normal))\n return subtract_vectors(point, snormal)", "def plot_plane(unit_normal, x_array, y_array, fore):\n # print'unit normal = ', unit_normal\n z = (((unit_normal[0] * (fore[0] - x_array)) + (unit_normal[1] * (fore[1] - y_array))) / unit_normal[2]) + fore[2]\n # print 'plane numbers\\n', z\n return z", "def plane(self):\r\n from lsst.analysis import utils\r\n return utils.fitplane(self.points, self.z)", "def mirror_point_to_plane(point, plane):\n assert isinstance(plane, cg3d_plane.CGPlane)\n pn, norm = plane.get_point_and_normal()\n norm.normalize()\n return point - 2.0 * ((point - pn) * norm) * norm", "def get_plane_of_points(\n self,\n normal_vector=\"z\",\n planar_coordinate=None,\n ):\n # Get results vectors\n if (normal_vector == \"z\"):\n x_flat = self.floris.grid.x_sorted_inertial_frame[0, 0].flatten()\n y_flat = self.floris.grid.y_sorted_inertial_frame[0, 0].flatten()\n z_flat = self.floris.grid.z_sorted_inertial_frame[0, 0].flatten()\n else:\n x_flat = self.floris.grid.x_sorted[0, 0].flatten()\n y_flat = self.floris.grid.y_sorted[0, 0].flatten()\n z_flat = self.floris.grid.z_sorted[0, 0].flatten()\n u_flat = self.floris.flow_field.u_sorted[0, 0].flatten()\n v_flat = self.floris.flow_field.v_sorted[0, 0].flatten()\n w_flat = self.floris.flow_field.w_sorted[0, 0].flatten()\n\n # Create a df of these\n if normal_vector == \"z\":\n df = pd.DataFrame(\n {\n \"x1\": x_flat,\n \"x2\": y_flat,\n \"x3\": z_flat,\n \"u\": u_flat,\n \"v\": v_flat,\n \"w\": w_flat,\n }\n )\n if normal_vector == \"x\":\n df = pd.DataFrame(\n {\n \"x1\": y_flat,\n \"x2\": z_flat,\n \"x3\": x_flat,\n \"u\": u_flat,\n \"v\": v_flat,\n \"w\": w_flat,\n }\n )\n if normal_vector == \"y\":\n df = pd.DataFrame(\n {\n \"x1\": x_flat,\n \"x2\": z_flat,\n \"x3\": y_flat,\n \"u\": u_flat,\n \"v\": v_flat,\n \"w\": w_flat,\n }\n )\n\n # Subset to plane\n # TODO: Seems sloppy as need more than one plane in the z-direction for GCH\n if planar_coordinate is not None:\n df = df[np.isclose(df.x3, planar_coordinate)] # , atol=0.1, rtol=0.0)]\n\n # Drop duplicates\n # TODO is this still needed now that we setup a grid for just this plane?\n df = df.drop_duplicates()\n\n # Sort values of df to make sure plotting is acceptable\n df = df.sort_values([\"x2\", \"x1\"]).reset_index(drop=True)\n\n return df", "def plane(self):\n return plane(self.N, self.o)", "def plane_equation(point_a, point_b, point_c):\n v1 = np.subtract(point_a, point_c)\n v2 = np.subtract(point_a, point_b)\n normal = np.cross(v1, v2)\n # print 'b4 norm', normal\n unit_normal = norm_vect(normal)\n # print 'unityyy', unit_normal\n return unit_normal", "def get_plane(\n self,\n pos=None,\n norm=None,\n plane=None,\n sx=None,\n sy=None,\n color=\"lightgray\",\n alpha=0.25,\n **kwargs,\n ):\n axes_pairs = dict(sagittal=(0, 1), horizontal=(2, 0), frontal=(2, 1))\n\n if pos is None:\n pos = self.root._mesh.centerOfMass()\n\n try:\n norm = norm or self.space.plane_normals[plane]\n except KeyError: # pragma: no cover\n raise ValueError( # pragma: no cover\n f\"Could not find normals for plane {plane}. Atlas space provides these normals: {self.space.plane_normals}\" # pragma: no cover\n )\n\n # Get plane width and height\n idx_pair = (\n axes_pairs[plane]\n if plane is not None\n else axes_pairs[\"horizontal\"]\n )\n\n bounds = self.root.bounds()\n root_bounds = [\n [bounds[0], bounds[1]],\n [bounds[2], bounds[3]],\n [bounds[4], bounds[5]],\n ]\n\n wh = [float(np.diff(root_bounds[i])) for i in idx_pair]\n if sx is None:\n sx = wh[0]\n if sy is None:\n sy = wh[1]\n\n # return plane\n return Actor(\n Plane(pos=pos, normal=norm, sx=sx, sy=sy, c=color, alpha=alpha),\n name=f\"Plane at {pos} norm: {norm}\",\n br_class=\"plane\",\n )", "def plane(self):\n return Plane(Point(0, self.evaluations.exposedWing.edges[2].point1.y, 0), Vector(0, 1, 0),\n hidden=True)", "def create_surface_plane(align_to = None, axis = 'x', width = 0.5, freeze_tm = True):\n axis_dict = {'x': (1, 0, 0), 'y': (0, 1, 0), 'z': (0, 0, 1)}\n res = pm.nurbsPlane(\n axis = axis_dict[axis],\n width = width,\n degree = 1,\n constructionHistory = False\n )[0]\n if align_to is not None:\n transformation.align(res, align_to)\n if freeze_tm:\n transformation.freeze_transform(res)\n return res", "def __init__(self, normal, position, name=None, reflective=False):\n _Surface.__init__(self, name, reflective)\n self._normal = normalize(normal)\n self._position = np.array(position)", "def fit_plane(xyz,z_pos=None):\n mean = np.mean(xyz,axis=0)\n xyz_c = xyz - mean[None,:]\n l,v = np.linalg.eig(xyz_c.T.dot(xyz_c))\n abc = v[:,np.argmin(l)]\n d = -np.sum(abc*mean)\n # unit-norm the plane-normal:\n abcd = np.r_[abc,d]/np.linalg.norm(abc)\n # flip the normal direction:\n if z_pos is not None:\n if np.sum(abcd[:3]*z_pos) < 0.0:\n abcd *= -1\n return abcd", "def add_rectangular_plane(center_loc=(0, 0, 0), point_to=(0, 0, 1), size=(2, 2), name=None):\n center_loc = np.array(center_loc)\n point_to = np.array(point_to)\n size = np.append(np.array(size), 0)\n\n bpy.ops.mesh.primitive_plane_add(location=center_loc)\n\n plane_obj = bpy.context.object\n\n if name is not None:\n plane_obj.name = name\n\n plane_obj.dimensions = size\n\n # Point it to target\n direction = Vector(point_to) - plane_obj.location\n # Find quaternion that rotates plane's 'Z' so that it aligns with 'direction'\n # This rotation is not unique because the rotated plane can still rotate about direction vector\n # Specifying 'Y' gives the rotation quaternion with plane's 'Y' pointing up\n rot_quat = direction.to_track_quat('Z', 'Y')\n plane_obj.rotation_euler = rot_quat.to_euler()\n\n # Scene update necessary, as matrix_world is updated lazily\n bpy.context.scene.update()\n\n return plane_obj", "def _prepare_plane(self):\n verticies = [\n # main plane - note that the mainplane is scaled so the mat_plane\n # matrix will it transform to the correct coordinates\n -self.i_border[0]/self._scaling[0], self.i_border[1]/self._scaling[1],\n -self.i_border[0]/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], self.i_border[1]/self._scaling[1],\n -self.i_border[0]/self._scaling[0], self.i_border[1]/self._scaling[1],\n\n # coord plane\n 0, 0,\n 0, -self.o_wh[1],\n self.o_wh[0], -self.o_wh[1],\n self.o_wh[0], -self.o_wh[1],\n self.o_wh[0], 0,\n 0, 0,\n\n # axes\n 0, -self.o_wh[1], self.o_wh[0], -self.o_wh[1], #x\n 0, 0, 0, -self.o_wh[1], #y\n ]\n\n colors = [\n 1.0, 1.0, 1.0, 1.0, # outer box XXX Remove outer box...\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n .9, .9, .9, 9.0, # plot box\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n 0.0, 0.0, 0.0, 1.0, #lines\n 0.0, 0.0, 0.0, 1.0,\n 0.0, 0.0, 0.0, 1.0,\n 0.0, 0.0, 0.0, 1.0,\n ]\n\n self._fonts = []\n for u in range(1, self._unit_count[0]+1):\n verticies.append(self._unit_w[0]*u)\n verticies.append(-self.o_wh[1]+0.02)\n verticies.append(self._unit_w[0]*u)\n verticies.append(-self.o_wh[1]-0.02)\n colors += [0.0, 0.0, 0.0, 1.0]\n colors += [0.0, 0.0, 0.0, 1.0]\n self._fonts.append([\n '{:.2f}'.format(u*(self.i_axis[0]/self._unit_count[0])-self.i_origin[0]),\n (self._unit_w[0]*u+self.i_border[0]-0.05)*self._scaling[0],\n (-self.o_wh[1]+(self.i_border[3])*0.5)\n ])\n for u in range(0, self._unit_count[1]):\n verticies.append(0.02)\n verticies.append(-self._unit_w[1]*u)\n verticies.append(-0.02)\n verticies.append(-self._unit_w[1]*u)\n colors += [0.0, 0.0, 0.0, 1.0]\n colors += [0.0, 0.0, 0.0, 1.0]\n self._fonts.append([\n '{:.2f}'.format(self.i_axis[1]-u*self.i_axis[1]/self._unit_count[1]-self.i_origin[1]),\n (0.025)*self._scaling[0],\n (-(self._unit_w[1])*u-self.i_border[1]+0.01)*self._scaling[1]\n ])\n\n self._draw_plane_indicies = (0, 12)\n self._draw_line_indicies = (12, 4+self._unit_count[0]*2+self._unit_count[1]*2)\n\n # convert data into valid data format\n verticies = numpy.array(verticies, dtype=numpy.float32)\n colors = numpy.array(colors, dtype=numpy.float32)\n\n self._plane_vao = util.VAO()\n self._plane_vbo = util.VBO(2)\n\n with self._plane_vao:\n # plane verticies\n with self._plane_vbo.get(0):\n glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(verticies), verticies, GL_STATIC_DRAW)\n glVertexAttribPointer(self.plane_shader.attributeLocation('vertex_position'), 2, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(0)\n\n # place vertex colors\n with self._plane_vbo.get(1):\n glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(colors), colors, GL_STATIC_DRAW)\n glVertexAttribPointer(self.plane_shader.attributeLocation('vertex_color'), 4, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(1)", "def plane(*args, length: float=0.0, name: AnyStr=\"\", position: List[float, float, float]=None,\n rotation: List[float, float, float]=None, size: float=0.0, width: float=0.0,\n **kwargs)->AnyStr:\n pass", "def point_and_plane_pose(plane_point, plane_orientation, points=None, xyz=None):\n vector = plane_orientation\n vector = vector / np.linalg.norm(vector)\n a = vector[0]\n b = vector[1]\n c = vector[2]\n\n d = -a * plane_point[0] - b * plane_point[1] - c * plane_point[2]\n\n if xyz is not None:\n xyz = np.asarray(xyz)\n if points.shape[0] != 3:\n logger.error(\n \"Wrong points shape. [3, N] expected, \" + str(points.shape) + \" given.\"\n )\n elif points is not None:\n points = np.asarray(points)\n if points.shape[1] != 3:\n logger.error(\n \"Wrong points shape. [N, 3] expected, \" + str(points.shape) + \" given.\"\n )\n xyz = points.T\n else:\n logger.error(\"points or xyz must be declared\")\n\n x, y, z = xyz\n z_out = (a * x + b * y + c * z + d) / (a ** 2 + b ** 2 + c ** 2) ** 0.5\n\n return z_out", "def _fit_plane_to_point_cloud(\n points_xyz: NDArrayFloat,\n) -> Tuple[float, float, float, float]:\n center_xyz: NDArrayFloat = np.mean(points_xyz, axis=0)\n out: Tuple[NDArrayFloat, NDArrayFloat, NDArrayFloat] = np.linalg.svd(\n points_xyz - center_xyz\n )\n vh = out[2]\n\n # Get the unitary normal vector\n a, b, c = float(vh[2, 0]), float(vh[2, 1]), float(vh[2, 2])\n d: float = -np.dot([a, b, c], center_xyz)\n return (a, b, c, d)", "def get_surface_normals_o3d(normals, points, scale=2):\n # total number of points:\n N = points.shape[0]\n\n points = np.vstack(\n (points.to_numpy(), points.to_numpy() + scale * normals)\n )\n lines = [[i, i+N] for i in range(N)]\n colors = np.zeros((N, 3)).tolist()\n\n # build pca line set:\n surface_normals_o3d = o3d.geometry.LineSet(\n points=o3d.utility.Vector3dVector(points),\n lines=o3d.utility.Vector2iVector(lines),\n )\n surface_normals_o3d.colors = o3d.utility.Vector3dVector(colors)\n\n return surface_normals_o3d", "def xyz2plane(x,y,z, new_x=[], plane=[], origin=None):\n # preliminary stuff\n if origin != None: x = x - origin\n a,b,c,d = plane\n bottom = np.sqrt(a*a + b*b + c*c) # normalize\n a,b,c,d = a/bottom, b/bottom, c/bottom, d/bottom\n px, py, pz = new_x\n bot = np.sqrt(px*px + py*py + pz*pz) #normalize\n px, py, pz = px/bot, py/bot, pz/bot\n p0 = [px,py,pz]\n # do rotation\n z_hat = [a,b,c]\n y_hat = cross(z_hat, p0)\n x_hat = cross(y_hat, z_hat)\n if type(x)==type(arr) or type(x)==type([]):\n xp, yp, zp = [], [], []\n for i in range(len(x)):\n xp.append(dot([x[i],y[i],z[i]], x_hat))\n yp.append(dot([x[i],y[i],z[i]], y_hat))\n zp.append(dot([x[i],y[i],z[i]], z_hat))\n else:\n xp = dot([x,y,z], x_hat)\n yp = dot([x,y,z], y_hat)\n zp = dot([x,y,z], z_hat)\n return xp, yp, zp", "def alpha_surface_reconstruction(pcd, alpha=10):\n road_mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_alpha_shape(pcd, alpha)\n vertices = np.asarray(copy.deepcopy(road_mesh.vertices)) #flatten road mesh\n vertices[:,2] = 0\n flattend_mesh = copy.deepcopy(road_mesh)\n flattend_mesh.vertices = o3d.utility.Vector3dVector(vertices)\n return flattend_mesh, road_mesh", "def test_CoordinatePlane(self):\n origin = np.random.randn(3)\n normal = np.random.randn(3)\n up_vector = np.random.randn(3)\n plane = shapes_nd.Plane(origin, normal)\n cplane = shapes_3d.CoordinatePlane(origin, normal, up_vector)\n \n np.testing.assert_almost_equal(cplane.dim, plane.dim)\n np.testing.assert_almost_equal(cplane.origin, plane.origin)\n np.testing.assert_almost_equal(cplane.normal, plane.normal)\n \n p3 = [0, 1, 0]\n c, d = cplane.project_point(p3, ret_dist=True)\n np.testing.assert_almost_equal(p3, cplane.revert_projection(c, d))\n p3 = np.random.randn(5, 3)\n c, d = cplane.project_point(p3, ret_dist=True)\n np.testing.assert_almost_equal(p3, cplane.revert_projection(c, d))", "def __init__(self, t, point=None, normal=None, uv=None, material=None):\n self.t = t\n self.point = point\n self.normal = normal\n self.uv = uv\n self.material = material", "def addPlaneToScene(self, foot, x, y):\r\n #research\r\n profprint()\r\n scene = slicer.mrmlScene\r\n # Create model node\r\n model = slicer.vtkMRMLModelNode()\r\n model.SetScene(scene)\r\n model.SetName(scene.GenerateUniqueName(\".ObturatorPlane\"))\r\n\r\n planeSource = vtk.vtkPlaneSource()\r\n foot-=25*(x+y)\r\n #planeSource.SetOrigin(np.array(foot))\r\n planeSource.SetOrigin(list(foot))\r\n planeSource.SetPoint1(np.array(foot)+50*x)\r\n planeSource.SetPoint2(np.array(foot)+50*y)\r\n planeSource.Update()\r\n model.SetAndObservePolyData(planeSource.GetOutput())\r\n\r\n # Create display node\r\n modelDisplay = slicer.vtkMRMLModelDisplayNode()\r\n modelDisplay.SetColor(1,1,0) # yellow\r\n modelDisplay.SetBackfaceCulling(0)\r\n modelDisplay.SetScene(scene)\r\n scene.AddNode(modelDisplay)\r\n model.SetAndObserveDisplayNodeID(modelDisplay.GetID())\r\n\r\n # Add to scene\r\n scene.AddNode(model)\r\n # transform = slicer.vtkMRMLLinearTransformNode()\r\n # scene.AddNode(transform)\r\n # model.SetAndObserveTransformNodeID(transform.GetID())\r\n #\r\n # vTransform = vtk.vtkTransform()\r\n # vTransform.Scale(50,50,50)\r\n # #vTransform.RotateX(30)\r\n # transform.SetAndObserveMatrixTransformToParent(vTransform.GetMatrix())\r", "def hyperplane(self):\n origin = (self.a+self.b+self.c)/3.\n normal = np.cross(self.a-self.b, self.a-self.c)\n return Hyperplane(origin, normal)", "def draw_plane(env, transform, extents=(4,4), texture=None):\n if texture is None:\n texture = np.zeros((100,100,4))\n texture[:,:,1] = 0.2\n texture[:,:,2] = 0.2\n texture[:,:,3] = 0.2\n with env:\n h = env.drawplane(transform, extents=extents, texture=texture)\n return h", "def surface(*args, degreeU: int=0, degreeV: int=0, formU: AnyStr=\"\", formV: AnyStr=\"\", knotU:\n Union[float, List[float]]=0.0, knotV: Union[float, List[float]]=0.0, name:\n AnyStr=\"\", objectSpace: bool=True, point: Union[List[float, float, float],\n List[List[float, float, float]]]=None, pointWeight: Union[List[float, float, float,\n float], List[List[float, float, float, float]]]=None, worldSpace: bool=True,\n **kwargs)->AnyStr:\n pass", "def SetNormal(self, *args):\n return _itkSurfaceSpatialObjectPointPython.itkSurfaceSpatialObjectPoint3_SetNormal(self, *args)", "def SetPlaneMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_SetPlaneMode(self, *args)", "def _update_surface_normals(self):\n\n # This is the case if there are too few points to\n # compute normals so there can be values to remove\n\n #can be important for parallel\n self.swarm.shadow_particles_fetch()\n\n if self.empty:\n self.director.data[...] = 0.0\n else:\n\n particle_coords = self.swarm.particleCoordinates.data\n\n Nx = np.empty(self.swarm.particleLocalCount)\n Ny = np.empty(self.swarm.particleLocalCount)\n Nz = np.empty(self.swarm.particleLocalCount)\n\n for i, xyz in enumerate(particle_coords):\n r, neighbours = self.kdtree.query(particle_coords[i], k=4)\n\n # this point is neighbour[0] and neighbour points are neighbours[(1,2,3)]\n XYZ1 = self.kdtree.data[neighbours[1]]\n XYZ2 = self.kdtree.data[neighbours[2]]\n XYZ3 = self.kdtree.data[neighbours[3]]\n\n dXYZ1 = XYZ2 - XYZ1\n dXYZ2 = XYZ3 - XYZ1\n\n # Cross product of those 2 vectors can be use as the local normal (perhaps)\n\n Nx[i], Ny[i], Nz[i] = np.cross(dXYZ1, dXYZ2)\n #if i == 0:\n # print(Nx, Ny, Nz)\n # print(xyz[0], xyz[1],xyz[2])\n # print((self.insidePt[0] - xyz[0]) * Nx[i] )\n\n if (self.insidePt):\n sign = np.sign( (self.insidePt[0] - xyz[0]) * Nx[i] +\n (self.insidePt[1] - xyz[1]) * Ny[i] +\n (self.insidePt[2] - xyz[2]) * Nz[i] )\n Nx[i] *= sign\n Ny[i] *= sign\n Nz[i] *= sign\n\n\n for i in range(0, self.swarm.particleLocalCount):\n scale = 1.0 / np.sqrt(Nx[i]**2 + Ny[i]**2 + Nz[i]**2)\n Nx[i] *= scale\n Ny[i] *= scale\n Nz[i] *= scale\n\n\n self.director.data[:,0] = Nx[:]\n self.director.data[:,1] = Ny[:]\n self.director.data[:,2] = Nz[:]\n\n print(\"Surf Norms\")\n\n return", "def test_create(self):\n f = azplugins.restrain.plane(group=hoomd.group.all(), point=(0,0,0), normal=(1,0,0), k=2.0)\n\n f.set_params(k=5.0)\n f.set_params(k=8)\n\n f.set_params(point=(0,0,1))\n f.set_params(point=[0,0,1])\n f.set_params(point=np.array([0,0,1]))\n\n f.set_params(normal=(0,0,1))\n f.set_params(normal=[0,0,1])\n f.set_params(normal=np.array([0,0,1]))\n\n f.set_params(point=(0,0,0), normal=(1,0,0), k=10.0)", "def GetPlane(plane):\r\n pass", "def project_onto_plane(vect):\n x, y, z = vect\n \n return (x, y, 0.)", "def closest_point_on_plane(point, plane):\n base, normal = plane\n x, y, z = base\n a, b, c = normalize_vector(normal)\n x1, y1, z1 = point\n d = a * x + b * y + c * z\n k = (a * x1 + b * y1 + c * z1 - d) / (a**2 + b**2 + c**2)\n return [x1 - k * a,\n y1 - k * b,\n z1 - k * c]", "def fit_plane_to_points(points: np.ndarray, eps: float=1.0e-5):\n # Compute plane origin and subract it from the points array.\n plane_origin = np.mean(points, axis=0)\n x = points - plane_origin\n\n # Dot product to yield a 3x3 array.\n moment = np.dot(x.T, x)\n\n # Extract single values from SVD computation to get normal.\n plane_normal = np.linalg.svd(moment)[0][:,-1]\n small = np.where(np.abs(plane_normal) < eps)\n plane_normal[small] = 0.0\n plane_normal /= np.linalg.norm(plane_normal)\n if (plane_normal[-1] < 0.0):\n plane_normal *= -1.0\n\n return (plane_normal, plane_origin)", "def create_plane(self):\n\n # First we calculate our point increment for both the x and y values\n inc_x = (self.xmax - self.xmin)/(self.xlen - 1)\n inc_y = (self.ymax - self.ymin)/(self.ylen - 1)\n\n # This for-loop will add every x-value with every y-value, saving the values column wise\n # i.e. (-10,-10), (-10,-9), (-10.-8),...,(-10,n) for n = our y-values.\n # store these combinations into a list, and add that to our plane. \n # The nested loop will then traverse again and will get the combinations for the next x-value.\n # The loop will continue until all x-values and y-value combinations are added to our plane.\n for y in range(0, self.ylen + 1):\n temp_list = []\n for x in range(0, self.xlen + 1):\n temp_list.append(self.f((self.xmin + x*inc_x) + (self.ymin + y*inc_y)*1j))\n self.plane.append(temp_list)", "def invert_point_on_plane(point, plane):\n _, _, proj = project_point_to_plane(point, plane)\n\n u, v = proj[0][1]\n return u, v", "def SetNormal(self, *args):\n return _itkSurfaceSpatialObjectPointPython.itkSurfaceSpatialObjectPoint2_SetNormal(self, *args)", "def test_point_on_plane(self, point, plane):\n _dist = point.dot(plane[:3]) + plane[3]\n if _dist <= epsilon:\n print('OK => point on plane')\n else:\n print('NO => point not on plane')", "def mesh_slicer(self, plane, opt):\n\n # get plane coefficients\n a = plane[0]\n b = plane[1]\n c = plane[2]\n\n # create vtk plane object\n VTKplane = vtk.vtkPlane()\n # for now we choose the center point as the point of rotation\n VTKplane.SetOrigin(self.mesh_poly.GetCenter())\n VTKplane.SetNormal(a, b, c)\n VTKplane.SetOrigin(self.epi_apex_node)\n\n # create cutter\n cutEdges = vtk.vtkCutter()\n cutEdges.SetInputData(self.mesh_poly)\n cutEdges.SetCutFunction(VTKplane)\n cutEdges.GenerateCutScalarsOn()\n cutEdges.SetValue(0, 0.5)\n\n # create renderer\n ren = vtk.vtkRenderer()\n ren.SetBackground(0.0, 0.0, 0.0)\n\n # create mapper\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputConnection(cutEdges.GetOutputPort())\n\n # create actor\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n actor.GetProperty().SetColor(0.0, 0.0, 1.0)\n actor.GetProperty().SetLineWidth(2)\n\n # display apex point\n apexA = include_points(list(self.epi_apex_node), 1, 15, (0, 0, 1))\n\n if (opt == 'mesh'):\n meshMapper = vtk.vtkPolyDataMapper()\n meshMapper.SetInputData(self.mesh_poly)\n meshActor = vtk.vtkActor()\n meshActor.SetMapper(meshMapper)\n meshActor.GetProperty().SetColor(1.0, 0.0, 0.0)\n\n # generate renderer\n ren.AddActor(self.meshActor)\n ren.AddActor(actor)\n ren.AddActor(apexA)\n\n else:\n ren.AddActor(actor)\n ren.AddActor(apexA)\n\n # display\n vtk_show(ren)", "def proj_to_plane(norm, d, pts):\n a = norm[0]\n b = norm[1]\n c = norm[2]\n\n p = []\n\n for i in range(len(pts)):\n x_p = pts[i][0]\n y_p = pts[i][1]\n z_p = pts[i][2]\n\n if a != 0:\n x_0 = (b * b + c * c) * x_p - a * b * y_p - a * c * z_p - a * d\n y_0 = (b * 1.0 / a) * (x_0 - x_p) + y_p\n z_0 = (c * 1.0 / a) * (x_0 - x_p) + z_p\n\n elif b != 0:\n x_0 = x_p \n y_0 = c * c * y_p - b * (d + c)\n z_0 = (c * 1.0 / b) *(y_0 - y_p) + z_p\n\n else:\n x_0 = x_p\n y_0 = y_p\n z_0 = - d * 1.0 / c\n\n p.append([x_0, y_0, z_0])\n \n return p", "def proj_to_plane(norm, d, pts):\n a = norm[0]\n b = norm[1]\n c = norm[2]\n\n p = []\n\n for i in range(len(pts)):\n x_p = pts[i][0]\n y_p = pts[i][1]\n z_p = pts[i][2]\n\n if a != 0:\n x_0 = (b * b + c * c) * x_p - a * b * y_p - a * c * z_p - a * d\n y_0 = (b * 1.0 / a) * (x_0 - x_p) + y_p\n z_0 = (c * 1.0 / a) * (x_0 - x_p) + z_p\n\n elif b != 0:\n x_0 = x_p \n y_0 = c * c * y_p - b * (d + c)\n z_0 = (c * 1.0 / b) *(y_0 - y_p) + z_p\n\n else:\n x_0 = x_p\n y_0 = y_p\n z_0 = - d * 1.0 / c\n\n p.append([x_0, y_0, z_0])\n \n return p", "def __init__(self, obj, plane):\n self.obj = obj\n self.plane = plane\n self.plane_origin = plane[0]\n self.plane_normal = plane[1]\n self.distance_from_plane = np.dot((self.obj.vectors-self.plane[0]),\n self.plane[1])\n self.slice_points = []\n\n self.calculate_points()\n \n return None", "def _generate_random_points_in_plane(nvect, dparam, npts, eps=0.0):\n np.random.seed(12345)\n a, b, c = nvect / np.linalg.norm(nvect)\n x, y = np.random.rand(npts), np.random.rand(npts)\n z = (dparam - a * x - b * y) / c\n if eps > 0:\n z += np.random.normal(loc=0., scale=eps, size=npts)\n return np.column_stack((x, y, z))", "def project_onto_plane(self,z):\n U=self.U\n Q=self.Q_p\n #print(((z-Q[-2,:,[2]])/P[-2,:,[2]]).T)\n #print(P[-2])\n return ((z-Q[-2,:,[2]])/U[-2,:,[2]]).T*U[-2]+Q[-2]", "def plane_distance(p, plane):\n x, y, z = p\n A, B, C, D = plane\n return A*x + B*y + C*z + D", "def surfcut_points(**kwargs):\n npoints = kwargs.get( 'npoints', 240 )\n origin = kwargs.get( 'origin', vec3(0.,0.,0.)) \n normal = kwargs.get( 'normal', (np.pi/2., 0.) ) \n lims0 = kwargs.get( 'lims0', (-50., 50.) ) \n lims1 = kwargs.get( 'lims1', (-50., 50.) ) \n extents = kwargs.get( 'extents', None) \n \n if extents is not None:\n lims0 = (-extents, extents)\n lims1 = (-extents, extents)\n \n # Make the unit vectors that define the plane\n unit = vec3()\n th = normal[0]\n ph = normal[1]\n unit.set_spherical( 1, th, ph) \n orth0 = vec3( -1.*np.sin(ph), np.cos(ph), 0. )\n orth1 = cross(unit,orth0)\n \n t0 = np.linspace( lims0[0], lims0[1], npoints )\n t1 = np.linspace( lims1[0], lims1[1], npoints ) \n \n # Obtain points on which function will be evaluated\n T0,T1 = np.meshgrid(t0,t1)\n X = origin[0] + T0*orth0[0] + T1*orth1[0] \n Y = origin[1] + T0*orth0[1] + T1*orth1[1]\n Z = origin[2] + T0*orth0[2] + T1*orth1[2] \n \n\n # If given an axes it will plot the reference surface to help visusalize\n # the surface cut\n \n # Note that the axes needs to be created with a 3d projection. \n # For example: \n # fig = plt.figure( figsize=(4.,4.) ) \n # gs = matplotlib.gridspec.GridSpec( 1,1 ) \n # ax0 = fig.add_subplot( gs[0,0], projection='3d' ) \n \n ax0 = kwargs.get( 'ax0', None ) \n if ax0 is not None: \n\n # Plot the reference surface\n ax0.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3, linewidth=0.)\n ax0.set_xlabel('X')\n ax0.set_ylabel('Y')\n ax0.set_zlabel('Z')\n lmin = min([ ax0.get_xlim()[0], ax0.get_ylim()[0], ax0.get_zlim()[0] ] )\n lmax = max([ ax0.get_xlim()[1], ax0.get_ylim()[1], ax0.get_zlim()[1] ] )\n ax0.set_xlim( lmin, lmax )\n ax0.set_ylim( lmin, lmax )\n ax0.set_zlim( lmin, lmax )\n ax0.set_yticklabels([])\n ax0.set_xticklabels([])\n ax0.set_zticklabels([])\n \n # If given an axes and a potential it will plot the surface cut of the \n # potential \n\n ax1 = kwargs.get( 'ax1', None) \n pot = kwargs.get( 'potential', None) \n\n if (ax1 is not None) and (pot is not None):\n # Evaluate function at points and plot\n EVAL = pot.evalpotential(X,Y,Z)\n\n im =ax1.pcolormesh(T0, T1, EVAL, cmap = plt.get_cmap('jet')) \n # cmaps: rainbow, jet\n\n plt.axes( ax1)\n cbar = plt.colorbar(im)\n cbar.set_label(pot.unitlabel, rotation=0 )#self.unitlabel\n \n return T0, T1, X, Y, Z", "def get_projection_point(self, point, plane, test=False):\n return point_on_plane_projection(point, plane, test=test)", "def rotate_to_xz_plane(self, point):\n if len(point) == 2:\n x, z = point\n else:\n x, y, z = point\n if y != 0.0:\n x = np.sqrt(x**2 + y**2)\n return abs(x), z", "def plane_from_multiple_points(pnts: Iterable[Point]) -> Plane:\n n = len(pnts)\n x = [pnt.x for pnt in pnts]\n y = [pnt.y for pnt in pnts]\n z = [pnt.z for pnt in pnts]\n pntc = Point(sum(x)/n, sum(y)/n, sum(z)/n)\n x = [pnt.x-pntc.x for pnt in pnts]\n y = [pnt.y-pntc.y for pnt in pnts]\n z = [pnt.z-pntc.z for pnt in pnts]\n sxx = sum([x[i]**2 for i in range(n)])\n sxy = sum([x[i]*y[i] for i in range(n)])\n sxz = sum([x[i]*z[i] for i in range(n)])\n syy = sum([y[i]**2 for i in range(n)])\n syz = sum([y[i]*z[i] for i in range(n)])\n d = sxx*syy-sxy**2\n a = (syz*sxy-sxz*syy)/d\n b = (sxy*sxz-sxx*syz)/d\n nrm = Vector(a, b, 1.0)\n return Plane(pntc, nrm)", "def from_3p(cls, a: Vector, b: Vector, c: Vector) -> 'Plane':\n n = (b - a).cross(c - a).normalize()\n return Plane(n, n.dot(a))", "def SetPlaneMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ShapeConvertToBezier_SetPlaneMode(self, *args)", "def pointOnSurface(*args, caching: bool=True, constructionHistory: bool=True, nodeState:\n Union[int, bool]=0, normal: bool=True, normalizedNormal: bool=True,\n normalizedTangentU: bool=True, normalizedTangentV: bool=True, parameterU:\n Union[float, bool]=0.0, parameterV: Union[float, bool]=0.0, position:\n bool=True, tangentU: bool=True, tangentV: bool=True, turnOnPercentage:\n bool=False, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[List[float3], Any]:\n pass", "def PlaneNormalVector(h, k, l):\r\n vec = np.array([h, k, l])\r\n return vec/np.linalg.norm(vec)", "def fit_plane_to_point_cloud(pc: np.ndarray) -> Tuple[Any, Any, Any, Any]:\n center = pc.sum(axis=0) / pc.shape[0]\n u, s, vh = np.linalg.svd(pc - center)\n\n # Get the unitary normal vector\n u_norm = vh[2, :]\n d = -np.dot(u_norm, center)\n a, b, c = u_norm\n return a, b, c, d", "def plot_sag_plane(self, P0=None, sag_pl=None):\n if P0 is None: P0 = np.array([0,0,0])\n if sag_pl is None: sag_pl = self.sp\n norm, d = sag_pl[:3], sag_pl[3]\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n plt.ion()\n # create x,y\n xypts = 10\n xrng = 300\n yrng = 130\n xrng_mesh = np.linspace(P0[0], P0[0]-xrng, xypts)\n yrng_mesh = np.linspace(P0[1]-yrng/2., P0[1]+yrng, xypts)\n xx, yy = np.meshgrid(xrng_mesh, yrng_mesh)\n # calculate corresponding z\n zz = -1 * (norm[0] * xx + norm[1] * yy + d) / norm[2]\n # plot the surface\n self.fig = plt.figure()\n self.fig_ax = self.fig.add_subplot(111, projection='3d')\n self.fig_ax.plot_wireframe(xx, yy, zz, color='gray')\n #ax.quiver(P0[0], P0[1], norm[0], norm[1])\n self.fig_ax.set_xlabel('X')\n self.fig_ax.set_ylabel('Y')\n self.fig_ax.set_zlabel('Z')\n self.fig_ax.set_zlim(P0[2]-xrng, P0[2]+yrng)\n plt.show()", "def project_point(self, point: array_like) -> Point:\n # Vector from the point in space to the point on the plane.\n vector_to_plane = Vector.from_points(point, self.point)\n\n # Perpendicular vector from the point in space to the plane.\n vector_projected = self.normal.project_vector(vector_to_plane)\n\n return Point(point) + vector_projected", "def polyPlane(*args, axis: Union[List[float, float, float], bool]=None, createUVs: Union[int,\n bool]=1, height: Union[float, bool]=1.0, subdivisionsHeight: Union[int, bool]=0,\n subdivisionsWidth: Union[int, bool]=10, subdivisionsX: Union[int, bool]=5,\n subdivisionsY: Union[int, bool]=5, texture: Union[int, bool]=1, width:\n Union[float, bool]=1.0, caching: bool=True, constructionHistory: bool=True, name:\n AnyStr=\"\", nodeState: Union[int, bool]=0, object: bool=True, q=True, query=True,\n e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass", "def normal(self, point):\n return self._normal.dup()", "def distance_point_plane(point, plane):\n base, normal = plane\n vector = subtract_vectors(point, base)\n return fabs(dot_vectors(vector, normal))", "def test_surface_normal(self):\n vertices = np.array([[0, 1, 0], [0, 0, 0], [1, 0, 0]])\n expected = np.array([0, 0, 1])\n np.testing.assert_almost_equal(surface_normal(vertices), expected)\n\n # Test against multiple triangles\n vertices = np.r_[vertices[np.newaxis, :, :], [[[0, 0, 0], [0, 2, 0], [2, 0, 0]]]]\n expected = np.array([[0, 0, 1], [0, 0, -1]])\n np.testing.assert_almost_equal(surface_normal(vertices), expected)\n\n # Some real data\n vertices = np.array([[2.435, -1.82, -0.53], [2.635, -2., -0.58], [2.535, -1.7, -0.58]])\n expected = np.array([0.33424239, 0.11141413, 0.93587869])\n np.testing.assert_almost_equal(surface_normal(vertices), expected)\n\n # Test input validation\n self.assertRaises(ValueError, surface_normal, np.array([[1, 2, 3, 4]]))", "def make_inward_normal(tetrahedron):\n\n convert_to_np_array = lambda v: np.array([v.x, v.y, v.z])\n np_vertices = list(map(convert_to_np_array, [tetrahedron.get_vertex(i) for i in range(4)]))\n # This is the middle point\n # midpoint = np.mean(np_vertices, axis=0)\n\n midpoint = np_vertices[0]\n for i in range(1, 4):\n midpoint += np_vertices[i]\n midpoint = midpoint / 2.0\n\n for i in range(4):\n face = tetrahedron.get_face(i)\n d = distance(face, midpoint)\n if d < 0:\n face.nx *= -1.0\n face.ny *= -1.0\n face.nz *= -1.0\n face.d *= -1.0", "def project_3d_points_to_plane(points, p1, p2 ,p3, numpoints):\n p1 = np.asarray(p1)\n p2 = np.asarray(p2)\n p3 = np.asarray(p3)\n\n # get vectors in plane\n v1 = p3 - p1\n v2 = p2 - p1\n\n # compute cross product\n cp = np.cross(v1, v2)\n a, b, c = cp # normal to plane is ax + by + cz\n\n # evaluate d\n d = np.dot(cp, p3)\n\n # thus, normal is given by\n plane = vtk.vtkPlane()\n origin = p1\n normal = normalize(np.array([a,b,c]))\n plane.SetOrigin(p1)\n plane.SetNormal(normal)\n\n if numpoints == 1:\n proj = [0,0,0]\n plane.ProjectPoint(points, origin, normal, proj)\n return proj\n else:\n projected_pts = np.zeros((numpoints, 3), dtype=float)\n\n for i in range(numpoints):\n proj = [0,0,0]\n plane.ProjectPoint(points[i], origin, normal, proj)\n projected_pts[i] = proj\n\n return projected_pts", "def get_normal_vector_of_plane(p1, p2, p3):\n v12 = np.array(p1) - np.array(p2)\n v13 = np.array(p1) - np.array(p3)\n nvec = np.cross(v12, v13)\n ## print 'norm: '+str(np.linalg.norm(nvec))\n return nvec / np.linalg.norm(nvec)", "def get_plane(self, scalar, plane, pval):\n\n if plane == 'yz' or plane == 'zy':\n # z along rows, y along columns\n return scalar[:, pval, :]\n elif plane == 'xz' or plane == 'zx':\n # x along columns, z along rows\n return scalar[:, :, pval]\n elif plane == 'xy' or plane == 'yx':\n # x along rows, y along columns\n return scalar[pval, :, :]", "def plane_sphere(p, s):\n\n p.normalize()\n\n d = dot(s.o-p.o, p.n)\n\n if d > s.r:\n return False\n else:\n return (s.o - d*p.n, sqrt(s.r*s.r - d*d))", "def ProjectToPlane(self):\n\n self.__do_essential_memebers_exist__()\n if self.element_type != \"tri\":\n raise ValueError(\"Project to plane is only applicable to triangles\")\n\n imesh = deepcopy(self)\n coordinates = []\n connectivities = []\n for counter, elem in enumerate(imesh.elements):\n\n elementCoordinates = imesh.points[elem,:]\n\n A = elementCoordinates[0,:]\n B = elementCoordinates[1,:]\n C = elementCoordinates[2,:]\n\n X = (B - A); X /= np.linalg.norm(X)\n Z = np.cross(X, C - A); Z /= np.linalg.norm(Z)\n Y = np.cross(Z, X)\n\n # PROJECT THE TRIANGLE TO THIS BASES\n a = [0., 0.]\n b = [np.linalg.norm((B - A)), 0.]\n c = [(C - A).dot(X), (C - A).dot(Y)]\n\n coordinates.append(a)\n coordinates.append(b)\n coordinates.append(c)\n\n elementConnectivity = [3 * counter, 3 * counter + 1, 3 * counter + 2]\n connectivities.append(elementConnectivity)\n\n coordinates = np.array(coordinates)\n connectivities = np.array(connectivities)\n imesh.points = coordinates\n imesh.elements = connectivities\n imesh.nelem = imesh.elements.shape[0]\n imesh.nnode = imesh.points.shape[0]\n\n return imesh", "def __copy__(self) -> 'Plane':\n return self.__class__(self._normal, self._distance_from_origin)", "def normal(self) -> Vec:\n # The three points are in clockwise order, so compute differences\n # in the clockwise direction, then cross to get the normal.\n point_1 = self.planes[1] - self.planes[0]\n point_2 = self.planes[2] - self.planes[1]\n\n return Vec.cross(point_1, point_2).norm()", "def add_planet(self,x,y,z):\n\t\tself.ob.append(vis.sphere(x=x, y=y, z=z, radius = 0.1))", "def WritePlane(self):\n if not self.__train:\n print('ERROR: Must use Train before WritePlane')\n sys.exit(-1)\n if not self.__openPlaneO:\n print('ERROR: Must use OpenPlaneO before WritePlane')\n sys.exit(-1)\n\n # Defines angular dimensions\n self.__nc_RSoft_O.createDimension('type', self.__n_type)\n\n # Defines variables\n if self.__containsRadial:\n rad_plane_id = self.__nc_RSoft_O.createVariable(\\\n 'radial_plane', 'f4', \\\n ('type','radial_structure_functions'))\n rad_plane_id[:] = self.radial_plane\n if self.__containsAngular:\n ang_plane_id = self.__nc_RSoft_O.createVariable(\\\n 'angular_plane', 'f4', \\\n ('type','angular_structure_functions'))\n ang_plane_id[:] = self.angular_plane\n intercept_id_O = self.__nc_RSoft_O.createVariable(\\\n 'intercept', 'f4', ('type'))\n intercept_id_O[:] = self.intercept", "def normal(self, point):\n point = self._center - np.array(point)\n # if abs(point.dot(point) - self._radius**2) > 1e-15:\n # raise RayTraceError(\n # 'Cannot compute normal. Point is too far from surface ({}).'.format(\n # (abs(point.dot(point) - self._radius**2))))\n return normalize(point / self._radius)", "def surface_norm(self, pt):\n\n return self.normal.normalize()", "def normal(axis_direction, axis_origin, point):\n # transform input into numpy arrays\n axis_direction = np.array(axis_direction, float)\n axis_origin = np.array(axis_origin, float)\n point = np.array(point, float)\n\n # vector from axis normal_origin to point\n vector = point - axis_origin\n\n # projection of vector on axis\n projection = np.dot(vector, axis_direction)*axis_direction\n\n # the normal vector from normal_origin to point\n normal_direction = vector - projection\n\n # normalized normal_direction\n normal_direction = normal_direction/np.linalg.norm(normal_direction)\n\n # opposite of the projection of vector on normal\n projection2 = - np.dot(normal_direction, vector)*normal_direction\n\n normal_origin = point + projection2\n\n return normal_direction, normal_origin", "def __init__(self,\n r = 1.0,\n normal = Vector(0.0,1.0,0.0),\n origin = Vector(0.0,0.0,0.0),\n orientation = Vector(1.0,0.0,0.0),\n c1 = Color(0.01,0.01,0.01),\n c2 = Color(0.99,0.99,0.99)):\n \n CheckPlane.__init__(self, normal, origin, orientation, c1, c2)\n self.origin = origin\n self.set_orientation(orientation)\n self.r = r\n self.R = r ** 2.0", "def plane_from_points(a, b, c):\n ab = subtract_vectors(b, a)\n ac = subtract_vectors(c, a)\n n = normalize_vector(cross_vectors(ab, ac))\n return a, n", "def sample(self, z_position):\n if self.direction[2] != 0:\n z_plane = PlaneSurface([0, 0, 1], [0, 0, z_position])\n if (z_plane.position() - self.position).dot(self.direction) > 0:\n ray = self\n else:\n ray = Ray(-self.direction, self.position)\n return ray.propagate(z_plane.time_to_bound(ray)).position", "def xzplane(draw, r, y, shift = np.array([1000, 1000, 0, 0]), scale = 300):\n extent = 2.8\n pln = np.array(\n [\n [-extent,y,0],\n [extent,y,0],\n [extent,y,extent*2],\n [-extent,y,extent*2]\n ]\n )\n pln = np.dot(pln, np.transpose(r))\n pln = pln * scale + shift[:3]\n draw.polygon([(pln[0][0],pln[0][1]),(pln[1][0],pln[1][1]),(pln[2][0],pln[2][1]),(pln[3][0],pln[3][1])], (0,102,255,70))", "def invert_plane_stress(normal, slip, weights=None):\n nx, ny, nz = normal\n sx, sy, sz = slip\n\n Gx = np.zeros((nx.size, 3))\n Gx[:,0] = nx - nx**3 + nx*nz**2 \n Gx[:,1] = ny - 2*ny*nx**2\n Gx[:,2] = -nx*ny**2 + nx*nz**2\n \n Gy = np.zeros((nx.size, 3))\n Gy[:,0] = -ny*nx**2 + ny*nz**2\n Gy[:,1] = nx - 2*nx*ny**2\n Gy[:,2] = ny - ny**3 + ny*nz**2\n \n Gz = np.zeros((nx.size, 3))\n Gz[:,0] = -nz*nx**2 - nz + nz**3\n Gz[:,1] = -2*nx*ny*nz\n Gz[:,2] = -ny**2*nz - nz + nz**3\n\n\n G = np.vstack((Gx, Gy, Gz)).T\n d = np.hstack([sx,sy,sz])\n\n if weights is not None:\n weights = np.tile(weights, 3)\n G *= weights\n d *= weights\n\n m, residual, rank, sing_vals = np.linalg.lstsq(G.T,d.T)\n\n s11, s12, s22 = m\n s33 = -(s11 + s22)\n\n sigma = np.array([[s11, s12, 0.0],\n [s12, s22, 0.0],\n [0.0, 0.0, s33]])\n return sigma", "def normal_at(self, world_point: Point) -> Vector:\n # Convert the world point and normal to shape space\n local_point = self.world_to_object(world_point)\n # Calculate the normal in shape space\n local_normal = self.local_normal_at(local_point)\n # Convert the local normal vector back to world space\n return self.normal_to_world(local_normal)", "def plane_equation(p1, p2, p3):\n a1 = p2[0] - p1[0]\n b1 = p2[1] - p1[1]\n c1 = p2[2] - p1[2]\n a2 = p3[0] - p1[0]\n b2 = p3[1] - p1[1]\n c2 = p3[2] - p1[2]\n a = b1 * c2 - b2 * c1\n b = a2 * c1 - a1 * c2\n c = a1 * b2 - b1 * a2\n # Points are collinear\n if (abs(a) < 1e-6) and (abs(b) < 1e-6) and (abs(c) < 1e-6):\n return None\n # All clear\n d = (- a * p1[0] - b * p1[1] - c * p1[2])\n return a, b, c, d", "def from_vector(cls, vector) -> 'Plane':\n v = Vector(vector)\n return Plane(v.normalize(), v.magnitude)", "def planes_3d(self, quantity, xplane, yplane):\n xplane = int(xplane)\n yplane = int(yplane)\n # Get the scalar values\n # Get the data on the plane with a fixed x value. These means we'll\n # have changing (y, z) points\n xdata = self.get_plane(quantity, 'yz', xplane)\n # z first cuz we want y to be changing before z to correspond with the\n # way numpy flattens arrays. Note this means y points will be in the\n # 2nd column\n xplanepoints = np.array(list(itertools.product(self.Z, self.Y)))\n xdata = xdata.flatten()\n xplanexval = np.array(list(itertools.repeat(x[xplane], len(xdata))))\n xplanedata = np.zeros((xplanepoints.shape[0], 4))\n xplanedata[:, 0] = xplanexval\n xplanedata[:, 1] = xplanepoints[:, 1]\n xplanedata[:, 2] = xplanepoints[:, 0]\n xplanedata[:, 3] = xdata\n # Same procedure for fixed y plane\n ydata = self.get_plane(quantity, 'xz', yplane)\n yplanepoints = np.array(list(itertools.product(z, x)))\n ydata = ydata.flatten()\n yplaneyval = np.array(list(itertools.repeat(y[yplane], len(ydata))))\n yplanedata = np.zeros((yplanepoints.shape[0], 4))\n yplanedata[:, 0] = yplanepoints[:, 1]\n yplanedata[:, 1] = yplaneyval\n yplanedata[:, 2] = yplanepoints[:, 0]\n yplanedata[:, 3] = ydata\n labels = ('X [um]', 'Y [um]', 'Z [um]', quantity)\n # Now stack them vertically and plot!\n all_data = np.vstack((xplanedata, yplanedata))\n self.scatter3d(all_data[:, 0], all_data[:, 1], all_data[:, 2],\n all_data[:, 3], labels, 'planes_3d')", "def build_coord(norm, d, pts):\n # Compute the origin as the mean point of the points, and this point has to be on the plane\n \n n = len(pts) \n x_total = 0\n y_total = 0\n z_total = 0\n \n for i in range(n):\n x_total += pts[i][0]\n y_total += pts[i][1]\n z_total += pts[i][2]\n\n x_o = x_total * 1.0 / n\n y_o = y_total * 1.0 / n\n z_o = z_total * 1.0 / n\n p_o = [x_o, y_o, z_o]\n \n # Choose p be the projection of a vector in the z-axis to the plane\n # If the plane is not perpendicular to the z-axis\n if ((norm[2] != 1) and (norm[2] != -1)): \n # Choose a point\n o_z = [x_o, y_o, z_o + 1]\n \n [[x_p, y_p, z_p]] = proj_to_plane(norm, d, [o_z])\n \n dist = np.linalg.norm([x_p - x_o, y_p - y_o, z_p - z_o])\n\n x_c = (x_p - x_o) * 1.0 / dist \n y_c = (y_p - y_o) * 1.0 / dist\n z_c = (z_p - z_o) * 1.0 / dist\n # Thus we have unit vector in x direction\n e_y = [x_c, y_c, z_c]\n #Compute the unit vector in y direction\n e_x = np.cross(e_y, norm).tolist()\n else:\n e_x = [1, 0, 0]\n e_y = [0, 1, 0]\n \n return [e_x, e_y, norm] , p_o", "def planarize(self):\r\n from lsst.analysis import utils\r\n assert numpy.isfinite(self.z).all()\r\n self.z -= utils.evalplane(self.plane(), self.points)", "def plot_surface(self):\n X, Y = np.meshgrid(self.x, self.y)\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_surface(X=X, Y=Y, Z=self.z)\n plt.show()", "def build_coord(norm, d, pts):\n # Compute the origin as the mean point of the points, and this point has to be on the plane\n \n n = len(pts)\n x_total = 0\n y_total = 0\n z_total = 0\n \n for i in range(n):\n x_total += pts[i][0]\n y_total += pts[i][1]\n z_total += pts[i][2]\n\n x_o = x_total * 1.0 / n\n y_o = y_total * 1.0 / n\n z_o = z_total * 1.0 / n\n p_o = [x_o, y_o, z_o]\n \n # Choose p be the projection of a vector in the z-axis to the plane\n # If the plane is not perpendicular to the z-axis\n if ((norm[2] != 1) and (norm[2] != -1)): \n # Choose a point\n o_z = [x_o, y_o, z_o + 1]\n \n [[x_p, y_p, z_p]] = proj_to_plane(norm, d, [o_z])\n \n dist = np.linalg.norm([x_p - x_o, y_p - y_o, z_p - z_o])\n\n x_c = (x_p - x_o) * 1.0 / dist \n y_c = (y_p - y_o) * 1.0 / dist\n z_c = (z_p - z_o) * 1.0 / dist\n # Thus we have unit vector in x direction\n e_y = [x_c, y_c, z_c]\n #Compute the unit vector in y direction\n e_x = np.cross(e_y, norm).tolist()\n else:\n e_x = [1, 0, 0]\n e_y = [0, 1, 0]\n \n return [e_x, e_y, norm] , p_o", "def project_points_plane(points, plane):\n return [project_point_plane(point, plane) for point in points]", "def plot_projection(self, normal=(1, 1, 1), index_row=0, index_col=0, show=True, texture=None, cmap=\"jet\", f=None,\n plotter=None, title='', font_size=10, font_color='black'):\n if not plotter:\n plotter = pv.Plotter()\n plotter.subplot(index_column=index_col, index_row=index_row)\n\n plotter.add_text(title, position=\"upper_edge\", font_size=font_size, color=font_color)\n tex = None\n if texture:\n\n if isinstance(texture, np.ndarray):\n tex = pv.numpy_to_texture(texture)\n else:\n tex = pv.read_texture(texture)\n self.pv_mesh.texture_map_to_plane(inplace=True)\n # plotter.add_mesh(pv_mesh, texture=tex)\n\n og = self.pv_mesh.center\n projected = self.pv_mesh.project_points_to_plane(origin=og, normal=normal)\n projected.texture_map_to_plane()\n plotter.add_mesh(projected, texture=tex)\n if show:\n plotter.show()\n return plotter", "def drawVector3D(x0,y0,z0,x1,y1,z1, vtype='normal'):\n dislin.vectr3(x0,y0,z0,x1,y1,z1, vectordict[vtype])", "def surfaceRender(nodal_mesh, focus, ax=None):\n\t# If no axes were passed, generate new set of axes\n\tif not ax:\n\t\tfig = mplt.figure()\n\t\tax = fig.add_subplot(111, projection='3d')\n\n\t# Sort the mesh by first 3 columns\n\tnodal_mesh = nodal_mesh[nodal_mesh[:, 0].argsort()]\n\tnodal_mesh = nodal_mesh[nodal_mesh[:, 1].argsort(kind='mergesort')]\n\tnodal_mesh = nodal_mesh[nodal_mesh[:, 2].argsort(kind='mergesort')]\n\t\n\t# Set up number of divisions and calculate e for each division (as a ratio)\n\tnum_div = 20\n\te = [i/num_div for i in range(num_div + 1)]\n\t# Convert angular values from degrees to radians\n\trads = math.pi/180\n\tnodal_mesh[:, 1:3] *= rads\n\t# Store the shapes and sizes of the mesh values\n\tm = nodal_mesh.shape[0]\n\tsize_nodal_nu = np.where(nodal_mesh[:, 2] == 0)[0].size\n\tsize_nodal_phi = m/size_nodal_nu\n\t# Get the mu and theta values from the mesh\n\tnodal_nu = nodal_mesh[:size_nodal_nu, 1]\n\tnodal_phi = nodal_mesh[::size_nodal_nu, 2]\n\t# Convert apex node from prolate to cartesian, then plot with scatter\n\tif min(nodal_nu) == 0:\n\t\tx, y, z = mathhelper.prolate2cart(nodal_mesh[0, 0], nodal_mesh[0, 1], nodal_mesh[0, 2], focus)\n\t\tax.scatter(z, y, -x)\n\t\tstart_nu = 1\n\telse:\n\t\tstart_nu = 0\n\t# Plot circumferential element boundaries\n\tfor i in range(start_nu, size_nodal_nu):\n\t\tfor j in range(int(size_nodal_phi)):\n\t\t\t# Define nodal values for interpolation\n\t\t\tif j == size_nodal_phi-1:\n\t\t\t\tind0 = i\n\t\t\t\tp0 = 2*math.pi\n\t\t\telse:\n\t\t\t\tind0 = (j+1)*size_nodal_nu + i\n\t\t\t\tp0 = nodal_phi[j+1]\n\t\t\tind1 = (j)*size_nodal_nu + i\n\t\t\tp1 = nodal_phi[j]\n\t\t\t# Get mu and dM/dm1\n\t\t\tm0 = nodal_mesh[ind0, 0]\n\t\t\tdm0 = nodal_mesh[ind0, 3]\n\t\t\tm1 = nodal_mesh[ind1, 0]\n\t\t\tdm1 = nodal_mesh[ind1, 3]\n\t\t\t# Convert to cartesian\n\t\t\tn0x, n0y, n0z = mathhelper.prolate2cart(nodal_mesh[ind0, 0], nodal_mesh[ind0, 1], nodal_mesh[ind0, 2], focus)\n\t\t\t# Plot the node\n\t\t\tax.scatter(n0z, n0y, -n0x)\n\t\t\t# Plot the arc segments\n\t\t\tfor k in range(2, len(e)):\n\t\t\t\t# Determine starting point to use\n\t\t\t\tif k == 2:\n\t\t\t\t\tpt_x, pt_y, pt_z = n0x, n0y, n0z\n\t\t\t\telse:\n\t\t\t\t\tpt_x, pt_y, pt_z = x_here, y_here, z_here\n\t\t\t\t# Get lambda\n\t\t\t\thm0 = 1 - 3*(e[k]**2) + 2*(e[k]**3)\n\t\t\t\thdm0 = e[k]*(e[k] - 1)**2\n\t\t\t\thm1 = (e[k]**2)*(3 - 2*e[k])\n\t\t\t\thdm1 = (e[k]**2)*(e[k] - 1)\n\t\t\t\tm = hm0 * m0 + hdm0 * dm0 + hm1 * m1 + hdm1 * dm1\n\t\t\t\t# Get theta\n\t\t\t\tp_here = p0 - e[k]*(p0 - p1)\n\t\t\t\t# Convert to cartesian\n\t\t\t\tx_here, y_here, z_here = mathhelper.prolate2cart(m, nodal_nu[i], p_here, focus)\n\t\t\t\t# Create vectors\n\t\t\t\tx = np.append(pt_x, x_here)\n\t\t\t\ty = np.append(pt_y, y_here)\n\t\t\t\tz = np.append(pt_z, z_here)\n\t\t\t\t# Plot segments\n\t\t\t\tax.plot(z, y, -x, 'k-.')\n\t# Plot longitudinal element boundaries\n\tfor i in range(int(size_nodal_phi)):\n\t\tfor j in range(size_nodal_nu-1):\n\t\t\t# Define nodal values needeed for interpolation\n\t\t\tind0 = i*size_nodal_nu + j\n\t\t\tind1 = ind0 + 1\n\t\t\tn0 = nodal_nu[j]\n\t\t\tn1 = nodal_nu[j+1]\n\t\t\t# Get lambda and dL/de2\n\t\t\tm0 = nodal_mesh[ind0, 0]\n\t\t\tdm0 = nodal_mesh[ind0, 4]\n\t\t\tm1 = nodal_mesh[ind1, 0]\n\t\t\tdm1 = nodal_mesh[ind1, 4]\n\t\t\t# Convert nodal points to cartesian\n\t\t\tn0x, n0y, n0z = mathhelper.prolate2cart(nodal_mesh[ind0, 0], nodal_mesh[ind0, 1], nodal_mesh[ind0, 2], focus)\n\t\t\t# Plot arc\n\t\t\tfor k in range(2, len(e)):\n\t\t\t\t# Determine point to use\n\t\t\t\tif k == 2:\n\t\t\t\t\tpt_x, pt_y, pt_z = n0x, n0y, n0z\n\t\t\t\telse:\n\t\t\t\t\tpt_x, pt_y, pt_z = x_here, y_here, z_here\n\t\t\t\t# Get lambda\n\t\t\t\thm0 = 1 - 3*(e[k]**2) + 2*(e[k]**3)\n\t\t\t\thdm0 = e[k]*(e[k] - 1)**2\n\t\t\t\thm1 = (e[k]**2)*(3 - 2*e[k])\n\t\t\t\thdm1 = (e[k]**2)*(e[k] - 1)\n\t\t\t\tm = hm0 * m0 + hdm0 * dm0 + hm1 * m1 + hdm1 * dm1\n\t\t\t\t# Get nu\n\t\t\t\tn_here = n0 + e[k]*(n1-n0)\n\t\t\t\t# Convert to cartesian\n\t\t\t\tx_here, y_here, z_here = mathhelper.prolate2cart(m, n_here, nodal_phi[i], focus)\n\t\t\t\t# Append the vectors for plotting\n\t\t\t\tx = np.append(pt_x, x_here)\n\t\t\t\ty = np.append(pt_y, y_here)\n\t\t\t\tz = np.append(pt_z, z_here)\n\t\t\t\t# Plot the segment\n\t\t\t\tax.plot(z, y, -x, 'k-.')\n\t\t\t\t\n\treturn(ax)", "def random_plane_points(num_points, bounds):\n\n # Infer dimension of data from bounds\n (bounds, dimension) = infer_dimension(bounds)\n\n # Generate points and rescale to fit bounds\n points = np.random.rand(num_points, dimension)\n unit_mean = [0.5] * dimension\n shifted_points = points - unit_mean + bounds.mean(axis=1)\n scale = bounds[:, 1] - bounds[:, 0]\n rescaled_points = np.dot(shifted_points, np.diag(scale))\n\n return rescaled_points", "def plane_2d(self, quantity, plane, pval, draw=False, fixed=None):\n self.log.info('Plotting plane')\n pval = int(pval)\n # x = np.arange(0, self.period, self.dx)\n # y = np.arange(0, self.period, self.dy)\n # z = np.arange(0, self.height + self.dz, self.dz)\n x = self.X\n y = self.Y\n z = self.Z\n # Get the scalar values\n freq = self.conf['Simulation']['params']['frequency']\n wvlgth = (consts.c / freq) * 1E9\n title = 'Frequency = {:.4E} Hz, Wavelength = {:.2f} nm'.format(\n freq, wvlgth)\n # Get the plane we wish to plot\n cs = self.get_plane(quantity, plane, pval)\n self.log.info('DATA SHAPE: %s' % str(cs.shape))\n show = self.conf['General']['show_plots']\n p = False\n sim_dir = os.path.expandvars(self.conf['General']['sim_dir'])\n if plane == 'yz' or plane == 'zy':\n labels = ('y [um]', 'z [um]', quantity, title)\n if self.conf['General']['save_plots']:\n p = os.path.join(sim_dir,\n '%s_plane_2d_yz_pval%s.png' % (quantity,\n str(pval)))\n self.heatmap2d(y, z, cs, labels, plane, pval,\n save_path=p, show=show, draw=draw, fixed=fixed)\n elif plane == 'xz' or plane == 'zx':\n labels = ('x [um]', 'z [um]', quantity, title)\n if self.conf['General']['save_plots']:\n p = os.path.join(sim_dir,\n '%s_plane_2d_xz_pval%s.png' % (quantity,\n str(pval)))\n self.heatmap2d(x, z, cs, labels, plane, pval,\n save_path=p, show=show, draw=draw, fixed=fixed)\n elif plane == 'xy' or plane == 'yx':\n labels = ('y [um]', 'x [um]', quantity, title)\n if self.conf['General']['save_plots']:\n p = os.path.join(sim_dir,\n '%s_plane_2d_xy_pval%s.png' % (quantity,\n str(pval)))\n self.heatmap2d(x, y, cs, labels, plane, pval,\n save_path=p, show=show, draw=draw, fixed=fixed)", "def get_transformable_plane(self, x_range = None, y_range = None):\n plane_config = dict(self.plane_config)\n shift_val = ORIGIN\n if x_range is not None:\n x_min, x_max = x_range\n plane_config[\"x_radius\"] = x_max - x_min\n shift_val += (x_max+x_min)*RIGHT/2.\n if y_range is not None:\n y_min, y_max = y_range\n plane_config[\"y_radius\"] = y_max - y_min\n shift_val += (y_max+y_min)*UP/2.\n plane = ComplexPlane(**plane_config)\n plane.shift(shift_val)\n if self.use_multicolored_plane:\n self.paint_plane(plane)\n return plane", "def from_vectors(cls, point: array_like, vector_a: array_like, vector_b: array_like, **kwargs) -> Plane:\n vector_a = Vector(vector_a)\n\n if vector_a.is_parallel(vector_b, **kwargs):\n raise ValueError(\"The vectors must not be parallel.\")\n\n # The cross product returns a 3D vector.\n vector_normal = vector_a.cross(vector_b)\n\n # Convert the point to 3D so that it matches the vector dimension.\n point = Point(point).set_dimension(3)\n\n return cls(point, vector_normal)", "def boom_plane(self):\n return Plane(reference=self.tail_shaft_circle[0].center,\n normal=Vector(0, 1, 0),\n binormal=Vector(0, 0, 1))", "def normal(self, t=0):\n n = Line3d()\n n.p = self.lerp(t)\n n.v = self.cross\n return n", "def xyplane(draw, r, x, shift = np.array([1000, 1000, 0, 0]), scale = 300):\n extent = 2.8\n pln = np.array(\n [\n [x,-extent,0],\n [x,extent,0],\n [x,extent,extent*2],\n [x,-extent,extent*2]\n ]\n )\n pln = np.dot(pln,np.transpose(r))\n pln = pln * scale + shift[:3]\n draw.polygon([(pln[0][0],pln[0][1]),(pln[1][0],pln[1][1]),(pln[2][0],pln[2][1]),(pln[3][0],pln[3][1])], (0,102,255,70))", "def plane_point_side_v3(p: np.ndarray, v: np.ndarray) -> Any:\n return p[:3].dot(v) + p[3]", "def intersect_plane(self, other: Plane, **kwargs) -> Line:\n if self.normal.is_parallel(other.normal, **kwargs):\n raise ValueError(\"The planes must not be parallel.\")\n\n array_normals_stacked = np.vstack((self.normal, other.normal))\n\n # Construct a matrix for a linear system.\n array_00 = 2 * np.eye(3)\n array_01 = array_normals_stacked.T\n array_10 = array_normals_stacked\n array_11 = np.zeros((2, 2))\n matrix = np.block([[array_00, array_01], [array_10, array_11]])\n\n dot_a = np.dot(self.point, self.normal)\n dot_b = np.dot(other.point, other.normal)\n array_y = np.array([0, 0, 0, dot_a, dot_b])\n\n # Solve the linear system.\n solution = np.linalg.solve(matrix, array_y)\n\n point_line = Point(solution[:3])\n direction_line = self.normal.cross(other.normal)\n\n return Line(point_line, direction_line)", "def get_plane(self, quantity, plane, pval):\n\n self.log.info('Retrieving plane for %s', quantity)\n scalar = self.get_scalar_quantity(quantity)\n if plane == 'yz' or plane == 'zy':\n # z along rows, y along columns\n return scalar[:, pval, :]\n elif plane == 'xz' or plane == 'zx':\n # x along columns, z along rows\n return scalar[:, :, pval]\n elif plane == 'xy' or plane == 'yx':\n # x along rows, y along columns\n return scalar[pval, :, :]" ]
[ "0.6926132", "0.67918247", "0.66280645", "0.6622853", "0.6570255", "0.6499382", "0.64442146", "0.6332076", "0.62797856", "0.62646264", "0.6246257", "0.6151091", "0.6120761", "0.6096576", "0.60693115", "0.59628874", "0.5912617", "0.58920056", "0.58715844", "0.585937", "0.58575803", "0.58491397", "0.5846112", "0.5843997", "0.58173543", "0.5813309", "0.57830566", "0.57630634", "0.5756629", "0.57460064", "0.5741681", "0.57187223", "0.57041", "0.5703605", "0.56917787", "0.56906664", "0.5686903", "0.567886", "0.5677912", "0.5674802", "0.5674802", "0.5671491", "0.56708795", "0.56696886", "0.56674445", "0.56576335", "0.5630148", "0.562735", "0.5625279", "0.5614196", "0.56068265", "0.55967784", "0.55874074", "0.55793387", "0.55628", "0.55516875", "0.5541118", "0.55336416", "0.5523495", "0.55062515", "0.5500171", "0.54979044", "0.54795945", "0.54688686", "0.5467951", "0.5466867", "0.5458666", "0.54546154", "0.5454049", "0.54499763", "0.5441335", "0.5437811", "0.5437491", "0.5434407", "0.54279983", "0.53908265", "0.53825057", "0.5376314", "0.53753334", "0.5372291", "0.5364766", "0.5351786", "0.53362215", "0.53331584", "0.53256255", "0.53145134", "0.5308774", "0.5302543", "0.5300385", "0.5298667", "0.52967113", "0.52870405", "0.527753", "0.52772963", "0.5269985", "0.52658033", "0.5262871", "0.5256123", "0.5251584", "0.52461326" ]
0.683124
1
Returns a hit, or None if the ray is parallel to the plane
def intersect(self, ray): t = None hit = None angle = ray.dir.dot(self.norm) if angle != 0: t = (self.point - ray.start).dot(self.norm) / angle if angle < 0: hit = Hit(self, ray, t, float('inf'), self.norm, self.mat) else: hit = Hit(self, ray, float('-inf'), t, self.norm, self.mat) else: vector = unit(ray.start - self.point) if vector.dot(self.norm) < 0: hit = Hit(self, ray, float('-inf'), float('inf'), self.norm, self.mat) else: return None if (self.mat.texture is not None and not isninf(hit.entry)) > 0: hit.texCords = self.texCords(ray.pos(t)) return hit
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rayIntersection(self, ray):\n #t = \"what we are trying to find\"\n l = -ray.mDirection\n l0 = ray.mOrigin\n n = self.mNormal\n p0 = self.mDistance * n\n #p = l0 + l * t\n\n if l.dot(n) > 0:\n v = p0 - l0\n t = -(v.dot(n) / l.dot(n))\n return t\n\n else:\n return None", "def intersect(self, ray):\n # TODO A5 (Step1) implement this function\n # Copy your implementation from A4\n # Then calculate uv coordinates, to be passed into the Hit initializer\n vs = self.vs\n\n a = vs[0][0] - vs[1][0]\n b = vs[0][1] - vs[1][1]\n c = vs[0][2] - vs[1][2]\n d = vs[0][0] - vs[2][0]\n e = vs[0][1] - vs[2][1]\n f = vs[0][2] - vs[2][2]\n\n ray_dir = ray.direction\n ray_orig = ray.origin\n\n g = ray_dir[0]\n h = ray_dir[1]\n i = ray_dir[2]\n j = vs[0][0] - ray_orig[0]\n k = vs[0][1] - ray_orig[1]\n l = vs[0][2] - ray_orig[2]\n\n M = a * (e * i - h * f) + b * (g * f - d * i) + c * (d * h - e * g)\n\n t = -(f * (a * k - j * b) + e * (j * c - a * l) + d *\n (b * l - k * c)) / M\n\n if (t < ray.start or t > ray.end):\n return no_hit\n\n gamma = (i * (a * k - j * b) + h * (j * c - a * l) + g *\n (b * l - k * c)) / M\n\n if (gamma < 0 or gamma > 1):\n return no_hit\n\n beta = (j * (e * i - h * f) + k * (g * f - d * i) +\n l * (d * h - e * g)) / M\n\n if (beta < 0 or beta > 1 - gamma):\n return no_hit\n\n P = ray_orig + t * ray_dir\n\n unit_normal = normalize(np.cross(vs[0] - vs[2], vs[1] - vs[2]))\n\n A = np.linalg.norm(np.cross(vs[1] - vs[0], vs[2] - vs[0])) / 2\n areaA = np.linalg.norm(np.cross(vs[1] - P, vs[2] - P)) / 2\n areaB = np.linalg.norm(np.cross(vs[0] - P, vs[2] - P)) / 2\n areaC = np.linalg.norm(np.cross(vs[0] - P, vs[1] - P)) / 2\n u = areaB / A\n v = areaC / A\n return Hit(t, P, unit_normal, vec([u, v]), self.material)", "def next_hit(self, ray):\n hit_candidates = [(i.time_to_bound(ray), i) for i in self._bounds]\n try:\n # WARNING - A hard cut on 'times' smaller than 10^-9 is made to exclude\n # a beam reinteracting with the same barrier. This cuts out any legitimate\n # interactions closer than 1nm of the beam position.\n return (sorted([(time, surface) for time, surface in hit_candidates\n if time is not None and time > 1e-9 and all(\n [b.contains(ray.propagate(time).position) for b in self._bounds\n if b is not surface])])[0])\n except IndexError:\n return None", "def intersect(self, ray):\n # TODO A5 (Step1) implement this function\n # Copy your implementation from A4\n # Then calculate uv coordinates, to be passed into the Hit initializer\n D = ray.direction\n E = ray.origin\n C = self.center\n R = self.radius\n B = 2*np.dot(D, E-C)\n A = np.dot(D, D)\n min_t = ray.start\n max_t = ray.end\n\n discriminant = B ** 2 - 4 * A * (np.dot(E-C, E-C)-R**2)\n\n if discriminant < 0:\n return no_hit\n\n t0 = (-1*B - np.sqrt(discriminant)) / (2*A)\n t1 = (-1*B + np.sqrt(discriminant)) / (2*A)\n\n if (t0 >= min_t and t0 <= max_t and t0 <= t1):\n t = t0\n elif (t1 >= min_t and t1 <= max_t):\n t = t1\n else:\n return no_hit\n\n P = E + t * D\n unit_normal = (P - C) / R\n d_hat = normalize(P - C)\n u = 0.5 + (np.arctan2(d_hat[0], d_hat[2])) / (2 * np.pi)\n v = 0.5 + (np.arcsin(d_hat[1])) / np.pi\n\n return Hit(t, P, unit_normal, vec([u, v]), self.material)", "def intersectRay(self, ray):\n # Ray Tracing from the Ground Up, pg. 367\n a, b, c, d = self.a[0] - self.b[0], self.a[0] - self.c[0], ray.d[0], self.a[0] - ray.o[0]\n e, f, g, h = self.a[1] - self.b[1], self.a[1] - self.c[1], ray.d[1], self.a[1] - ray.o[1]\n i, j, k, L = self.a[2] - self.b[2], self.a[2] - self.c[2], ray.d[2], self.a[2] - ray.o[2]\n\n m, n, p = f * k - g * j, h * k - g * L, f * L - h * j\n q, s = g * i - e * k, e * j - f * i\n\n denom = a * m + b * q + c * s\n if denom < self.kEpsilon:\n return None\n\n inv_denom = 1.0 / denom\n\n e1 = d * m - b * n - c * p\n beta = e1 * inv_denom\n\n if 1.0 < beta or beta < 0.0:\n return None\n\n r = e * L - h * i\n e2 = a * n + d * q + c * r\n gamma = e2 * inv_denom\n\n if 1.0 < gamma or gamma < 0.0:\n return None\n\n e3 = a * p - b * r + d * s\n t = e3 * inv_denom\n\n if t < self.kEpsilon:\n return None\n\n return t", "def intersects(self, ray):\n theta = 45\n H = 512\n W = 512\n A = self.origin\n B = Point(W, A.y, A.z)\n C = Point(B.x, (int)(H * math.sin(theta * math.pi / 180)), (int)(H * math.cos(math.pi * theta / 180)))\n D = Point(A.x, (int)(H * math.sin(theta * math.pi / 180)), (int)(H * math.cos(math.pi * theta / 180)))\n vec3 = ray.direction * self.normal\n if vec3 != 0:\n vec1 = self.origin - ray.origin\n vec2 = vec1 * self.normal\n dist = vec2 / vec3\n if dist > 0:\n point_on_plane = ray.origin + dist * ray.direction\n if A.x <= point_on_plane.x <= B.x and A.y <= point_on_plane.y <= D.y and B.z <= point_on_plane.z <= C.z:\n #print A, B, C, D, point_on_plane\n return dist", "def intersectsAB(self, ray):\n v1 = ray.origin - self.pointA\n v2 = self.pointB - self.pointA\n v3 = Point(-ray.direction.y, ray.direction.x)\n dot = v2.dot(v3)\n if (abs(dot) < 0.000001):\n return None\n t1 = v2.cross(v1) / dot\n t2 = v1.dot(v3) / dot\n if (t1 >= 0.0 and (t2 >= 0.0 and t2 <= 1.0)):\n return t1\n return None", "def raySegmentIntersectAB(self, ray):\n v1 = ray.origin - self.pointA\n v2 = self.pointB - self.pointA\n v3 = Point(-ray.direction.y, ray.direction.x)\n dot = v2.dot(v3)\n if (abs(dot) < 0.000001):\n return None\n t1 = v2.cross(v1) / dot\n t2 = v1.dot(v3) / dot\n if (t1 >= 0.0 and (t2 >= 0.0 and t2 <= 1.0)):\n return t1\n return None", "def intersect(self, ray):\n # TODO A5 (Step3 and Step4) implement this function\n # For step 4, check if uvs and normals are not None (respectively)\n # If so, then interpolate them\n\n # batch_intersect returns t, beta, gamma, i\n posns = self.posns\n uvs = self.uvs\n inds = self.inds\n normals = self.normals\n t, beta, gamma, i = batch_intersect(posns[inds[:, :]], ray)\n if (t == np.inf):\n return no_hit\n vs = posns[inds[i, :]]\n P = ray.origin + t * ray.direction\n\n if (t == np.inf):\n return no_hit\n else:\n\n alpha = 1 - beta - gamma\n\n if uvs is not None:\n\n uv0 = uvs[inds[i][0]]\n uv1 = uvs[inds[i][1]]\n uv2 = uvs[inds[i][2]]\n\n uv = alpha * uv0 + beta * uv1 + gamma * uv2\n\n else:\n\n A = np.linalg.norm(np.cross(vs[1] - vs[0], vs[2] - vs[0])) / 2\n areaA = np.linalg.norm(np.cross(vs[1] - P, vs[2] - P)) / 2\n areaB = np.linalg.norm(np.cross(vs[0] - P, vs[2] - P)) / 2\n areaC = np.linalg.norm(np.cross(vs[0] - P, vs[1] - P)) / 2\n u = areaB / A\n v = areaC / A\n uv = vec([u, v])\n\n if normals is not None:\n\n n0 = normals[inds[i][0]]\n n1 = normals[inds[i][1]]\n n2 = normals[inds[i][2]]\n\n unit_normal = normalize(alpha * n0 + beta * n1 + gamma * n2)\n\n else:\n unit_normal = normalize(np.cross(vs[0] - vs[2], vs[1] - vs[2]))\n\n return Hit(t, P, unit_normal, uv, self.material)", "def intersection(self, ray):\n d_proj = self._normal.dot(ray.d)\n if abs(d_proj) < bounds.too_small:\n return -1.0\n s_proj = (self._origin - ray.o).dot(self._normal)\n if d_proj * s_proj < 0.0:\n # ray going away from plane\n return -1.0\n else:\n return s_proj / d_proj", "def rayIntersection(self, ray):\n\n rotVect = ray.mDirection #math3d.VectorN(math.cos(num), - math.sin(num), 0)\n\n # this give all the lines (red green and blue at the moment)\n tankPos = math3d.VectorN(ray.mOrigin[0], ray.mOrigin[1], 0)\n linkPos = math3d.VectorN(200,200,0)\n v = linkPos - tankPos\n added = (tankPos + getPara(v, rotVect) + getPerp(v, rotVect))\n added2 = tankPos + getPara(v, rotVect) #If the magnitude of this is minus the sphere origin is less than the radius you're in the sphere\n added3 = tankPos + getPerp(v, rotVect)\n added4 = tankPos + rotVect.normalized() * 200 #this is get point only change 200 to dist\n\n\n test = added2 - self.mCenter #checks if in center\n\n\n if test.magnitude() <= self.mRadius:\n green = added2 - ray.mOrigin #this is Qpara\n thing = (self.mSRadius - test.magnitude()**2) ** 0.5\n t = (green.magnitude() - thing)\n print(green.magnitude() - thing)\n return t\n else:\n return None\n\n #print(test.magnitude(), self.mRadius)\n #print(green.magnitude(), \"green\")", "def _get_intersection(self, ray):\n\n intersection = None\n for obj in self.objects:\n dist = obj.intersects(ray)\n if dist is not None and \\\n (intersection is None or dist < intersection[1]):\n intersection = obj, dist\n\n return intersection", "def time_to_bound(self, ray):\n incidence_cosine = dot(self._normal, ray.direction)\n aplanarity = (self._position - ray.position).dot(self._normal)\n if incidence_cosine == 0:\n # Ray parallel to plane. If aplanarity also 0, ray is in the plane.\n return None\n time = aplanarity / incidence_cosine\n return time if time > 0 else None", "def getIntersection(self, ray):\n pass", "def intersect(self, plane, epsilon=0.00001):\r\n den = np.dot(self.direction, plane.normal)\r\n if math.fabs(den) < epsilon:\r\n return None\r\n\r\n result = (-plane.distance - np.dot(plane.normal, self.origin)) / den\r\n\r\n if result < 0.0:\r\n if result < -epsilon:\r\n return None\r\n result = 0.0\r\n return result", "def intersect(self, ray):\n # TODO A5 copy your implementation from A4\n surfaces = self.surfs\n\n min_t = np.inf\n i = no_hit\n\n for s in surfaces:\n intersect = s.intersect(ray)\n if (intersect.t < min_t):\n min_t = intersect.t\n i = intersect\n return i", "def intersects(self, ray):\n sphere_to_ray = ray.origin - self.center\n a = 1\n b = 2 * ray.direction.dot_product(sphere_to_ray)\n c = sphere_to_ray.dot_product(sphere_to_ray) - self.radius * self.radius\n discriminant = b * b - 4 * a * c\n\n if discriminant >= 0:\n dist = (-b - sqrt(discriminant)) / 2\n if dist > 0:\n return dist\n\n return None", "def HitTest(self, point, flags=0):\r\n \r\n w, h = self.GetSize()\r\n flags = 0\r\n \r\n if point.x < 0:\r\n flags |= TREE_HITTEST_TOLEFT\r\n if point.x > w:\r\n flags |= TREE_HITTEST_TORIGHT\r\n if point.y < 0:\r\n flags |= TREE_HITTEST_ABOVE\r\n if point.y > h:\r\n flags |= TREE_HITTEST_BELOW\r\n\r\n if flags:\r\n return None, flags\r\n \r\n if self._anchor == None:\r\n flags = TREE_HITTEST_NOWHERE\r\n return None, flags\r\n \r\n hit, flags = self._anchor.HitTest(self.CalcUnscrolledPosition(point), self, flags, 0)\r\n\r\n if hit == None: \r\n flags = TREE_HITTEST_NOWHERE\r\n return None, flags\r\n\r\n if not self.IsItemEnabled(hit):\r\n return None, flags\r\n\r\n return hit, flags", "def is_intersecting(self, ray):\n\n intersecting_point = self._sympy_plane.intersection(ray.sympy_line)[0]\n\n if 'x' in self._name:\n\n if self._within_y_bounds(intersecting_point.y) and self._within_z_bounds(intersecting_point.z):\n return True, np.array(map(float, [intersecting_point.x, intersecting_point.y, intersecting_point.z]))\n\n\n\n elif 'y' in self._name:\n\n if self._within_x_bounds(intersecting_point.x) and self._within_z_bounds(intersecting_point.z):\n return True, np.array(map(float, [intersecting_point.x, intersecting_point.y, intersecting_point.z]))\n\n\n\n elif 'z' in self._name:\n\n if self._within_y_bounds(intersecting_point.y) and self._within_x_bounds(intersecting_point.x):\n return True, np.array(map(float, [intersecting_point.x, intersecting_point.y, intersecting_point.z]))\n\n return False, None", "def shoot_ray(self, row, col):\n # Uses validate method to check if row,col are legal for ray entrance location\n if not self.valid_ray(row, col):\n return False\n # creates ray object from row, col integers\n ray = Ray(row, col)\n # checks if atom is in front of entrance position\n if not ray.can_continue(self.get_a_locations()):\n self.mark_portal(ray.get_start())\n if self.get_score() <= 0:\n self.change_state(\"LOST\")\n return None\n # while there is no atom in front of ray and ray will not exit board --\n while ray.can_continue(self.get_a_locations()):\n ray.check_diags(self.get_a_locations())\n # moves ray forward one space\n ray.advance()\n # if ray will exit board by advancing --\n if not ray.on_board():\n # adjusts score if entrance/exit do not match prior entrances/exits\n self.mark_portal(ray.get_start(), ray.get_pos())\n # changes state to lose if score is now <= 0\n if self.get_score() <= 0:\n self.change_state(\"LOST\")\n # returns tuple of exit location\n return tuple(ray.get_pos())\n # if ray is blocked by atom --\n if not ray.no_atom(self.get_a_locations()):\n # changes state to lost if score is now <= 0\n self.mark_portal(ray.get_start())\n if self.get_score() <= 0:\n self.change_state(\"LOST\")\n return None", "def ray_segment_intersect(p_ray, d_ray, seg):\n d_seg = seg[1] - seg[0]\n\n t_max = np.linalg.norm(d_seg)\n\n d_seg = d_seg / t_max\n d_ray = d_ray / np.linalg.norm(d_ray)\n\n D = np.stack([d_ray, -d_seg], axis=1)\n b = seg[0] - p_ray\n\n try:\n T = np.linalg.solve(D, b)\n except np.linalg.LinAlgError as e:\n # D is a singular matrix, lines are parallel\n return None\n\n # 0 <= T[1] < t_max because if the ray intersects perfectly with vertices then they will\n # T[0] > 0 ray shoots only in one direction\n # be included twice because they are the end and the beginning of a two segments\n if 0 <= T[1] < t_max and T[0] > 0 and np.allclose(np.dot(D, T), b):\n return seg[0] + d_seg * T[1]\n else:\n return None", "def intersection(self, line: AbstractLine) -> Optional[AbstractPoint]:\n plane = Plane(self.__point_a,\n self.__point_b - self.__point_a,\n self.__point_c - self.__point_a)\n\n point = plane.intersection(line)\n if point is not None:\n if self.has_point(point):\n return point\n return None", "def intersects(self, ray):\n def raySegmentIntersectAB(self, ray):\n \"\"\"\n recibes a ray. checks if it intersects the segment\n dot: denominator. if dot = 0 they're paralel\n t1: distance from origin to intersection\n t2: intersection IN the segment\n \"\"\"\n v1 = ray.origin - self.pointA\n v2 = self.pointB - self.pointA\n v3 = Point(-ray.direction.y, ray.direction.x)\n dot = v2.dot(v3)\n if (abs(dot) < 0.000001):\n return None\n t1 = v2.cross(v1) / dot\n t2 = v1.dot(v3) / dot\n if (t1 >= 0.0 and (t2 >= 0.0 and t2 <= 1.0)):\n return t1\n return None\n \n def raySegmentIntersectCD(self, ray):\n v1 = ray.origin - self.pointC\n v2 = self.pointD - self.pointC\n v3 = Point(-ray.direction.y, ray.direction.x)\n dot = v2.dot(v3)\n if (abs(dot) < 0.000001):\n return None\n t1 = v2.cross(v1) / dot\n t2 = v1.dot(v3) / dot\n if (t1 >= 0.0 and (t2 >= 0.0 and t2 <= 1.0)):\n return t1\n return None\n \n def raySegmentIntersectAC(self, ray):\n v1 = ray.origin - self.pointA\n v2 = self.pointC - self.pointA\n v3 = Point(-ray.direction.y, ray.direction.x)\n dot = v2.dot(v3)\n if (abs(dot) < 0.000001):\n return None\n t1 = v2.cross(v1) / dot\n t2 = v1.dot(v3) / dot\n if (t1 >= 0.0 and (t2 >= 0.0 and t2 <= 1.0)):\n return t1\n return None\n\n def raySegmentIntersectBD(self, ray):\n v1 = ray.origin - self.pointB\n v2 = self.pointD - self.pointB\n v3 = Point(-ray.direction.y, ray.direction.x)\n dot = v2.dot(v3)\n if (abs(dot) < 0.000001):\n return None\n t1 = v2.cross(v1) / dot\n t2 = v1.dot(v3) / dot\n if (t1 >= 0.0 and (t2 >= 0.0 and t2 <= 1.0)):\n return t1\n return None\n \n \n minD = 9999\n distance_AB = raySegmentIntersectAB(self, ray)\n distance_CD = raySegmentIntersectCD(self, ray)\n distance_AC = raySegmentIntersectAC(self, ray)\n distance_BD = raySegmentIntersectBD(self, ray)\n \n if distance_AB is not None:\n minD = distance_AB\n \n if distance_CD is not None:\n if distance_CD < minD:\n minD = distance_CD\n \n if distance_AC is not None:\n if distance_AC < minD:\n minD = distance_AC\n \n if distance_BD is not None:\n if distance_BD < minD:\n minD = distance_BD\n\n if minD is not None:\n if minD != 9999:\n return minD\n return None\n \"\"\"\n minD = raySegmentIntersectBD(self, ray)\n #print (minD)\n return minD\n \"\"\"", "def getRay(self, points, normed=False): # pragma: no cover\n # to be overloaded by the child class.\n return None", "def intersection(self, segment):\n intersection = self.hyperplane.intersection(segment)\n if intersection is not None and np.linalg.norm(intersection - self.closest_point_to(intersection)) < epsilon:\n return intersection\n\n return None", "def hit(self):\n return self._hit", "def intersection(self, ray):\n \n points = []\n intersection_objects = []\n for obj in self.objects:\n intersection = obj.shape.intersection(ray)\n if intersection != None:\n for pt in intersection:\n points.append(pt)\n intersection_objects.append(obj)\n \n if len(points) == 0:\n return None, None\n return points, intersection_objects", "def hit(bx, by, r, px, py,h):\n if bx >= px:\n distance = bx - px\n else:\n distance = px - bx\n if py<=by and by<=py+h and distance <= r:\n return True\n else:\n return False", "def intersect(self,ray:Ray):\n o = ray.o #ray origin\n d = ray.d #ray destination\n oc = o - self.center #vector from ray origin to center\n b = 2*(oc*d)\n c = oc*oc - self.r**2\n disc = b**2-4*c\n if disc<0:\n return False,-1\n else:\n disc **=0.5\n t0 = -b-disc\n t1 = -b+disc\n return True,max(t0,t1)", "def intersect(l: Line, p: Plane) -> Point:\n if math.isclose((l.d * p.normal()), 0):\n # If the line direction is perpendicular to the plane normal,\n # the line and plane must be parallel.\n return None\n else:\n # There exists a parameter t, which makes\n # p.isInPlane(l.point(t)) == 0\n # Let's find it.\n # Initial guess\n t1 = 1\n p1 = l.point(t1)\n d1 = distancePointPlane(p1, p)\n t2 = 2\n p2 = l.point(t2)\n d2 = distancePointPlane(p2, p)\n\n # Calculate line through the two points (t,d)\n a = (d2 - d1) / (t2 - t1)\n b = d1 - a * t1\n\n # Find the t-value where d is zero\n # 0 = at+b <=> t = -b/a\n t = -b / a\n print(\"parameter: {}\".format(t))\n return l.point(t)", "def intersection_plane_plane_plane(plane1, plane2, plane3, epsilon=1e-6):\n line = intersection_plane_plane(plane1, plane2, epsilon)\n if not line:\n return None\n pt = intersection_line_plane(line, plane3, epsilon)\n if pt:\n return pt\n return None", "def intersects(self, ray):\n\n sphere_to_ray = ray.origin - self.origin\n b = 2 * ray.direction * sphere_to_ray\n c = sphere_to_ray ** 2 - self.radius ** 2\n discriminant = b ** 2 - 4 * c\n\n if discriminant >= 0:\n dist = (-b - math.sqrt(discriminant)) / 2\n if dist > 0:\n return dist", "def intersectsRay(self, ray):\n pass", "def hit(self, ray_, t_min, t_max):\n raise NotImplemented(\"Override in subclass\")", "def get_closest_node(self, point, plane=None):\n node = -1\n best_dist = 1.e100\n\n if len(point) == 3:\n # Seaching in 3d mesh\n meshz = self.get_data_value('ELEVATION Z', 0)\n for i in range(self.npoin3):\n dist = (self.meshx[i]- point[0])**2 + \\\n (self.meshy[i]- point[1])**2 + \\\n (meshz[i]- point[2])**2\n\n if dist < best_dist:\n best_dist = dist\n node = i\n\n elif len(point) == 2:\n if plane is None:\n # Searching in a 2d mesh\n for i in range(self.npoin2):\n dist = (self.meshx[i]- point[0])**2 + \\\n (self.meshy[i]- point[1])**2\n\n if dist < best_dist:\n best_dist = dist\n node = i\n else:\n # Searching in a given plane for the closest node\n for i in range(plane*self.npoin2, (plane+1)*self.npoin2):\n dist = (self.meshx[i]- point[0])**2 + \\\n (self.meshy[i]- point[1])**2\n\n if dist < best_dist:\n best_dist = dist\n node = i\n\n else:\n raise TelemacException(\\\n \"Point should be 2d or 3d: {}\".format(point))\n\n return node", "def LinePlaneCollision(planeNormal, planePoint, rayDirection, rayPoint, epsilon=1e-12):\n\n ndotu = planeNormal.dot(rayDirection)\n if abs(ndotu) < epsilon:\n raise RuntimeError(\"no intersection or line is within plane\")\n\n w = rayPoint - planePoint\n si = -planeNormal.dot(w) / ndotu\n Psi = w + si * rayDirection + planePoint\n return Psi", "def intersection_ray_ray_3d(ray1: Tuple[Vector, Vector],\n ray2: Tuple[Vector, Vector], abs_tol=1e-10) -> \\\nSequence[\n Vector]:\n # source: http://www.realtimerendering.com/intersections.html#I304\n o1, p1 = ray1\n d1 = (p1 - o1).normalize()\n o2, p2 = ray2\n d2 = (p2 - o2).normalize()\n d1xd2 = d1.cross(d2)\n denominator = d1xd2.magnitude_square\n if math.isclose(denominator, 0., abs_tol=abs_tol):\n # ray1 is parallel to ray2\n return tuple()\n else:\n o2_o1 = o2 - o1\n det1 = _determinant(o2_o1, d2, d1xd2)\n det2 = _determinant(o2_o1, d1, d1xd2)\n p1 = o1 + d1 * (det1 / denominator)\n p2 = o2 + d2 * (det2 / denominator)\n if p1.isclose(p2, abs_tol=abs_tol):\n # ray1 and ray2 have an intersection point\n return p1,\n else:\n # ray1 and ray2 do not have an intersection point,\n # p1 and p2 are the points of closest approach on each ray\n return p1, p2", "def current_navmesh(self):\n source = self.transform.world_position.copy()\n source.z += 1\n\n target = source.copy()\n target.z -= 2\n\n result = self.physics.ray_test(target=target, source=source, mask=CollisionGroups.navmesh)\n\n if result is None:\n return None\n\n return result.entity", "def ray_cast(self, max_distance):\n\t\tray_angle = radians(self.rotation)\n\n\t\t# Sense objects\n\t\tx, y = self.position\n\t\tclosest = None\n\t\tclosest_distance = float('inf')\n\t\tfor obj in self.get_scene_objects():\n\t\t\tif not hasattr(obj, 'position') or obj is self:\n\t\t\t\tcontinue\n\t\t\tobj_x, obj_y = obj.position\n\n\t\t\t# Figure out what the angle of this object is\n\t\t\t# relative to self\n\t\t\tdelta_x = obj_x - x\n\t\t\tdelta_y = y - obj_y\n\t\t\tangle = atan2(delta_y, delta_x) - pi/2\n\n\t\t\t# If the angle is acceptably in front of self\n\t\t\tif abs(angle - ray_angle) >= RAY_ANGLE_THRESHOLD:\n\t\t\t\tcontinue\n\n\t\t\tdist = self.distance_to(obj)\n\t\t\tif dist < closest_distance:\n\t\t\t\tclosest = obj\n\t\t\t\tclosest_distance = dist\n\n\t\tif closest is not None:\n\t\t\tif closest_distance <= max_distance:\n\t\t\t\treturn closest\n\t\t\t# Object is there, but not close enough\n\t\t\treturn None\n\n\t\t# Sense boundary\n\n\t\t# Divide the max_distance into components\n\t\t# based on the ray angle\n\t\tmax_x = max_distance * sin(ray_angle)\n\t\tmax_y = max_distance * cos(ray_angle)\n\n\t\t# Add the max_x and max_y to the current position\n\t\tmax_x = x - max_x\n\t\tmax_y = y - max_y\n\n\t\t# Check if either component is out of a boundary\n\t\tif max_x < 0:\n\t\t\treturn LEFT_BOUNDARY\n\t\telif max_x > self.get_screen_width():\n\t\t\treturn RIGHT_BOUNDARY\n\t\telif max_y < 0:\n\t\t\treturn TOP_BOUNDARY\n\t\telif max_y > self.get_screen_height():\n\t\t\treturn BOTTOM_BOUNDARY\n\n\t\t# Nothing found\n\t\treturn None", "def hitTest( a, b ):\n r = a.radius + b.radius\n x = abs( a.x - b.x )\n y = abs( a.y - b.y )\n if x <= r and y <= r and x*x + y*y <= r*r:\n return 1\n return 0", "def pick(self, start, direction, mat):\n new_mat = np.dot(\n np.dot(mat, self.translation_matrix),\n np.linalg.inv(self.scaling_matrix)\n )\n\n results = self.aabb.ray_hit(start, direction, mat)\n return results", "def ray_status(ray, points, nodes):\n container = find_container(ray, nodes)\n \n # Handle special case of last step where ray is hitting the world node\n root = nodes[0].root\n if container == root and len(nodes) == 1:\n status = root, None, root\n return status\n\n if nodes[0] == container:\n surface_node = nodes[0]\n to_node = nodes[1]\n else:\n surface_node = nodes[0]\n to_node = nodes[0]\n status = container, to_node, surface_node\n return status", "def HitTest(self, point, flags=0):\r\n\r\n w, h = self.GetSize()\r\n column = -1\r\n\r\n if not isinstance(point, wx.Point):\r\n point = wx.Point(*point)\r\n\r\n if point.x < 0:\r\n flags |= wx.TREE_HITTEST_TOLEFT\r\n if point.x > w:\r\n flags |= wx.TREE_HITTEST_TORIGHT\r\n if point.y < 0:\r\n flags |= wx.TREE_HITTEST_ABOVE\r\n if point.y > h:\r\n flags |= wx.TREE_HITTEST_BELOW\r\n if flags:\r\n return None, flags, column\r\n\r\n if not self._anchor:\r\n flags = wx.TREE_HITTEST_NOWHERE\r\n column = -1\r\n return None, flags, column\r\n \r\n hit, flags, column = self._anchor.HitTest(self.CalcUnscrolledPosition(point), self, flags, column, 0)\r\n if not hit:\r\n flags = wx.TREE_HITTEST_NOWHERE\r\n column = -1\r\n return None, flags, column\r\n \r\n return hit, flags, column", "def trace(self, ray): # type: (Ray) -> Vector\n hit_object = None\n t = numpy.inf\n\n for scene_object in self.scene.shapes:\n t0 = scene_object.intersect(ray)\n if t0 < t:\n t = t0\n hit_object = scene_object\n\n # if there were no intersections, then return the background colour\n if t == numpy.inf:\n return self.scene.camera.background\n\n hit_point = ray.origin + ray.direction * t\n normal = hit_object.normal(hit_point)\n luminance = 0.0\n\n # perform shading calculations\n for light in self.scene.lights:\n hit_point_to_light = (light.centre - hit_point).normal\n\n #check whether this light contributes to the shading\n in_shadow = False\n for shadower in self.scene.shapes:\n # we don't want to test against itself\n if shadower == hit_object:\n continue\n shadow_ray = Ray(hit_point + normal * 0.0001, hit_point_to_light)\n if shadower.intersect(shadow_ray) < numpy.inf:\n in_shadow = True\n break\n if in_shadow:\n continue\n\n # super simple lambertian lighting model\n luminance += hit_point_to_light.dot(normal) * light.power\n\n # calculate shaded colour - luminance may be over one if there are multiple light sources\n # normally this would be dealt with by HDR and tone mapping but is just clipped\n # in demo ray tracers\n object_colour = hit_object.material.colour * min(luminance, 1.0)\n\n # calculate reflection colour if material has reflectance\n if hit_object.material.reflectance != 0.0 and ray.depth != self.scene.camera.depth:\n reflected_direction = (ray.direction - normal * 2 * (ray.direction.dot(normal))).normal\n # we need to 'translate' the reflection vector away from the hitpoint otherwise\n # we risk intersecting the original hit point again which causes artifacts in the reflection\n reflected_ray = Ray(hit_point + reflected_direction * 0.0001, reflected_direction, ray.depth + 1)\n reflection_colour = self.trace(reflected_ray)\n\n # interpolate shaded colour and reflected colour based on reflectance\n return Vector(*[lerp(object_colour.data[i], reflection_colour.data[i], hit_object.material.reflectance) for i in range(3)])\n\n return object_colour", "def ray(self):\n return self._ray", "def LinePlaneIntersection(line, plane):\n plane = rhutil.coerceplane(plane, True)\n line_points = rhutil.coerce3dpointlist(line, True)\n line = Rhino.Geometry.Line(line_points[0], line_points[1])\n rc, t = Rhino.Geometry.Intersect.Intersection.LinePlane(line, plane) \n if not rc: return scriptcontext.errorhandler()\n return line.PointAt(t)", "def time_to_bound(self, ray):\n qr_a = ray.direction.dot(ray.direction)\n qr_b = 2. * (ray.position - self._center).dot(ray.direction)\n qr_c = np.linalg.norm(ray.position - self._center)**2 - self._radius**2\n radical_arg = qr_b**2 - (4. * qr_a * qr_c)\n try:\n if qr_b < 0:\n quadratic = (-qr_b - sqrt(radical_arg)) / 2.\n else:\n quadratic = (-qr_b + sqrt(radical_arg)) / 2.\n if quadratic != 0.0:\n return min((i for i in (quadratic / qr_a, qr_c / quadratic) if i > 0))\n except ValueError:\n # Root is imaginary. Pass this to return None.\n pass\n return None", "def get_atom_hit(self):\r\n return Marker((0, 128, 0), self._screen)", "def intersect_plane(L, plane):\n \n # Line U, V\n # Plane N n\n # (VxN-nU:U.N)\n # Note that this is in homogeneous coordinates.\n # intersection of plane (n,p) with the line (v,p)\n # returns point and line parameter\n \n \n den = np.dot(L.w, plane.n)\n \n if abs(den) > (100*_eps):\n P = -(np.cross(L.v, plane.n) + plane.p * L.w) / den\n p = (np.cross(L.v, plane.n) - plane.p * L.w) / den\n \n P = L.pp\n t = np.dot( P-p, N)\n return namedtuple('intersect_plane', 'p t')(P, t)\n else:\n return None", "def _next_hit(self, elements):\n hits = ((item, item.next_hit(self._ray)) for item in elements)\n try:\n return sorted(\n [(hit[0], hit[1], item) for item, hit in hits if hit is not None])[0]\n except IndexError:\n return None", "def point_of_intersection(l, pz=distance):\r\n # Must fix the error here. Right now, any vector can have a point in the plane.\r\n # Must make it so that only vectors pointing in the planes direction has a point there\r\n # Can be done by checking whether d is positive or not.\r\n # This is to prevent vectors that point away from the detector to be counted\r\n # The definitions below assume that the detector is centred in the origin and its length is oriented along the z-axis.\r\n p0 = np.array([0,0,pz]) # Point on the plane\r\n l0 = np.array([0,0,0]) # Point on the line\r\n n = np.array([0,0,1]) # Normal vector of the plane\r\n d = np.dot(p0-l0, n)/np.dot(l, n)\r\n point = [i*d for i in l]\r\n return point", "def shoot_ray(self, entry_x, entry_y):\r\n\r\n # check to make sure entry_x and entry_y are valid\r\n if (entry_x in [0, 9] or entry_y in [0, 9]) and \\\r\n self._board.get_board_item(entry_x, entry_y) != \"o\":\r\n\r\n exit_tup = self._board.find_exit(entry_x, entry_y)\r\n # returned 0 if hit\r\n if exit_tup == 0:\r\n # decrement entry only if not visited\r\n marker = self.get_hit_marker()\r\n circle_tuple = self.calculate_entry_exit(entry_y, entry_x)\r\n marker.update_center(circle_tuple)\r\n points = self._player.add_entry_exit((entry_x, entry_y), marker,\r\n (entry_x, entry_y))\r\n self._stats.dec_player_score(points)\r\n return \"Hit\"\r\n elif exit_tup == 1:\r\n # decrement entry only if not visited\r\n marker = self.get_reflect_marker()\r\n circle_tuple = self.calculate_entry_exit(entry_y, entry_x)\r\n marker.update_center(circle_tuple)\r\n points = self._player.add_entry_exit((entry_x, entry_y), marker,\r\n (entry_x, entry_y))\r\n\r\n self._stats.dec_player_score(points)\r\n\r\n return \"reflect\"\r\n else:\r\n # decrement both entry and exit if not already visited\r\n marker = self.get_color_marker()\r\n exit_x, exit_y = exit_tup\r\n circle_entry = self.calculate_entry_exit(entry_y, entry_x)\r\n circle_exit = self.calculate_entry_exit(exit_y, exit_x)\r\n marker.update_center(circle_entry, circle_exit)\r\n points = self._player.add_entry_exit((entry_x, entry_y),\r\n marker, exit_tup)\r\n\r\n self._stats.dec_player_score(points)\r\n return exit_tup\r\n else:\r\n # returns false if the shoot_ray point is invalid\r\n return \"Bad shot\"", "def getClosestPointFromLine(origin, ray, point):\n # calculate the difference vector\n delta = point-origin\n # norm the ray\n ray /= np.linalg.norm(ray, axis=-1)[..., None]\n # calculate the scale product\n factor = np.sum(ray*delta, axis=-1)\n try:\n return origin + factor[:, None] * ray\n except IndexError:\n return origin + factor * ray", "def reflect(self, ray):\n normal = self.normal(ray.position)\n if normal.dot(ray.direction) > 0:\n normal = -normal\n return Ray(\n ray.direction - 2 * dot(ray.direction, normal) * normal, ray.position)", "def ray(self, pixel):\n # Ensure pixel is in homogenous coordinates\n if len(pixel) == 2:\n pixel = np.vstack((pixel, [1]))\n\n ray = project(self._camera.P_pinv, pixel.astype(np.float32))\n assert ray.shape == (4, 1)\n\n return self._camera.center, ray", "def intersection_line_plane(line, plane, epsilon=1e-6):\n pt1 = line[0]\n pt2 = line[1]\n p_cent = plane[0]\n p_norm = plane[1]\n\n v1 = subtract_vectors(pt2, pt1)\n dot = dot_vectors(p_norm, v1)\n\n if abs(dot) > epsilon:\n v2 = subtract_vectors(pt1, p_cent)\n fac = -dot_vectors(p_norm, v2) / dot\n vec = scale_vector(v1, fac)\n return add_vectors(pt1, vec)\n else:\n return None", "def ray_at(self, O, t):\n point = self.float_mul(t).plus(O)\n return point", "def _hit_span_get(self):\n try:\n return self.hit_end - self.hit_start\n except TypeError: # triggered if any of the coordinates are None\n return None", "def ray_intersect_triangle(origin, direction, triangle, use_planes=False):\n origin = np.array(origin)\n direction = np.array(direction)\n if len(direction.shape) == 1:\n direction = direction.reshape(1, *direction.shape)\n return_single = True\n else:\n return_single = False\n triangle = np.array(triangle)\n if len(triangle.shape) == 2:\n triangle = triangle.reshape(1, *triangle.shape)\n\n v0 = triangle[..., 0, :]\n v1 = triangle[..., 1, :]\n v2 = triangle[..., 2, :]\n u = v1 - v0\n v = v2 - v0\n normal = np.cross(u, v)\n b = np.inner(normal, direction)\n a = my_inner(normal[..., None, :], v0[..., None, :] - origin[None, ..., :])\n\n rI = a / b\n # ray is parallel to the plane\n rI[(b == 0.0)*(a != 0.0)] = np.nan\n # ray is parallel and lies in the plane\n rI[(b == 0.0)*(a == 0.0)] = 0\n\n # check whether the intersection is behind the origin of the ray\n rI[rI < 0.0] = np.nan\n\n if not use_planes:\n w = origin + rI[..., None] * direction - v0[..., None, :]\n denom = my_inner(u, v) * my_inner(u, v) - my_inner(u, u) * my_inner(v, v)\n\n si = (my_inner(u, v)[..., None] * my_inner(w, v[..., None, :]) - my_inner(v, v)[..., None] * my_inner(w, u[..., None, :])) / denom[:, None]\n rI[((si < 0)+(si > 1.0)).astype(bool)] = np.nan\n\n ti = (my_inner(u, v)[..., None] * my_inner(w, u[..., None, :]) - my_inner(u, u)[..., None] * my_inner(w, v[..., None, :])) / denom[:, None]\n rI[((ti < 0.0) + (si + ti > 1.0)).astype(bool)] = np.nan\n\n def nanargmin(a, axis):\n from numpy.lib.nanfunctions import _replace_nan\n a, mask = _replace_nan(a, np.inf)\n res = np.argmin(a, axis=axis)\n return res\n\n index = nanargmin(rI, axis=0)\n rI = rI[index, np.arange(len(index))]\n point = origin + rI[..., None] * direction\n\n if return_single:\n return point[0]\n return point", "def intersection(self, segment):\n p0, p1 = segment.p0, segment.p1\n\n # x = t*(p1 - p0) + p0\n # n'*(x - origin) = 0\n # combine to get\n # n'*(t*(p1-p0) + p0 - origin) = 0\n # solve for t\n\n v = p1 - p0\n w = p0 - self.origin\n t = -np.dot(self.normal, w)/np.dot(self.normal, v)\n\n if 0-epsilon <= t <= 1+epsilon:\n return t*(p1-p0) + p0\n else:\n return None", "def simpleObjPickRoad(obj, roads):\n # in here the obj (either union or terrace) consists of one building\n fittestRid = -1\n accessPoint = Point(0, 0)\n\n findRoad = False\n\n for road in roads:\n reference = road.geom.project(obj.centroid)\n tempAccessPoint = road.geom.interpolate(reference)\n PointC = (obj.centroid.x, obj.centroid.y)\n PointD = (tempAccessPoint.x, tempAccessPoint.y)\n lineCD = LineString((PointC, PointD))\n\n if type(obj) == pg_read.Union:\n # now we are using a union\n uid = obj.id\n cur.execute(\"select * from unions \\\n where st_intersects(geom, st_geomfromtext('%s', 27700)) \\\n and uid != %d\" % (lineCD.wkt, uid)) # NOQA\n results = cur.fetchall()\n if not results:\n # which means no other unions intersects lineCD\n findRoad = True\n fittestRid = road.id\n accessPoint = tempAccessPoint\n break\n else:\n # which means we are using a terrace\n tid = obj.id\n cur.execute(\"select * from terraces \\\n where st_intersects(geom, st_geomfromtext('%s', 27700)) \\\n and tid != %d\" % (lineCD.wkt, tid)) # NOQA\n results = cur.fetchall()\n if not results:\n # which means no other terraces intersects lineCD\n findRoad = True\n fittestRid = road.id\n accessPoint = tempAccessPoint\n break\n\n if findRoad:\n # if findRoad == True:\n return fittestRid, accessPoint\n else:\n # which means findRoad == False\n # I know it's sad, but we need to have default option here\n # we use the roads[0] as accessRoad anyway\n road = roads[0]\n reference = road.geom.project(obj.centroid)\n fittestRid = road.id\n accessPoint = road.geom.interpolate(reference)\n return fittestRid, accessPoint", "def trace(self):\n\n \n assert self.scene != None, \"The photon's scene variable is not set.\"\n \n intersection_points, intersection_objects = self.scene.intersection(self.ray)\n\n \"\"\"\n #DIAGNOSTICS\n print \"\\nnew\\n\"\n print self.position, self.direction, \"\\n\"\n print intersection_points, \"\\n\"\n for i in range(0, len(intersection_objects)):\n print \"Object: \", intersection_objects[i].name, \" - Intersection: \", intersection_points[i]\n \"\"\"\n \n assert intersection_points != None, \"The ray must intersect with something in the scene to be traced.\"\n \n if self.container is None:\n self.container = self.scene.container(self)\n assert self.container != None, \"Container of ray cannot be found.\"\n \n #import pdb; pdb.set_trace()\n #import pudb; pudb.set_trace()\n intersection_points, intersection_objects = Scene.sort(intersection_points, intersection_objects, self, container=self.container, show_log=self.show_log)\n \n # find current intersection point and object -- should be zero if the list is sorted!\n intersection = closest_point(self.position, intersection_points)\n for i in range(0,len(intersection_points)):\n if list(intersection_points[i]) == list(intersection):\n index = i\n break\n \n #import pdb; pdb.set_trace()\n intersection_object = intersection_objects[index]\n assert intersection_object != None, \"No intersection points can be found with the scene.\"\n \n \n \"\"\"\n #DIAGNOSTICS\n print \"\\n\", intersection, \"\\n\"\n print intersection_object.name \n \"\"\" \n \n \n # Reached scene boundaries?\n if intersection_object is self.scene.bounds:\n self.active = False\n self.previous_container = self.container\n self.container = self.scene.bounds\n return self\n\n\n # Reached a RayBin (kind of perfect absorber)?\n if isinstance(intersection_object, RayBin):\n self.active = False\n self.previous_container = self.container\n self.container = self.scene.bounds\n return self\n \n \n # Here we trace the ray through a Coating\n if isinstance(self.container, Coating):\n normal = intersection_object.shape.surface_normal(self.ray)\n self = self.container.material.trace(self, normal, separation(self.position, intersection))\n self.exit_device = self.container\n self.previous_container = self.container\n self.container = self.scene.container(self)\n return self\n \n \n # Here we determine if the Coating has been hit\n if isinstance(intersection_object, Coating) and intersection_object.shape.on_surface(self.position):\n self.previous_container = self.container\n self.container = intersection_object\n self.exit_device = intersection_object\n assert self.exit_device != self.scene.bounds, \"The object the ray hit before hitting the bounds is the bounds, this can't be right.\"\n return self\n \n \n # Here we trace the ray through a Material\n self = self.container.material.trace(self, separation(self.position, intersection))\n \n \n # Lost in material?\n # Photon has been re-absorbed but NOT re-emitted, i.e. is inactive\n if not self.active:\n #01/04/10: Unification --> Next two lines came from older Trace version\n self.exit_device = self.container\n self.exit_material = self.container.material\n return self \n \n # Reaches interface\n # Photon has been re-absorbed AND re-emitted, i.e. is still active\n ray_on_surface = intersection_object.shape.on_surface(self.position)\n if not ray_on_surface: \n self.exit_device = self.container\n return self\n \n # Ray has reached a surface of some description, increment the intersection counter\n self.intersection_counter += 1\n \n # If we reach an reflective material then we don't need to follow \n # this logic we can just return\n if ray_on_surface and isinstance(intersection_object, Coating):\n self.previous_container = self.container\n self.container = intersection_object\n self.exit_device = intersection_object\n return self\n \n # KARLG NEW CODE HERE\n #import pudb; pudb.set_trace()\n if isinstance(intersection_object, Face):\n self.exit_device = intersection_object\n \n # Now change the properties of the photon accoring to what your surface does\n random_number = np.random.random_sample()\n if random_number < intersection_object.reflectivity:\n # Reflected\n self.direction = reflect_vector(intersection_object.shape.surface_normal(self.ray), self.direction)\n elif random_number < intersection_object.reflectivity + intersection_object.transmittance:\n # Transmitted\n pass\n else:\n # Loss\n self.active = False\n return self\n \n # Fresnel details\n normal = intersection_object.shape.surface_normal(self.ray)\n rads = angle(normal, self.direction)\n \n # material-air or material-material interface\n # Are there duplicates intersection_points that are equal to the ray position?\n same_pt_indices = []\n for i in range(0,len(intersection_points)):\n if cmp_points(self.position, intersection_points[i]):\n same_pt_indices.append(i)\n assert len(same_pt_indices) < 3, \"An interface can only have 2 or 0 common intersection points.\"\n \n initialised_internally = None\n \n if len(same_pt_indices) == 2:\n intersection_object = self.container\n \n if self.container == intersection_object:\n \n # hitting internal interface -- for the case we are at an material-material interface (i.e. not travelling through air)\n initialised_internally = True\n \n if len(same_pt_indices) == 2:\n \n for obj in intersection_objects:\n if obj.shape.on_surface(intersection) and obj != self.container:\n #if obj != self.container:\n next_containing_object = obj\n \n \n else:\n # hitting internal interface -- for the case we are not at an interface\n next_containing_object = self.scene.container(self)\n \n assert self.container != next_containing_object, \"The current container cannot also be the next containing object after the ray is propagated.\"\n \n # Fresnel details\n normal = intersection_object.shape.surface_normal(self.ray)\n rads = angle(normal, self.direction)\n if self.polarisation == None:\n reflection = fresnel_reflection(rads, self.container.material.refractive_index, next_containing_object.material.refractive_index)\n else:\n reflection = fresnel_reflection_with_polarisation(normal, self.direction, self.polarisation, self.container.material.refractive_index, next_containing_object.material.refractive_index)\n \n else:\n # hitting external interface\n initialised_internally = False \n \n \n if len(same_pt_indices) == 2:\n for obj in intersection_objects:\n if obj != self.container:\n intersection_object = obj\n next_containing_object = obj\n else:\n next_containing_object = intersection_object\n \n #import pdb; pdb.set_trace()\n normal = intersection_object.shape.surface_normal(self.ray)\n rads = angle(normal, self.direction)\n if self.polarisation == None:\n reflection = fresnel_reflection(rads, self.container.material.refractive_index, next_containing_object.material.refractive_index)\n else:\n reflection = fresnel_reflection_with_polarisation(normal, self.direction, self.polarisation, self.container.material.refractive_index, next_containing_object.material.refractive_index)\n \n if isinstance(next_containing_object, Collector):\n # If the photon hits an interface with e.g. a cell index-matched to it, then no reflection is to occur at this interface.\n reflection = 0.\n \n if np.random.random_sample() < reflection:\n # photon is reflected\n before = copy(self.direction)\n self.direction = reflect_vector(normal, self.direction)\n ang = angle(before, self.direction)\n \n if self.polarisation != None:\n \n #import pdb; pdb.set_trace()\n if cmp_floats(ang, np.pi):\n # Anti-parallel\n self.polarisation = self.polarisation\n else:\n # apply the rotation transformation the photon polarisation which aligns the before and after directions\n R = rotation_matrix_from_vector_alignment(before, self.direction)\n self.polarisation = transform_direction(self.polarisation, R)\n \n assert cmp_floats(angle(self.direction, self.polarisation), np.pi/2), \"Exit Pt. #1: Angle between photon direction and polarisation must be 90 degrees: theta=%s\" % str(np.degrees(angle(self.direction, self.polarisation)))\n \n self.propagate = False\n self.exit_device = self.container\n \n # invert polaristaion if n1 < n2\n if self.container.material.refractive_index < next_containing_object.material.refractive_index:\n \n if self.polarisation != None:\n \n if cmp_floats(ang, np.pi):\n # Anti-parallel\n self.polarisation = self.polarisation * -1.\n else:\n # apply the rotation transformation the photon polarisation which aligns the before and after directions\n R = rotation_matrix_from_vector_alignment(before, self.direction)\n self.polarisation = transform_direction(self.polarisation, R)\n \n assert cmp_floats(angle(self.direction, self.polarisation), np.pi/2), \"Exit Pt. #2: Angle between photon direction and polarisation must be 90 degrees: theta=%s\" % str(angle(self.direction, self.polarisation))\n \n if self.exit_device == self.scene.bounds or self.exit_device == None:\n self.exit_device = intersection_object\n assert self.exit_device != self.scene.bounds, \"The object the ray hit before hitting the bounds is the bounds, this can't be right\"\n return self\n else:\n # photon is refracted through interface\n self.propagate = True\n before = copy(self.direction)\n ang = angle(before, self.direction)\n if initialised_internally:\n if not isinstance(next_containing_object, Collector):\n self.direction = fresnel_refraction(normal, self.direction, self.container.material.refractive_index, next_containing_object.material.refractive_index )\n \n if self.polarisation != None:\n if cmp_floats(ang, np.pi):\n # Anti-parallel\n self.polarisation = self.polarisation\n else:\n # apply the rotation transformation the photon polarisation which aligns the before and after directions\n R = rotation_matrix_from_vector_alignment(before, self.direction)\n self.polarisation = transform_direction(self.polarisation, R)\n assert cmp_floats(angle(self.direction, self.polarisation), np.pi/2), \"Exit Pt. #3: Angle between photon direction and polarisation must be 90 degrees: theta=%s\" % str(angle(self.direction, self.polarisation))\n \n self.exit_device = self.container #LSC is the exit_device\n self.previous_container = self.container\n self.container = next_containing_object #Bounds is the container\n return self\n else:\n if not isinstance(next_containing_object, Collector):\n self.direction = fresnel_refraction(normal, self.direction, self.container.material.refractive_index, intersection_object.material.refractive_index )\n \n if self.polarisation != None:\n \n if cmp_floats(ang, np.pi):\n # Anti-parallel\n self.polarisation = self.polarisation\n else:\n # apply the rotation transformation the photon polarisation which aligns the before and after directions\n R = rotation_matrix_from_vector_alignment(before, self.direction)\n self.polarisation = transform_direction(self.polarisation, R)\n # apply the rotation transformation the photon polarisation which aligns the before and after directions\n\n assert cmp_floats(angle(self.direction, self.polarisation), np.pi/2), \"Exit Pt. #4: Angle between photon direction and polarisation must be 90 degrees: theta=%s\" % str(angle(self.direction, self.polarisation))\n \n # DJF 13.5.2010: This was crashing the statisical collection because it meant that an incident ray, hitting and transmitted, then lost would have bounds as the exit_device.\n #self.exit_device = self.container\n self.exit_device = intersection_object\n self.previous_container = self.container\n self.container = intersection_object\n return self", "def obj_ray_cast(obj, matrix):\r\n \r\n # get the ray relative to the object\r\n matrix_inv = matrix.inverted()\r\n ray_origin_obj = matrix_inv * ray_origin\r\n ray_target_obj = matrix_inv * ray_target\r\n ray_direction_obj = ray_target_obj - ray_origin_obj\r\n \r\n # cast the ray\r\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\r\n \r\n if success:\r\n return location, normal, face_index\r\n else:\r\n return None, None, None", "def is_hit(ball, r_ball, v, target, r_target):\n v_norm = norm_2d(v)\n dr = (target[0] - ball[0], target[1] - ball[1])\n dr_norm = norm_2d(dr)\n\n p = project(dr, v)\n p_norm = norm_2d(p)\n\n if p_norm > v_norm:\n c = (v_norm ** 2 + dr_norm ** 2 - 2 * sc_mul(v, dr)) ** 0.5\n return c <= r_ball + r_target\n\n h = get_point_line_distance(target, ball, (-v[1], v[0]))\n return abs(h) <= r_ball + r_target", "def obj_ray_cast(obj, matrix):\n\n # get the ray relative to the object\n matrix_inv = matrix.inverted()\n ray_origin_obj = matrix_inv * ray_origin\n ray_target_obj = matrix_inv * ray_target\n ray_direction_obj = ray_target_obj - ray_origin_obj\n \n # cast the ray\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\n\n if success:\n return location, normal, face_index, ray_target\n else:\n return None, None, None, ray_target", "def obj_ray_cast(obj, matrix):\n\n # get the ray relative to the object\n matrix_inv = matrix.inverted()\n ray_origin_obj = matrix_inv * ray_origin\n ray_target_obj = matrix_inv * ray_target\n ray_direction_obj = ray_target_obj - ray_origin_obj\n\n # cast the ray\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\n\n if success:\n return location, normal, face_index\n else:\n return None, None, None", "def closest_point_on_plane(point, plane):\n base, normal = plane\n x, y, z = base\n a, b, c = normalize_vector(normal)\n x1, y1, z1 = point\n d = a * x + b * y + c * z\n k = (a * x1 + b * y1 + c * z1 - d) / (a**2 + b**2 + c**2)\n return [x1 - k * a,\n y1 - k * b,\n z1 - k * c]", "def on_road(self):\n for obj in self.model.grid[self.pos[0]][self.pos[1]]:\n if obj.agent_type == \"road\":\n return obj", "def obj_ray_cast(obj, matrix):\n\n # get the ray relative to the object\n matrix_inv = matrix.inverted()\n ray_origin_obj = matrix_inv @ ray_origin\n ray_target_obj = matrix_inv @ ray_target\n ray_direction_obj = ray_target_obj - ray_origin_obj\n\n # cast the ray\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\n\n if success:\n return location, normal, face_index\n else:\n return None, None, None", "def follow(ray: Ray, scene: Scene, max_iters=1000, renderer=None) -> [Tuple[Ray, Decision]]:\n path = [(ray, Decision.EMIT)]\n idx = 0\n last_ray = ray\n while ray.is_alive:\n intersections = scene.intersections(ray.position, ray.direction)\n points, nodes = zip(*[(x.point, x.hit) for x in intersections])\n for ray, decision in step(ray, points, nodes, renderer=renderer):\n path.append((ray, decision))\n if points_equal(ray.position, last_ray.position) and np.allclose(ray.direction, last_ray.direction):\n raise TraceError(\"Ray did not move.\")\n last_ray = ray\n if idx > max_iters:\n raise TraceError(\"Ray got stuck.\")\n return path", "def HitTest(self, x, y):\r\n\r\n return 0", "def get_point(k, refpt):\n i = 0\n while i < k:\n rho, theta = np.random.uniform(r, 2*r), np.random.uniform(0, 2*np.pi)\n pt = refpt[0] + rho*np.cos(theta), refpt[1] + rho*np.sin(theta), 0\n if not (0 <= pt[0] < width and 0 <= pt[1] < height):\n # This point falls outside the domain of the grid, so try again.\n i += 1\n continue\n if point_valid(pt) and is_on_face(pt, v1, v2, v3):\n return pt\n i += 1\n # We failed to find a suitable point in the vicinity of refpt.\n return False", "def hit(self, origin, sightVector, hitError):\n distance = [0, 0, 0]\n for i in range(0, 3):\n distance[i] = self.translation[i] - origin[i]\n v1 = normalize(distance)\n v2 = normalize(sightVector)\n return abs(v1[0] - v2[0]) < hitError and \\\n abs(v1[1] - v2[1]) < hitError and abs(v1[2] - v2[2]) < hitError", "def hit(self, otherball):\r\n dx = (self.unif[0] + self.vx) - (otherball.unif[0] + otherball.vx)\r\n dy = (self.unif[1] + self.vy) - (otherball.unif[1] + otherball.vy)\r\n rd = self.radius + otherball.radius\r\n return dot(dx, dy) < (rd * rd)", "def step(ray, points, nodes, renderer=None):\n container, to_node, surface_node = ray_status(ray, points, nodes)\n min_point = ray.position\n max_point = points[0]\n \n dist = distance_between(min_point, max_point)\n _ray = ray\n for (ray, decision) in trace_path(ray, container, dist):\n if renderer:\n renderer.add_ray_path([_ray, ray])\n _ray = ray\n yield ray, decision\n\n if to_node is None and container.parent is None:\n # Case: Hit world node; kill ray here.\n ray = replace(ray, is_alive=False)\n yield ray, Decision.KILL\n elif points_equal(ray.position, max_point):\n # Case: Hit surface\n # NB The ray argument of `trace_surface` *must* be a ray on the surface of the \n # node and the returned ray must *not* be on the node!\n before_ray = ray\n _ray = ray\n for ray, decision in trace_surface(ray, container, to_node, surface_node):\n if renderer:\n renderer.add_ray_path([_ray, ray])\n _ray = ray\n yield ray, decision\n # Avoid error checks in production\n if __debug__:\n local_ray = ray.representation(surface_node.root, surface_node)\n if surface_node.geometry.is_on_surface(local_ray.position):\n logger.warning(\"(before) pos: {}\".format(before_ray.position))\n logger.warning(\"(after) pos: {}\".format(ray.position))\n raise TraceError(\"After tracing a surface the ray cannot still be on the surface.\")", "def point_sur_segment(self, pt):\n dp = pt - self.c\n d = dp.length - self.r\n a = atan2(dp.y, dp.x)\n t = (a - self.a0) / self.da\n return t > 0 and t < 1, d, t", "def sample(self, z_position):\n if self.direction[2] != 0:\n z_plane = PlaneSurface([0, 0, 1], [0, 0, z_position])\n if (z_plane.position() - self.position).dot(self.direction) > 0:\n ray = self\n else:\n ray = Ray(-self.direction, self.position)\n return ray.propagate(z_plane.time_to_bound(ray)).position", "def illuminate(self, ray, hit, scene):\n # TODO A5 copy implementation from A4 and modify\n # material parameters need to be looked up by the uv's at the intersection point\n l = self.position - hit.point\n epsilon = 0.000001\n point = hit.point + l*epsilon\n shadow_ray = Ray(point, l, epsilon, 1)\n\n if (scene.intersect(shadow_ray).t > 1):\n\n # diffuse shading\n intensity = self.intensity\n position = self.position\n normal = hit.normal\n dist_to_source = np.linalg.norm(hit.point - position)\n diffuse_coeff = hit.material.lookup(hit.material.k_d, hit)\n v = (-1) * normalize(ray.direction)\n light_ray = normalize(position - hit.point)\n specular_coeff = hit.material.lookup(hit.material.k_s, hit)\n p = hit.material.lookup(hit.material.p, hit)\n\n # diffuse shading\n # diffuse_output = diffuse_coeff * (np.maximum(0, np.dot(normal, light_ray)) / (dist_to_source ** 2)) * intensity\n # specular shading\n shade_ray = Ray(hit.point, light_ray, epsilon)\n if (scene.intersect(shade_ray).t == np.inf):\n h = (v + light_ray) / np.linalg.norm(v + light_ray)\n specular_output = (diffuse_coeff + specular_coeff * ((np.dot(normal, h)) ** p)) * (\n np.maximum(0, np.dot(normal, light_ray)) / (dist_to_source ** 2)) * intensity\n return specular_output\n\n return vec([0, 0, 0])", "def hitDetect(self, direction, solid):\n # Hit edge code not finished - A-Small-Being\n modX = self.x\n modY = self.y\n if direction == Direction.LEFT:\n modX -= 1\n elif direction == Direction.RIGHT:\n modX += 1\n elif direction == Direction.UP:\n modY -= 1\n elif direction == Direction.DOWN:\n modY += 1\n\n if (modX, modY) in solid:\n return True\n\n if modX == 0:\n #too far left\n pass\n elif modX == self.gameWindow.TILEWIDTH + 1:\n # Too far right\n pass\n elif modY == 0:\n # Too far up\n pass\n elif modY == self.gameWindow.TILEHEIGHT + 1:\n # Too far down\n pass\n else:\n # Fine\n return False\n return True", "def collision_detection(p, poly):\r\n _eps = 0.00001\r\n _huge = sys.float_info.max\r\n _tiny = sys.float_info.min\r\n \r\n def rayintersectseg(p, edge):\r\n ''' takes a point p=Pt() and an edge of two endpoints a,b=Pt() of a line segment returns boolean\r\n '''\r\n a,b = edge\r\n if a.y > b.y:\r\n a,b = b,a\r\n if p.y == a.y or p.y == b.y:\r\n p = Pt(p.x, p.y + _eps)\r\n \r\n intersect = False\r\n \r\n if (p.y > b.y or p.y < a.y) or (\r\n p.x > max(a.x, b.x)):\r\n return False\r\n \r\n if p.x < min(a.x, b.x):\r\n intersect = True\r\n else:\r\n if abs(a.x - b.x) > _tiny:\r\n m_red = (b.y - a.y) / float(b.x - a.x)\r\n else:\r\n m_red = _huge\r\n if abs(a.x - p.x) > _tiny:\r\n m_blue = (p.y - a.y) / float(p.x - a.x)\r\n else:\r\n m_blue = _huge\r\n intersect = m_blue >= m_red\r\n return intersect\r\n \r\n def _odd(x): return x%2 == 1\r\n \r\n def ispointinside(p, poly):\r\n\r\n return _odd(sum(rayintersectseg(p, edge)\r\n for edge in poly.edges ))\r\n \r\n detection = ispointinside(p,poly)\r\n return detection", "def ray_trace(x, y, poly):\n\n @vectorize([bool_(float64, float64)])\n def ray(x, y):\n # where xy is a coordinate\n n = len(poly)\n inside = False\n p2x = 0.0\n p2y = 0.0\n xints = 0.0\n p1x, p1y = poly[0]\n for i in range(n + 1):\n p2x, p2y = poly[i % n]\n if y > min(p1y, p2y):\n if y <= max(p1y, p2y):\n if x <= max(p1x, p2x):\n if p1y != p2y:\n xints = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x\n if p1x == p2x or x <= xints:\n inside = not inside\n p1x, p1y = p2x, p2y\n return inside\n\n return ray(x, y)", "def _hitTest(self, point):\n point = self.CalcUnscrolledPosition(point)\n for displayKey in self._displayKeys.values():\n if displayKey.scaled.Contains(point):\n return displayKey\n return None", "def closest_on_screen_point_optim(trajectory, viewpoint, yaw, gaze_on_screen):\n \n traj_angles = dp.world_to_angles_through_screen(trajectory, viewpoint, yaw) \n \n #pprint(traj_angles)\n\n dist, idx = closest_node_tree(traj_angles, gaze_on_screen)\n ml_screen_ref = traj_angles[idx] \n\n return(idx, ml_screen_ref)", "def get_projection_point(self, point, plane, test=False):\n return point_on_plane_projection(point, plane, test=test)", "def mesh_ray_collision(mesh: jnp.ndarray, origin: jnp.ndarray, direction: jnp.ndarray):\n collides, positions, distances = jax.vmap(\n lambda t: _triangle_ray_collision(t, origin, direction)\n )(mesh)\n idx = jnp.argmin(jnp.where(collides, distances, jnp.inf))\n return (\n jnp.any(collides),\n positions[idx],\n _triangle_normal(mesh[idx]),\n )", "def get_object_at_location(cls, x, y):\n object_map_at_target_location = cls.query\\\n .filter_by(x=x, y=y).one_or_none()\n if not object_map_at_target_location:\n return None\n return object_map_at_target_location.get_real_object()", "def line_intersection(p0_x, p0_y, p1_x, p1_y, p2_x, p2_y, p3_x, p3_y):\n s10_x = p1_x - p0_x\n s10_y = p1_y - p0_y\n s32_x = p3_x - p2_x\n s32_y = p3_y - p2_y\n\n denom = s10_x * s32_y - s32_x * s10_y\n if denom == 0.0:\n return None # Collinear\n denomPositive = denom > 0\n\n s02_x = p0_x - p2_x\n s02_y = p0_y - p2_y\n s_numer = s10_x * s02_y - s10_y * s02_x\n if (s_numer < 0) == denomPositive:\n return None # No collision\n\n t_numer = s32_x * s02_y - s32_y * s02_x\n if (t_numer < 0) == denomPositive:\n return None # No collision\n\n if (s_numer > denom) == denomPositive or (t_numer > denom) == denomPositive:\n return 0 # No collision\n \n # Collision detected\n t = t_numer / denom\n i_x = p0_x + (t * s10_x)\n i_y = p0_y + (t * s10_y)\n\n return i_x, i_y", "def findminpath(tab, gxtab, gytab, pixtab):\n\n pathdist = 2 # the number of points each points on a ray can related to on the previous ray\n pathdist_penalty = 0.3 # penalty of the difference of the pathdist\n pathpix_penalty = 2 # penalty of the difference of pixel values between the point and the previous point\n nray = tab.shape[1]\n\n #tab = np.hstack((tab,tab[:, 0].reshape(tab.shape[0], 1)))\n #pixtab = np.hstack((pixtab,pixtab[:, 0].reshape(pixtab.shape[0], 1)))\n #gxtab = np.hstack((gxtab,gxtab[:, 0].reshape(gxtab.shape[0], 1)))\n #gytab = np.hstack((gytab,gytab[:, 0].reshape(gytab.shape[0], 1)))\n\n tab = np.hstack((tab,tab,tab)) # horizontally stack the tab matrix to prepare for the filtering on the result\n pixtab = np.hstack((pixtab,pixtab,pixtab))\n gxtab = np.hstack((gxtab,gxtab,gxtab))\n gytab = np.hstack((gytab,gytab,gytab))\n\n tab = (tab - tab.min()) / (tab.max() - tab.min()) # noralize the tab matrix\n pixtab = (pixtab - pixtab.min()) / (pixtab.max() - pixtab.min()) * -1 # for we want to find the white contour of the cell so we multipy -1 on the pixtab\n # tab = tab / np.median(tab)\n # pixtab = pixtab / np.median(pixtab)\n path = np.zeros(tab.shape)\n path[:, 0] = np.array(range(0, tab.shape[0]))\n score = np.zeros(tab.shape)\n score[:, 1] = tab[:, 1]\n\n for i in range(1, tab.shape[1]):\n for j in range(tab.shape[0]):\n mins = np.Inf # record the min value of the ray\n minat = 0\n for k in range(-pathdist, pathdist+1):\n if(0 <= (j+k) and (j+k) < tab.shape[0]):\n s = pixtab[j, i]\n pixdiff = abs(pixtab[j, i] - pixtab[j+k, i-1])\n s += pixdiff * pathpix_penalty # two kinds of penalty\n s += abs(k) * pathdist_penalty\n s += score[j+k, i-1]\n\n if(s < mins):\n mins = s\n minat = j + k\n path[j, i] = minat\n score[j, i]= mins\n\n start = int(np.argmin(score[:, -1]))\n path = path.astype(np.int32)\n minpath = [start]\n for i in range(tab.shape[1]-1, 0, -1):\n minpath.append(path[minpath[-1], i])\n minpath = minpath[::-1]\n # print(len(minpath))\n minpath = savgol_filter(minpath, 15, 3) # apply a Savitzky-Golay filter to the raw minpath signal\n minpath = minpath[nray:nray*2] # cut the middle part of minpath whose length is nray\n return np.array(minpath).astype(np.int32)", "def test_point_on_plane(self, point, plane):\n _dist = point.dot(plane[:3]) + plane[3]\n if _dist <= epsilon:\n print('OK => point on plane')\n else:\n print('NO => point not on plane')", "def hit(self):", "def _pickFull(self, context):\n rayObject = context.getPickingSegment(frame=self._getScenePrimitive())\n if rayObject is None:\n return None\n\n points = utils.segmentPlaneIntersect(\n rayObject[0, :3],\n rayObject[1, :3],\n planeNorm=self.getNormal(),\n planePt=self.getPoint())\n\n if len(points) == 1: # Single intersection\n if numpy.any(points[0] < 0.):\n return None # Outside volume\n z, y, x = int(points[0][2]), int(points[0][1]), int(points[0][0])\n\n data = self.getData(copy=False)\n if data is None:\n return None # No dataset\n\n depth, height, width = data.shape\n if z < depth and y < height and x < width:\n return PickingResult(self,\n positions=[points[0]],\n indices=([z], [y], [x]))\n else:\n return None # Outside image\n else: # Either no intersection or segment and image are coplanar\n return None", "def get_hit_marker(self):\r\n return Marker((0, 0, 0), self._screen)", "def GetPlane(plane):\r\n pass", "def hit(self, other=None, push=False):\n laser = pygame.mixer.Sound('resources/Laser.wav')\n laser.set_volume(0.5)\n if not other:\n front_pos = \\\n [p + d for p, d in zip(self.pos, DIRECTIONS[self.rotation])]\n other = self.map.get(front_pos)\n assert other is not None, \"No robot in front!\"\n if push:\n other.pos = [p + d for p, d in\n zip(other.pos, DIRECTIONS[self.rotation])]\n # get the hit direction\n look_other = DIRECTIONS[other.rotation]\n look_self = DIRECTIONS[self.rotation]\n if look_other == look_self: # von hinten getroffen\n damage = DAMAGE[FROM_BEHIND]\n elif all(abs(x) != abs(y)\n for x, y in zip(look_other, look_self)): # seitlich\n damage = DAMAGE[FROM_SIDE]\n else: # frontal\n damage = DAMAGE[FROM_FRONT]\n\n other.health -= damage if not push else damage * 0.25\n\n if hasattr(other, 'take_damage_anim'):\n other.animator.play_animation(other.take_damage_anim)\n if self.speakers:\n self.speakers.play(laser)\n new_turn = \"{0}!{1};{2}\".format(self.pos, other.pos, damage)\n self._call_gamelog_callbacks(new_turn)", "def hit(self, rect):\n \n # Find the hits at the current level\n hits = set(item for item in self.items if rect.contains(item.location))\n \n # Recursively check the lower quadrants\n if self.nw and rect.left < self.cx and rect.top < self.cy:\n hits |= self.nw.hit(rect)\n if self.sw and rect.left < self.cx and rect.bottom >= self.cy:\n hits |= self.sw.hit(rect)\n if self.ne and rect.right >= self.cx and rect.top < self.cy:\n hits |= self.ne.hit(rect)\n if self.se and rect.right >= self.cx and rect.bottom >= self.cy:\n hits |= self.se.hit(rect)\n \n return hits", "def get_mouse_ray(self, context, event):\n region, rv3d = context.region, context.region_data\n coord = event.mouse_region_x, event.mouse_region_y\n ray_direction = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)\n ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, coord)\n return ray_origin, ray_direction", "def intersection_segment_plane(segment, plane, epsilon=1e-6):\n pt1 = segment[0]\n pt2 = segment[1]\n p_cent = plane[0]\n p_norm = plane[1]\n\n v1 = subtract_vectors(pt2, pt1)\n dot = dot_vectors(p_norm, v1)\n\n if abs(dot) > epsilon:\n v2 = subtract_vectors(pt1, p_cent)\n fac = -dot_vectors(p_norm, v2) / dot\n if fac > 0. and fac < 1.:\n vec = scale_vector(v1, fac)\n return add_vectors(pt1, vec)\n return None\n else:\n return None", "def goto_pt(self, pt):\n curr_xy = [self.state.x, self.state.y]\n target_xy = (pt[0], pt[1])\n dist = math.sqrt((curr_xy[0] - target_xy[0])**2\n + (curr_xy[1] - target_xy[1])**2)\n\n if dist > self.goto_thresh:\n self.controller.target_velocity = self.goto_vel\n steering, velocity = \\\n self.controller.compute_control(self.state, target_xy)\n self.data_handler.update_target(self.controller.target)\n return steering, velocity\n else:\n self.controller.target_velocity = 0.0\n steering = 0.0\n velocity = 0.0\n return steering, velocity", "def pred_car_other_lane(self):\n other_lane = self.other_lane()\n\n for i in range(self.road.N):\n if self.road.cells[other_lane][(self.x + i + 1) % (self.road.N-1)] != -1:\n return self.road.cars[self.road.cells[other_lane][(self.x + i + 1) % (self.road.N-1)]]", "def linesegment_plane_intersection(self, p0,p1,point,normal): # only returns lines...intersections through the segment end points are ignored\n\t\tp0dot=numpy.dot(p0-point,normal)\n\t\tp1dot=numpy.dot(p1-point,normal)\n\t\tif (p0dot>0 and p1dot<0) or (p0dot<0 and p1dot>0): \n\t\t\t# if the dot products have opposing signs, then the line intersects the plane\n\t\t\treturn True,p0+(p1-p0)*abs(p0dot)/(abs(p0dot)+abs(p1dot))\n\t\telse:\n\t\t\treturn False" ]
[ "0.724688", "0.7169482", "0.7158089", "0.71482855", "0.67915195", "0.67516196", "0.67201257", "0.66962445", "0.6689992", "0.6683316", "0.65686566", "0.6530607", "0.64233273", "0.6218107", "0.6107053", "0.60904384", "0.60822034", "0.60539466", "0.60471374", "0.5973455", "0.59581316", "0.5944168", "0.5883482", "0.58700913", "0.5852339", "0.5827479", "0.5778097", "0.5777266", "0.57022357", "0.56945235", "0.5648417", "0.5611707", "0.5563744", "0.553851", "0.5529523", "0.55243814", "0.5508424", "0.5503527", "0.5488689", "0.5473283", "0.5473044", "0.54589635", "0.5453586", "0.54513234", "0.54334146", "0.54228675", "0.5375146", "0.5361326", "0.5349615", "0.53245085", "0.5291477", "0.5287521", "0.52861947", "0.5284544", "0.52671903", "0.526048", "0.5222193", "0.5215975", "0.52079946", "0.5206312", "0.52010375", "0.5195408", "0.5185223", "0.5182445", "0.5162324", "0.5141238", "0.5134397", "0.51294863", "0.510286", "0.5082481", "0.5075469", "0.5067873", "0.50580376", "0.5036729", "0.50314003", "0.5014529", "0.5009622", "0.49973068", "0.49764273", "0.4969208", "0.49673837", "0.49643046", "0.49544537", "0.49539417", "0.49386123", "0.49278378", "0.49271864", "0.49136552", "0.49094453", "0.48939368", "0.4880124", "0.48724905", "0.4871798", "0.48485523", "0.4848538", "0.4840595", "0.4840551", "0.48385227", "0.48379028", "0.4835317" ]
0.7633641
0
return the number of scalar components
def getNumberOfScalarComponents(self): return self.numberOfComponents
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def n_components(self):\n return 1", "def n_components(self):\n return self._components.shape[0]", "def n_cs(self):\n return np.size(self._cs, 0)", "def components(self):\n return self.num_components", "def __len__(self):\n num_x, num_y = self.conv_dims()\n return num_x * num_y", "def GetNumberOfComponents():\n return _itkRGBAPixelPython.itkRGBAPixelF_GetNumberOfComponents()", "def __len__(self):\n # Product function that can handle iterables (np.product can't).\n product = partial(reduce, operator.mul)\n modstr = '%s__' % self.modality\n return sum(product(len(v) for k, v in p.items() if modstr in k) if p else 1\n for p in self.param_grid)", "def n_complex_components(self):\n return self.n_components // 2 + (self.n_components % 2)", "def GetNumberOfComponents():\n return _itkRGBAPixelPython.itkRGBAPixelUS_GetNumberOfComponents()", "def GetNumberOfComponents():\n return _itkRGBAPixelPython.itkRGBAPixelUC_GetNumberOfComponents()", "def numel(self) -> int:\n return sum(p.numel() for p in self.parameters)", "def numel(self):\n return self.t.size", "def countComponents26(cube):\n n,l = labelComponents26(cube);\n return n;", "def __len__(self):\n return np.size(self.A,0)", "def n_cf(self):\n return np.size(self._ref_ii, 0)", "def voxel_count(self):\n return self.cols * self.rows * self.sections", "def __len__(self):\n # Product function that can handle iterables (np.product can't).\n product = partial(reduce, operator.mul)\n return sum(product(len(v) for v in p.values()) if p else 1\n for p in self.param_grid)", "def count(self):\n return len(self._components)", "def size(self)->int:\n\n return np.prod([axes if is_integer(axes) else len(axes) for axes in self._dim_axes])", "def num_vars(self):\n return len(self.bounds.lb)", "def itkRGBAPixelUS_GetNumberOfComponents():\n return _itkRGBAPixelPython.itkRGBAPixelUS_GetNumberOfComponents()", "def itkRGBAPixelUC_GetNumberOfComponents():\n return _itkRGBAPixelPython.itkRGBAPixelUC_GetNumberOfComponents()", "def itkRGBAPixelF_GetNumberOfComponents():\n return _itkRGBAPixelPython.itkRGBAPixelF_GetNumberOfComponents()", "def __len__(self):\n n = 1\n for valTuple in self._valListOfLists:\n n *= len(valTuple)\n return n", "def __len__(self):\n return len(self.components)", "def __len__(self):\n return len(self.components)", "def __len__(self):\n return len(self.kernels)", "def __len__(self):\n\n try:\n return len(self.counts)\n except SpectrumError:\n return len(self.cps)", "def n_elements(self) -> int:\n n_elem = np.prod(self.shape)\n if self.n_timesteps > 1:\n n_elem = int(n_elem / self.n_timesteps)\n return n_elem", "def n_thres(self):\n return np.size(self.thres)", "def _get_vector_size(self):\n if len(self):\n return len(self.values()[0])\n else:\n return 0", "def ndim(self) -> int:\r\n return len(self.plates)", "def nfactors(self):\n return self.L.nnz", "def dim(self) -> int:", "def nvar(self):\n return len(self.v)", "def ndim(self):\n\n self._check_assigned()\n\n if (\n self.lazy\n and self.transformer is not None\n and hasattr(self.transformer, \"get_transformed_shape\")\n ):\n return len(self.transformer.get_transformed_shape(self.values))\n else:\n return self.__array__().ndim", "def getNumElements(self):\n return 1", "def _n_features_out(self):\n return self.components_.shape[0]", "def len(self):\n return math.sqrt(self.v[0] * self.v[0] + self.v[1] * self.v[1])", "def __len__(self):\n return len(self.__squares) * len(self.__squares[0])", "def getDimension(self):\n return len(self.components)", "def __len__(self):\n return self.xyz.shape[0]", "def __len__(self):\n return self.xyz.shape[0]", "def nvar(self):\n return self.h.shape[0]", "def _N(self):\n return len(self._array)", "def getNumElements(self):\n return 1", "def size(self) -> int:\n return int(np.multiply(*self.shape))", "def Num_Elem_Pila(self):\n return len(self.pila)", "def num_elements(shape):\n return 1 if shape is None else int(np.prod(shape))", "def nspatials(self):\n return int(len(self)/2)", "def is_scalar(self):\n return len(self.coeffs.shape[self.sdim:]) == 0", "def n_features(self):\n return self.components.shape[-1]", "def number_of_basis(self):\n return self._pre_kernel.shape[0]", "def size(self):\n return self._N", "def ndim(self):\n return len(self.nvars)", "def num_cuboids(self):\n return self._shape_count(_sff.cuboid)", "def ndim(self):\n return len(self.point)", "def __len__(self) -> int:\n return len(self.variables)", "def length(self):\n return int(np.sum([x.length for x in self.parameters]))", "def dimension(self):\r\n a = 0\r\n for x in self.faces():\r\n if (len(x) > a):\r\n a = len(x) \r\n return a-1", "def get_number_of_components(df):\n n_components = 6 # since there a 6 numeric features\n pipe = _build_model(df, use_pca=True, n_components=n_components, use_kmeans=False, n_clusters=99)\n explained_variances = pipe.named_steps['pca'].explained_variance_ratio_\n plt.figure(7, figsize=(12, 6))\n plt.plot(range(1, 7), np.cumsum(explained_variances), 'o')\n plt.plot(range(1, 7), np.cumsum(explained_variances), '-', alpha=0.5)\n plt.xlabel('number of components')\n plt.ylabel('cumulative explained variance')\n plt.show()", "def nt(self):\n if self.nClumps() > 0:\n \n return len(self[0])\n \n else:\n \n return 0", "def getNumElements(self):\n return 0", "def count_dims(da):\n return len(da.dims)", "def nVariables(self):\n return len(self.variables)", "def component_size(self) -> int:\n return self._component_size", "def __len__(self):\n return self.N.shape[0]", "def GetNumberOfVariables(self):\n\n # nvar = 0\n # for i in self.variables_order:\n # # DO NOT COUNT VARIABLES THAT GET CONDENSED OUT\n # if i!=0:\n # if mesh.element_type == \"tri\":\n # nvar += (i+1)*(i+2) // 2\n # elif mesh.element_type == \"tet\":\n # nvar += (i+1)*(i+2)*(i+3) // 6\n # elif mesh.element_type == \"quad\":\n # nvar += (i+1)**2\n # elif mesh.element_type == \"hex\":\n # nvar += (i+1)**3\n\n # nvar = sum(self.variables_order)\n if self.nvar == None:\n self.nvar = self.ndim\n return self.nvar", "def size(self):\n return len(self.array_form)", "def numpoints(self):\n return len(self.pars) + 1 # so dof is 1", "def size(self):\n return self.N", "def calculate_num_params(self):\n num_params = 0\n for p in self.parameters():\n num_params += p.data.view(-1).size(0)\n return num_params", "def size(self):\n return int(misc.intprod(self.shape))", "def nClumps(self):\n \n return len(self)", "def size(self):\n return reduce(mul, self.shape, 1)", "def _get_parameter_count(self):\n parameters_d = 5;\n size_h = self.model.size_h\n return (size_h - 1) + size_h * (\n (size_h - 1) + parameters_d + (self.model.size_aa - 1) + \n (self.model.size_ss - 1) + (self.model.size_cis - 1)\n )", "def __len__(self):\n # type: () -> int\n return self.shape[0]", "def __len__(self):\n return self._vector.degree()", "def _numel(x):\n return tf.cast(tf.reduce_prod(x.shape), x.dtype)", "def length(self):\n return len(self.x)", "def num_parameters(self) -> int:\n return len(self.w) + prod(self.v.shape) - len(self.v)", "def __len__(self) -> int:\n\n return self.shape.shape[1]", "def specht(mu):\n return StandardTableaux(mu).cardinality().n()", "def get_num_variables(self):\n return len(self.variables)", "def width(self) -> int:\n return self._obj[self.x_dim].size", "def num_nodes(self):\n return ((len(self.tensor_u)+1) * (len(self.tensor_v)+1) *\n (len(self.tensor_w)+1))", "def getNumDimensions(self):\n return len(self.di.keys())", "def __len__(self) -> int:\n return self.disp_size ** 2", "def ndim(self):\n return self.initial_value.ndim", "def ndims(x):\n return len(x.get_shape())", "def __len__(self):\n return len(self._representation_vector)", "def count(self):\r\n return self.data_array.size", "def __len__(self):\n\n value_length = []\n for v in chain(self.values(), self.metainfo_values()):\n if isinstance(v, LabelData):\n value_length.append(v.label.shape[0])\n elif is_splitable_var(v):\n value_length.append(len(v))\n else:\n continue\n\n # NOTE: If length of values are not same or the current data sample\n # is empty, return length as 1\n if len(list(set(value_length))) != 1:\n return 1\n\n length = value_length[0]\n return length", "def nNx(self):\n return self.nCx + 1", "def __len__(self):\n return self._vertices.shape[0]", "def ndim(self):\n return self.__value.ndim", "def __len__(self):\n a = 1\n for size in self.sizes:\n a *= size\n return a", "def size(self):\n\t\treturn self.dims", "def count(self):\n \n return len(self.img_lst)", "def dim(self):\n return len(self._n)" ]
[ "0.74888134", "0.7376706", "0.7100258", "0.70853853", "0.70716816", "0.6938483", "0.69184744", "0.68926495", "0.68802017", "0.68690693", "0.6788687", "0.67648643", "0.6762822", "0.6739765", "0.67380327", "0.67363364", "0.671714", "0.6700786", "0.66510797", "0.6639145", "0.66346884", "0.66238576", "0.66186726", "0.6610711", "0.6609566", "0.6609566", "0.66025543", "0.65967375", "0.6596694", "0.65787077", "0.6545257", "0.6523638", "0.6521125", "0.65192986", "0.65190476", "0.6503485", "0.650108", "0.6496861", "0.6484584", "0.64841115", "0.6470093", "0.6465665", "0.6465665", "0.64639896", "0.6460598", "0.64572453", "0.6453727", "0.64311445", "0.6423383", "0.64188284", "0.6392824", "0.63914937", "0.63841105", "0.63783085", "0.6370214", "0.6368872", "0.6364317", "0.6360981", "0.63577", "0.63483214", "0.63479656", "0.63461", "0.6344622", "0.6333371", "0.63309187", "0.6324675", "0.63163584", "0.6308178", "0.6305977", "0.62927955", "0.62901586", "0.6288476", "0.6282235", "0.6279312", "0.62754345", "0.6270198", "0.626587", "0.6259863", "0.62534106", "0.625024", "0.6245406", "0.6244992", "0.6237537", "0.6234222", "0.62241125", "0.62240297", "0.622308", "0.6222", "0.62177086", "0.6217456", "0.621111", "0.62095934", "0.6205513", "0.6205438", "0.62023634", "0.61885196", "0.61864626", "0.6179317", "0.6174346", "0.61708283" ]
0.8905581
0
Set the color transfer function
def setColorTransferFunction(self, ctf): self.ctf = ctf
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_color(self):\r\n \r\n \r\n colorset = self.colorset\r\n \r\n self.grfx[0].colorset = colorset\r\n pass", "def setColorDiffuse(*args):", "def getColorTransferFunction(self):\n\t\treturn self.ctf", "def set_color(self):\n self.image[self.x, self.y] = self.color\n if self.diffusion:\n r = g = b = 0\n for i in range(self.convolution_matrix.shape[0]):\n for j in range(self.convolution_matrix.shape[1]):\n r = g = b = 0\n for k in range(self.convolution_matrix.shape[0]):\n for l in range(self.convolution_matrix.shape[1]):\n m = (self.x + i + k - 2 + self.image.shape[0]) % self.image.shape[0]\n n = (self.y + j + l - 2 + self.image.shape[1]) % self.image.shape[1]\n r += self.convolution_matrix[k][l] * self.image[m, n][2]\n g += self.convolution_matrix[k][l] * self.image[m, n][1]\n b += self.convolution_matrix[k][l] * self.image[m, n][0]\n self.image[self.x, self.y] = (b, g, r)", "def changeColor( self ):\n\t\t\n\t\tx, y = self.position.xy\n\t\tself.color = ( int((x / WINDOW_X) * 128), int((x / WINDOW_X) * 128) + int((y / WINDOW_Y) * 128 ), int((y / WINDOW_Y) * 128))", "def changeColor(self):\n self.layer.new_colormap()", "def set_color(self, color):\n\t\tpass", "def set_color():\n function = LegacyFunctionSpecification()\n function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN)\n for par in [\"red\", \"green\", \"blue\"]:\n function.addParameter(par, dtype='float64', direction=function.IN, \n description = \"The new RGB color vector of the particle\")\n function.addParameter('npoints', dtype='int32', direction=function.LENGTH)\n function.result_type = 'int32'\n function.must_handle_array = True\n return function", "def getTweenColor(self, factor):\n\n pass", "def UseColor(self, use_color):\n self.use_color = use_color", "def set_color(self, color):\n pass", "def set_color(color):\r\n global _current_color\r\n _current_color = color", "def set_param(self, name, val):\n # name will be 'colorR', 'colorG', 'colorB'\n rgb255 = int(val * 255)\n if name == 'colorR':\n self.color.r = rgb255\n elif name == 'colorG':\n self.color.g = rgb255\n elif name == 'colorB':\n self.color.b = rgb255", "def get_transfer_functioin(self, color_file=None, volume_opacity=0.25):\n if color_file: # Color file is given\n\n import csv\n fid = open(color_file, \"r\")\n reader_color = csv.reader(fid)\n\n dict_RGB = {}\n for line in reader_color:\n dict_RGB[int(line[0])] = [float(line[2]) / 255.0,\n float(line[3]) / 255.0,\n float(line[4]) / 255.0]\n fid.close()\n\n # Define colour transfer function\n color_transfor = vtk.vtkColorTransferFunction()\n\n for idx in dict_RGB.keys():\n color_transfor.AddRGBPoint(idx,\n dict_RGB[idx][0],\n dict_RGB[idx][1],\n dict_RGB[idx][2])\n\n # Opacity transfer function\n opacity_scalar = vtk.vtkPiecewiseFunction()\n\n for idx in dict_RGB.keys():\n opacity_scalar.AddPoint(\n idx, volume_opacity if idx != 0 else 0.0)\n\n # Opacity Gradient Transfer function\n opacity_gradient = vtk.vtkPiecewiseFunction()\n opacity_gradient.AddPoint(1, 0.0)\n opacity_gradient.AddPoint(5, 0.1)\n opacity_gradient.AddPoint(100, 1.0)\n\n return color_transfor, opacity_scalar, opacity_gradient\n\n else: # Default color transfer functions\n\n # min, max = self.get_value_range()\n color_transfor = vtk.vtkColorTransferFunction()\n color_transfor.AddRGBPoint(0, 0.0, 0.0, 0.0)\n color_transfor.AddRGBPoint(500, 1.0, 0.5, 0.3)\n color_transfor.AddRGBPoint(1000, 1.0, 0.5, 0.3)\n color_transfor.AddRGBPoint(1150, 1.0, 1.0, 0.9)\n\n # The opacity transfer function is used to control the opacity\n # of different tissue types.\n opacity_scalar = vtk.vtkPiecewiseFunction()\n opacity_scalar.AddPoint(0, 0.00)\n opacity_scalar.AddPoint(500, 0.15)\n opacity_scalar.AddPoint(1000, 0.15)\n opacity_scalar.AddPoint(1150, 0.85)\n\n # The gradient opacity function is used to decrease the opacity\n # in the \"flat\" regions of the volume while maintaining the opacity\n # at the boundaries between tissue types. The gradient is measured\n # as the amount by which the intensity changes over unit distance.\n # For most medical data, the unit distance is 1mm.\n opacity_gradient = vtk.vtkPiecewiseFunction()\n opacity_gradient.AddPoint(0, 0.0)\n opacity_gradient.AddPoint(90, 0.5)\n opacity_gradient.AddPoint(100, 1.0)\n\n return color_transfor, opacity_scalar, opacity_gradient", "def set_color(self, new_color):\n self.color = new_color", "def set_color(self, color):\n self.light_color = color\n for f in self.color_change_cb:\n f(self)", "def refresh_color(self):\n self.color = max(0, int(math.sqrt(self.vx ** 2\n + self.vy ** 2)) + 100)", "def _color(self, args):", "def _update_color(self, color):\n self.color = color", "def set_color(self, value):\n _lib.caca_set_dither_color.argtypes = [_Dither, ctypes.c_char_p]\n _lib.caca_set_dither_color.restype = ctypes.c_int\n\n return _lib.caca_set_dither_color(self, value)", "def _update_color(self, *args):\n\n if self._variable and 'w' in self._mode and not self._dnd_started:\n self._internal_color_change = True\n self.color_var.set(self._variable)", "def _color(self, x, factor):\r\n factor = (factor/MAX_LEVEL) * 1.8 + .1\r\n degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(x))\r\n x = tfa.image.blend(degenerate, tf.cast(x, tf.float32), factor)\r\n return tf.saturate_cast(x, tf.uint8)", "def _set_hsvF(self, color):\n\n self.qcolor.setHsvF(color[0], color[1], color[2], 255)", "def setColorMode(mode='full'):\n mdict = {'low':'NONE','full':'FULL'}\n dislin.clrmod(mdict[mode])", "def test_color(self):\n self._calibration_test(\"color_full\")", "def setPixelColor(self, n, color):\n self._logger.debug(\"setPixelColor\")", "def change_color(self, color):\n self.color = color", "def _set_color_mode(self, mode):\n self._write(ST7789_COLMOD, bytes([mode & 0x77]))", "def __init__(self, *args, **kwargs):\n _gdi_.DCTextColourChanger_swiginit(self,_gdi_.new_DCTextColourChanger(*args, **kwargs))", "def setColorRGB(r,g,b):\n r, g, b = r/255., g/255., b/255.\n dislin.setrgb(r,g,b)", "def _set_color(self, r):\n c = COLORS[self.color]\n r.setLineColor(c[0], c[1], c[2])\n r.setColor(c[0], c[1], c[2])", "def set_color_rgb(r, g, b):\r\n global _current_color\r\n _current_color = (r, g, b)", "def color(self, color):\n #self._color = color\n new_color = \"{0}{1}{2}\".format(hex(int(color[0]))[2:].zfill(2),\n hex(int(color[1]))[2:].zfill(2),\n hex(int(color[2]))[2:].zfill(2))\n #self.log.info(\"RASPLes.color(%s : %s -> %s)\" % (self.number, color, new_color))\n #print(\"color(%s -> %s)\" % (self.number, new_color))\n try:\n self.current_color = new_color\n #self.strip.setPixelColor(int(self.number), self.current_color)\n self.strip.setPixelColorRGB(int(self.number), color[0], color[1], color[2])\n\n self.strip.updated = True\n except Exception as e:\n self.log.error(\"led update error\" + str(e))", "def driftColor(baseColor, factor=110):\n if baseColor.lightness() > 128:\n return baseColor.darker(factor)\n else:\n return baseColor.lighter(factor+10)", "def setColor(self):\n\n sel = cmds.ls(selection=True, type=['shape', 'transform'])\n if len(sel) > 0:\n for obj in sel:\n if cmds.nodeType(obj) == 'transform':\n shapes = cmds.listRelatives(obj, type='shape')\n if len(shapes) > 0 and self.shapeTypeCbx.isChecked():\n for shape in shapes:\n if cmds.attributeQuery('overrideEnabled', node=shape, exists=True):\n cmds.setAttr(shape + '.overrideEnabled', True)\n if self.colorsTab.currentIndex() == 0:\n if cmds.attributeQuery('overrideRGBColors', node=shape, exists=True):\n cmds.setAttr(shape + '.overrideRGBColors', False)\n if cmds.attributeQuery('overrideColor', node=shape, exists=True):\n cmds.setAttr(shape + '.overrideColor', self.colorSlider.value())\n else:\n if cmds.attributeQuery('overrideRGBColors', node=shape, exists=True):\n cmds.setAttr(shape + '.overrideRGBColors', True)\n if cmds.attributeQuery('overrideColorRGB', node=shape, exists=True):\n color = self.rgbColorDlg.currentColor()\n cmds.setAttr(shape + '.overrideColorRGB', color.red()/255.0, color.green()/255.0, color.blue()/255.0)\n\n if self.transformTypeCbx.isChecked():\n if cmds.attributeQuery('overrideEnabled', node=obj, exists=True):\n cmds.setAttr(obj + '.overrideEnabled', True)\n if self.colorsTab.currentIndex() == 0:\n if cmds.attributeQuery('overrideRGBColors', node=obj, exists=True):\n cmds.setAttr(obj + '.overrideRGBColors', False)\n if cmds.attributeQuery('overrideColor', node=obj, exists=True):\n cmds.setAttr(obj + '.overrideColor', self.colorSlider.value())\n else:\n if cmds.attributeQuery('overrideRGBColors', node=obj, exists=True):\n cmds.setAttr(obj + '.overrideRGBColors', True)\n if cmds.attributeQuery('overrideColorRGB', node=obj, exists=True):\n color = self.rgbColorDlg.currentColor()\n cmds.setAttr(obj + '.overrideColorRGB', color.red() / 255.0,\n color.green() / 255.0, color.blue() / 255.0)", "def set_color(self, color):\n self.color = color", "def assign_color(self,tank):\n tank.color = self.color_queue.popleft()", "def Set(*args):\n return _XCAFDoc.XCAFDoc_ColorTool_Set(*args)", "def fadeToRGB(self, color: tuple):\n r, g, b = color\n self._sendi2c('c', [r, g, b])", "def Set(self, *args):\n return _itkRGBAPixelPython.itkRGBAPixelF_Set(self, *args)", "def rgb(self, value):\n\n self._variable = value\n self._update()", "def XCAFDoc_ColorTool_Set(*args):\n return _XCAFDoc.XCAFDoc_ColorTool_Set(*args)", "def setColor(self,value):\n\t\tself.politics = value if(type(value) is int)else int(value[1:],16)\n\t\tself.canvas.itemconfig('node_'+self.identifier,fill=self.toRGB())", "def setColourLevels(self):\n minsg = np.min(self.sg)\n maxsg = np.max(self.sg)\n brightness = self.brightnessSlider.value()\n contrast = self.contrastSlider.value()\n colourStart = (brightness / 100.0 * contrast / 100.0) * (maxsg - minsg) + minsg\n colourEnd = (maxsg - minsg) * (1.0 - contrast / 100.0) + colourStart\n for btn in self.picbuttons:\n btn.stopPlayback()\n btn.setImage(self.lut, colourStart, colourEnd, False)\n btn.update()", "def set_color(self, r=0, g=0, b=0):\n r = clamp(r)\n g = clamp(g)\n b = clamp(b)\n self._state.color = (r, g, b)\n self.send_command(Command.SET_COLOR, [int(r), int(g), int(b)])", "def setColorIndex(idx):\n dislin.setclr(idx)", "def plot_color_changed(self):\n self.plot_color = self.plot_color_button.color()", "def change_color(mutated_genome):\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if color_mode == 'RGB':\n color_red = random.randint(-25,25)\n color_green = random.randint(-25,25)\n color_blue = random.randint(-25,25)\n color = mutated_genome[index][0]\n newcolor = (color[0]+color_red,color[1]+color_green,color[2]+color_blue)\n else: #color_mode == 'L':\n color_diff = random.randint(-25,25)\n color = mutated_genome[index][0]\n newcolor = color+color_diff\n mutated_genome[index][0] = newcolor", "def slider_action(self, sender):\n self.r = self.rslider.value\n self.g = self.gslider.value\n self.b = self.bslider.value\n self.preview.background_color = self.rgb\n self.colorlabel.text = self.hexcode", "def setColors(self):\n #productive\n profprint()\n self.color= [[0,0,0] for i in range(205)]\n self.color255= self.setColors255()\n for i in range(205):\n for j in range(3):\n self.color[i][j] = self.color255[i][j]/float(255)\n\n return self.color", "def setColor(self, value):\n _res = self.mAPIContext.SDGraphObjectFrame_setColor(self.mHandle, ctypes.byref(value))\n if _res != SDApiError.NoError.value:\n if _res == SDApiError.NoErrorOutputParamNotSet.value:\n return None\n raise APIException(SDApiError(_res))\n return None", "def color(self, value: tuple) -> None:\n if value in Color.PALETTE:\n self._color = value", "def setColourMap(self):\n cmap = self.config['cmap']\n\n pos, colour, mode = colourMaps.colourMaps(cmap)\n\n cmap = pg.ColorMap(pos, colour,mode)\n self.lut = cmap.getLookupTable(0.0, 1.0, 256)\n minsg = np.min(self.sg)\n maxsg = np.max(self.sg)\n self.colourStart = (self.config['brightness'] / 100.0 * self.config['contrast'] / 100.0) * (maxsg - minsg) + minsg\n self.colourEnd = (maxsg - minsg) * (1.0 - self.config['contrast'] / 100.0) + self.colourStart", "def set_red(x, y, value, slot = 0):\r\n __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3] = value", "def set_rgb(self, value):\n act = RGBAction(self, value)\n return act.invoke()", "def rgb_slider_moved(self, event):\n slider_red = int(self.slider_r.get_value())\n slider_green = int(self.slider_g.get_value())\n slider_blue = int(self.slider_b.get_value())\n\n self.change_color((slider_red, slider_green, slider_blue))", "def update_color(self):\n self.plot(update_traces=False, update_waveforms=True)", "def color_callback(self, data):\n cv_image = self.bridge.imgmsg_to_cv2(data, desired_encoding=\"passthrough\")\n self.color_mutex.acquire()\n self.color_image = cv_image\n self.color_mutex.release()", "def main():\n # color = rb.Color.BLUE.value\n # move_to_color(color)\n infared_sensor()\n\n # WHITE/RED does not work same with the BLUE/GREEN going down", "def rgb_transition(self,r=0,g=0,b=0,duration=1,force=False):\n r=self.assert_int_255(r)\n g=self.assert_int_255(g)\n b=self.assert_int_255(b)\n #make this a no-op if we already are showing this color\n if not force and (r==self.r and g==self.g and b==self.b):\n return None\n \n tms = int(duration*1000)\n assert tms>=0 and tms<=0xffffffff\n t=tms.to_bytes(4,'little')\n\n logger.info(\n 'Transitioning device at %03d:%03d to RGB:%02x%02x%02x over %d ms',\n self.usbdev.bus,self.usbdev.address,r,g,b,tms)\n ret=self.usbdev.ctrl_transfer(bmRequestType=0x21, bRequest=0x09, wValue=0x03a2, wIndex=0, data_or_wLength=[0xa2,0x00,r,g,b,t[0],t[1],t[2],t[3]])\n self.r=r\n self.g=g\n self.b=b\n return ret", "def collimator(self):\n self.spectrum = self.spectrum", "def setColor(self, color):\n self.__color = color", "def setChan(\n self,\n u,\n chan,\n fval,\n ):\n\n self.DMX[u].set_chan_float(chan, fval)", "def setColourMap(self):\n cmap = self.config['cmap']\n\n pos, colour, mode = colourMaps.colourMaps(cmap)\n\n cmap = pg.ColorMap(pos, colour, mode)\n self.lut = cmap.getLookupTable(0.0, 1.0, 256)\n minsg = np.min(self.sg)\n maxsg = np.max(self.sg)\n self.colourStart = (self.config['brightness'] / 100.0 * self.config['contrast'] / 100.0) * (\n maxsg - minsg) + minsg\n self.colourEnd = (maxsg - minsg) * (1.0 - self.config['contrast'] / 100.0) + self.colourStart", "def setColor(clr):\n if type(clr) == types.StringType:\n setColorString(clr)\n return \n if type(clr) == types.IntType:\n setColorIndex(clr)\n return\n if type(clr) == types.TupleType:\n setColorRGB(*clr)", "def map_channels(self, map_function):\n return ScreenColor(map_function(self.red), map_function(self.green), map_function(self.blue))", "def change_color():\n return random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)", "def setSurfaceColorScale(low,high):\n dislin.zscale(low,high)", "def set_color(objname, rgb):\r\n return f'\\ncmd.set_color(\"{objname}\", {(rgb[0], rgb[1], rgb[2])})'", "def scale_to_01(color: C3I) -> C3F:\n r, g, b = color\n return r / 255, g / 255, b / 255", "def gradfactor(self, f):\r\n raise NotImplementedError", "def Set(*args, **kwargs):\n return _gdi_.Colour_Set(*args, **kwargs)", "def update_r(color, new_r):\n\n color.update_r(new_r)", "def SetRed(self, *args):\n return _itkRGBAPixelPython.itkRGBAPixelUS_SetRed(self, *args)", "def set_color_mode(self, mode):\n mode=self._color_modes.get(mode,mode)\n res=lib.is_SetColorMode(self.hcam,mode)\n errcheck()(res,lib.is_SetColorMode,None)\n return self.get_color_mode()", "def late_gradient_fusion():\n pass", "def set_color(self):\n new_color = QColorDialog.getColor(QColor(self.config['color']))\n if not new_color.isValid():\n return\n self.config['color'] = new_color.rgb()\n self.paint()", "def setColour(self, col):\n\t\tself.colour = col", "def set_color(self, color):\n self._color = color", "def set_palette(self, red, green, blue, alpha):\n raise DitherError(\"Not implemented\")", "def change_lights_color(self, entity, attribute, oldUrl, newUrl, kwargs):\n if newUrl != oldUrl and newUrl is not None and self.can_change_colors():\n rgb_colors = self.get_colors(self.format_ha_url(newUrl))\n for i in range(len(self.lights)):\n threading.Thread(target=self.set_light_rgb, args=(self.lights[i], rgb_colors[i])).start()", "def set_red_light(self, value):\n self.diffuse_light[0] = value\n self.redraw()", "def setRandomColor():\n setColor(getRandomColor())", "def set_contrast(value):\n command([0x21, 0x14, value, 0x20, 0x0c])", "def brighter_switch(turtle, color):\n turtle.fillcolor(color + \"1\")", "def on_rgb_slide(self,r,g,b):\n if not self.active:\n return\n red = int(round(r / 100.0))\n green = int(round(g / 100.0))\n blue = int(round(b / 100.0))\n self.rgb = colormodel.RGB(red, green, blue)\n self.hsv = a3.rgb_to_hsv(self.rgb)\n assert (self.hsv == None or type(self.hsv) == colormodel.HSV), 'rgb_to_hsv does not return a HSV object'\n self.cmyk = a3.rgb_to_cmyk(self.rgb)\n assert (self.cmyk == None or type(self.cmyk) == colormodel.CMYK), 'rgb_to_cmyk does not return a CMYK object'\n self.update()", "def _color_change_mode(self):\r\n self.dlg.exec_()\r\n self.color = self.dlg.currentColor().name()\r\n self.colorPlate.setStyleSheet(\"background-color: %s;\" % self.color)\r\n self.input_scene.get_stk_color(self.color)\r\n return", "def color_transfert():\n\n\n\tn_target = input(\"Tell me which picture wants a new make up.\\n\\n\")\n\tn_source = input(\"And now tell me which one she wanna look like \\n\\n\")\n\n\ttarget = cv.imread(n_target, 1)\n\tsource = cv.imread(n_source, 1)\n\n\t### So basically, target will get new colors from source\n\n\t## First let's convert them into the l alpha beta color space\n\n\tt_alpha = rgb2alpha(target)\n\ts_alpha = rgb2alpha(source)\n\n\n\t## Now let's make up our target thanks to some statistical operations\n\n\tm_target = make_up(t_alpha, s_alpha)\n\n\n\t## Finally we gonna convert target back to rgb space\n\n\tm_target = alpha2rgb(m_target)\n\n\t## And save it, so let's name it, you don't have to give the format, we'll add it here\n\n\tname = input(\"What's the name of the new picture ? \\n\")\n\n\tname += \".png\"\n\n\tcv.imwrite(name, m_target)\t\t# You can now post your new picture to instagramm and let\n\t\t\t\t\t\t# your followers believe that you are a skilled photograph.\t\n\t\t\t\t\t\t# I personally don't use this shit so fuck it.\n\n\tprint(\"{} saved.\".format(name))", "def set_color(color='black', index=-1): # (8)\n if index == -1:\n global color_buffer\n color_buffer = deque([color]*NUM_LEDS, maxlen=NUM_LEDS)\n else:\n color_buffer[index] = color", "def setPixelColorRGB(self, n, red, green, blue, white=0):\n self._logger.debug(\"setPixelColorRGB\")", "def set_led_color(color):\n requests.post('http://192.168.4.1/pixel', data=json.dumps(color))", "def fl_color(colr):\n _fl_color = library.cfuncproto(\n library.load_so_libforms(), \"fl_color\",\\\n None, [xfdata.FL_COLOR],\\\n \"\"\"void fl_color(FL_COLOR col)\"\"\")\n library.check_if_flinitialized()\n #library.checknonfatal_allowed_value_in_list(colr, xfdata.COLOR_list)\n ul_colr = library.convert_to_FL_COLOR(colr)\n library.keep_elem_refs(colr, ul_colr)\n _fl_color(ul_colr)", "def change( p ):\n red = p[0]\n green = p[1]\n blue = p[2]\n return [ 255-red, 255-green, 255-blue ]", "def log_forward_color(self, forward_func, *args, color=None, **kwargs):\n\t\tif color is None:\n\t\t\treturn forward_func(*args, **kwargs)\n\t\telse:\n\t\t\tif \"extra\" not in kwargs:\n\t\t\t\treturn forward_func(*args, extra=dict(color=color), **kwargs)\n\t\t\telse:\n\t\t\t\treturn forward_func(*args, **{**kwargs, \"extra\": {**kwargs[\"extra\"], \"color\": color}})", "def switch(self, _color = 16):\n\t\tself.pointer.flip()\n\n\t\tif self.pointer.get():\n\t\t\tself.content[0][1] = 3\n\t\t\tself.content[1][1] = 16\n\t\telse:\n\t\t\tself.content[0][1] = 16\n\t\t\tself.content[1][1] = 3", "def __init__(self, target_colour: Tuple[int, int, int]) -> None:\r\n self.colour = target_colour", "def __init__(self, target_colour: Tuple[int, int, int]) -> None:\r\n self.colour = target_colour", "def __init__(self, target_colour: Tuple[int, int, int]) -> None:\r\n self.colour = target_colour", "def color(self, color):\n\n self.container['color'] = color", "def on_autocolor_switch(self, action, value):\n\t\taction.set_state(value)\n\t\tautocolor = value.get_boolean()\n\t\tself.config[\"color\"][\"auto\"] = autocolor\n\n\t\tcolor = self.config[\"color\"][\"autofg\"] if autocolor else self.config[\"color\"][\"fg\"]\n\t\tself.gui[\"fg-colorbutton\"].set_rgba(color)\n\t\tself._mainapp.draw.color_update()" ]
[ "0.67156917", "0.6633878", "0.6617592", "0.63400304", "0.6332681", "0.6234133", "0.62047946", "0.6202683", "0.61825573", "0.60994345", "0.6095942", "0.601829", "0.5967596", "0.5921991", "0.5918888", "0.5858037", "0.5846267", "0.5846243", "0.5836248", "0.5822841", "0.5788105", "0.5754373", "0.5750509", "0.5732265", "0.57248926", "0.5702094", "0.5686844", "0.5683639", "0.56805426", "0.5672658", "0.5662928", "0.5655595", "0.5650803", "0.56216735", "0.56119525", "0.5610205", "0.55951834", "0.559341", "0.55822414", "0.5568921", "0.5552745", "0.55457085", "0.5544992", "0.5528635", "0.55119944", "0.55037135", "0.54824704", "0.5469458", "0.54691", "0.5445954", "0.5444603", "0.54315346", "0.5429521", "0.5422716", "0.54215074", "0.54187334", "0.5414043", "0.54113674", "0.5395554", "0.53947955", "0.53919935", "0.5378827", "0.5372842", "0.5359309", "0.53461105", "0.5344184", "0.534257", "0.53368145", "0.53322595", "0.5327841", "0.532369", "0.5323402", "0.53208464", "0.53148866", "0.5314142", "0.530756", "0.53048307", "0.53042674", "0.5301364", "0.5296395", "0.52884746", "0.52848405", "0.527793", "0.5273372", "0.5272855", "0.52700037", "0.5264449", "0.5263763", "0.5257225", "0.5253555", "0.52534086", "0.52517337", "0.52511203", "0.52495605", "0.5240861", "0.52405316", "0.52405316", "0.52405316", "0.52370375", "0.5236274" ]
0.81864583
0
Return a flag indicating whether the images are 2D or 3D images
def is3DImage(self): return self.is3D
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_depth_image(self):\n return False", "def is_2d(self) -> bool:\n return self.layers == 1 and self.times == 1", "def check_niimg_3d(niimg, dtype=None):\n return check_niimg(niimg, ensure_ndim=3, dtype=dtype)", "def is3_d(self):\n return self.container['is3_d']", "def are_compatible_imgs(one_img, another_img):\n return have_same_shapes(one_img, another_img)", "def is_rgb(im):\n if(im.ndim == 3):\n return True\n else:\n return False", "def test_complex(self):\n image = self.design.layout.layers[0].images[2]\n assert len(image.shape_instances) == 3", "def check_img(img):\n\n if isinstance(img, (str, os.PathLike)) and os.path.exists(img):\n img = nib.load(img)\n elif not isinstance(img, nib.spatialimages.SpatialImage):\n raise TypeError('Provided image must be an existing filepath or a '\n 'pre-loaded niimg-like object')\n\n # ensure 3D or squeezable to 3D\n img = nib.funcs.squeeze_image(img)\n if len(img.shape) != 3:\n raise ValueError('Provided image must be 3D')\n\n # check if atlas data is int or castable to int\n # if image is arrayproxy convert it to an array for speed-up\n data = np.asarray(img.dataobj)\n cast = nib.is_proxy(img.dataobj)\n if img.header.get_data_dtype().kind not in ['i', 'u']:\n idata = data.astype('int32')\n cast = np.allclose(idata, data)\n data = idata\n if not cast:\n raise ValueError('Provided image should have integer values or '\n 'be safely castable to int without data loss')\n if cast:\n img = img.__class__(data, img.affine, header=img.header)\n img.header.set_data_dtype(np.int32)\n\n return img", "def rgb(self) -> bool:\n return self.image_shape[2] == 3", "def has_images(self):\n return len(self.images) > 0", "def assertIsNifti3D(*args):\n for f in args:\n assertIsNifti(f)\n d = ensure.ensureIsImage(f)\n assert len(d.shape) == 3, \\\n 'incorrect shape for 3D nifti: {}:{}'.format(d.shape, f)", "def _isGrayscale(self, img: ndarray) -> bool:\n if len(np.squeeze(img).shape) == 2:\n return True\n else:\n return False", "def is_rgb(img: np.ndarray) -> bool:\n\n return len(img.shape) >= 1 and img.shape[-1] == 3", "def is_lpi(image: ANTsImage) -> bool:\n return ants.get_orientation(image) == \"LPI\"", "def is_RGB(self,img_path):\n image=Image.open(img_path)\n image=np.asarray(image)\n if(len(image.shape)<3):\n return False\n return True", "def check_type(filename):\n try:\n im = Image.read(filename)\n except SanperaError:\n return False\n else:\n return im.original_format in [b'JPEG', b'PNG', b'GIF']", "def is_grayscale(img):\n return len(img.shape) == GS", "def check_image_size(img_name, img_path):\n \n try:\n \n # Open image\n img = Image.open(img_name)\n \n # Determine size of image\n width, height = img.size\n \n # Check if image is square\n if (width==height):\n is_square = True\n else:\n is_square = False\n \n # Check for channels in image\n img_list = list(img.getdata())\n img_max = max(img_list)\n if (type(img_max)==int):\n is_single_channel = True\n else:\n is_single_channel = False\n \n return is_square, is_single_channel\n \n finally:\n \n # Close image\n img.close()", "def hasImages(self):\n return len(self.getImages()) > 0", "def hasImages(self):\n return len(self.getImages()) > 0", "def is_image(content_type):\n return content_type == \"image/jpeg\" or content_type == \"image/png\"", "def images_exist(self):\n pass", "def check_shape(self):\r\n if np.array(self.img).shape != (1536, 2048, 3):\r\n raise BadShape", "def is_gray(img: np.ndarray):\n return len(img.shape) == 2 and img.shape[0] > 1 and img.shape[1] > 1", "def number_of_images_valid():\r\n if number_of_images_a_valid() and number_of_images_b_valid():\r\n return True\r\n else:\r\n return False", "def has_2D(self):\n\t\tif self.have_fastas is False:\n\t\t\tself._extract_fastas_from_fast5()\n\t\t\tself.have_fastas = True\n\n\t\tif self.fastas.get('twodirections') is not None:\n\t\t\treturn True\n\t\treturn False", "def image_comparison(self):\n for result in self.cards:\n if result.image_status:\n return True\n return False", "def is_image(self):\r\n # we can only get this if we have headers\r\n LOG.debug('content type')\r\n LOG.debug(self.content_type)\r\n if (self.content_type is not None and\r\n self.content_type.lower() in IMAGE_TYPES.values()):\r\n return True\r\n else:\r\n return False", "def hasImages(self):\n\n if len(self._images) > 0:\n return 1\n for s in self._subdirs:\n if s.hasImages():\n return 1\n return 0", "def image(self):\n return self.any_image(-1)", "def isImage(imgref):\n if (imgref.endswith(\"JPG\")):\n return True\n if (imgref.endswith(\"jpg\")):\n return True\n if (imgref.endswith(\"gif\")):\n return True\n if (imgref.endswith(\"png\")):\n return True\n return False", "def test_05_01_mask_of3D(self):\n x=cpi.Image()\n x.image = np.ones((10,10,3))\n self.assertTrue(x.mask.ndim==2)", "def _image_is_large_enough(im):\n return (im.shape[0] >= MIN_DIM) and (im.shape[1] >= MIN_DIM)", "def IsImage(self, filename):\n mimetype = mimetypes.guess_type(filename)[0]\n if not mimetype:\n return False\n return mimetype.startswith(\"image/\")", "def is_valid(box, img):\n valid_width = box['top_left_x'] > 0 and box['bottom_right_x'] < img.shape[1]\n valid_height = box['top_left_y'] > 0 and box['bottom_right_y'] < img.shape[0]\n return valid_width and valid_height", "def texture_mode_enabled():\n for area in bpy.context.screen.areas:\n if area.type == \"VIEW_3D\":\n for space in area.spaces:\n if space.type == \"VIEW_3D\":\n if space.viewport_shade == \"TEXTURED\":\n return True\n elif (space.viewport_shade == \"SOLID\" and\n space.show_textured_solid):\n return True\n return False", "def IsImage(self, filename):\r\n mimetype = mimetypes.guess_type(filename)[0]\r\n if not mimetype:\r\n return False\r\n return mimetype.startswith(\"image/\")", "def is_grayscale(self):\n return self.r == self.g == self.b", "def is_grayscale(self):\n return self.r == self.g == self.b", "def is3D(data):\n return data.find(\"x3\") != -1 and data.find(\"y3\") != -1 and data.find(\"z3\") != -1", "def checkImageDimensions(self, filenames):\n\t\ts = None\n\t\thashStr = filenames[:]\n\t\thashStr.sort()\n\t\thashStr = str(hashStr)\n\t\t# check to see if there's already a result of the check for these filenames in the cache\n\t\tif hashStr in self.dimensionCheck:\n\t\t\tLogging.info(\"Using cached result for dimensions check: %s\"%(str(self.dimensionCheck[hashStr])))\n\t\t\treturn self.dimensionCheck[hashStr]\n\t\t\t\n\t\tfor file in filenames:\n\t\t\tif file not in self.imageDims:\n\t\t\t\tprint \"Trying to open\",type(file)\n\t\t\t\ttry:\n\t\t\t\t\tself.ext = file.split(\".\")[-1].upper()\n\t\t\t\t\tif self.ext == \"TIF\":\n\t\t\t\t\t\tself.ext = \"TIFF\"\n\t\t\t\t\tif self.ext == \"JPG\":\n\t\t\t\t\t\tself.ext = \"JPEG\"\n\n\t\t\t\t\tif self.ext == \"VTI\":\n\t\t\t\t\t\treader = vtk.vtkXMLImageReader()\n\t\t\t\t\telse:\n\t\t\t\t\t\treader = eval(\"vtk.vtk%sReader()\"%self.ext)\n\t\t\t\t\treader.SetFileName(file)\n\t\t\t\t\treader.UpdateInformation()\n\t\t\t\texcept IOError, ex:\n\t\t\t\t\ttraceback.print_exc()\n\t\t\t\t\traise Logging.GUIError(\"Cannot open image file\", \"Cannot open image file %s\" % file)\n\n\t\t\t\textent = reader.GetDataExtent()\n\t\t\t\tfSize = (extent[1],extent[3])\n\t\t\t\tself.imageDims[file] = fSize\n\t\t\telse:\n\t\t\t\tfSize = self.imageDims[file]\n\t\t\tif s and fSize != s:\n\t\t\t\tx0, y0 = s\n\t\t\t\tx1, y1 = fSize\n\t\t\t\tself.dimensionCheck[hashStr] = False\n\t\t\t\treturn 0\n\t\t\ts = fSize \n\t\t\tfn = file\n\t\tself.dimensionCheck[hashStr] = True\n\t\treturn 1", "def number_of_images_a_valid():\r\n counter = 0\r\n with os.scandir(os.path.join(dir_path, \"inputs\", \"type_a\")) as filepaths:\r\n for path in filepaths:\r\n extension = os.path.splitext(path)[1].lower()\r\n if extension == \".png\" or extension == \".jpg\":\r\n counter += 1\r\n if counter >= int(number_of_images_a.get()):\r\n return True\r\n else:\r\n messagebox.showwarning(\"Invalid Image Inputs\", (\r\n \"Not enough images of type a to create \"\r\n \"requested grid.\"))\r\n return False", "def check_image(image, depth):\n cols, rows = image.size\n divisor = 2**depth\n n_rows = round(rows/divisor) * divisor\n n_cols = round(cols/divisor) * divisor\n # d = min(n_rows, n_cols)\n image = image.resize((n_cols, n_rows))\n image_array = np.asarray(image)\n return image_array, Fraction(n_rows, n_cols)", "def test_has_alpha(self):\n image_3d = np.array([[ # One image with shape (1, 2, 3)\n [1, 2, 3],\n [4, 5, 6]\n ]])\n image_4d = np.array([[ # One image with shape (1, 3, 4)\n [1, 2, 3, 4],\n [4, 5, 6, 7],\n [8, 9, 10, 11]\n ]])\n image_5d = np.array([[ # One image with shape (1, 1, 5)\n [1, 2, 3, 4, 5]\n ]])\n self.assertEqual(localHDR.has_alpha(image_3d), False)\n self.assertEqual(localHDR.has_alpha(image_4d), True)\n self.assertEqual(localHDR.has_alpha(image_5d), False)", "def is_fit(self):\n if not hasattr(self, '_icc_imgs'):\n return False\n else:\n return self._icc_imgs is not None", "def is_image_file(filename):\n img_types = [\".jpg\", \".jpeg\", \".png\", \".gif\"]\n ext = os.path.splitext(filename)[1].lower()\n return ext in img_types", "def test_is_image(self):\n os.chdir(\"testimages/\")\n self.assertTrue(fileactions.is_image(\"arch_001.jpg\"))\n self.assertFalse(fileactions.is_image(\"not_an_image.jpg\"))", "def check_dataset(dataset):\n loader = torch.utils.data.DataLoader(dataset, batch_size=16)\n dataiter = iter(loader)\n images, labels = dataiter.next()\n imgs_grid = make_grid(images, padding=0)\n np_grid = imgs_grid.numpy()\n plt.figure(figsize=(10, 7))\n plt.imshow(np.transpose(np_grid, (1, 2, 0)))\n for i in labels:\n print(dataset.classes[i.item()])\n plt.show()", "def is_cv3():\n (major, minor, _) = cv2.__version__.split('.')\n return int(major) == 3", "def _get_image_type_from_array(arr):\n if len(arr.shape) == 3 and arr.shape[2] == 3:\n # 8-bit x 3 colors\n return 'RGB'\n elif len(arr.shape) == 2:\n # 8-bit, gray-scale\n return 'L'\n else:\n raise ValueError(\n 'Input array must have either 2 dimensions or 3 dimensions where the '\n 'third dimension has 3 channels. i.e. arr.shape is (x,y) or (x,y,3). '\n 'Found shape {}.'.format(arr.shape))", "def is_new_red_camera():\r\n ids = range(15)\r\n for id in ids:\r\n name = 'red{:04d}.fits'.format(id)\r\n if os.path.exists(name):\r\n hdr = pyfits.getheader(name)\r\n if hdr['NAXIS1'] == 4141 or hdr['NAXIS1'] == 4114:\r\n return True\r\n elif hdr['NAXIS1'] == 1024 or hdr['NAXIS1'] == 1124:\r\n return False\r\n else:\r\n raise ValueError('Unexpected image size')\r\n else:\r\n continue\r\n\r\n # raise ValueError('Could not locate red side files')\r\n print 'Could not locate red side files--defaulting to new camera'\r\n return True", "def is_image_file(filename):\n return any([filename.endswith(img_type) for img_type in [\".jpg\", \".png\", \".gif\"]])", "def is_image_file(filename):\r\n filename_lower = filename.lower()\r\n return any(filename_lower.endswith(ext) for ext in IMG_EXTENSIONS)", "def countless3d(data):\n modshape = np.array(data.shape) % 2\n assert sum(\n modshape\n ) == 0, \"COUNTLESS 3D currently only supports even sided images.\" # someone has to write even_to_odd3d\n\n return countless(data, (2, 2, 2))", "def images_are_present(file_info):\n currentdir = os.path.join(WORKDIR, file_info['folder'])\n if not os.path.exists(currentdir):\n return False\n count = len([x for x in os.listdir(currentdir) if x.endswith('.png')])\n if count != file_info['size']:\n print([x for x in os.listdir(currentdir) if x.endswith('.png')])\n print('Count does not match')\n print(count)\n print(file_info['size'])\n return False\n return True", "def __is_image_id( self, image_id ):\n images_ids = self.__get_multi_images_ids()\n for id in images_ids:\n if image_id == id:\n return True\n return False", "def load_from_images(self):\n logging.debug(\"load_from_images called\")\n return True", "def create_octree_image() -> bool:\n return async_octree and (create_image_type != CREATE_IMAGE_NORMAL)", "def check_image_before_load(self,image_dims):\n\n if image_dims[0]*image_dims[1]*image_dims[2]*4 < self.check_available_memory():\n return True\n else:\n return False", "def check_niimg_4d(niimg, return_iterator=False, dtype=None):\n return check_niimg(\n niimg, ensure_ndim=4, return_iterator=return_iterator, dtype=dtype\n )", "def is_image_landscape(asset):\r\n \r\n logging.debug('is_image_landscape({})'.format(asset))\r\n\r\n myDim = get_image_size(asset)\r\n # Calculate Width:Height; > 0 == landscape; < 0 == portrait\r\n if myDim[0]/myDim[1] > 1:\r\n logging.debug('is_image_landscape - True')\r\n return True\r\n else:\r\n logging.debug('is_image_landscape - False')\r\n return False", "def no_classes(mask):\n extrema = ImageStat.Stat(mask).extrema\n r = extrema[0][1]\n g = extrema[1][1]\n b = extrema[2][1]\n\n if r == 0 and g == 0 and b == 0:\n return True\n\n return False", "def __contains__(self, image: Any) -> bool:\n return isinstance(image, self.native_image_type)", "def image_shape(self):\n return tuple(self._img_shape)", "def is_image_size_64(image):\n return image['height'] == 64 and image['width'] == 64", "def _check_images_and_labels(self, image_dir, label_dir):\n return len(os.listdir(image_dir))==len(os.listdir(label_dir))", "def has_legacy_image(self):\n pass", "def has_legacy_image(self):\n pass", "def hasUdim(self):\n\t\treturn '<udim>' in self.path.lower()", "def validate_image_type(filename: str) -> bool:\n supported_extensions = (\"png\", \"jpg\", \"jpeg\")\n return (filename not in (None, \"\")) and (get_extension(filename) in supported_extensions)", "def check_layers_count(context, count):\n history = DOCKER_CLIENT.history(context.config.userdata['IMAGE'])\n if len(history) == int(count):\n return True\n\n raise Exception(\"Image does not contain %s layers, current number of layers: %s\" % (count, len(history)), history)", "def isInsideImage(x, y, nx, ny, imageNx, imageNy):\r\n return ( ((x+nx) < imageNx) and ((y+ny) < imageNy) )", "def image_check(kwargs) -> bool:\n\n # Kwarg argument check\n return kwarg_check(\n kwargs=kwargs,\n options=[\n \"min_captured_at\",\n \"max_captured_at\",\n \"radius\",\n \"image_type\",\n \"organization_id\",\n \"fields\",\n ],\n callback=\"image_check\",\n )", "def is_image_file(filename):\n return has_file_allowed_extension(filename, IMG_EXTENSIONS)", "def three_different_np_images():\n rgb1 = np.zeros((32, 32, 3), dtype=np.uint8)\n rgb1[..., 0] = 192\n rgb1[..., 1] = 0\n rgb1[..., 2] = 0\n # img1 = Image.fromarray(rgb1)\n\n rgb2 = np.zeros((32, 32, 3), dtype=np.uint8)\n rgb2[..., 0] = 0\n rgb2[..., 1] = 192\n rgb2[..., 2] = 0\n # img2 = Image.fromarray(rgb2)\n\n rgb3 = np.zeros((32, 32, 3), dtype=np.uint8)\n rgb3[..., 0] = 0\n rgb3[..., 1] = 0\n rgb3[..., 2] = 192\n # img3 = Image.fromarray(rgb3)\n\n return (rgb1, rgb2, rgb3)", "def _get_consistent_shape(images: Iterable):\n dim0s = []\n dim1s = []\n\n for img in images:\n dim0s.append(img.shape[0])\n dim1s.append(img.shape[1])\n\n assert len(set(dim0s)) == 1 and len(set(dim1s)) == 1, 'Inconsistent shapes.'\n\n return dim0s[0], dim1s[0]", "def compare_images(img1_path, img2_path):\n img1 = Image.open(img1_path)\n img2 = Image.open(img2_path)\n try:\n diff = ImageChops.difference(img1, img2)\n except ValueError:\n return False\n return diff.getbbox() is None", "def reversible(self) -> bool:\n xy_row = np.column_stack(\n (\n np.linspace(\n -self.imgsz[0] / (2 * self.f[0]),\n self.imgsz[0] / (2 * self.f[0]),\n int(self.imgsz[0]),\n ),\n np.zeros(int(self.imgsz[0])),\n )\n )\n dxy = self._distort(xy_row)\n continuous_row = np.all(dxy[1:, 0] >= dxy[:-1, 0])\n xy_col = np.column_stack(\n (\n np.zeros(int(self.imgsz[1])),\n np.linspace(\n -self.imgsz[1] / (2 * self.f[1]),\n self.imgsz[1] / (2 * self.f[1]),\n int(self.imgsz[1]),\n ),\n )\n )\n dxy = self._distort(xy_col)\n continuous_col = np.all(dxy[1:, 1] >= dxy[:-1, 1])\n return continuous_row and continuous_col", "def is_image(mine=None, file=None):\n if file:\n mine = get_file_mine(file)\n print(mine)\n if mine:\n return mine.find('image') != -1\n\n return False", "def is_filetype(img_path, formats=[\"jpg\", \"png\", \"gif\", \"pgm\", \"tif\", \"ppm\"]):\n # formats = [\"jpg\", \"png\", \"gif\", \"pgm\"]\n end = img_path[-3:]\n return os.path.isfile(img_path) and (end in formats)", "def isPng(self):\n\t\treturn self.extension == '.png'", "def test_on_dm3_vs_dm4_image(self):\n im3 = diffread(TEST_DM3)\n im4 = diffread(TEST_DM4)\n\n with self.subTest(\"DM3\"):\n self.assertEqual(im3.shape, (2048, 2048))\n self.assertEqual(im3.dtype, np.dtype(\"int8\"))\n\n with self.subTest(\"DM4\"):\n self.assertEqual(im4.shape, (2048, 2048))\n self.assertEqual(im4.dtype, np.dtype(\"int8\"))\n\n with self.subTest(\"DM3 vs. DM4\"):\n self.assertTrue(np.allclose(im3, im4))", "def get_img_shape(img):\n if K.image_dim_ordering() == 'th':\n return K.int_shape(img)\n else:\n samples, w, h, c = K.int_shape(img)\n return samples, c, w, h", "def check_availability(img_path):\n # loading gray image\n gray_image = cv2.imread(img_path, 0)\n\n # check whether img give empty list or not\n flag = face_recognition.face_locations(gray_image)\n if flag:\n return True\n return False", "def Has3d(self, *args):\n return _Adaptor3d.Adaptor3d_TopolTool_Has3d(self, *args)", "def _check_consistency_between_imaging_extractors(self):\n return True", "def IsRenderLayersOn(self):\n\n renderLayers = pm.ls(exactType=\"renderLayer\")\n referenceLayers = pm.ls(exactType=\"renderLayer\", rn=1)\n return ((len(renderLayers) - len(referenceLayers)) > 1)", "def check_if_original(article):\n num_img = len(article.find_all(\"img\"))\n return num_img < 2", "def hasImage(self):\n if self.getImage():\n return True\n return False", "def has_image(self):\n return hasattr(self, \"_image\") and self._image is not None", "def hasImg(img_name):\n try:\n Image.objects.raw({\"_id\": img_name}).first()\n return True\n except pymodm_errors.DoesNotExist:\n return False", "def assertIsNifti4D(*args):\n for f in args:\n assertIsNifti(f)\n d = ensure.ensureIsImage(f)\n assert len(d.shape) == 4, \\\n 'incorrect shape for 4D nifti: {}:{}'.format(d.shape, f)", "def is_image_local(self, image):\n result = self.execute_module(\"docker_image_facts\", {\"name\": image})\n return bool(result.get(\"images\")) and not result.get(\"failed\")", "def has_image_data (ff_hdus_list, which_hdu=0):\n if (which_hdu == 0): # heuristic for Primary HDU\n if (ff_hdus_list[which_hdu].header.get('NAXIS') == 2):\n return True\n else:\n return False\n else: # it's an extension and so marked\n return ( (len(ff_hdus_list) > which_hdu) and\n (ff_hdus_list[which_hdu].header.get('XTENSION') == 'IMAGE') )", "def checkImages(self):\r\n\r\n self.leftImage, self.rightImage, res = self.receiver.getImageData()\r\n\r\n return res", "def check_sub_image(self, ndvi_filename, input_path):\n rgb_filename = re.sub(\"BWNDVI\",\"RGB\",ndvi_filename)\n rgb_img = Image.open(self.get_file(os.path.join(input_path, rgb_filename),\n self.input_location_type))\n img_ok = check_image_ok(rgb_img, 0.05)\n return img_ok", "def imageType(self):\n return self.__imageType", "def is_valid_image(image):\n if image not in AVAILABLE_IMAGES.keys():\n return False\n\n return True", "def _check_fov(img, affine, shape):\n img = check_niimg(img)\n return img.shape[:3] == shape and np.allclose(img.affine, affine)", "def is_jpg(filename):\n return '.jpg' in filename" ]
[ "0.68862396", "0.65322673", "0.6502295", "0.64808524", "0.64616615", "0.64155763", "0.63741845", "0.6256185", "0.62417966", "0.6234979", "0.6150245", "0.6141367", "0.60545796", "0.6043418", "0.603095", "0.60253567", "0.5992711", "0.5932484", "0.5897953", "0.5897953", "0.58671457", "0.5839329", "0.5819505", "0.58014476", "0.57349354", "0.5710495", "0.57062507", "0.5704931", "0.56985337", "0.5688867", "0.5686172", "0.56808287", "0.5652669", "0.56464857", "0.56321865", "0.5617805", "0.5608982", "0.56055516", "0.56055516", "0.55936724", "0.5585338", "0.5585123", "0.5580401", "0.55687064", "0.556685", "0.5566292", "0.55646724", "0.55621666", "0.55304664", "0.5489376", "0.5484313", "0.5481332", "0.54579526", "0.5454903", "0.54479474", "0.5445142", "0.5444277", "0.5430202", "0.5430017", "0.54300165", "0.54266536", "0.54209954", "0.54193103", "0.54146785", "0.5403165", "0.5400727", "0.5399202", "0.5399202", "0.53987736", "0.53981996", "0.53857315", "0.5368322", "0.5359632", "0.5357035", "0.53549504", "0.5348926", "0.53478795", "0.5325552", "0.53208274", "0.5312523", "0.5301913", "0.5300684", "0.5295916", "0.52953595", "0.5290958", "0.5287248", "0.5286436", "0.5285352", "0.5284839", "0.52806103", "0.52729", "0.52720153", "0.52689904", "0.5268807", "0.5262002", "0.5261837", "0.5261312", "0.52610034", "0.5260233", "0.5257304" ]
0.7844598
0
set the filenames that will be read
def setFilenames(self, filenames): self.filenames = filenames if len(filenames) == 0: return if not self.dimensions: self.retrieveImageInfo(filenames[0]) if not self.checkImageDimensions(filenames): raise Logging.GUIError("Image dimensions do not match", \ "Some of the selected files have differing dimensions, \ and cannot be imported into the same dataset.") self.getReadersFromFilenames() self.numberOfImages = len(filenames) if self.is3D: if self.readers: self.numberOfImages = 0 for rdr in self.readers: self.numberOfImages += rdr.GetNumberOfSubFiles()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_filename(self, file_name):", "def setfiles(self, filelist):\r\n self._filelist=filelist\r\n self._energy=self.readenergy(filelist)", "def filenames(self):\n pass", "def fileset(self):\n pass", "def __init__(self, files):\n self.files = files and [NamedFile(data=d, filename=fn) for (d, fn) in files]", "def setFile(self, filename):\n self.prepare() #new call on each new file to process\n self.filename = \"%s\" % filename", "def set_in_files():\r\n\tindatadir = '/nobackup/ejblom/reddit'\r\n\tcom_dir = '/comments'\r\n\tsubm_dir = '/submissions'\r\n\tglob_end = '/filtered*'\r\n\tcom_glob_str = indatadir + com_dir + glob_end\r\n\tsubm_glob_str = indatadir + subm_dir + glob_end\r\n\tinfilenames = sorted(glob.glob(com_glob_str)) + sorted(glob.glob(subm_glob_str))\r\n\treturn infilenames", "def load_files(self, filenames):\n self.filenames = filenames\n self.slider.setRange(0, len(self.filenames) - 1)\n self.slider.setSliderPosition(0)\n self.update_image()", "def set_IDs_filenames(self):\n self._trn_IDs_file = f\"{self._ds_root}/train_{self.opts['val_split']}split.txt\"\n self._val_IDs_file = f\"{self._ds_root}/val_{self.opts['val_split']}split.txt\"\n self._tst_IDs_file = f\"{self._ds_root}/test.txt\"", "def set_stations_filenames(directories):\n STATIONS.set_filenames(directories)", "def _load_filenames():\n filenames = session.get('filenames')\n\n if filenames is None:\n g.filenames = None\n else:\n g.filenames = filenames", "def set_filenames(self, folder_name=None):\n\n self.set_folder_name(folder_name)\n\n self.params['input_folder'] = \"%sInputSpikeTrains/\" % self.params['folder_name']# folder containing the input spike trains for the network generated from a certain stimulus\n self.params['spiketimes_folder'] = \"%sOutputSpikes/\" % self.params['folder_name']\n self.params['volt_folder'] = \"%sVoltageTraces/\" % self.params['folder_name']\n self.params['parameters_folder'] = \"%sParameters/\" % self.params['folder_name']\n self.params['connections_folder'] = \"%sConnections/\" % self.params['folder_name']\n self.params['activity_folder'] = \"%sANNActivity/\" % self.params['folder_name']\n self.params['bcpnntrace_folder'] = \"%sBcpnnTraces/\" % self.params['folder_name']\n self.params['figures_folder'] = \"%sFigures/\" % self.params['folder_name']\n self.params['movie_folder'] = \"%sMovies/\" % self.params['folder_name']\n self.params['tmp_folder'] = \"%stmp/\" % self.params['folder_name']\n self.params['data_folder'] = '%sData/' % (self.params['folder_name']) # for storage of analysis results etc\n self.params['training_input_folder'] = \"%sTrainingInput/\" % self.params['folder_name'] # folder containing the parameters used for training the network\n self.params['folder_names'] = [self.params['folder_name'], \\\n self.params['spiketimes_folder'], \\\n self.params['volt_folder'], \\\n self.params['parameters_folder'], \\\n self.params['connections_folder'], \\\n self.params['activity_folder'], \\\n self.params['bcpnntrace_folder'], \\\n self.params['figures_folder'], \\\n self.params['movie_folder'], \\\n self.params['tmp_folder'], \\\n self.params['data_folder'], \\\n self.params['training_input_folder'], \\\n self.params['input_folder']] # to be created if not yet existing\n\n self.params['params_fn_json'] = '%ssimulation_parameters.json' % (self.params['parameters_folder'])", "def read_files(self):\n for f in self.filenames:\n self.games.extend(pgn.loads(open(f).read()))", "def handleFileNames(self):\n \n # expand the wild cards - but do not create the full directory path\n # as the work sub directories have yet to be created.\n if not os.path.exists(self.shareArea):\n m = 'Cannot set self.auxfiles due to non-existent share directory: %s' % self.shareArea\n self.logger.fatal(m)\n raise RTTCodingError(m)\n\n # resolve auxFile patterns to file names\n auxFiles = []\n for pattern in self.auxFilePatterns:\n base, fnpattern = os.path.split(pattern)\n srcDir = os.path.normpath(os.path.join(self.shareArea, base))\n filesInShare = os.listdir(srcDir)\n auxFiles.extend([os.path.join(base,file) for file in filesInShare if fnmatch.fnmatch(file, fnpattern)])\n\n self.auxFiles = unique(auxFiles)", "def select_files(self):\n pass", "def __init__(self):\n self.file_list = os.listdir(self.PATH)", "def _read_directory(self):\n self._filenames = glob.glob(self._directory + \"/*.project\")", "def _setup_data_filenames(self):\n\n # read in filenames of training data(poses, images, labels)\n logging.info('Reading filenames')\n all_filenames = os.listdir(self.data_dir)\n if self.image_mode== ImageMode.BINARY:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.binary_im_tensor_template) > -1]\n elif self.image_mode== ImageMode.DEPTH:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.depth_im_tensor_template) > -1]\n elif self.image_mode== ImageMode.BINARY_TF:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.binary_im_tf_tensor_template) > -1]\n elif self.image_mode== ImageMode.COLOR_TF:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.color_im_tf_tensor_template) > -1]\n elif self.image_mode== ImageMode.GRAY_TF:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.gray_im_tf_tensor_template) > -1]\n elif self.image_mode== ImageMode.DEPTH_TF:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.depth_im_tf_tensor_template) > -1]\n elif self.image_mode== ImageMode.DEPTH_TF_TABLE:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.depth_im_tf_table_tensor_template) > -1]\n else:\n raise ValueError('Image mode %s not supported.' %(self.image_mode))\n\n self.pose_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.hand_poses_template) > -1]\n self.label_filenames = [f for f in all_filenames if f.find(self.target_metric_name) > -1]\n\n self.im_filenames.sort(key = lambda x: int(x[-9:-4]))\n self.pose_filenames.sort(key = lambda x: int(x[-9:-4]))\n self.label_filenames.sort(key = lambda x: int(x[-9:-4]))\n\n # check that all file categories were found\n if len(self.im_filenames) == 0 or len(self.label_filenames) == 0 or len(self.label_filenames) == 0:\n raise ValueError('1 or more required training files could not be found')", "def setFileName(self, filename):\r\n #We need to extract the last position of the path. This element is the filename\r\n Splifilename = filename.split(\"\\\\\")\r\n Isofilname = Splifilename[-1]\r\n self.actualFile = Isofilname", "def add_files(self, filenames):\n for filename in filenames:\n self.add_file(filename)", "def change_files(self, files: list = None):\n if not is_empty_arr(files):\n self._files = files", "def initialize_files(file_name, ran, file_extension):\r\n \"\"\"Specifiy the exact file name and the number of files --> file_name_(range) e.g file_name=chickens ,ran=16\"\"\"\r\n answer_file_rep = [file_name + str(number) for number in range(1, ran)]\r\n answer_files = [file + \"{}\".format(file_extension) for file in answer_file_rep]\r\n answers = [\"answer\" + str(number) for number in range(1, ran)]\r\n return answer_files, ran, answers", "def get_files(self):\n def _get_files_by_names(files, name_set, postfix):\n ret = []\n for f in files: \n name = osp.basename(f).split(\"_%s\" % postfix)[0]\n if name in name_set:\n ret.append(f)\n return ret\n\n frame1_files = sorted(glob.glob(osp.join(self.root, 'images', \"*_pre_disaster*\")))\n frame2_files = sorted(glob.glob(osp.join(self.root, \"images\", \"*_post_disaster*\")))\n label_files = sorted(glob.glob(osp.join(self.root, \"masks\", \"*_change*\")))\n assert len(frame1_files) == len(frame2_files) == len(label_files), \\\n \"%d, %d, %d\" % (len(frame1_files), len(frame2_files), len(label_files))\n\n file_names = [osp.basename(f).split(\"_pre\")[0] for f in frame1_files]\n file_names = sorted(list(set(file_names)))\n if self.isTrain:\n name_set = train_test_split(file_names, train_size=0.8, random_state=0)[0]\n else: \n name_set = train_test_split(file_names, train_size=0.8, random_state=0)[1]\n self.frame1_files = _get_files_by_names(frame1_files, name_set, 'pre')\n self.frame2_files = _get_files_by_names(frame2_files, name_set, 'post')\n self.label_files = _get_files_by_names(label_files, name_set, 'change')", "def set_filenames(self, directories):\n station_table_filename = self.station_table_filename\n noaa_filename = self.noaa_filename\n if directories is None:\n self.enabled = False\n self.known_stations = {}\n self.station_table_filename = ''\n self.noaa_filename = ''\n else:\n if not isinstance(directories, list):\n directories = [directories]\n self.enabled = True\n for directory in directories:\n station_table_path = os.path.join(directory, 'station.table')\n noaa_path = os.path.join(directory, 'nsd_bbsss.txt')\n if os.path.exists(station_table_path) or \\\n os.path.exists(noaa_path):\n station_table_filename = station_table_path\n noaa_filename = noaa_path\n break\n\n if station_table_filename != self.station_table_filename or \\\n noaa_filename != self.noaa_filename:\n self.station_table_filename = station_table_filename\n self.noaa_filename = noaa_filename\n self.reload()", "def filenames(self) -> dict[str, str]:\r\n ...", "def _load_files(self):\n for filedoc in self._docset.get_files():\n path = filedoc.get_path()\n if not path:\n # In case of only partially loaded file information,\n # the path information is not set for unloaded files.\n continue\n if not os.path.isabs(path):\n path = os.path.join(self._source_root, path)\n extension = os.path.splitext(path)[1]\n # We don't care about Markdown files that only produce pages\n # (and fail the directory check below).\n if extension == '.md':\n continue\n dirdoc = filedoc.get_directory()\n if not dirdoc:\n self._reporter.xml_assert(filedoc.get_xml_path(),\n \"file is not in any directory in Doxygen\")\n continue\n relpath = self._get_rel_path(path)\n fileobj = self._files.get(relpath)\n if not fileobj:\n fileobj = File(path, relpath, self._docmap[dirdoc])\n self._files[relpath] = fileobj\n fileobj.set_doc_xml(filedoc, self)\n self._docmap[filedoc] = fileobj", "def set_files(self, items):\n if not isinstance(items, dict):\n raise ValueError('Invalid file description')\n for tid, files in items.iteritems():\n if not isinstance(files, dict):\n continue\n wanted = []\n unwanted = []\n priority_high = []\n priority_normal = []\n priority_low = []\n for fid, file in files.iteritems():\n if not isinstance(file, dict):\n continue\n if 'selected' in file and file['selected']:\n wanted.append(fid)\n else:\n unwanted.append(fid)\n if 'priority' in file:\n if file['priority'] == 'high':\n priority_high.append(fid)\n elif file['priority'] == 'normal':\n priority_normal.append(fid)\n elif file['priority'] == 'low':\n priority_low.append(fid)\n self.change([tid], files_wanted = wanted\n , files_unwanted = unwanted\n , priority_high = priority_high\n , priority_normal = priority_normal\n , priority_low = priority_low)", "def get_filenames(self):\n return self.filenames", "def setFile(self, filename): #$NON-NLS-1$\r", "def set_files(self, file_list):\n\tif file_list==None: return []\n\timport types\n\tisString = isinstance(file_list, types.StringTypes) \n\tisList = isinstance(file_list, list) \n\tassert isString or isList, \"You should provide a list of files as list or as CVS string!\"\n\tif isList: return file_list\n\tif isString :\n\t import re\n\t file_list_converted = re.sub(r'\\s', '', file_list).split(',') #remove all whitespaces\n\t return file_list_converted", "def setup(self, files):\n if not isinstance(files, (list, tuple)):\n raise RuntimeError(\"Argument must be list of files.\")\n\n self.files = files", "def initFileList(self,extension):\r\n self.listExec.Clear()\r\n for fname in os.listdir(\"data\"):\r\n #print 'testing file ' , fname\r\n \r\n if extension in fname :\r\n #print fname\r\n self.listExec.Append(fname)\r\n self.Refresh()", "def filepaths(self):\n pass", "def set_fname_encoder(self):\n\n fp = open(self.meta_path, 'r')\n wav_names = []\n next(fp)\n for i, line in tqdm(enumerate(fp)):\n audio_name, _, _, _ = line.split()\n wav_name = os.path.basename(audio_name)\n wav_names.append(wav_name)\n self.fname_encoder.fit(wav_names)", "def ingest(self, files):\n for file in files:\n self.files.add(file)", "def setSourceFile(filename):", "def __setupPaths(self):\n self.csv_file_names = filter(\n (lambda x: not re.match(\".*\\\\.csv$\", x) is None),\n os.listdir(self.path))", "def initialize_file_readers():\n savefile_path = os.path.join(os.getcwd()+ \"/../data/\", SAVE_FILE)\n file_reader_list = []\n for file in os.listdir(savefile_path):\n file_reader = open(os.path.join(savefile_path,file), \"r\")\n file_reader_list.append({\"file_reader\": file_reader, \"last_read\": { \"word\": \"\", \"doc_score_list\": []}})\n return file_reader_list", "def populate(self, dr=None):\n if dr is not None: self.directory = dr\n \n for k in OM.glob_strings:\n string =self.directory+\"/\"+OM.glob_strings[k]\n print(\"OM::populate -- Checking \",k,\" (\",string,\")\", end=\"\")\n fnames = glob.glob(string)\n print(\"... found\",len(fnames), \"files\")\n setattr(self, k, fnames)\n #print(k, lst)", "def set_train_data(self):\n files_per_worker = len(self.train_list) // self.num_workers\n files_for_this_worker = self.train_list[ \n self.worker_id*files_per_worker : (self.worker_id+1)*files_per_worker ]\n # The worker takes an extra file if needed\n if self.worker_id < len(self.train_list) % self.num_workers:\n files_for_this_worker.append(self.train_list[ self.num_workers*files_per_worker + self.worker_id ])\n print \"Files for worker %d:\" % self.comm_block.Get_rank()\n for f in files_for_this_worker:\n print \" %s\" % f\n self.data.file_names = files_for_this_worker", "def filelist_generator(self):\n for filename in self.filenames:\n yield filename", "def files(self, ending='.sif'):\n for f in sorted(os.listdir(self.path)):\n if f.endswith(ending):\n self.file_name = f\n yield f", "def __init__(self, data_dir, transform):\n self.filenames = os.listdir(data_dir)\n self.filenames = [os.path.join(data_dir, f) for f in self.filenames]\n\n self.transform = transform", "def do_files(self, args):\n file_names = self.regexprutils.get_file_names()\n print 'File names:'\n for name in file_names:\n print ' %s' % (name, )", "def load_files(self):\n # Needs to be implemented by child class\n raise NotImplementedError", "def set_imagefilename(self,imagefilename):\n self.imagefile = open(imagefilename,'r+')", "def doReadFiles(self, logicalFileName=None, realFileName=None):\n #type: (Text)->List(Text)\n assert logicalFileName or realFileName\n if logicalFileName is not None:\n self.fileName=logicalFileName\n self.sourceLines=readFileLines(\n file=self.fileName,\n issueOrigin=self,\n message='Cannot read source file %s')\n if realFileName is not None:\n self.realFileName=realFileName\n if logicalFileName==realFileName:\n self.realSourceLines=self.sourceLines\n else:\n self.realFileName = realFileName\n self.realSourceLines = readFileLines(\n file=self.realFileName,\n issueOrigin=self,\n message='Cannot read generated file %s')", "def setUp(self):\n dirname = os.path.dirname(__file__)\n self.files = [\n os.path.join(dirname, 'data',\n 'goes13_IR_107_testwcm_201604291015.tif'),\n os.path.join(dirname, 'data',\n 'goes15_IR_107_testwcm_201604291015.tif'),\n os.path.join(dirname, 'data',\n 'himawari8_IR1_testwcm_201604291015.tif'),\n os.path.join(dirname, 'data',\n 'meteosat7_IR_115_testwcm_201604291015.tif'),\n os.path.join(dirname, 'data',\n 'meteosat10_IR_108_testwcm_201604291015.tif')\n ]", "def set_attributes(self):\n\n self.input_file = None # the InputFile object\n self.namelist = None # the currently selected namelist\n self.file_loaded = False # is an input file loaded or not", "def set_filename(self, filename):\n self._filename = filename", "def getReadersFromFilenames(self):\n\t\tfor i in self.readers:\n\t\t\tdel i\n\t\tself.readers = []\n\n\t\tif not self.filenames:\n\t\t\traise Logging.GUIError(\"No files could be found\", \\\n\t\t\t\t\t\t\t\t\t\"For some reason, no files were listed to be imported.\")\t\t \n\t\t\t\t\t\n\t\tfiles = self.filenames\n\t\tprint \"Determining readers from \", self.filenames\n\n\t\tisRGB = 1\n\t\tself.ext = files[0].split(\".\")[-1].lower()\n\t\tdim = self.dimMapping[self.ext]\n\t\t# Initially flip the image if it's tiff, png or jpg.\n\t\t# In setVerticalFlip we negate the setting to have it set correctly.\n\t\tif self.ext.lower() in [\"png\", \"jpg\", \"jpeg\"]:\n\t\t\tself.flipVertically = True\n\t\tif self.ext in [\"tif\", \"tiff\"]:\n\t\t\treader = vtkbxd.vtkExtTIFFReader()\n\t\t\treader.SetFileName(files[0])\n\t\t\treader.UpdateInformation()\n\t\t\tif reader.GetNumberOfScalarComponents() >= 3:\n\t\t\t\tprint \"MODE IS RGB, IS AN RGB IMAGE\"\n\t\t\telse:\n\t\t\t\tprint \"MODE ISN'T RGB, THEREFORE NOT RGB\"\n\t\t\t\tisRGB = 0\n\t\t\trdr = self.getReaderByExtension(self.ext, isRGB)\n\t\t\trdr.SetFileName(files[0])\n\t\t\tif rdr.GetNumberOfSubFiles() > 1:\n\t\t\t\tdim = 3\n\t\t\t\t\n\t\tself.isRGB = isRGB\n\t\tself.is3D = (dim == 3)\n\t\t\n\t\tdirName = os.path.dirname(files[0])\n\t\tprint \"THERE ARE\", self.slicesPerTimepoint, \"SLICES PER TIMEPOINT\"\n\t\tself.ext = files[0].split(\".\")[-1].lower()\n\t\t\n\t\tif dim == 3:\n\t\t\ttotalFiles = len(files)\n\t\t\tfor i, file in enumerate(files):\n\t\t\t\trdr = self.getReaderByExtension(self.ext, isRGB)\n\t\t\t\trdr.SetFileName(file)\n\t\t\t\tself.readers.append(rdr)\n\t\t\treturn\n\t\t\t\n\t\ttotalFiles = len(files) / self.slicesPerTimepoint\n\n\t\timgAmnt = len(files)\n\t\tif totalFiles == 1:\n\t\t\trdr = self.getReaderByExtension(self.ext, isRGB)\n\t\t\tarr = vtk.vtkStringArray()\n\t\t\tfor fileName in files:\n\t\t\t\tarr.InsertNextValue(os.path.join(dirName, fileName))\n\t\t\trdr.SetFileNames(arr)\n\t\t\tself.readers.append(rdr)\n\t\t\treturn\n\t\t\t\n\t\tif imgAmnt > 1:\n\t\t\t# If the pattern doesn't have %, then we just use\n\t\t\t# the given filenames and allocate them to timepoints\n\t\t\t# using slicesPerTimepoint slices per timepoint\n\t\t\tntps = len(files) / self.slicesPerTimepoint\n\t\t\tfilelst = files[:]\n\t\t\t# dirn #TODO: what was this?\n\t\t\tfor tp in range(0, ntps):\n\t\t\t\trdr = self.getReaderByExtension(self.ext, isRGB)\n\t\t\t\tarr = vtk.vtkStringArray()\n\t\t\t\tfor i in range(0, self.slicesPerTimepoint):\n\t\t\t\t\tarr.InsertNextValue(filelst[0])\n\t\t\t\t\tfilelst = filelst[1:]\n\t\t\t\trdr.SetFileNames(arr)\n\t\t\t\trdr.SetDataExtent(0, self.x - 1, 0, self.y - 1, 0, self.slicesPerTimepoint - 1)\n\t\t\t\trdr.SetDataSpacing(self.spacing)\n\t\t\t\trdr.SetDataOrigin(0, 0, 0)\n\t\t\t\tself.readers.append(rdr)\n\t\t\treturn\n\t\t\n\t\telif imgAmnt == 1:\n\t\t\t# If only one file\n\t\t\trdr = self.getReaderByExtension(self.ext, isRGB)\n\t\t\trdr.SetDataExtent(0, self.x - 1, 0, self.y - 1, 0, self.slicesPerTimepoint - 1)\n\t\t\trdr.SetDataSpacing(self.spacing)\n\t\t\trdr.SetDataOrigin(0, 0, 0)\n\t\t\trdr.SetFileName(files[0])\n\n\t\t\tLogging.info(\"Reader = \", rdr, kw = \"io\")\n\t\t\tself.readers.append(rdr)", "def __init__(self, root_dir):\n self.paths = glob.glob(root_dir + \"/*.csv\")\n self.target = 'Default'\n # Grouping variable names", "def readdata(self, filepaths):\n pass", "def GetFileNames(self):\n return self.files", "def load_all_files(self):\n\t\tself.get_rankings()\n\t\tself.get_partition()\n\t\tself.__load_factors()\n\t\tself.get_document_associations()\n\t\tself.get_term_associations()", "def set_filename(self, name):\n self.ds_filename = name", "def addFileNames(self, fileNames):\n with Tracer(traceLogger):\n infos = []\n\n oldNumFiles = len(self.topLevelOperator.Dataset)\n # HACK: If the filePath isn't valid, replace it\n # This is to work around the scenario where two independent data selection applets are coupled, causing mutual resizes.\n # This will be fixed when a multi-file data selection applet gui replaces this gui. \n for i in reversed( range( oldNumFiles ) ):\n if not self.topLevelOperator.Dataset[i].ready():\n oldNumFiles -= 1\n else:\n break\n \n \n # Assign values to the new inputs we just allocated.\n # The GUI will be updated by callbacks that are listening to slot changes\n for i, filePath in enumerate(fileNames):\n datasetInfo = DatasetInfo()\n cwd = self.topLevelOperator.WorkingDirectory.value\n \n if not areOnSameDrive(filePath,cwd):\n QMessageBox.critical(self, \"Drive Error\",\"Data must be on same drive as working directory.\")\n return\n \n absPath, relPath = getPathVariants(filePath, cwd)\n \n # Relative by default, unless the file is in a totally different tree from the working directory.\n if len(os.path.commonprefix([cwd, absPath])) > 1:\n datasetInfo.filePath = relPath\n else:\n datasetInfo.filePath = absPath\n\n h5Exts = ['.ilp', '.h5', '.hdf5']\n if os.path.splitext(datasetInfo.filePath)[1] in h5Exts:\n datasetNames = self.getPossibleInternalPaths( absPath )\n if len(datasetNames) > 0:\n datasetInfo.filePath += str(datasetNames[0])\n else:\n raise RuntimeError(\"HDF5 file %s has no image datasets\" % datasetInfo.filePath)\n\n # Allow labels by default if this gui isn't being used for batch data.\n datasetInfo.allowLabels = ( self.guiMode == GuiMode.Normal )\n infos.append(datasetInfo)\n\n #if no exception was thrown, set up the operator now\n self.topLevelOperator.Dataset.resize( oldNumFiles+len(fileNames) )\n for i in range(len(infos)):\n self.topLevelOperator.Dataset[i+oldNumFiles].setValue( infos[i] )", "def set_fnames(self, fnames):\n self.fnames = fnames[:]", "def filenames(self):\n return self._filenames", "def __init__(self):\n self.filelist = list()", "def __set_file_info(self, path_name):\n file_name = os.path.basename(path_name)\n file_path = os.path.dirname(path_name)\n self._file_path = file_path\n self._file_name = file_name", "def setup(self):\n # Call the baseclass setup to resolve any selections\n super().setup()\n\n self.outcont = None\n\n # If we are returning the same file for every iteration,\n # then load that file now.\n if self.only_prefix:\n filename = self.prefix\n\n split_ext = os.path.splitext(filename)\n if split_ext[1] not in [\".h5\", \".hdf5\"]:\n filename = split_ext[0] + \".h5\"\n\n # Load file into outcont attribute\n self.outcont = self._load_file(filename)\n\n else:\n self.prefix = os.path.splitext(self.prefix)[0]", "def SetFileName(self, *args) -> \"void\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMF3_SetFileName(self, *args)", "def setup(self, files):\n if not isinstance(files, (list, tuple)):\n raise RuntimeError(\"Argument must be list of files.\")\n\n self.files = files\n\n # Set up frequency selection.\n if self.freq_physical:\n basefreq = np.linspace(800.0, 400.0, 1024, endpoint=False)\n self.freq_sel = sorted(\n set([np.argmin(np.abs(basefreq - freq)) for freq in self.freq_physical])\n )\n\n elif self.channel_range and (len(self.channel_range) <= 3):\n self.freq_sel = slice(*self.channel_range)\n\n elif self.channel_index:\n self.freq_sel = self.channel_index\n\n else:\n self.freq_sel = slice(None)", "def download_files(self):", "def set_file_path_name(self):\n self.file_path_name = self.get_file_path() + self.get_file_name()", "def processFileNames(self, fileNames):\n inputFiles = [open(f,'r') for f in fileNames]\n try:\n datainput.processFiles(inputFiles)\n finally:\n for fp in inputFiles:\n fp.close()", "def paths(self, paths):\r\n self._paths = paths\r\n self._extract()", "def SetFileName(self, *args) -> \"void\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMF2_SetFileName(self, *args)", "def make_files(self):\n return []", "def read(self,filenames):\n\n if isinstance(filenames, basestring):\n filenames = [filenames]\n read_ok = []\n for filename in filenames:\n try:\n fp = open(filename)\n except IOError:\n continue\n self._read(fp)\n fp.close()\n read_ok.append(filename)\n return read_ok", "def associate_files(self):\n self.MatlabFiles = {'defaults': os.path.join(self.ParentDir,'defaults.m'),\n 'avevel': os.path.join(self.OutDir, 'pix2avevel.mat'),\n 'cumdef': os.path.join(self.OutDir, 'pix2cumdef.mat'),\n 'variance': os.path.join(self.OutDir, 'vaiance.mat')}", "def _initNames(self):\n self.outselect = os.path.join(self.workpath, 'FT1_selected'+self.suffix+'.fits')\n self.outmktime = os.path.join(self.workpath, 'FT1_filtered'+self.suffix+'.fits')\n self.outltcube = os.path.join(self.workpath, 'LtCube'+self.suffix+'.fits')\n self.outbincub = os.path.join(self.workpath, 'BinCube'+self.suffix+'.fits')\n self.outbinmap = os.path.join(self.workpath, 'CMAP'+self.suffix+'.fits')\n self.outbinexp = os.path.join(self.workpath, 'BinExpMap'+self.suffix+'.fits')\n self.outexpmap = os.path.join(self.workpath, 'ExpMap'+self.suffix+'.fits')\n self.outsrcmap = os.path.join(self.workpath, 'SrcMaps'+self.suffix+'.fits')\n self.outgtlike = os.path.join(self.workpath, 'Results'+self.suffix+'.dat')\n self.outmodel = os.path.join(self.workpath, 'OutModel'+self.suffix+'.xml')\n self.outapert = os.path.join(self.workpath, 'LC_ApPhoto'+self.suffix+'.fits')\n self.outgtmod = os.path.join(self.workpath, 'GtModel'+self.suffix+'.fits')\n self.outresid = os.path.join(self.workpath, 'Resid'+self.suffix+'.fits')\n self.outresig = os.path.join(self.workpath, 'ResSigma'+self.suffix+'.fits')\n self.outtsmap = os.path.join(self.workpath, 'TSMmap'+self.suffix+'.fits')\n return\n # self.outfind = self.dir + self.src + '_FindSrc'+self.suffix+'.txt'", "def set_paths(self, paths):\n self.paths = paths", "def extract_files(self, *filenames):\n for filename in filenames:\n data = self.read_file(filename)\n f = open(filename, 'wb')\n f.write(data or b'')\n f.close()", "def fixupFileNames(process):\n if not hasattr(process.source, \"fileNames\"):\n process.source.fileNames = cms.untracked.vstring()\n return", "def files_set(self):\n return UploadedFile.get_files_for(self)", "def listFiles(self):\n pass", "def __setFileName(self,\n fileName):\n self.__fileName = fileName\n return self", "def set_documents_names(cls, input_list_names: List[str]) -> None:\n cls.documents_names = input_list_names", "def parse_files(self, filenames=\"\"):\n if self._parsed:\n raise Exception(\"Have been parsed\")\n self._parsed = True\n\n if filenames:\n if not isinstance(filenames, (list, tuple)):\n filenames = self._parse_string(filenames).strip(\", \").split(\",\")\n\n for filename in filenames:\n self._parse_file(filename)\n\n self._check_and_fix()", "def clean_files(self):\n self.filenames.clear()", "def get_tweet_file_names(self) -> None:\n self.list_of_files = os.listdir(self.input_file_path)\n no_of_files = len(self.list_of_files)\n for iterator in range(0, no_of_files):\n self.list_of_files[iterator] = self.input_file_path + \"\\\\\" + self.list_of_files[iterator]\n print(\"no of json files \",no_of_files)", "def set_directories(args):\n global READS_DIR\n READS_DIR = args.reads_dir\n if not os.path.isdir(READS_DIR):\n sys.exit(\"%s not a directory\" % READS_DIR)\n READS_DIR = os.path.abspath(READS_DIR) + \"/\"\n os.chdir(READS_DIR)", "def files(self):\n all_files = set()\n for label in self.filesets:\n all_files.update(self.filesets[label])\n return all_files", "def _read_files(self):\n \n for langname in self.langnames:\n filename = f'data/word_lists/{langname}.txt'\n with open(filename) as f:\n index = self.langnames.index(langname)\n lang_list = getattr(self, f'word_list{index}')\n words = f.readlines()\n for word in words:\n fword = ''.join(char for char in word if char is not '\\n')\n lang_list.append(fword)\n f.close()\n return", "def receiveFileFromDialog(self, paths):\n self.filesList.filesStartedLoading.emit(False)\n for p in paths:\n self.filesList.registerFile(None, QtCore.QString(p))\n self.filesList.filesFinishedLoading.emit(True)", "def add_files(self, paths):\n for path in paths:\n self.add_file(path)", "def setup(self, files):\n if self.acqtype not in self._acqtype_reader:\n raise ValueError(f'Specified acqtype \"{self.acqtype}\" is not supported.')\n\n if not isinstance(files, (list, tuple)):\n raise ValueError(\"Argument must be list of files.\")\n\n self.files = files", "def get_files(self):\n # self.folder= +str(int(time.time()))\n if not os.path.exists(self.folder):\n os.mkdir(self.folder)\n while len(self.url_queue): # If we have URLs to crawl - we crawl\n href = self.url_queue.popleft() # We grab a URL from the left of the list\n filename = href.rsplit('/', 1)[-1]\n print(\"Downloading %s to %s...\" % (href, filename))\n fullname = os.path.join(self.folder, filename)\n urlretrieve(href, fullname)\n self.xlfnames.append(filename)", "def load_data(self):\n for set_name in self.image_dir_path:\n if self.verbose:\n print('\\n> Loading data files for the set: ' + set_name)\n\n # image dir\n image_dir = os.path.join(self.data_path, self.image_dir_path[set_name])\n\n # annotation file path\n annot_filepath = os.path.join(self.data_path, self.annotation_path[set_name])\n\n if 'test' in set_name:\n yield load_data_test(set_name, image_dir, annot_filepath, self.verbose)\n else:\n yield self.load_data_trainval(set_name, image_dir, annot_filepath)", "def _find_named_files(self):\n for name, description in self.named_files.iteritems():\n name = name.format(job_name=self.job_name)\n f_path = '{}/{}'.format(self.rism3d_folder, name)\n if os.path.isfile(f_path):\n self.file_path_dic[description] = f_path\n else:\n self._not_found_error(f_path)", "def SetFilename(self, f):\n self._filename = f", "def set_options(self, option_list):\n self._file_name = option_list['output_file'].get_value()", "def __init__(self, files, **kwargs):\n self.files = {name: (None, data, 'application/octet-stream', {}) for name, data in files.iteritems()}\n for name, data in kwargs.iteritems():\n name = name.replace('_', '-')\n try:\n options = self.options_by_name[name]\n except KeyError:\n raise ValueError('Unknown option {}'.format(name))\n self.files[name] = (None, data, options.content_type, options.headers)", "def setStatiFile(self, filename):\n self.statiFile = filename", "def files(self):\r\n all_files = set()\r\n for label in self.filesets:\r\n all_files.update(self.filesets[label])\r\n return all_files", "def files(self):\r\n all_files = set()\r\n for label in self.filesets:\r\n all_files.update(self.filesets[label])\r\n return all_files", "def prepare_filenames(config: Dict[str, Any]) -> Dict[str, Any]:\n for handler_name in config[\"handlers\"].keys():\n handler_config = config[\"handlers\"][handler_name]\n if \"filename\" in handler_config:\n filename = Path(handler_config[\"filename\"]).name\n handler_config[\"filename\"] = str(LOGS_DIR.joinpath(filename))\n return config", "def SetFileName(self, *args) -> \"void\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMD3_SetFileName(self, *args)" ]
[ "0.6955265", "0.6887402", "0.6878499", "0.67892617", "0.6675026", "0.6572736", "0.65357256", "0.65231085", "0.64586145", "0.64167434", "0.6371263", "0.6240702", "0.6175739", "0.6168266", "0.61507344", "0.6141928", "0.61352116", "0.6123154", "0.6068659", "0.6062177", "0.60527366", "0.59976", "0.599101", "0.5984772", "0.5980417", "0.5964459", "0.5948665", "0.5947353", "0.59333664", "0.5925125", "0.59190404", "0.5913133", "0.5907783", "0.5898141", "0.588922", "0.5876622", "0.5870405", "0.5864258", "0.58543247", "0.5848954", "0.5827403", "0.58256423", "0.58104616", "0.57917416", "0.57869333", "0.57845265", "0.5759612", "0.5754263", "0.573561", "0.5722492", "0.57139224", "0.57032067", "0.57002354", "0.57000136", "0.5694816", "0.56882036", "0.5686324", "0.5684419", "0.5677718", "0.5675255", "0.5669877", "0.56649894", "0.5663994", "0.56555665", "0.5641285", "0.5625837", "0.5622011", "0.5621636", "0.5617949", "0.5617229", "0.5607212", "0.5606391", "0.55955863", "0.55931586", "0.5583197", "0.5580898", "0.5574398", "0.556907", "0.55620825", "0.5541948", "0.5541639", "0.5541297", "0.55398214", "0.5537948", "0.5507685", "0.5499076", "0.5486449", "0.5480128", "0.54788744", "0.5475625", "0.54715896", "0.54704374", "0.54678786", "0.5461657", "0.5460538", "0.5458359", "0.5449884", "0.5449884", "0.5442288", "0.54414624" ]
0.72363704
0
Set a flag indicating whether the image should be flipped vertically
def setVerticalFlip(self, flag): if self.ext.lower() in ["png", "jpg", "jpeg"]: self.flipVertically = not flag else: self.flipVertically = flag
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def flip_vertical(img):\r\n #reading image\r\n im = Image.open(\"filename\")\r\n\r\n #flipping image vertically\r\n newimg = im.transpose(PIL.Image.FLIP_TOP_BOTTOM)\r\n return img", "def flip_image_vertical(image):\n return cv.flip(image, 1)", "def flip_image(img, vert=True):\n if vert:\n return img.transpose(Image.FLIP_TOP_BOTTOM)\n else:\n return img.transpose(Image.FLIP_LEFT_RIGHT)", "def vflip(img):\n #if not _is_pil_image(img):\n # raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n return img.transpose(Image.FLIP_TOP_BOTTOM)", "def flip(self):\n \n if self.faceup:\n self.faceup = False\n else:\n self.faceup = True", "def setHorizontalFlip(self, flag):\n\t\tself.flipHorizontally = flag", "def flip(img, boolean=True):\n return pg.transform.flip(img, boolean, False)", "def flip_vertical(original_image: Image) -> Image :\r\n \r\n new_image = copy(original_image)\r\n \r\n pixel_width = get_width(original_image)\r\n pixel_height = get_height(original_image) \r\n\r\n \r\n for x in range(pixel_width) :\r\n for y in range(pixel_height) :\r\n original_vertical_pixel = get_color(original_image, x, y)\r\n opposite_vertical_pixel = pixel_height - 1 - y\r\n set_color(new_image, x, opposite_vertical_pixel, original_vertical_pixel)\r\n \r\n return new_image", "def flip(self):", "def flip_vertical(image: Image) -> Image:\r\n flipped_image = copy(image)\r\n middle_pixel = get_width(flipped_image) // 2\r\n width = get_width(flipped_image)\r\n height = get_height(flipped_image)\r\n \r\n for x in range(middle_pixel):\r\n for y in range(height):\r\n r,g,b = get_color(image,x,y)\r\n new_r, new_g, new_b = get_color(image, abs(width-x) - 1, y)\r\n set_color(flipped_image,x,y,create_color(new_r,new_g,new_b))\r\n set_color(flipped_image,width-x-1,y,create_color(r,g,b))\r\n \r\n return flipped_image", "def flip(self, xbool, ybool):\n self._surf = pygame.transform.flip(self._surf, xbool, ybool).convert_alpha()", "def flip(self, mode='h'):\n # TODO: Implement the flip function. Remember to record the boolean values is_horizontal_flip and\n # is_vertical_flip.\n if mode == 'h':\n self.is_horizontal_flip = True\n self.x = np.flipud(self.x)\n elif mode == 'v':\n self.is_vertical_flip = True\n self.x = np.fliplr(self.x)\n else:\n self.is_vertical_flip = True\n self.is_horizontal_flip = True\n self.x = np.fliplr(self.x)\n self.x = np.flipud(self.x)\n # raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################", "def flip(self, bev_direction: str = 'horizontal') -> None:\n pass", "def flip_faceup(self):\r\n self.faceup = True", "def td_flip(self):\n self.cw_rotate()\n self.cw_rotate()\n self.lr_flip()\n self.find_edges()", "def flip_vertical(image: Image) -> Image:\r\n flipped_image = copy(image)\r\n middle_pixel = get_width(flipped_image) // 2\r\n width = get_width(flipped_image)\r\n height = get_height(flipped_image)\r\n\r\n for x in range(middle_pixel):\r\n for y in range(height):\r\n r, g, b = get_color(image, x, y)\r\n new_r, new_g, new_b = get_color(image, abs(width - x) - 1, y)\r\n set_color(flipped_image, x, y, create_color(new_r, new_g, new_b))\r\n set_color(flipped_image, width - x - 1, y, create_color(r, g, b))\r\n\r\n return flipped_image", "def test_flip_vertical() -> None:\n original = create_image(3, 2)\n set_color(original, 0, 0, create_color(0, 0, 0))\n set_color(original, 1, 0, create_color(90, 90, 90))\n set_color(original, 2, 0, create_color(255, 255, 255))\n set_color(original, 0, 1, create_color(10, 10, 10))\n set_color(original, 1, 1, create_color(0, 0, 0))\n set_color(original, 2, 1, create_color(90, 90, 90))\n \n expected = create_image(3, 2)\n set_color(expected, 0, 0, create_color(10, 10, 10))\n set_color(expected, 1, 0, create_color(0, 0, 0))\n set_color(expected, 2, 0, create_color(90, 90, 90))\n set_color(expected, 0, 1, create_color(0, 0, 0))\n set_color(expected, 1, 1, create_color(90, 90, 90))\n set_color(expected, 2, 1, create_color(255, 255, 255))\n \n flipped_vertical = flip_vertical(original)\n \n for x, y, col in flipped_vertical: # tests each colour of each pixel of the filtered sample image and compares it to the expected image\n check_equal('Checking pixel @(' + str(x) + ', ' + str(y) + ')', col, get_color(expected, x, y))", "def set_flip(self, val):\n self.flip = val", "def __flip(img, flip, flip_type=Image.FLIP_LEFT_RIGHT):\n if flip:\n return img.transpose(flip_type)\n return img", "def set_flipped(self, x, y):\n self.pieces[x + (y * self.width)].set_flipped()", "def flip(self):\n if self.is_face_up:\n arcade.load_texture(self.back_file)\n self.is_face_up = False\n else:\n arcade.load_texture(self.face_file)\n self.is_face_up = True", "def flip(self, horizontally):\n\t\tself.currentPixbuf = self.currentPixbuf.flip(horizontally)\n\t\tself.scaleCache[1] = 0\n\t\tgc.collect()", "def flip_augmentation():\n return lambda image: ImageOps.flip(image)", "def flipNormals(self):\n self.flip = not self.flip", "def mirrorImage(self):\n\n im = Image.open(self.ActivePhoto)\n out = im.transpose(PIL.Image.FLIP_LEFT_RIGHT)\n out.save(self.ActivePhoto)\n self.photo.setPixmap(QtGui.QPixmap(self.ActivePhoto))\n print (\"Flipped image\")", "def flip(self):\n self.width, self.height = self.height, self.width", "def flip(img, code=0):\n\treturn cv2.flip(img, flipCode=code)", "def vflip(img):\n if not _is_numpy(img):\n raise TypeError('img should be Numpy Image. Got {}'.format(type(img)))\n\n return np.flipud(img)", "def flip(self, x: bool, y: bool) -> 'BaseImage':\n assert isinstance(x, bool)\n assert isinstance(y, bool)\n assert (x or y), 'at least one axis should be True'\n self._surface = pygame.transform.flip(self._surface, x, y)\n return self", "def flip_h(image, gt):\n result_im = cv2.flip(image, 1)\n result_gt = cv2.flip(gt, 1)\n\n return result_im, result_gt", "def vflip(img, data_format='CHW'):\n _assert_image_tensor(img, data_format)\n\n h_axis = _get_image_h_axis(data_format)\n\n return img.flip(axis=[h_axis])", "def flip_image(image):\n return cv2.flip(image, flipCode=1)", "def img_add_flip(arr, flip_horiz = True, flip_vert = False):\r\n assert len(arr.shape) == 3, \"'arr' input array must be three dimensional\"\r\n arr_copy = arr.copy()\r\n if flip_horiz:\r\n arr_copy = np.fliplr(arr_copy)\r\n if flip_vert:\r\n arr_copy = np.flipud(arr_copy)\r\n return arr_copy", "def flip(self, horizontal):\n try:\n self._is_transformable()\n horizontal = get_int(horizontal)\n except NotTransformable as e:\n self._app[\"statusbar\"].message(str(e) + \" flip\", \"error\")\n return\n except StringConversionError as e:\n self._app[\"statusbar\"].message(str(e), \"error\")\n return\n images = self.get_images(\"Flipped\")\n # Apply changes\n for fil in images:\n if fil not in self._changes:\n self._changes[fil] = [0, 0, 0]\n if horizontal:\n self._changes[fil][1] = \\\n (self._changes[fil][1] + 1) % 2\n else:\n self._changes[fil][2] = \\\n (self._changes[fil][2] + 1) % 2\n # Flip the image shown\n if self._app.get_path() in images:\n self.emit(\"changed\", \"flip\", horizontal)\n # Reload thumbnails of flipped images immediately\n if self._app[\"thumbnail\"].toggled:\n self.apply()", "def collate_fn_flip(self, batch):\n FT = torch.FloatTensor\n img, uvd_gt = zip(*batch)\n flip = random.randint(1, 10000)%2\n # Do flipping\n # 0 = left, 1 = right\n hand_side = 1\n if flip:\n hand_side = 0 \n\n new_img = []\n new_uvd = []\n for i, u in batch:\n if flip:\n i = i.transpose(Image.FLIP_LEFT_RIGHT)\n u[:, 0] = 0.999 - u[:, 0]\n i = np.asarray(i)\n i = i/255.0\n i = IMG.imgshape2torch(i)\n new_img.append(i)\n new_uvd.append(u)\n \n new_img = FT(new_img)\n new_uvd = FT(new_uvd)\n return new_img, new_uvd, hand_side", "def flip(self, row: int, col: int) -> None:\n self.state[row, col] = not self.state[row, col]", "def flip_image(image):\n\n return cv2.flip(image, 1)", "def test_random_vertical_flip(dummy_input):\n # Test the 2D image: H, W, C\n image, label = dummy_input(image_size=(512, 512, 3),\n label_size=(512, 512, 1))\n transform = RandomVerticalFlip(prob=1)\n _image, _label = transform(image, label)\n _image, _label = transform(_image, _label)\n assert (image == _image).all()\n assert (label == _label).all()\n \n # Test the 3D image: H, W, D, C\n image, label = dummy_input(image_size=(512, 512, 20, 3),\n label_size=(512, 512, 20, 1))\n transform = RandomVerticalFlip(prob=1)\n _image, _label = transform(image, label)\n _image, _label = transform(_image, _label)\n assert (image == _image).all()\n assert (label == _label).all()", "def vflip(img):\n if not _is_numpy_image(img):\n raise TypeError('img should be nparray Image. Got {}'.format(type(img)))\n\n return cv2.flip(img, 0)", "def flip_image(image, label):\n # Flip the image\n cv2.flip(image, 1, image)\n\n # Flip the label\n label[1] = image.shape[1] - label[1]", "def set_flip(self, flipconv):\n if flipconv is None:\n flipconv = 'astro' # default\n if flipconv == 'astro': self._flip = -1\n elif flipconv == 'geo': self._flip = 1\n else: raise ValueError(\"flipconv must be 'astro', 'geo' or None for default.\")", "def set_flipout(flipout):\n if isinstance(flipout, bool):\n __SETTINGS__._FLIPOUT = flipout\n else:\n raise TypeError('flipout must be True or False')", "def vflip(self):\n for y in range(0, self.height // 2):\n for x in range(0, self.width):\n self._chars[x][y], self._chars[x][self.height - 1 - y] = self._chars[x][self.height - 1 - y], self._chars[x][y]\n self._fginfo[x][y], self._fginfo[x][self.height - 1 - y] = self._fginfo[x][self.height - 1 - y], self._fginfo[x][y]\n self._bginfo[x][y], self._bginfo[x][self.height - 1 - y] = self._bginfo[x][self.height - 1 - y], self._bginfo[x][y]\n self._strDirty = True", "def mirror(image):\n\n return cv2.flip(image, 1)", "def saveflip(image, fname, outpath, axis='x', preserve_name=False):\n if not preserve_name:\n fpath = genSavePath(outpath, fname, modstring=f\"mirror_{axis}\")\n else:\n fpath = genSavePath(outpath, fname)\n im = copy(image)\n if axis == 'x':\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\n elif axis == 'y':\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\n else:\n raise Exception('No valid axis procvided for flipping: {} - received value: {}'.format(fname, axis))\n try:\n im.save(fpath, subsample=\"keep\", qtables=image.quantization, optimize=True)\n\n except IOError as m:\n print( \"Flipped({}) image creation failed for: {}. \\nReason:{}\".format(axis,fname,m))", "def _augment(img):\r\n return flip(img, axis=2)", "def flip_horizontal(img):\r\n #reading image\r\n im = Image.open(\"filename\")\r\n\r\n #flipping image horizontally\r\n newimg = im.transpose(PIL.Image.FLIP_LEFT_RIGHT)\r\n \r\n return img", "def flip_image(image_path, saved_location):\n image_obj = Image.open(image_path)\n rotated_image = image_obj.transpose(Image.FLIP_LEFT_RIGHT)\n rotated_image.save(saved_location)", "def flip_image(image, direction):\n prevShape = image.shape\n image, reshaped = reshape_to_cv_format(image, False)\n image = cv.flip(image, direction)\n if reshaped: \n image = image.reshape(prevShape)\n return image", "def setFlag(self):\n if self.inPlay and not self.shown:\n self.flag = not(self.flag)\n image_index = 11 if self.flag else 10\n self.configure(image = Tile.images[image_index])\n return 1 if self.flag else -1\n return 0", "def flip_image_horizontal(image):\n return cv.flip(image, 0)", "def flip(self, xflip=True, yflip=False):\n self.drawer.flush()\n img = self.img\n if xflip: img = img.transpose(PIL.Image.FLIP_LEFT_RIGHT)\n if yflip: img = img.transpose(PIL.Image.FLIP_TOP_BOTTOM)\n self.img = img\n self.update_drawer_img()\n return self", "def _augment(img):\n return flip(img, axis=2)", "def flip(self):\n self.align = self._left if self.align == self._right else self._right\n self.group.layout_all()", "def flip_rotate(img):\r\n\r\n choice = int(8*np.random.rand())\r\n \r\n if choice == 0:\r\n return img\r\n if choice == 1:\r\n return np.rot90(img, 1)\r\n if choice == 2:\r\n return np.rot90(img, 2)\r\n if choice == 3:\r\n return np.rot90(img, 3)\r\n if choice == 4:\r\n return np.flip(img, 0)\r\n if choice == 5:\r\n return np.flip(img, 1)\r\n if choice == 6:\r\n return np.flip(np.rot90(img, 1), 0)\r\n if choice == 7:\r\n return np.flip(np.rot90(img, 1), 1)", "def check_flip(origin_imgs, result_imgs, flip_type):\n n, _, _, _ = np.shape(origin_imgs)\n if flip_type == 'horizontal':\n for i in range(n):\n if np.any(result_imgs[i] != np.fliplr(origin_imgs[i])):\n return False\n else:\n # yapf: disable\n for i in range(n):\n if np.any(result_imgs[i] != np.transpose(np.fliplr(np.transpose(origin_imgs[i], (1, 0, 2))), (1, 0, 2))): # noqa:E501\n return False\n # yapf: enable\n return True", "def reversible(self) -> bool:\n xy_row = np.column_stack(\n (\n np.linspace(\n -self.imgsz[0] / (2 * self.f[0]),\n self.imgsz[0] / (2 * self.f[0]),\n int(self.imgsz[0]),\n ),\n np.zeros(int(self.imgsz[0])),\n )\n )\n dxy = self._distort(xy_row)\n continuous_row = np.all(dxy[1:, 0] >= dxy[:-1, 0])\n xy_col = np.column_stack(\n (\n np.zeros(int(self.imgsz[1])),\n np.linspace(\n -self.imgsz[1] / (2 * self.f[1]),\n self.imgsz[1] / (2 * self.f[1]),\n int(self.imgsz[1]),\n ),\n )\n )\n dxy = self._distort(xy_col)\n continuous_col = np.all(dxy[1:, 1] >= dxy[:-1, 1])\n return continuous_row and continuous_col", "def on_IV_mode_toggled(self, checked):\n # TODO: not implemented yet\n if checked:\n self.vimode = 0\n qmdz_const.VI_MODE = 0\n self.sample_id.setText(\"IV_test\")\n self.VI_MPL.clear_curve()\n self.VI_MPL.set_iv_mode()", "def readout_flipped(self, iamp):\n flipped = ct.c_int()\n self.lib.IsReadoutFlippedByAmplifier(ct.c_int(iamp),\n ct.pointer(flipped))\n return bool(flipped.value)", "def fl_flip_yorigin():\n _fl_flip_yorigin = library.cfuncproto(\n library.load_so_libforms(), \"fl_flip_yorigin\",\\\n None, [],\\\n \"\"\"void fl_flip_yorigin()\"\"\")\n _fl_flip_yorigin()", "def flip_frame(frame, flip_code):\n return cv.flip(frame, flip_code)", "def flip(self) -> int:\n self.flags = ~(self.flags)\n return self.flags", "def set_flip_board(self, new_value: bool) -> None:\n self._flip_board = new_value", "def _flip_y(self, zoom, row):\n\n if row is None or zoom is None:\n raise TypeError(\"zoom and row cannot be null\")\n\n return (2 ** zoom) - 1 - row", "def random_flip(image ):\n if random.randint(0,1):\n image = flipud(image) # vertical flip\n if random.randint(0,1):\n image = fliplr(image) # horizontal flip\n return image", "def fliplr(img):\n inv_idx = torch.arange(img.size(3) - 1, -1, -1).long() # N x C x H x W\n img_flip = img.index_select(3, inv_idx)\n\n return img_flip", "def invert(self, img):\n return self.inverse()(img)", "def flip(h):\n return np.flip(h)", "def flip(h):\n return np.flip(h)", "def collate_fn_no_flip(self, batch):\n FT = torch.FloatTensor\n img, uvd_gt = zip(*batch)\n\n new_img = []\n new_uvd = []\n for i, u in batch:\n if self.pred_img_side == 'left':\n i = i.transpose(Image.FLIP_LEFT_RIGHT)\n u[:, 0] = 0.999 - u[:, 0]\n i = np.asarray(i)\n i = i/255.0\n i = IMG.imgshape2torch(i)\n new_img.append(i)\n new_uvd.append(u)\n \n new_img = FT(new_img)\n new_uvd = FT(new_uvd)\n return new_img, new_uvd", "def invert(self, val):\n self._invert = val\n if val:\n self.write_cmd(self.CMD_SET_DISP_REVERSE)\n else:\n self.write_cmd(self.CMD_SET_DISP_NORMAL)", "def random_vertical_filp(self, img, p = 0.5):\n if self.decision(p):\n img = cv2.flip(img, 0)\n return img", "def __splitOrientation(self, checked):\n if checked:\n self.setSplitOrientation(Qt.Horizontal)\n self.splitViewAct.setIcon(\n UI.PixmapCache.getIcon(\"splitHorizontal.png\"))\n self.splitRemoveAct.setIcon(\n UI.PixmapCache.getIcon(\"remsplitHorizontal.png\"))\n self.newDocumentSplitViewAct.setIcon(\n UI.PixmapCache.getIcon(\"splitHorizontal.png\"))\n else:\n self.setSplitOrientation(Qt.Vertical)\n self.splitViewAct.setIcon(\n UI.PixmapCache.getIcon(\"splitVertical.png\"))\n self.splitRemoveAct.setIcon(\n UI.PixmapCache.getIcon(\"remsplitVertical.png\"))\n self.newDocumentSplitViewAct.setIcon(\n UI.PixmapCache.getIcon(\"splitVertical.png\"))\n Preferences.setUI(\"SplitOrientationVertical\", checked)", "def transpose(self, method):\r\n w, h = self.size\r\n if method == FLIP_LEFT_RIGHT:\r\n _im = cv2.flip(self._instance, 1)\r\n elif method == FLIP_TOP_BOTTOM:\r\n _im = cv2.flip(self._instance, 0)\r\n elif method == ROTATE_90:\r\n _im = self.rotate_bound(270)\r\n x = self.size[0]//2-self.size[1]//2\r\n box = (0, x, self.size[0], x+self.size[1])\r\n _im = self.crop(box, _im)\r\n elif method == ROTATE_180:\r\n _im = self.rotate(180, self._instance)\r\n elif method == ROTATE_270:\r\n _im = self.rotate_bound(90)\r\n x = self.size[0]//2-self.size[1]//2\r\n box = (0, x, self.size[0], x+self.size[1])\r\n _im = self.crop(box, _im)\r\n if isinstance(_im, Image):\r\n return _im\r\n elif isinstance(_im, np.ndarray):\r\n return Image(_im)", "def orient(self,Y):\r\n self.orientation[Y]+=1\r\n if self.orientation[Y]>3:\r\n self.orientation[Y]=0\r\n if self.orientation[Y]<0:\r\n self.orientation[Y]=3\r\n self.can.delete(self.image_bateau[Y])\r\n self.image_bateau[Y]=self.create_image(self.img[self.orientation[Y]][Y],0,0)\r\n self.affichage(Y)", "def flip_axes(input_file, flipx=True, flipy=True, flipz=False,\n use_matrix=False, use_header=True):\n import os\n import numpy as np\n import nibabel as nb\n\n # Load image volume\n img = nb.load(input_file)\n dat = img.get_data()\n if use_matrix:\n mat = img.get_affine()\n if use_header:\n hdr = img.get_header()\n lenx, leny, lenz = np.shape(dat)\n dat_new = np.zeros((lenx, leny, lenz))\n\n # Flip x\n if flipx:\n for x in range(lenx):\n dat_new[lenx-1-x,:,:] = dat[x,:,:]\n\n # Flip y\n if flipy:\n for y in range(leny):\n dat_new[:,leny-1-y,:] = dat[:,y,:]\n\n # Flip z\n if flipz:\n for z in range(lenz):\n dat_new[:,:,lenz-1-z] = dat[:,:,z]\n\n # Save output\n out_file = 'reorient_' + os.path.basename(input_file)\n if use_matrix:\n if use_header:\n img = nb.Nifti1Image(dat_new, mat, hdr)\n else:\n img = nb.Nifti1Image(dat_new, mat)\n elif use_header:\n img = nb.Nifti1Image(dat_new, np.eye(4,4), hdr)\n else:\n img = nb.Nifti1Image(dat_new, np.eye(4,4))\n\n img.to_filename(out_file)\n\n return out_file", "def flip(self, bev_direction='horizontal', points=None):\n assert bev_direction in ('horizontal', 'vertical')\n if bev_direction == 'horizontal':\n self.tensor[:, 0::7] = -self.tensor[:, 0::7]\n if self.with_yaw:\n self.tensor[:, 6] = -self.tensor[:, 6] + np.pi\n elif bev_direction == 'vertical':\n self.tensor[:, 2::7] = -self.tensor[:, 2::7]\n if self.with_yaw:\n self.tensor[:, 6] = -self.tensor[:, 6]\n\n if points is not None:\n assert isinstance(points, (torch.Tensor, np.ndarray, BasePoints))\n if isinstance(points, (torch.Tensor, np.ndarray)):\n if bev_direction == 'horizontal':\n points[:, 0] = -points[:, 0]\n elif bev_direction == 'vertical':\n points[:, 2] = -points[:, 2]\n elif isinstance(points, BasePoints):\n points.flip(bev_direction)\n return points", "def horizontal_flip() -> Callable:\n return lambda img: TF.hflip(img)", "def flip_boxes_vertically(boxes):\n # Flip boxes vertically\n ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)\n flipped_ymin = tf.subtract(1.0, ymax)\n flipped_ymax = tf.subtract(1.0, ymin)\n flipped_boxes = tf.concat([flipped_ymin, xmin, flipped_ymax, xmax], axis=1)\n return flipped_boxes", "def rotateYRight(self):\n MV = self.MV\n MV[:3, 0] = 0, 1, 0 # 1st col is right vector, make it point along y axis\n # set middle middle and middle right values to zero:\n MV[1, 1] = 0\n MV[1, 2] = 0\n a = MV[0, 1] # grab top middle value\n b = np.sqrt(1 - a**2) # calc new complementary value to get normalized vectors\n if MV[2, 1] < 0:\n b = -b # keep b -ve, reduce jumping around of axes\n MV[2, 1] = b\n MV[0, 2] = b\n MV[2, 2] = -a # needs to be -ve of MV[0, 1]\n self.MV = MV", "def rotateYRight(self):\n MV = self.MV\n MV[:3, 0] = 0, 1, 0 # 1st col is right vector, make it point along y axis\n # set middle middle and middle right values to zero:\n MV[1, 1] = 0\n MV[1, 2] = 0\n a = MV[0, 1] # grab top middle value\n b = np.sqrt(1 - a**2) # calc new complementary value to get normalized vectors\n if MV[2, 1] < 0:\n b = -b # keep b -ve, reduce jumping around of axes\n MV[2, 1] = b\n MV[0, 2] = b\n MV[2, 2] = -a # needs to be -ve of MV[0, 1]\n self.MV = MV", "def flip(self, axes=None, inplace=False, i=False):\n d = _inplace_enabled_define_and_cleanup(self)\n super(DimensionCoordinate, d).flip(axes=axes, inplace=True)\n\n direction = d._custom.get(\"direction\")\n if direction is not None:\n d._custom[\"direction\"] = not direction\n\n return d", "def translate_image_vertical(image, translationFactor):\n\n # create vertical translation matrix\n tm = np.float32([[1, 0, 0],\n [0, 1, translationFactor]])\n return translate_image(image, tm)", "def setInverted(self, state=True):\n self.__inverted = state", "def horizontal_flip(img, steering_angle):\n flipped = cv2.flip(img, 1) # positive (>0) flip code means flipping about y-axis\n steering_angle = steering_angle * -1 # change sign of steering angle to account for flip\n return flipped, steering_angle", "def _rotate(self):\n \r\n if self.clr == 1: # (default rotation) \r\n # o o o o \r\n # o x x o x o o x\r\n # o o o o\r\n _colOffsets = [[-1,-1, 0, 0], [-1, 0, 0, 1], [ 1, 1, 0, 0], [ 1, 0, 0,-1]] #\r\n _rowOffsets = [[ 1, 0, 0,-1], [-1,-1, 0, 0], [-1, 0, 0, 1], [ 1, 1, 0, 0]] #\r\n elif self.clr == 2:\r\n # o o o o \r\n # o x o x x o x o\r\n # o o o o\r\n _colOffsets = [[-1,-1, 0, 0], [ 1, 0, 0,-1], [ 1, 1, 0, 0], [-1, 0, 0, 1]] #\r\n _rowOffsets = [[-1, 0, 0, 1], [-1,-1, 0, 0], [ 1, 0, 0,-1], [ 1, 1, 0, 0]] #\n \r\n elif self.clr == 3: # \r\n # o o o o \r\n # x o x o x o x o\r\n # o o o o\n \r\n _colOffsets = [[-1, 0, 0, 0], [-1,-1, 0, 1], [ 1, 0, 0, 0], [ 1, 1, 0,-1]] #\r\n _rowOffsets = [[ 1, 1, 0,-1], [-1, 0, 0, 0], [-1,-1, 0, 1], [ 1, 0, 0, 0]] #\n \r\n elif self.clr == 4:\r\n # o o o o \r\n # x o x o x o x o\r\n # o o o o\r\n _colOffsets = [[-1, 0, 0, 0], [1, 1, 0, -1], [1, 0, 0,0], [-1, -1, 0,1]]\n _rowOffsets = [[-1,-1, 0, 1], [-1,0, 0, 0], [1,1, 0,-1], [1,0, 0, 0]]\n \r\n elif self.clr == 5: # o o\r\n # o x \r\n # x o x o o o o o x o\r\n # o o \r\n _colOffsets = [[ 0, 0, 0, 0], [ 2, 1, 0,-1], [ 0, 0, 0, 0], [-2,-1, 0, 1]] #\r\n _rowOffsets = [[-2,-1, 0, 1], [ 0, 0, 0, 0], [ 2, 1, 0,-1], [ 0, 0, 0, 0]] #\r\n elif self.clr == 6: #\r\n # o o o \r\n # o x o x o x o o x o\r\n # o o o \r\n _colOffsets = [[ 0,-1, 0, 0], [-1, 0, 0, 1], [ 0, 1, 0, 0], [ 1, 0, 0,-1]] #\r\n _rowOffsets = [[ 1, 0, 0,-1], [ 0,-1, 0, 0], [-1, 0, 0, 1], [ 0, 1, 0, 0]] #\r\n elif self.clr == 7: # \r\n # o o o o o o o o\r\n # o x o x o x o x\r\n # \r\n _colOffsets = [[-1,-1, 0, 0], [-1,-1, 0, 0], [-1,-1, 0, 0], [-1,-1, 0, 0]] #@@\r\n _rowOffsets = [[ 0,-1, 0,-1], [ 0,-1, 0,-1], [ 0,-1, 0,-1], [ 0,-1, 0,-1]] #@@\n \r\n self._colOffsets = _colOffsets[self._rot] #@@\r\n self._rowOffsets = _rowOffsets[self._rot] #@@\r\n self._update() #@@\r", "def flip_tile(self, tile, flag=False):\n selected_tile = self.stack[tile]\n\n if flag == True:\n if self.flags_remaining == 0:\n raise ValueError(\"No flags left\")\n else:\n selected_tile['flag']=True\n self.flags_remaining -= 1\n\n else:\n if selected_tile['flag'] == True:\n selected_tile['flag'] == False\n self.flags_remaining += 1\n\n else:\n self.stack[tile]['flip'] = True\n self.tiles_remaining -= 1\n if selected_tile['value'] == 'bomb':\n self.end_game()\n elif selected_tile['value'] == 0:\n self.blank_tile_cascade(tile)\n self.check_win()", "def sym_right_img(img):\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n return sym_left_img(img)", "def __flip(self, image, landmarks, run_prob=0.5):\n if np.random.rand() < run_prob:\n return image, landmarks\n image = np.fliplr(image)\n landmarks[:, 0] = image.shape[1] - landmarks[:, 0]\n landmarks = LandmarkHelper.flip(landmarks, landmarks.shape[0])\n return image, landmarks", "def random_flip(self, image, label, horizontal=False):\n\n flip = 1\n rand_float = uniform(0, 1)\n\n if horizontal:\n # 1 == vertical flip\n # 0 == horizontal flip\n flip = randint(0, 1)\n\n if rand_float > 0.5:\n image = cv2.flip(image, flip)\n label = cv2.flip(label, flip)\n\n return image, label", "def mirror(img):\n return img[:, ::-1]", "def flip(self, p):\n return -p", "def flip(imgs):\n x = random.choice([-1, 0, 1, 2])\n if x == 2:\n return imgs\n else:\n return [cv2.flip(img, x) for img in imgs]", "def flip(tensor, axis=None):\n raise NotImplementedError", "def __update_col_facedown(self, col):\n all_is_facedown = True\n # Loop all card in column\n for card in self.solitaire[col]:\n # If no card is represented\n if card == 0:\n break\n # If at least 1 card is faceup we return from method\n if not card.is_facedown:\n all_is_facedown = False\n return\n # If all card is facedown, we can flip and reviel a new card\n # so we have one less card facing down in that column\n if all_is_facedown:\n if self.col_facedown[col] > 0:\n print(f\"Vend kort i kolonne {col}\")\n self.col_facedown[col] -= 1", "def flipAndInvertImage2(self, A: List[List[int]]) -> List[List[int]]:\n def invert(x):\n if x:\n return 0\n else:\n return 1\n\n for row in A:\n left, right = 0, len(row)-1\n while left <= right:\n row[left], row[right] = invert(row[right]), invert(row[left])\n left += 1\n right -= 1\n\n return A", "def evert(self):\n for e in self.edges:\n self.invert()\n for f in self.faces:\n f.invert()", "def convert_flip(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axis = op.attr(\"axis\")\n\n for i, ax in enumerate(axis):\n if i == 0:\n out = _op.reverse(x, ax)\n else:\n out = _op.reverse(out, ax)\n\n g.add_node(op.output(\"Out\")[0], out)", "def lr_flip(self):\n for g in self.grid:\n g.reverse()", "def set_invert_display(enable):\n if enable:\n send_command(0xA7)\n else:\n send_command(0xA6)" ]
[ "0.73209965", "0.70142406", "0.68551445", "0.66779745", "0.66683435", "0.666765", "0.65579903", "0.64983195", "0.64376324", "0.64257395", "0.6424698", "0.6389282", "0.63263226", "0.63111866", "0.63103646", "0.630324", "0.6244989", "0.61426216", "0.6092576", "0.60797507", "0.60258675", "0.59888154", "0.5978035", "0.59627616", "0.59263426", "0.590511", "0.5888896", "0.58764815", "0.5850129", "0.58486706", "0.5841394", "0.5838484", "0.581671", "0.58117527", "0.5808985", "0.5775403", "0.5746161", "0.56744534", "0.5664703", "0.5655347", "0.5641214", "0.56228924", "0.5619947", "0.55938333", "0.5588591", "0.55819005", "0.55508286", "0.5531665", "0.5519797", "0.54935944", "0.54807156", "0.54732925", "0.54693234", "0.54477584", "0.54453427", "0.54450387", "0.541083", "0.540221", "0.5396986", "0.5396422", "0.5393892", "0.5385508", "0.5379279", "0.53634775", "0.53513014", "0.5341227", "0.533776", "0.5329209", "0.5329209", "0.53242207", "0.53175616", "0.5283458", "0.5253648", "0.5227469", "0.52151585", "0.5211796", "0.52039546", "0.51980835", "0.519186", "0.5189389", "0.5189389", "0.51860654", "0.51535076", "0.51487494", "0.5145134", "0.513986", "0.51398367", "0.51343954", "0.51330453", "0.5132112", "0.5131454", "0.5123822", "0.5116976", "0.51101166", "0.51087505", "0.510457", "0.5091377", "0.5087573", "0.50843245", "0.50798744" ]
0.8431753
0
Set a flag indicating whether the image should be flipped horizontally
def setHorizontalFlip(self, flag): self.flipHorizontally = flag
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def flip_horizontal(img):\r\n #reading image\r\n im = Image.open(\"filename\")\r\n\r\n #flipping image horizontally\r\n newimg = im.transpose(PIL.Image.FLIP_LEFT_RIGHT)\r\n \r\n return img", "def flip_image_horizontal(image):\n return cv.flip(image, 0)", "def flip(self, horizontally):\n\t\tself.currentPixbuf = self.currentPixbuf.flip(horizontally)\n\t\tself.scaleCache[1] = 0\n\t\tgc.collect()", "def setVerticalFlip(self, flag):\n\t\tif self.ext.lower() in [\"png\", \"jpg\", \"jpeg\"]:\n\t\t\tself.flipVertically = not flag\n\t\telse:\n\t\t\tself.flipVertically = flag", "def flip(img, boolean=True):\n return pg.transform.flip(img, boolean, False)", "def flip(self, horizontal):\n try:\n self._is_transformable()\n horizontal = get_int(horizontal)\n except NotTransformable as e:\n self._app[\"statusbar\"].message(str(e) + \" flip\", \"error\")\n return\n except StringConversionError as e:\n self._app[\"statusbar\"].message(str(e), \"error\")\n return\n images = self.get_images(\"Flipped\")\n # Apply changes\n for fil in images:\n if fil not in self._changes:\n self._changes[fil] = [0, 0, 0]\n if horizontal:\n self._changes[fil][1] = \\\n (self._changes[fil][1] + 1) % 2\n else:\n self._changes[fil][2] = \\\n (self._changes[fil][2] + 1) % 2\n # Flip the image shown\n if self._app.get_path() in images:\n self.emit(\"changed\", \"flip\", horizontal)\n # Reload thumbnails of flipped images immediately\n if self._app[\"thumbnail\"].toggled:\n self.apply()", "def flip(self):\n \n if self.faceup:\n self.faceup = False\n else:\n self.faceup = True", "def flip(self, xbool, ybool):\n self._surf = pygame.transform.flip(self._surf, xbool, ybool).convert_alpha()", "def flip_image(img, vert=True):\n if vert:\n return img.transpose(Image.FLIP_TOP_BOTTOM)\n else:\n return img.transpose(Image.FLIP_LEFT_RIGHT)", "def flip(self):", "def __flip(img, flip, flip_type=Image.FLIP_LEFT_RIGHT):\n if flip:\n return img.transpose(flip_type)\n return img", "def flip(self, mode='h'):\n # TODO: Implement the flip function. Remember to record the boolean values is_horizontal_flip and\n # is_vertical_flip.\n if mode == 'h':\n self.is_horizontal_flip = True\n self.x = np.flipud(self.x)\n elif mode == 'v':\n self.is_vertical_flip = True\n self.x = np.fliplr(self.x)\n else:\n self.is_vertical_flip = True\n self.is_horizontal_flip = True\n self.x = np.fliplr(self.x)\n self.x = np.flipud(self.x)\n # raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################", "def mirrorImage(self):\n\n im = Image.open(self.ActivePhoto)\n out = im.transpose(PIL.Image.FLIP_LEFT_RIGHT)\n out.save(self.ActivePhoto)\n self.photo.setPixmap(QtGui.QPixmap(self.ActivePhoto))\n print (\"Flipped image\")", "def flip_faceup(self):\r\n self.faceup = True", "def img_add_flip(arr, flip_horiz = True, flip_vert = False):\r\n assert len(arr.shape) == 3, \"'arr' input array must be three dimensional\"\r\n arr_copy = arr.copy()\r\n if flip_horiz:\r\n arr_copy = np.fliplr(arr_copy)\r\n if flip_vert:\r\n arr_copy = np.flipud(arr_copy)\r\n return arr_copy", "def horizontal_flip(img, steering_angle):\n flipped = cv2.flip(img, 1) # positive (>0) flip code means flipping about y-axis\n steering_angle = steering_angle * -1 # change sign of steering angle to account for flip\n return flipped, steering_angle", "def horizontal_flip() -> Callable:\n return lambda img: TF.hflip(img)", "def flipNormals(self):\n self.flip = not self.flip", "def vflip(img):\n #if not _is_pil_image(img):\n # raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n return img.transpose(Image.FLIP_TOP_BOTTOM)", "def set_flipped(self, x, y):\n self.pieces[x + (y * self.width)].set_flipped()", "def flip_augmentation():\n return lambda image: ImageOps.flip(image)", "def set_flip(self, val):\n self.flip = val", "def flip(self, x: bool, y: bool) -> 'BaseImage':\n assert isinstance(x, bool)\n assert isinstance(y, bool)\n assert (x or y), 'at least one axis should be True'\n self._surface = pygame.transform.flip(self._surface, x, y)\n return self", "def td_flip(self):\n self.cw_rotate()\n self.cw_rotate()\n self.lr_flip()\n self.find_edges()", "def flip(self, bev_direction: str = 'horizontal') -> None:\n pass", "def check_flip(origin_imgs, result_imgs, flip_type):\n n, _, _, _ = np.shape(origin_imgs)\n if flip_type == 'horizontal':\n for i in range(n):\n if np.any(result_imgs[i] != np.fliplr(origin_imgs[i])):\n return False\n else:\n # yapf: disable\n for i in range(n):\n if np.any(result_imgs[i] != np.transpose(np.fliplr(np.transpose(origin_imgs[i], (1, 0, 2))), (1, 0, 2))): # noqa:E501\n return False\n # yapf: enable\n return True", "def flip(self):\n self.width, self.height = self.height, self.width", "def set_flip(self, flipconv):\n if flipconv is None:\n flipconv = 'astro' # default\n if flipconv == 'astro': self._flip = -1\n elif flipconv == 'geo': self._flip = 1\n else: raise ValueError(\"flipconv must be 'astro', 'geo' or None for default.\")", "def flip(img, code=0):\n\treturn cv2.flip(img, flipCode=code)", "def flip_horizontal(picture: Image) -> Image:\r\n \r\n hflip = copy(picture)\r\n width = get_width(picture)\r\n height = get_height(picture)\r\n \r\n for x in range(width):\r\n for y in range(height):\r\n original_hpixel = get_color(picture, x, y)\r\n hflip_pixel = (width - 1) - x\r\n set_color(hflip, hflip_pixel, y, original_hpixel)\r\n \r\n return hflip", "def flip_vertical(img):\r\n #reading image\r\n im = Image.open(\"filename\")\r\n\r\n #flipping image vertically\r\n newimg = im.transpose(PIL.Image.FLIP_TOP_BOTTOM)\r\n return img", "def flip_image(image):\n return cv2.flip(image, flipCode=1)", "def flip_horizontally(img, gt_boxes):\n flipped_img = tf.image.flip_left_right(img)\n flipped_gt_boxes = tf.stack([gt_boxes[..., 0],\n 1.0 - gt_boxes[..., 3],\n gt_boxes[..., 2],\n 1.0 - gt_boxes[..., 1]], -1)\n return flipped_img, flipped_gt_boxes", "def flip_image_vertical(image):\n return cv.flip(image, 1)", "def flip_image(image):\n\n return cv2.flip(image, 1)", "def flip(self):\n if self.is_face_up:\n arcade.load_texture(self.back_file)\n self.is_face_up = False\n else:\n arcade.load_texture(self.face_file)\n self.is_face_up = True", "def mirror(image):\n\n return cv2.flip(image, 1)", "def flip(self, xflip=True, yflip=False):\n self.drawer.flush()\n img = self.img\n if xflip: img = img.transpose(PIL.Image.FLIP_LEFT_RIGHT)\n if yflip: img = img.transpose(PIL.Image.FLIP_TOP_BOTTOM)\n self.img = img\n self.update_drawer_img()\n return self", "def setFlag(self):\n if self.inPlay and not self.shown:\n self.flag = not(self.flag)\n image_index = 11 if self.flag else 10\n self.configure(image = Tile.images[image_index])\n return 1 if self.flag else -1\n return 0", "def flip(self, row: int, col: int) -> None:\n self.state[row, col] = not self.state[row, col]", "def flip_image(image, label):\n # Flip the image\n cv2.flip(image, 1, image)\n\n # Flip the label\n label[1] = image.shape[1] - label[1]", "def fliplr(img):\n inv_idx = torch.arange(img.size(3) - 1, -1, -1).long() # N x C x H x W\n img_flip = img.index_select(3, inv_idx)\n\n return img_flip", "def _augment(img):\r\n return flip(img, axis=2)", "def random_flip(self, image, label, horizontal=False):\n\n flip = 1\n rand_float = uniform(0, 1)\n\n if horizontal:\n # 1 == vertical flip\n # 0 == horizontal flip\n flip = randint(0, 1)\n\n if rand_float > 0.5:\n image = cv2.flip(image, flip)\n label = cv2.flip(label, flip)\n\n return image, label", "def flip_image(image, direction):\n prevShape = image.shape\n image, reshaped = reshape_to_cv_format(image, False)\n image = cv.flip(image, direction)\n if reshaped: \n image = image.reshape(prevShape)\n return image", "def collate_fn_flip(self, batch):\n FT = torch.FloatTensor\n img, uvd_gt = zip(*batch)\n flip = random.randint(1, 10000)%2\n # Do flipping\n # 0 = left, 1 = right\n hand_side = 1\n if flip:\n hand_side = 0 \n\n new_img = []\n new_uvd = []\n for i, u in batch:\n if flip:\n i = i.transpose(Image.FLIP_LEFT_RIGHT)\n u[:, 0] = 0.999 - u[:, 0]\n i = np.asarray(i)\n i = i/255.0\n i = IMG.imgshape2torch(i)\n new_img.append(i)\n new_uvd.append(u)\n \n new_img = FT(new_img)\n new_uvd = FT(new_uvd)\n return new_img, new_uvd, hand_side", "def flip(self):\n self.align = self._left if self.align == self._right else self._right\n self.group.layout_all()", "def saveflip(image, fname, outpath, axis='x', preserve_name=False):\n if not preserve_name:\n fpath = genSavePath(outpath, fname, modstring=f\"mirror_{axis}\")\n else:\n fpath = genSavePath(outpath, fname)\n im = copy(image)\n if axis == 'x':\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\n elif axis == 'y':\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\n else:\n raise Exception('No valid axis procvided for flipping: {} - received value: {}'.format(fname, axis))\n try:\n im.save(fpath, subsample=\"keep\", qtables=image.quantization, optimize=True)\n\n except IOError as m:\n print( \"Flipped({}) image creation failed for: {}. \\nReason:{}\".format(axis,fname,m))", "def vflip(img, data_format='CHW'):\n _assert_image_tensor(img, data_format)\n\n h_axis = _get_image_h_axis(data_format)\n\n return img.flip(axis=[h_axis])", "def _augment(img):\n return flip(img, axis=2)", "def flip_frame(frame, flip_code):\n return cv.flip(frame, flip_code)", "def flip_image(image_path, saved_location):\n image_obj = Image.open(image_path)\n rotated_image = image_obj.transpose(Image.FLIP_LEFT_RIGHT)\n rotated_image.save(saved_location)", "def flip_axes(input_file, flipx=True, flipy=True, flipz=False,\n use_matrix=False, use_header=True):\n import os\n import numpy as np\n import nibabel as nb\n\n # Load image volume\n img = nb.load(input_file)\n dat = img.get_data()\n if use_matrix:\n mat = img.get_affine()\n if use_header:\n hdr = img.get_header()\n lenx, leny, lenz = np.shape(dat)\n dat_new = np.zeros((lenx, leny, lenz))\n\n # Flip x\n if flipx:\n for x in range(lenx):\n dat_new[lenx-1-x,:,:] = dat[x,:,:]\n\n # Flip y\n if flipy:\n for y in range(leny):\n dat_new[:,leny-1-y,:] = dat[:,y,:]\n\n # Flip z\n if flipz:\n for z in range(lenz):\n dat_new[:,:,lenz-1-z] = dat[:,:,z]\n\n # Save output\n out_file = 'reorient_' + os.path.basename(input_file)\n if use_matrix:\n if use_header:\n img = nb.Nifti1Image(dat_new, mat, hdr)\n else:\n img = nb.Nifti1Image(dat_new, mat)\n elif use_header:\n img = nb.Nifti1Image(dat_new, np.eye(4,4), hdr)\n else:\n img = nb.Nifti1Image(dat_new, np.eye(4,4))\n\n img.to_filename(out_file)\n\n return out_file", "def flip_rotate(img):\r\n\r\n choice = int(8*np.random.rand())\r\n \r\n if choice == 0:\r\n return img\r\n if choice == 1:\r\n return np.rot90(img, 1)\r\n if choice == 2:\r\n return np.rot90(img, 2)\r\n if choice == 3:\r\n return np.rot90(img, 3)\r\n if choice == 4:\r\n return np.flip(img, 0)\r\n if choice == 5:\r\n return np.flip(img, 1)\r\n if choice == 6:\r\n return np.flip(np.rot90(img, 1), 0)\r\n if choice == 7:\r\n return np.flip(np.rot90(img, 1), 1)", "def vflip(img):\n if not _is_numpy(img):\n raise TypeError('img should be Numpy Image. Got {}'.format(type(img)))\n\n return np.flipud(img)", "def fotoXO(self):\n\t\ttry:\n\t\t\timageP = self.cam.get_image()\n\t\t\tself.image = pygame.transform.flip(imageP, True, False)\n\t\texcept:\n\t\t\tpass\n\t\treturn self.image", "def flip(self) -> int:\n self.flags = ~(self.flags)\n return self.flags", "def flip_x(self):\n self.x_lim_helper.flip_limits()", "def flip_x(self):\n self.x_lim_helper.flip_limits()", "def flipMat(image):\n retImage = cv.CreateMat(image.rows, image.cols, image.type)\n height = image.rows\n transMatrix = cv.CreateMatHeader(2, 3, cv.CV_32FC1)\n narr = numpy.array([[1, 0, 0], [0, -1, height]], numpy.float32)\n cv.SetData(transMatrix, narr, cv.CV_AUTOSTEP)\n cv.WarpAffine(image, retImage, transMatrix)\n return retImage", "def reversible(self) -> bool:\n xy_row = np.column_stack(\n (\n np.linspace(\n -self.imgsz[0] / (2 * self.f[0]),\n self.imgsz[0] / (2 * self.f[0]),\n int(self.imgsz[0]),\n ),\n np.zeros(int(self.imgsz[0])),\n )\n )\n dxy = self._distort(xy_row)\n continuous_row = np.all(dxy[1:, 0] >= dxy[:-1, 0])\n xy_col = np.column_stack(\n (\n np.zeros(int(self.imgsz[1])),\n np.linspace(\n -self.imgsz[1] / (2 * self.f[1]),\n self.imgsz[1] / (2 * self.f[1]),\n int(self.imgsz[1]),\n ),\n )\n )\n dxy = self._distort(xy_col)\n continuous_col = np.all(dxy[1:, 1] >= dxy[:-1, 1])\n return continuous_row and continuous_col", "def flip(tensor, axis=None):\n raise NotImplementedError", "def flip_h(image, gt):\n result_im = cv2.flip(image, 1)\n result_gt = cv2.flip(gt, 1)\n\n return result_im, result_gt", "def random_flip(image ):\n if random.randint(0,1):\n image = flipud(image) # vertical flip\n if random.randint(0,1):\n image = fliplr(image) # horizontal flip\n return image", "def vflip(img):\n if not _is_numpy_image(img):\n raise TypeError('img should be nparray Image. Got {}'.format(type(img)))\n\n return cv2.flip(img, 0)", "def __flip(self, image, landmarks, run_prob=0.5):\n if np.random.rand() < run_prob:\n return image, landmarks\n image = np.fliplr(image)\n landmarks[:, 0] = image.shape[1] - landmarks[:, 0]\n landmarks = LandmarkHelper.flip(landmarks, landmarks.shape[0])\n return image, landmarks", "def flip(self, axes=None, inplace=False, i=False):\n d = _inplace_enabled_define_and_cleanup(self)\n super(DimensionCoordinate, d).flip(axes=axes, inplace=True)\n\n direction = d._custom.get(\"direction\")\n if direction is not None:\n d._custom[\"direction\"] = not direction\n\n return d", "def mirror_horiz(pixels):\n copy = blank_image(len(pixels), len(pixels[0])) \n for r in range(len(pixels)):\n for c in range(len(pixels[0])):\n copy[r][c] = pixels[r][c]\n a = len(pixels[0])//2\n for r in range(len(pixels)):\n for c in range(a, len(pixels[0])):\n copy[r][c] = copy[r][-c-1]\n return copy", "def _move_left(self):\n self.x -= self.settings.mario_speed\n if self.settings.direction == 1:\n self.image = pygame.transform.flip(self.image, True, False)\n self.settings.direction = -1", "def random_horizontal_flip(image, boxes=None, masks=None, seed=None):\n return preprocessor.random_horizontal_flip(image, boxes, masks, seed=seed)", "def flip(self, flip_x=True, flip_y=True):\n self._version += 1\n self._surf = pygame.transform.flip(self._surf,\n flip_x, flip_y).convert_alpha()\n return self", "def hflip(img):\n #if not _is_pil_image(img):\n # raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n return img.transpose(Image.FLIP_LEFT_RIGHT)", "def flip(flip_x=False, flip_y=False): \r\n x, y = 1, 1\r\n if flip_x:\r\n x = np.random.choice((-1,1))\r\n if flip_y:\r\n y = np.random.choice((-1,1))\r\n return np.array(((x, 0, 0),\r\n (0, y, 0),\r\n (0, 0, 1)), dtype=np.float)", "def problem1():\n\n img = load_image(\"data/a1p1.png\")\n display_image(img)\n\n save_as_npy(\"a1p1.npy\", img)\n\n img1 = load_npy(\"a1p1.npy\")\n display_image(img1)\n\n img2 = mirror_horizontal(img1)\n display_image(img2)\n\n display_images(img1, img2)", "def image_augmentation(img):\n return np.fliplr(img)", "def flip(self, x, y):\n self.pieces[x + (y * self.width)].flip()", "def image_flip(image, direction):\n im_size = image.get_shape().as_list()\n if direction == 'left_right':\n flip_function = tf.image.random_flip_left_right\n elif direction == 'up_down':\n flip_function = tf.image.random_flip_up_down\n else:\n raise NotImplementedError\n\n if len(im_size) == 3:\n return flip_function(image)\n elif len(im_size) == 4:\n if im_size[-1] > 1:\n raise NotImplementedError\n trans_image = tf.transpose(tf.squeeze(image), [1, 2, 0])\n flip_image = tf.expand_dims(\n tf.transpose(flip_function(trans_image), [2, 0, 1]), axis=-1)\n return flip_image\n else:\n raise NotImplementedError", "def flipAndInvertImage(self, A: List[List[int]]) -> List[List[int]]:\n return [list(map(lambda x: 0 if x else 1, row[::-1])) for row in A]", "def readout_flipped(self, iamp):\n flipped = ct.c_int()\n self.lib.IsReadoutFlippedByAmplifier(ct.c_int(iamp),\n ct.pointer(flipped))\n return bool(flipped.value)", "def flip(imgs):\n x = random.choice([-1, 0, 1, 2])\n if x == 2:\n return imgs\n else:\n return [cv2.flip(img, x) for img in imgs]", "def flip_vertical(original_image: Image) -> Image :\r\n \r\n new_image = copy(original_image)\r\n \r\n pixel_width = get_width(original_image)\r\n pixel_height = get_height(original_image) \r\n\r\n \r\n for x in range(pixel_width) :\r\n for y in range(pixel_height) :\r\n original_vertical_pixel = get_color(original_image, x, y)\r\n opposite_vertical_pixel = pixel_height - 1 - y\r\n set_color(new_image, x, opposite_vertical_pixel, original_vertical_pixel)\r\n \r\n return new_image", "def flip_icon(icon):\n\n return (\"X\" if icon == \"O\" else \"O\")", "def test_random_horizontal_flip(dummy_input):\n # Test the 2D image: H, W, C\n image, label = dummy_input(image_size=(512, 512, 3),\n label_size=(512, 512, 1))\n transform = RandomHorizontalFlip( prob=1)\n _image, _label = transform(image, label)\n _image, _label = transform(_image, _label)\n assert (image == _image).all()\n assert (label == _label).all()\n \n # Test the 3D image: H, W, D, C\n image, label = dummy_input(image_size=(512, 512, 20, 3),\n label_size=(512, 512, 20, 1))\n transform = RandomHorizontalFlip(prob=1)\n _image, _label = transform(image, label)\n _image, _label = transform(_image, _label)\n assert (image == _image).all()\n assert (label == _label).all()", "def lr_flip(self):\n for g in self.grid:\n g.reverse()", "def flip_ras_lps(vol, affine):\n vol_flipped = np.flip(vol, (0,1))\n affine_flipped = affine.copy()\n affine_flipped[0,-1] = (-1 * affine @ np.array([vol.shape[0]-1,0,0,1]))[0]\n affine_flipped[1,-1] = (-1 * affine @ np.array([0,vol.shape[1]-1,0,1]))[1]\n\n return vol_flipped, affine_flipped", "def _check_flip(origin_imgs, result_imgs):\n h, w, c = origin_imgs.shape\n for i in range(h):\n for j in range(w):\n for k in range(c):\n if result_imgs[i, j, k] != origin_imgs[i, w - 1 - j, k]:\n return False\n return True", "def test_flip_vertical() -> None:\n original = create_image(3, 2)\n set_color(original, 0, 0, create_color(0, 0, 0))\n set_color(original, 1, 0, create_color(90, 90, 90))\n set_color(original, 2, 0, create_color(255, 255, 255))\n set_color(original, 0, 1, create_color(10, 10, 10))\n set_color(original, 1, 1, create_color(0, 0, 0))\n set_color(original, 2, 1, create_color(90, 90, 90))\n \n expected = create_image(3, 2)\n set_color(expected, 0, 0, create_color(10, 10, 10))\n set_color(expected, 1, 0, create_color(0, 0, 0))\n set_color(expected, 2, 0, create_color(90, 90, 90))\n set_color(expected, 0, 1, create_color(0, 0, 0))\n set_color(expected, 1, 1, create_color(90, 90, 90))\n set_color(expected, 2, 1, create_color(255, 255, 255))\n \n flipped_vertical = flip_vertical(original)\n \n for x, y, col in flipped_vertical: # tests each colour of each pixel of the filtered sample image and compares it to the expected image\n check_equal('Checking pixel @(' + str(x) + ', ' + str(y) + ')', col, get_color(expected, x, y))", "def hflip(self):\n self.leftimg, self.rightimg = self.rightimg, self.leftimg", "def rotate_right_90(img):\r\n #reading image\r\n im = Image.open(\"filename\")\r\n \r\n #flipping image 90 degrees\r\n newimg = im.transpose(PIL.Image.ROTATE_90)\r\n \r\n return img", "def flipAndInvertImage2(self, A: List[List[int]]) -> List[List[int]]:\n def invert(x):\n if x:\n return 0\n else:\n return 1\n\n for row in A:\n left, right = 0, len(row)-1\n while left <= right:\n row[left], row[right] = invert(row[right]), invert(row[left])\n left += 1\n right -= 1\n\n return A", "def transform(self, mat: TxMatrix) -> None:\n self.rotation = self.rotation - int(0x10000 * mat.angle / math.pi / 2) & 0xFFFF\n if mat.flipped:\n self.flip_y = not self.flip_y\n self.rotation = -self.rotation & 0xFFFF", "def set_invert_display(enable):\n if enable:\n send_command(0xA7)\n else:\n send_command(0xA6)", "def flip(self, p):\n return -p", "def toggle_back_mid(self, checked):\n if self.material_background:\n if checked:\n image = self.parent.data.materials[self.tile.background_material].midimage\n else:\n image = self.parent.data.materials[self.tile.background_material].bgimage\n self.material_background.setPixmap(image)\n if self.mod_background:\n if checked:\n image = self.parent.data.matmods[self.tile.background_mod].midimage\n else:\n image = self.parent.data.matmods[self.tile.background_mod].bgimage\n self.mod_background.setPixmap(image)", "def toggle_wireframe(self):\n self.view['wireframe'] = not self.view['wireframe']\n self.update_flags()", "def sym_left_img(img):\n cropped = img.crop(box=(0, 0, img.width / 2, img.height))\n mirror = cropped.transpose(Image.FLIP_LEFT_RIGHT)\n combine = Image.new('RGB', (cropped.width * 2, cropped.height), 'white')\n combine.paste(cropped, (0, 0, cropped.width, cropped.height))\n combine.paste(mirror, (cropped.width, 0, cropped.width * 2, cropped.height))\n return combine", "def flip_row(rows, i):\n for j in range(order):\n rows[i][j] = -rows[i][j]", "def flip_tile(self, tile, flag=False):\n selected_tile = self.stack[tile]\n\n if flag == True:\n if self.flags_remaining == 0:\n raise ValueError(\"No flags left\")\n else:\n selected_tile['flag']=True\n self.flags_remaining -= 1\n\n else:\n if selected_tile['flag'] == True:\n selected_tile['flag'] == False\n self.flags_remaining += 1\n\n else:\n self.stack[tile]['flip'] = True\n self.tiles_remaining -= 1\n if selected_tile['value'] == 'bomb':\n self.end_game()\n elif selected_tile['value'] == 0:\n self.blank_tile_cascade(tile)\n self.check_win()", "def collate_fn_no_flip(self, batch):\n FT = torch.FloatTensor\n img, uvd_gt = zip(*batch)\n\n new_img = []\n new_uvd = []\n for i, u in batch:\n if self.pred_img_side == 'left':\n i = i.transpose(Image.FLIP_LEFT_RIGHT)\n u[:, 0] = 0.999 - u[:, 0]\n i = np.asarray(i)\n i = i/255.0\n i = IMG.imgshape2torch(i)\n new_img.append(i)\n new_uvd.append(u)\n \n new_img = FT(new_img)\n new_uvd = FT(new_uvd)\n return new_img, new_uvd", "def random_horizontal_flip(img, angle, prob=0.5):\n\n if np.random.rand() <= prob:\n img = cv2.flip(img, 1)\n angle = -angle\n\n return img, angle" ]
[ "0.7160271", "0.6985837", "0.6883737", "0.6870575", "0.6787338", "0.67704475", "0.6695351", "0.6677402", "0.6635156", "0.6454874", "0.64387655", "0.64263225", "0.6393341", "0.6339728", "0.63132757", "0.6289396", "0.61795443", "0.61762446", "0.61736774", "0.6157254", "0.6147425", "0.61260164", "0.61075574", "0.61018664", "0.6062592", "0.6054945", "0.60410434", "0.60015607", "0.6001088", "0.5991457", "0.5967564", "0.5948894", "0.59085155", "0.590566", "0.5890086", "0.58447194", "0.58341396", "0.5822114", "0.57834285", "0.57498354", "0.57327807", "0.57313967", "0.5724974", "0.5715353", "0.5708785", "0.56937754", "0.56833625", "0.56514686", "0.56082076", "0.56058115", "0.55757236", "0.5573182", "0.55610204", "0.5543094", "0.5506922", "0.54723847", "0.54723716", "0.5471844", "0.5471844", "0.5471399", "0.54678416", "0.5464779", "0.5457303", "0.5455988", "0.54501957", "0.5448164", "0.54275995", "0.5426481", "0.5411117", "0.5397402", "0.53971636", "0.5388936", "0.5365111", "0.53272873", "0.528007", "0.5275459", "0.52692276", "0.5265063", "0.52649516", "0.5263534", "0.5262168", "0.5259047", "0.5256096", "0.52495414", "0.5247168", "0.5233755", "0.52324945", "0.5231468", "0.52275014", "0.5202341", "0.51982844", "0.5189252", "0.5184262", "0.5180183", "0.51727295", "0.5164444", "0.5163008", "0.5152905", "0.51518524", "0.51453954" ]
0.83549047
0
check that each image in the list has the same dimensions
def checkImageDimensions(self, filenames): s = None hashStr = filenames[:] hashStr.sort() hashStr = str(hashStr) # check to see if there's already a result of the check for these filenames in the cache if hashStr in self.dimensionCheck: Logging.info("Using cached result for dimensions check: %s"%(str(self.dimensionCheck[hashStr]))) return self.dimensionCheck[hashStr] for file in filenames: if file not in self.imageDims: print "Trying to open",type(file) try: self.ext = file.split(".")[-1].upper() if self.ext == "TIF": self.ext = "TIFF" if self.ext == "JPG": self.ext = "JPEG" if self.ext == "VTI": reader = vtk.vtkXMLImageReader() else: reader = eval("vtk.vtk%sReader()"%self.ext) reader.SetFileName(file) reader.UpdateInformation() except IOError, ex: traceback.print_exc() raise Logging.GUIError("Cannot open image file", "Cannot open image file %s" % file) extent = reader.GetDataExtent() fSize = (extent[1],extent[3]) self.imageDims[file] = fSize else: fSize = self.imageDims[file] if s and fSize != s: x0, y0 = s x1, y1 = fSize self.dimensionCheck[hashStr] = False return 0 s = fSize fn = file self.dimensionCheck[hashStr] = True return 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def images_are_present(file_info):\n currentdir = os.path.join(WORKDIR, file_info['folder'])\n if not os.path.exists(currentdir):\n return False\n count = len([x for x in os.listdir(currentdir) if x.endswith('.png')])\n if count != file_info['size']:\n print([x for x in os.listdir(currentdir) if x.endswith('.png')])\n print('Count does not match')\n print(count)\n print(file_info['size'])\n return False\n return True", "def _get_consistent_shape(images: Iterable):\n dim0s = []\n dim1s = []\n\n for img in images:\n dim0s.append(img.shape[0])\n dim1s.append(img.shape[1])\n\n assert len(set(dim0s)) == 1 and len(set(dim1s)) == 1, 'Inconsistent shapes.'\n\n return dim0s[0], dim1s[0]", "def are_compatible_imgs(one_img, another_img):\n return have_same_shapes(one_img, another_img)", "def number_of_images_valid():\r\n if number_of_images_a_valid() and number_of_images_b_valid():\r\n return True\r\n else:\r\n return False", "def image_size_exact(self, msg, img_type, height, width,\n target=None):\n height = int(height)\n width = int(width)\n sizes = self._get_sizes(msg, img_type)\n for img in sizes:\n if (img['width'], img['height']) == (width, height):\n return True\n return False", "def number_of_images_a_valid():\r\n counter = 0\r\n with os.scandir(os.path.join(dir_path, \"inputs\", \"type_a\")) as filepaths:\r\n for path in filepaths:\r\n extension = os.path.splitext(path)[1].lower()\r\n if extension == \".png\" or extension == \".jpg\":\r\n counter += 1\r\n if counter >= int(number_of_images_a.get()):\r\n return True\r\n else:\r\n messagebox.showwarning(\"Invalid Image Inputs\", (\r\n \"Not enough images of type a to create \"\r\n \"requested grid.\"))\r\n return False", "def check_image_size(image_folder_path, height=None, width=None):\n total_img_list = glob.glob(os.path.join(image_folder_path, \"*\"))\n counter = 0\n for image in tqdm(total_img_list, desc=\"Checking in progress\"):\n try:\n img = cv2.imread(image)\n\n # Review Comments:\n #\n # I assume you were trying to initialize width and height\n # if they are not defined by the caller. I have rewritten\n # your code to do this successfully - before you were just\n # comparing the height and width of each image with\n # itself.\n if height is None:\n height = img.shape[1]\n\n if width is None:\n width = img.shape[0]\n\n if not (height == img.shape[1] and width == img.shape[0]):\n counter += 1\n # Review Comments: What exception are you trying to catch here?\n # In general, you should not have a bare except block.\n except:\n print(\"this {} is corrupted\".format(image))\n continue\n return counter", "def _autocheck_dimensions(self):\n # W dimensions check list\n assert len(self.W.shape) == 2, f\"W shape should be (N, N) but is {self.W.shape}.\"\n assert self.W.shape[0] == self.W.shape[1], f\"W shape should be (N, N) but is {self.W.shape}.\"\n\n # Win dimensions check list\n assert len(self.Win.shape) == 2, f\"Win shape should be (N, input) but is {self.Win.shape}.\"\n err = f\"Win shape should be ({self.W.shape[1]}, input) but is {self.Win.shape}.\"\n assert self.Win.shape[0] == self.W.shape[0], err\n\n # Wout dimensions check list\n assert len(self.Wout.shape) == 2, f\"Wout shape should be (output, nb_states) but is {self.Wout.shape}.\"\n nb_states = self.Win.shape[1] + self.W.shape[0] + 1 if self.use_raw_inp else self.W.shape[0] + 1\n err = f\"Wout shape should be (output, {nb_states}) but is {self.Wout.shape}.\"\n assert self.Wout.shape[1] == nb_states, err\n\n # Wfb dimensions check list\n if self.Wfb is not None:\n assert len(self.Wfb.shape) == 2, f\"Wfb shape should be (input, output) but is {self.Wfb.shape}.\"\n err = f\"Wfb shape should be ({self.Win.shape[0]}, {self.Wout.shape[0]}) but is {self.Wfb.shape}.\"\n assert (self.Win.shape[0],self.Wout.shape[0]) == self.Wfb.shape, err", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n for i, img_info in enumerate(self.img_infos):\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n for i, img_info in enumerate(self.img_infos):\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def ensure_images_have_the_same_properties(images, properties):\n for prop in properties:\n vals = np.asarray([getattr(image, prop) for image in images])\n if not all(vals == vals[0]):\n raise ValueError(f'To be coadded, images must all have the same {prop}. '\n f'These images had: {[(image.id, getattr(image, prop)) for image in images]}.')", "def _check_dimensions(self, workspace_to_check):\n for i in range(self._raw_ws.getNumDims()):\n if self._raw_ws.getDimension(i).getNBins() != workspace_to_check._raw_ws.getDimension(i).getNBins():\n return False\n return True", "def check_images():\n\n print(f'Looking for duplicate images...')\n\n for image in images_in_directory:\n duplicate = check_image_for_duplicates(image)\n\n if (duplicate):\n print(f'Found {duplicate} to be a duplicate image of: {image}')\n remove_image(duplicate)\n pass", "def check_image_dimensions(image_paths, image_height, image_width):\n logging.info('Using image height, width %s', str((image_height, image_width)))\n\n bad_images = []\n\n for path in image_paths:\n logging.info('Trying to read image %s', path)\n image = microscopeimagequality.dataset_creation.read_16_bit_greyscale(path)\n\n if image.shape[0] < image_height or image.shape[1] < image_width:\n bad_images.append(path)\n logging.info('Image %s dimension %s is too small.', path, str(image.shape))\n\n logging.info('Done checking images')\n\n logging.info('Found %d bad images.', len(bad_images))\n\n if bad_images:\n raise ValueError('Found %d bad images! \\n %s' % (len(bad_images), '\\n'.join(bad_images)))", "def __len__(self):\n return len(self.images)", "def __len__(self):\n return len(self.images)", "def check_shapes(arrs):\r\n shps = [i.shape for i in arrs]\r\n eq = np.all(np.array([shps[0] == i for i in shps[1:]]))\r\n err = \"Arrays arr not of the same shape...\"\r\n if not eq:\r\n raise ValueError(\"{}\\n{}\".format(err, shps))", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n for i, img_info in enumerate(self.img_infos):\n # Filter out empty images\n if img_info['ann']['bboxes'].shape[0] > 0:\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def n_compare(im_list, label_list, figsize=(12,8), zoom_box_coord=None,\n zoom_box_color='red'):\n # Error checks.\n # =============\n # 1. Matching lengths of image and lebal lists.\n if len(im_list) != len(label_list):\n raise ValueError(f\"\"\"List length mismatch: {len(im_list)} != {len(label_list)}. \"\"\"\n \"\"\"Length of arguments 'im_list' and 'label_list' must be the same.\"\"\")\n\n # 2. At least 2 images are present in the input list.\n if len(im_list) < 2:\n raise ValueError(f\"Input list to argument 'im_list' must contain 2 or more items.\")\n \n # Infer values for plotting.\n n_rows = 1 if zoom_box_coord is None else 2\n n_cols = len(im_list)\n scaling_factor_x = im_list[1].shape[1] / im_list[0].shape[1]\n scaling_factor_y = im_list[1].shape[0] / im_list[0].shape[0]\n \n # Arrange and display images.\n # ===========================\n _, ax = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=figsize)\n\n for i, im_i in enumerate(im_list):\n if n_rows == 1:\n # 1. If only a single row (no zoom).\n ax[i].imshow(im_i)\n ax[i].set_title(label_list[i])\n ax[i].get_xaxis().set_visible(False)\n ax[i].get_yaxis().set_visible(False)\n else:\n # 2. If 2 rows are to be plotted (zoom is applied).\n ax[0][i].imshow(im_i)\n ax[0][i].set_title(label_list[i])\n ax[0][i].get_xaxis().set_visible(False)\n ax[0][i].get_yaxis().set_visible(False)\n\n # 2.1. Handle box coordinates and scaling thereof.\n if i == 0:\n xy = (zoom_box_coord[0], zoom_box_coord[1])\n rect = patches.Rectangle(xy,\n zoom_box_coord[2],\n zoom_box_coord[3],\n linewidth=1,edgecolor='red',facecolor='none')\n new_zoom_coords = (xy[0], xy[1], zoom_box_coord[2], zoom_box_coord[3])\n else:\n xy = (int(zoom_box_coord[0]*scaling_factor_x), int(zoom_box_coord[1]*scaling_factor_y))\n rect = patches.Rectangle(xy,\n int(zoom_box_coord[2]*scaling_factor_x),\n int(zoom_box_coord[3]*scaling_factor_y),\n linewidth=1,edgecolor='red',facecolor='none')\n new_zoom_coords = (xy[0],\n xy[1],\n int(zoom_box_coord[2]*scaling_factor_x),\n int(zoom_box_coord[3]*scaling_factor_y)\n )\n ax[0][i].add_patch(rect)\n \n # 2.2. Draw zoomed images.\n ax[1][i].imshow(subsample_image(im_i, (new_zoom_coords)))\n ax[1][i].get_xaxis().set_visible(False)\n ax[1][i].get_yaxis().set_visible(False)\n \n plt.tight_layout()\n plt.show()", "def get_images_bytesize_match(self, images):\r\n cnt = 0\r\n MAX_BYTES_SIZE = 15728640\r\n good_images = []\r\n for image in images:\r\n if cnt > 30:\r\n return good_images\r\n src = self.parser.getAttribute(image, attr='src')\r\n src = self.build_image_path(src)\r\n local_image = self.get_local_image(src)\r\n if local_image:\r\n bytes = local_image.bytes\r\n if (bytes == 0 or bytes > self.images_min_bytes) \\\r\n and bytes < MAX_BYTES_SIZE:\r\n good_images.append(image)\r\n else:\r\n images.remove(image)\r\n cnt += 1\r\n return good_images if len(good_images) > 0 else None", "def _filter_imgs(self, min_size=32):\n\n valid_inds = []\n for i, img_info in enumerate(self.data_infos):\n if min(img_info[\"width\"], img_info[\"height\"]) < min_size:\n continue\n if self.filter_empty_gt and len(img_info[\"ann\"][\"bboxes\"]) > 0:\n valid_inds.append(i)\n else:\n valid_inds.append(i)\n\n return valid_inds", "def _filter_imgs(self, min_size=32):\r\n valid_inds = []\r\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\r\n for i, img_info in enumerate(self.img_infos):\r\n if self.filter_empty_gt and self.img_ids[i] not in ids_with_ann:\r\n continue\r\n if min(img_info['width'], img_info['height']) >= min_size:\r\n valid_inds.append(i)\r\n return valid_inds", "def assert_equal_shapes(numpy_arrays: list):\n\n if len(numpy_arrays) < 2:\n return\n\n shapes = np.asarray([np.shape(_arr) for _arr in numpy_arrays]).astype(float)\n mean = np.mean(shapes, axis=0)\n for i in range(len(shapes)):\n shapes[i, :] = shapes[i, :] - mean\n\n if not np.sum(np.abs(shapes)) <= 1e-5:\n raise AssertionError(\"The given volumes did not all have the same\"\n \" dimensions. Please double check the simulation\"\n f\" parameters. Called from {inspect.stack()[1].function}\")", "def __len__(self):\n\n return len(self.images)", "def _check_images_and_labels(self, image_dir, label_dir):\n return len(os.listdir(image_dir))==len(os.listdir(label_dir))", "def is_valid(box, img):\n valid_width = box['top_left_x'] > 0 and box['bottom_right_x'] < img.shape[1]\n valid_height = box['top_left_y'] > 0 and box['bottom_right_y'] < img.shape[0]\n return valid_width and valid_height", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\n for i, img_info in enumerate(self.img_infos):\n if self.img_ids[i] not in ids_with_ann:\n continue\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\n for i, img_info in enumerate(self.img_infos):\n if self.img_ids[i] not in ids_with_ann:\n continue\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\n for i, img_info in enumerate(self.img_infos):\n if self.img_ids[i] not in ids_with_ann:\n continue\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def check_shape(self):\r\n if np.array(self.img).shape != (1536, 2048, 3):\r\n raise BadShape", "def number_of_images_b_valid():\r\n counter = 0\r\n with os.scandir(os.path.join(dir_path, \"inputs\", \"type_b\")) as filepaths:\r\n for path in filepaths:\r\n extension = os.path.splitext(path)[1].lower()\r\n if extension == \".png\" or extension == \".jpg\":\r\n counter += 1\r\n if ((number_of_images_b.get() == \"\") or\r\n (counter >= int(number_of_images_b.get()))):\r\n return True\r\n else:\r\n messagebox.showwarning(\"Invalid Image Inputs\", (\r\n \"Not enough images of type b to create \"\r\n \"requested grid.\"))\r\n return False", "def test_complex(self):\n image = self.design.layout.layers[0].images[2]\n assert len(image.shape_instances) == 3", "def checksImages(self):\n metadata=[]\n for image in self.meta['sources']:\n with rasterio.open(image) as src:\n metaData=src.meta\n \n assert metaData['driver'] == 'GTiff', \"Driver is not supported: {0}\".format(metaData['driver'])\n assert metaData['count'] == len(self.meta['bandNames']), \"Nbands incorrect, expected: {0}, {1} provided\".format(metaData['count'],len(self.meta['bandNames']))\n \n metadata.append({'dtype': metaData['dtype'], 'driver': metaData['driver'], 'nodata': metaData['nodata'], 'nBands': metaData['count'],'crs': src.crs.to_string()})\n \n assert len(set([item['dtype'] for item in metadata])) == 1, \"Images list dtypes aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['dtype'] for item in metadata])))\n assert len(set([item['driver'] for item in metadata])) == 1, \"Images list drivers aren't compatibles. Expected: 1, 1 provided\".format(metaData['count'],len(set([item['driver'] for item in metadata])))\n assert len(set([item['nodata'] for item in metadata])) == 1, \"Images list nodata values aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['nodata'] for item in metadata])))\n assert len(set([item['nBands'] for item in metadata])) == 1, \"Images list nBands number aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['nBands'] for item in metadata])))\n assert len(set([item['crs'] for item in metadata])) == 1, \"Images list crs aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['crs'] for item in metadata]))) \n return metadata[0]", "def test_correct_image_size(location):\n chunkloc = resave_to_chunks(root=location[\"dir\"],\n n_imgs=10,\n output_stem=location[\"stem\"])\n\n loaded = np.load(chunkloc)\n assert len(loaded.files) > 0\n\n first = loaded[loaded.files[0]]\n assert first.shape != ()\n assert first.shape == (520, 696)", "def check_qim_dim_match(cls, qim, dim):\n return len(qim) == len(dim)", "def __len__(self):\n return self.images.size(0)", "def check_image_size(img_name, img_path):\n \n try:\n \n # Open image\n img = Image.open(img_name)\n \n # Determine size of image\n width, height = img.size\n \n # Check if image is square\n if (width==height):\n is_square = True\n else:\n is_square = False\n \n # Check for channels in image\n img_list = list(img.getdata())\n img_max = max(img_list)\n if (type(img_max)==int):\n is_single_channel = True\n else:\n is_single_channel = False\n \n return is_square, is_single_channel\n \n finally:\n \n # Close image\n img.close()", "def check_duplicate_image_name(image_paths):\n image_names = [os.path.basename(os.path.splitext(p)[0]) for p in image_paths]\n\n num_images = len(image_names)\n\n num_unique = len(set(image_names))\n\n if num_images != num_unique:\n raise ValueError('Found %d duplicate images.' % (num_images - num_unique))\n\n logging.info('Found no duplicates in %d images.', num_images)", "def __len__(self):\r\n return len(self.img_names)", "def test_full_resize(self):\n number_of_pixels = 300\n destination = base_path +'/test_data/rendering_tests/resized_images/'\n source_folder = base_path + '/test_data/rendering_tests/filter_database/'\n\n\n for the_file in os.listdir(destination):\n file_path = os.path.join(destination, the_file)\n if os.path.isfile(file_path):\n os.unlink(file_path)\n\n\n self.assertEqual(0, len(os.listdir(destination)))\n rb.find_all_files(number_of_pixels,source_folder, destination)\n self.assertEqual(6, len(os.listdir(destination)))\n for the_file in os.listdir(destination):\n file_path = os.path.join(destination,the_file)\n with Image.open(file_path) as f:\n self.assertNotEqual(number_of_pixels+5, f.size[0])\n self.assertNotEqual(number_of_pixels+5, f.size[1])\n # the above checks that the size does not vary as needed\n # probably not necessary\n self.assertEqual(number_of_pixels, f.size[0])\n self.assertEqual(number_of_pixels, f.size[1])", "def _check_shape(self, X):\n return all([X.shape[i] == self.train_shape[i] for i in range(2)])", "def is_full(self):\r\n return self.num_checkers == self.width * self.height", "def _check_size(self, img):\n absdiff = num.abs(num.subtract(img.shape, self.expected_size))\n pctdiff = num.true_divide(absdiff, self.expected_size)\n if not num.all(pctdiff <= self.size_tolerance):\n raise StandardError('image size outside form tolerance {} != {}'\n .format(img.shape, self.expected_size))", "def check_dim(gr, DIM):\n l = len(gr)\n if(l != DIM):\n return False\n\n for i in range(0, DIM):\n if(len(gr[i]) != l):\n return False \n return True", "def get_num_of_images(self):", "def _assert_same_size(outputs, output_size):\n nest.assert_same_structure(outputs, output_size)\n flat_output_size = nest.flatten(output_size)\n flat_output = nest.flatten(outputs)\n\n for (output, size) in zip(flat_output, flat_output_size):\n if isinstance(size, tf.TensorShape):\n if output.shape == size:\n pass\n elif output[0].shape != tf.TensorShape(size):\n raise ValueError(\n \"The output size does not match the the required output_size\")", "def _assert_same_size(outputs: TensorStruct, output_size: OutputSize):\n flat_output_size = nest.flatten(output_size)\n flat_output = nest.flatten(outputs)\n for output, size in zip(flat_output, flat_output_size):\n if isinstance(size, torch.Size):\n if output[0].size() != size:\n raise ValueError('The output size does not matchthe required output_size')\n elif output[0].size()[-1] != size:\n raise ValueError('The output size does not match the required output_size')", "def _check_tensor_shapes(tensors):\n for tensor in tensors:\n tensor = tf.convert_to_tensor(value=tensor)\n tensor.get_shape().assert_has_rank(2)\n tensor.get_shape().assert_is_compatible_with(\n tf.convert_to_tensor(value=tensors[0]).get_shape())", "def has_images(self):\n return len(self.images) > 0", "def __len__(self):\n return len(self.image_paths)", "def __len__(self):\n return len(self.image_paths)", "def __len__(self):\n return len(self.image_paths)", "def _check_sizes(self, space):\n my_dimension = self.get_total_dimension()\n other_dimension = space.get_total_dimension()\n if my_dimension != other_dimension:\n if isinstance(space, Conv2DSpace):\n if my_dimension * space.shape[0] !=\\\n other_dimension:\n raise ValueError(str(self)+\" with total dimension \" +\n str(my_dimension) +\n \" can't format a batch into \" +\n str(space) + \"because its total dimension\\\n is \" +\n str(other_dimension))", "def _check_image_size(self, size):\n if size % 32 == 0:\n return (0, 0)\n else:\n imageBorder = 32 - (size % 32)\n if (imageBorder % 2) == 0:\n return (int(imageBorder / 2), int(imageBorder / 2))\n else:\n return (int(imageBorder / 2), int((imageBorder / 2) + 1))", "def __len__(self):\n return len(self.image_names)", "def _assert_is_batched(self, *arrays):\n shape_list = []\n for array in arrays:\n if isinstance(array, tf.Tensor):\n shape_list.append(array.shape.as_list())\n else:\n shape_list.append(np.shape(array))\n # All arrays should have at least two dimensions.\n assert all([len(shape) >= 2 for shape in shape_list])\n # All arrays should have the same batch size.\n assert len(set([shape[0] for shape in shape_list])) == 1", "def check_sizes(self, show=True):\n # find pixel with common RA \n comRApix = np.where((self.coords1[0]<=np.max(self.coords2[0]))&\n (self.coords1[0]>=np.min(self.coords2[0]))\n )[0]\n \n # find pixels with common DEC \n comDECpix = np.where((self.coords1[1]<=np.max(self.coords2[1]))&\n (self.coords1[1]>=np.min(self.coords2[1]))\n )[0]\n \n print('Image 1 common pixels size: ({:}, {:})'.format(comRApix.size,\n comDECpix.size))\n \n # Corner coordinates \n minRA = np.min(self.coords1[0][comRApix])\n maxRA = np.max(self.coords1[0][comRApix])\n minDEC = np.min(self.coords1[1][comDECpix])\n maxDEC = np.max(self.coords1[1][comDECpix])\n if show:\n comFrame = plt.Rectangle(xy=(minRA, minDEC), width=maxRA-minRA,\n height=maxDEC-minDEC, hatch='\\\\', fill=True,\n color='g', alpha=.3)\n fig = plt.figure(figsize=(10,10))\n ax = fig.add_subplot(111)\n ax.add_patch(comFrame)\n ax.add_patch(self.image1.plotframe(color='r'))\n ax.add_patch(self.image2.plotframe(color='b'))\n ax.annotate('Image 1', xy=(minRA,maxDEC), color='r')\n ax.plot() \n plt.show()\n \n self.boundRA = np.array([minRA, maxRA])\n self.boundDEC = np.array([minDEC, maxDEC]) \n self.bounds1 = np.array([[comRApix[0], comRApix[-1]], \n [comDECpix[0], comDECpix[-1]]])\n \n if self.image1.get_pix_area() < self.image2.get_pix_area():\n print('Image 1 have smaller pixels than 2. \\n')\n self.pix_1_smaller = True \n else:\n print('Image 2 have smaller pixels than 1. \\n')\n self.pix_1_smaller = False", "def count(self):\n \n return len(self.img_lst)", "def test_valid_sizes(self):\n for size in settings.MISAGO_AVATARS_SIZES:\n self.assertEqual(clean_size(size), size)", "def is_image_size_64(image):\n return image['height'] == 64 and image['width'] == 64", "def canvas_size(self):\r\n width = height = 0\r\n for image in self.images:\r\n x = image.x + image.absolute_width\r\n y = image.y + image.absolute_height\r\n if width < x:\r\n width = x\r\n if height < y:\r\n height = y\r\n return round_up(width), round_up(height)", "def get_all_images(self):\n self.roses.save_image()\n all_images = Images.get_all_images()\n self.assertTrue(len(all_images)<1)", "def check_crop_size(size):\n type_check(size, (int, list, tuple), \"size\")\n if isinstance(size, int):\n check_value(size, (1, FLOAT_MAX_INTEGER))\n elif isinstance(size, (tuple, list)) and len(size) == 2:\n for value in size:\n check_value(value, (1, FLOAT_MAX_INTEGER))\n else:\n raise TypeError(\"Size should be a single integer or a list/tuple (h, w) of length 2.\")", "def images_exist(self):\n pass", "def verify_images(root_dir, root_listdir):\n counter = 0\n\n for index, image_dir in enumerate(root_listdir):\n images_listdir = os.listdir(root_dir + \"/\" + image_dir)\n list_of_images_indices = [\n image_index\n for image_index in range(3, len(images_listdir) - 1)\n if image_index % 2 == 0\n ]\n for image_ind in list_of_images_indices:\n filename = root_dir + \"/\" + image_dir + \"/\" + images_listdir[image_ind]\n try:\n im = Image.open(filename)\n im.verify()\n im.close()\n except (OSError, ValueError):\n counter += 1\n\n print(\"%d files caused error due to OSError and ValueError.\" % counter)", "def have_same_shapes(array1, array2):\n return array1.shape == array2.shape", "def test_test_image_dims_content(self):\n iterator = self._dataset.get_test()\n sample = next(iterator)\n image, label = sample['image'], sample['label']\n\n with self.subTest(name='DataShape'):\n self.assertTupleEqual(image.shape, (self._batch_size_test, 32, 32, 3))\n\n with self.subTest(name='DataType'):\n self.assertTrue(np.issubdtype(image.dtype, float))\n\n with self.subTest(name='DataValues'):\n # Normalized by stddev., expect nothing to fall outside 3 stddev.\n self.assertTrue((image >= -3.).all() and (image <= 3.).all())\n\n with self.subTest(name='LabelShape'):\n self.assertLen(label, self._batch_size_test)\n\n with self.subTest(name='LabelType'):\n self.assertTrue(np.issubdtype(label.dtype, int))\n\n with self.subTest(name='LabelValues'):\n self.assertTrue((label >= 0).all() and\n (label <= self._dataset.num_classes).all())", "def assert_same_size(sequences):\n seq_size = len(sequences[0])\n for seq in sequences:\n if len(seq) != seq_size:\n raise SizeError", "def match_size(*arrays):\n target = arrays[0].datashape\n result = []\n\n # check for bad inputs\n for a in arrays:\n ds = a.datashape.copy()\n for i in range(min(a.ndim, target.ndim)):\n if ds.dim_low[i] < target.dim_low[i] or \\\n ds.dim_high[i] > target.dim_high[i]:\n raise ValueError(\"All array domains must be a subset \"\n \"of the first array's domain\")\n\n for a in arrays:\n ds = a.datashape.copy()\n ds.dim_low = list(ds.dim_low)\n ds.dim_high = list(ds.dim_high)\n\n for i in range(min(a.ndim, target.ndim)):\n ds.dim_low[i] = target.dim_low[i]\n ds.dim_high[i] = target.dim_high[i]\n if ds != a.datashape:\n a = a.redimension(ds.schema)\n result.append(a)\n\n return tuple(result)", "def _check_ensembles_are_same_size(p, q):\n if p.npdf != q.npdf:\n raise ValueError(\"Input ensembles should have the same number of distributions\")", "def test_separate_ims():\n\n df1, df2 = setup()\n\n # Test 1\n im = separate_ims(df1)\n size = df1['imdims'][0]\n assert im.size == (size[0]*2, size[1])\n\n # Test 2\n im = separate_ims(df2)\n size = df2['imdims'][0]\n assert im.size == (size[0], size[1])", "def __len__(self):\n return len(self.img_paths)", "def getDims(img):\n n,m,k = np.shape(img) \n N,M = 0,0\n for i in range(1,n):\n if np.array_equal(img[i],img[i-1]):\n N += 1\n for j in range(1,m):\n if np.array_equal(img[:,j],img[:,j-1]):\n M += 1\n return N,M,n,m", "def compare_images(img1_path, img2_path):\n img1 = Image.open(img1_path)\n img2 = Image.open(img2_path)\n try:\n diff = ImageChops.difference(img1, img2)\n except ValueError:\n return False\n return diff.getbbox() is None", "def check_image_before_load(self,image_dims):\n\n if image_dims[0]*image_dims[1]*image_dims[2]*4 < self.check_available_memory():\n return True\n else:\n return False", "def _checkSize(X1,X2):\n \n if len(X1) != len(X2):\n raise ValueError, 'Lists are differnt lengths'", "def check_output(runtype):\n for image_name in image_names:\n for i in range(nplanes):\n compare_list_of_outputs(i,\n outputs_to_check,\n get_list_of_test_data(outputs_to_check, test_data_dir, nplanes, nchannels, added_tag, i),\n get_list_of_output_data(outputs_to_check, output_root, i)\n )", "def shape(self):\n return (self.numberOfImages,) + self.imageList[0].shape", "def get_image_sizes():\n widths = []\n heights = []\n\n from settings import folders_location\n for individual_folder_name in listdir(folders_location):\n individual_training_folder_path = folders_location + individual_folder_name + \"/training/\"\n\n image_paths = listdir(individual_training_folder_path)\n for image_path in image_paths:\n img = cv2.imread(individual_training_folder_path + image_path)\n\n height, width, channel = img.shape\n widths.append(width)\n heights.append(height)\n\n print(individual_training_folder_path + image_path)\n\n print(\"Min: %s, Max: %s\" % (np.min(widths), np.max(widths)))\n print(\"Average: %s\" % (np.average(widths)))\n\n return widths", "def hasImages(self):\n return len(self.getImages()) > 0", "def hasImages(self):\n return len(self.getImages()) > 0", "def testSize (self):\r\n \r\n perpixel = bytes_per_pixel [self.bih_vals [bih_BitCount]]\r\n width = self.bih_vals [bih_Width]\r\n height = self.bih_vals [bih_Height]\r\n expected = self.bih_vals [bih_SizeImage]\r\n\r\n # Rows always have multiples of 4 bytes\r\n \r\n padding = 3 - ((perpixel * width + 3) % 4)\r\n size = (width * perpixel + padding) * height\r\n\r\n if not size == expected:\r\n print \"Calculated size = %d (<> %d)\" % (size, expected)\r\n print \"***** File size error *****\"", "def _check_same_fov(*args, **kwargs):\n raise_error = kwargs.pop(\"raise_error\", False)\n for i, arg in enumerate(args):\n kwargs[f\"img_#{i}\"] = arg\n errors = []\n for (a_name, a_img), (b_name, b_img) in itertools.combinations(\n kwargs.items(), 2\n ):\n if not a_img.shape[:3] == b_img.shape[:3]:\n errors.append((a_name, b_name, \"shape\"))\n if not np.allclose(a_img.affine, b_img.affine):\n errors.append((a_name, b_name, \"affine\"))\n if len(errors) > 0 and raise_error:\n raise ValueError(\n \"Following field of view errors were detected:\\n\"\n + \"\\n\".join(\n [\n f\"- {e[0]} and {e[1]} do not have the same {e[2]}\"\n for e in errors\n ]\n )\n )\n return len(errors) == 0", "def correct_batch_size_in_files(self):\n print('checking correct file sizes')\n all_ok = True\n for f in self.data_filenames:\n all_ok *= (np.load(f).shape[0] == self.batch_size)\n if not all_ok:\n break\n print(all_ok)\n return all_ok", "def _check_consistency_between_imaging_extractors(self):\n return True", "def check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits):\n def _check_pair(a, b):\n if a != b:\n raise ValueError(\"Shape mismatch: %s vs %s.\" % (a, b))\n if len(a) != 2 or len(b) != 2:\n raise ValueError(\"Rank: expected 2, got %s and %s\" % (len(a), len(b)))\n\n if (d_real is not None) and (d_fake is not None):\n _check_pair(d_real.shape.as_list(), d_fake.shape.as_list())\n if (d_real_logits is not None) and (d_fake_logits is not None):\n _check_pair(d_real_logits.shape.as_list(), d_fake_logits.shape.as_list())\n if (d_real is not None) and (d_real_logits is not None):\n _check_pair(d_real.shape.as_list(), d_real_logits.shape.as_list())", "def is_new_based_on_imgs(soup):\n\n \n \n prev_hashes = get_prev_img_hashes()\n temp_hashes = get_temp_img_hashes(soup)\n\n if len(temp_hashes.difference(prev_hashes))>0:\n print(\"new, based on images\")\n return True\n else:\n return False", "def valid_image(self, image):\n valid = False\n if (isinstance(image, list) and len(image) == 11):\n valid = True\n for row in image:\n if (isinstance(row, list) and len(row) == 11):\n for pixel in row:\n if not self.valid_color(pixel):\n valid = False\n break\n else:\n valid = False\n break\n if not valid:\n _LOGGER.error(\"Invalid image data received\")\n return valid", "def findWidthHeight():\n\n for f in os.listdir(\"%s/train/images/\" % args.dataset):\n if f.endswith(\".jpeg\"):\n imf = \"%s/train/images/%s\" % (args.dataset, f)\n try:\n im = Image.open(imf)\n except:\n print \"Could not open training image %s to read its size.\" %imf\n usage()\n break\n \n width = int(im.size[0])\n height = int(im.size[1])\n \n nwidth = width\n nheight = height\n if args.width:\n nwidth = args.width\n if args.height:\n nheight = args.height\n\n return width, height, nwidth, nheight, not(width == nwidth and height == nheight)", "def __len__(self):\n return len(self.image_info)", "def __len__(self):\n return len(self.image_info)", "def test_mid_sizes(self):\n for size in settings.MISAGO_AVATARS_SIZES:\n self.assertEqual(clean_size(size - 1), size)", "def test_size_check(self):\n [x1, y1, s1, g1] = self.data.diffusion_data.shape\n [x2, y2, s2, g2] = module_05.run_module(self.data).diffusion_data.shape\n self.assertEqual(x1, x2)\n self.assertEqual(y1, y2)\n self.assertEqual(s1, s2)\n self.assertEqual(g1, g2)", "def _check_batch_size(self, data_list):\n if self.api_info is None:\n self.get_info() # sets the image size and other such info from server.\n MAX_BATCH_SIZE = self.api_info['max_batch_size']\n if len(data_list) > MAX_BATCH_SIZE:\n raise ApiError((\"Number of images provided in bach %d is greater than maximum allowed per \"\n \"request %d\") % (len(data_list), MAX_BATCH_SIZE))", "def scan_size(self):\n max_memory = 10e9/4 # because 32-bit floats will be used\n memory = 0\n for f in self.filenames:\n img = cv2.imread(f, int(self.color))\n if img is not None:\n m = 1\n for dim in img.shape:\n m *= dim\n memory += m\n else:\n print('error opening %s' % f)\n print('size is %s bytes' % memory)\n return memory <= max_memory", "def scan_size(self):\n max_memory = 10e9/4 # because 32-bit floats will be used\n memory = 0\n for f in self.filenames:\n img = cv2.imread(f, int(self.color))\n if img is not None:\n m = 1\n for dim in img.shape:\n m *= dim\n memory += m\n else:\n print('error opening %s' % f)\n print('size is %s bytes' % memory)\n return memory <= max_memory", "def scan_size(self):\n max_memory = 10e9/4 # because 32-bit floats will be used\n memory = 0\n for f in self.filenames:\n img = cv2.imread(f, int(self.color))\n if img is not None:\n m = 1\n for dim in img.shape:\n m *= dim\n memory += m\n else:\n print('error opening %s' % f)\n print('size is %s bytes' % memory)\n return memory <= max_memory", "def dimensions():", "def check_border(pool, func, images, entries, copy_failed):\n start = time.perf_counter()\n test_results = pool.map(func, images)\n logger.info(\"%i of %i images have white border.\",\n test_results.count(True), len(test_results))\n failed = []\n # Iterate in reverse to avoid shifting\n # the indices of the objects we want to remove\n for i, passed in reversed(list(enumerate(test_results))):\n if not passed:\n del images[i]\n failed.append(entries.pop(i))\n if failed:\n # Log the names in their original order\n failed = list(reversed(failed))\n logger.info(\"Skipping %i images:\", len(failed))\n util.pprint_log([x.name for x in failed], logger.info)\n if copy_failed:\n _copy_failed(failed)\n logger.info(util.elapsed(start))\n logger.info(\"\\n\")", "def check_consistent_length(arrays: Sequence[npt.ArrayLike]) -> None:\n lengths = [_num_samples(X) for X in arrays if X is not None]\n uniques = np.unique(lengths)\n if len(uniques) > 1:\n raise ValueError(\n \"Found input variables with inconsistent numbers of\" \" samples: %r\" % [int(length) for length in lengths]\n )" ]
[ "0.66642034", "0.6607947", "0.65018415", "0.6470456", "0.64090717", "0.63401216", "0.6332631", "0.63291144", "0.6313849", "0.6313849", "0.62913483", "0.6274798", "0.62315774", "0.6190616", "0.6132303", "0.6132303", "0.6086374", "0.60784054", "0.6073663", "0.60491264", "0.6013802", "0.5977754", "0.5968297", "0.59614646", "0.5955326", "0.595298", "0.5947664", "0.5947664", "0.5947664", "0.5934962", "0.5930257", "0.58881897", "0.58743364", "0.5870228", "0.5865093", "0.5850577", "0.58353615", "0.58295894", "0.58292246", "0.5826168", "0.58231425", "0.5808609", "0.5808343", "0.5807351", "0.5803726", "0.58016443", "0.5797772", "0.5792991", "0.57904416", "0.57858837", "0.57858837", "0.57858837", "0.57822603", "0.57755995", "0.5774412", "0.57704073", "0.57688147", "0.57670873", "0.57654274", "0.5764183", "0.5761486", "0.5759647", "0.575408", "0.57523704", "0.57522804", "0.574026", "0.5721965", "0.5720845", "0.5719606", "0.5715905", "0.570361", "0.56932676", "0.569114", "0.56848204", "0.56745785", "0.5660189", "0.56548697", "0.56426907", "0.56418246", "0.5641602", "0.5641602", "0.56375617", "0.5630976", "0.5627128", "0.56271166", "0.56261456", "0.5620279", "0.5617657", "0.56171244", "0.56106", "0.56106", "0.56079566", "0.560687", "0.5606141", "0.56056124", "0.56056124", "0.56056124", "0.5603087", "0.56015515", "0.55969673" ]
0.66137314
1
return a VTK image reader based on file extension
def getReaderByExtension(self, ext, isRGB = 0): assert ext in self.extMapping, "Extension not recognized: %s" % ext mpr = self.extMapping[ext] prefix="vtk" # If it's a tiff file, we use our own, extended TIFF reader if self.extMapping[ext] == "TIFF": mpr = "ExtTIFF" prefix="vtkbxd" self.rdrstr = "%s.vtk%sReader()" % (prefix, mpr) rdr = eval(self.rdrstr) if ext == "bmp": rdr.Allow8BitBMPOn() if ext == "tiff": rdr.SetFileLowerLeft(self.flipVertically) return rdr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getReadersFromFilenames(self):\n\t\tfor i in self.readers:\n\t\t\tdel i\n\t\tself.readers = []\n\n\t\tif not self.filenames:\n\t\t\traise Logging.GUIError(\"No files could be found\", \\\n\t\t\t\t\t\t\t\t\t\"For some reason, no files were listed to be imported.\")\t\t \n\t\t\t\t\t\n\t\tfiles = self.filenames\n\t\tprint \"Determining readers from \", self.filenames\n\n\t\tisRGB = 1\n\t\tself.ext = files[0].split(\".\")[-1].lower()\n\t\tdim = self.dimMapping[self.ext]\n\t\t# Initially flip the image if it's tiff, png or jpg.\n\t\t# In setVerticalFlip we negate the setting to have it set correctly.\n\t\tif self.ext.lower() in [\"png\", \"jpg\", \"jpeg\"]:\n\t\t\tself.flipVertically = True\n\t\tif self.ext in [\"tif\", \"tiff\"]:\n\t\t\treader = vtkbxd.vtkExtTIFFReader()\n\t\t\treader.SetFileName(files[0])\n\t\t\treader.UpdateInformation()\n\t\t\tif reader.GetNumberOfScalarComponents() >= 3:\n\t\t\t\tprint \"MODE IS RGB, IS AN RGB IMAGE\"\n\t\t\telse:\n\t\t\t\tprint \"MODE ISN'T RGB, THEREFORE NOT RGB\"\n\t\t\t\tisRGB = 0\n\t\t\trdr = self.getReaderByExtension(self.ext, isRGB)\n\t\t\trdr.SetFileName(files[0])\n\t\t\tif rdr.GetNumberOfSubFiles() > 1:\n\t\t\t\tdim = 3\n\t\t\t\t\n\t\tself.isRGB = isRGB\n\t\tself.is3D = (dim == 3)\n\t\t\n\t\tdirName = os.path.dirname(files[0])\n\t\tprint \"THERE ARE\", self.slicesPerTimepoint, \"SLICES PER TIMEPOINT\"\n\t\tself.ext = files[0].split(\".\")[-1].lower()\n\t\t\n\t\tif dim == 3:\n\t\t\ttotalFiles = len(files)\n\t\t\tfor i, file in enumerate(files):\n\t\t\t\trdr = self.getReaderByExtension(self.ext, isRGB)\n\t\t\t\trdr.SetFileName(file)\n\t\t\t\tself.readers.append(rdr)\n\t\t\treturn\n\t\t\t\n\t\ttotalFiles = len(files) / self.slicesPerTimepoint\n\n\t\timgAmnt = len(files)\n\t\tif totalFiles == 1:\n\t\t\trdr = self.getReaderByExtension(self.ext, isRGB)\n\t\t\tarr = vtk.vtkStringArray()\n\t\t\tfor fileName in files:\n\t\t\t\tarr.InsertNextValue(os.path.join(dirName, fileName))\n\t\t\trdr.SetFileNames(arr)\n\t\t\tself.readers.append(rdr)\n\t\t\treturn\n\t\t\t\n\t\tif imgAmnt > 1:\n\t\t\t# If the pattern doesn't have %, then we just use\n\t\t\t# the given filenames and allocate them to timepoints\n\t\t\t# using slicesPerTimepoint slices per timepoint\n\t\t\tntps = len(files) / self.slicesPerTimepoint\n\t\t\tfilelst = files[:]\n\t\t\t# dirn #TODO: what was this?\n\t\t\tfor tp in range(0, ntps):\n\t\t\t\trdr = self.getReaderByExtension(self.ext, isRGB)\n\t\t\t\tarr = vtk.vtkStringArray()\n\t\t\t\tfor i in range(0, self.slicesPerTimepoint):\n\t\t\t\t\tarr.InsertNextValue(filelst[0])\n\t\t\t\t\tfilelst = filelst[1:]\n\t\t\t\trdr.SetFileNames(arr)\n\t\t\t\trdr.SetDataExtent(0, self.x - 1, 0, self.y - 1, 0, self.slicesPerTimepoint - 1)\n\t\t\t\trdr.SetDataSpacing(self.spacing)\n\t\t\t\trdr.SetDataOrigin(0, 0, 0)\n\t\t\t\tself.readers.append(rdr)\n\t\t\treturn\n\t\t\n\t\telif imgAmnt == 1:\n\t\t\t# If only one file\n\t\t\trdr = self.getReaderByExtension(self.ext, isRGB)\n\t\t\trdr.SetDataExtent(0, self.x - 1, 0, self.y - 1, 0, self.slicesPerTimepoint - 1)\n\t\t\trdr.SetDataSpacing(self.spacing)\n\t\t\trdr.SetDataOrigin(0, 0, 0)\n\t\t\trdr.SetFileName(files[0])\n\n\t\t\tLogging.info(\"Reader = \", rdr, kw = \"io\")\n\t\t\tself.readers.append(rdr)", "def _get_reader(self, filepath: str):\n file_extension = os.path.splitext(filepath)[-1]\n\n self._validate_file(filepath)\n\n if file_extension == \".ipynb\":\n return NotebookReader(filepath)\n elif file_extension in [\".py\", \".r\"]:\n return FileReader(filepath)\n else:\n raise ValueError(f\"File type {file_extension} is not supported.\")", "def imread_wrapper(file):\n p = Path(file)\n if len(p.suffixes) == 2:\n series = int(Path(p.stem).stem)\n else:\n series = int(p.stem)\n return imread(file, series=series)", "def _get_reader_class(basename: str) -> Type[BaseEEGReader]:\n if basename.endswith(\".h5\"):\n return RamulatorHDF5Reader\n elif basename.endswith((\".bdf\", \".mff\", \".raw\")):\n return ScalpEEGReader\n elif basename.endswith(\".npy\"):\n return NumpyEEGReader\n else:\n return SplitEEGReader", "def imread(filename):\n filename = process(filename)\n ext = os.path.splitext(filename)[1]\n if ext.lower() == '.pfm':\n return load_pfm(filename)\n elif ext.lower() == '.dng':\n return load_dng(filename)\n else:\n loaded = cv2.imread(filename, flags=cv2.IMREAD_ANYDEPTH + cv2.IMREAD_COLOR)\n if loaded is None:\n raise IOError('Could not read {0}'.format(filename))\n else:\n return loaded", "def ReadVTK(self, filename, element_type=None):\n\n try:\n import vtkInterface as vtki\n except IOError:\n raise IOError(\"vtkInterface is not installed. Please install it first using 'pip install vtkInterface'\")\n\n self.__reset__()\n\n vmesh = vtki.UnstructuredGrid(filename)\n flat_elements = np.copy(np.delete(vmesh.cells, vmesh.offset))\n\n if not np.all(vmesh.celltypes == vmesh.celltypes[0]):\n raise IOError(\"Cannot read VTK files with hybrid elements\")\n\n cellflag = vmesh.celltypes[0]\n\n if cellflag == 5:\n self.element_type = \"tri\"\n divider = 3\n elif cellflag == 9:\n self.element_type = \"quad\"\n divider = 4\n elif cellflag == 10:\n self.element_type = \"tet\"\n divider = 4\n elif cellflag == 12:\n self.element_type = \"hex\"\n divider = 8\n elif cellflag == 3:\n self.element_type = \"line\"\n divider = 2\n else:\n raise IOError(\"VTK element type not understood\")\n\n if element_type is not None:\n if self.element_type != element_type:\n raise ValueError(\"VTK file does not contain {} elements\".format(element_type))\n\n\n self.elements = np.ascontiguousarray(flat_elements.reshape(int(flat_elements.shape[0]/divider),divider), dtype=np.uint64)\n self.points = np.ascontiguousarray(vmesh.points, dtype=np.float64)\n self.nelem = self.elements.shape[0]\n self.nnode = self.points.shape[0]\n\n if self.points.shape[1] == 3:\n if np.allclose(self.points[:,2],0.):\n self.points = np.ascontiguousarray(self.points[:,:2])\n\n if self.element_type == \"tri\" or self.element_type == \"quad\":\n self.GetEdges()\n self.GetBoundaryEdges()\n elif self.element_type == \"tet\" or self.element_type == \"hex\":\n self.GetFaces()\n self.GetBoundaryFaces()\n self.GetBoundaryEdges()\n\n return", "def read_image(path, file_format='nii.gz'):\n path = path + '.' + file_format\n if file_format == 'npy':\n image = np.load(path)\n elif file_format == 'npz':\n image = np.load(path)['arr_0']\n elif file_format in ('png', 'jpg'):\n image = np.array(imageio.imread(path))\n elif file_format == 'dcm':\n image = np.array(imageio.volread(path, 'DICOM'))\n elif file_format in ('nii', 'nii.gz'):\n image = nib.load(path).get_data()\n else:\n raise ValueError('invalid --input_type : {}'.format(file_format))\n\n return image", "def get_type(ext):\n if ext.lower() in Asset.SUPPORTED_IMAGE_EXT['in']:\n return 'image'\n return 'file'", "def load_image(fname):\n return load_tiff(fname)", "def reader(self, idx):\n # Get the path of input image and groundtruth mask.\n input_path, gtmask_path = self.imgs[idx]\n input_img, gt_img = self.loader(input_path, gtmask_path)\n return input_img, gt_img", "def readImage(self, path, tt=1):\n return cv2.imread( path, tt)", "def read_image(self, filePath):\n if filePath.endswith(\".dcm\"):\n image = sitk.ReadImage(filePath)\n image = sitk.GetArrayFromImage(image).astype(\"int16\")\n image = np.expand_dims(image[0,:,:], -1)\n elif filePath.endswith(\".png\"):\n image = cv2.imread(filePath)\n image = np.array(image, dtype = \"int16\")\n elif filePath.endswith(\".mha\"):\n image = sitk.ReadImage(filePath)\n image = sitk.GetArrayFromImage(image).astype(\"int16\")\n image = np.transpose(image,(1,2,0))\n return image", "def __getitem__(self, idx):\n image = Image.open(self.filenames[idx]) # PIL image\n image = self.transform(image)\n return image", "def vtk_xml_reader(self):\n reader = vtk.vtkXMLStructuredGridReader()\n reader.SetFileName(self.file)\n reader.Update()\n assert isinstance(reader, vtk.vtkObject)\n return reader", "def load(self, file, lazy=True):\n # individual files for each slice\n # we got one file, nice!\n \n if not lazy:\n\n if file in self.imagedict.keys():\n return self.imagedict[file]\n else:\n self.imagedict[file] = self.load(file, True)\n self.imagedict[file] *= 1\n return self.imagedict[file]\n \n else:\n \n ending = splitext(file)[-1].lower()\n if ending in ['.nii', '.hdr', '.nii.gz', '.gz']:\n if self.correct_orientation:\n vol = ni.open_image(file, verbose=False)\n self.affine = vol.get_aligned_transformation(\"RAS\")\n data = vol.aligned_volume\n else:\n f = nib.load(file)\n self.affine = f.affine\n self.pixdim = np.asarray(f.header['pixdim'][1:])\n data = f.get_data()\n return data\n # elif ending in ['.nrrd', '.nhdr']:\n # if self.correct_orientation:\n # vol = nr.open_image(file, verbose=False)\n # self.affine = vol.get_aligned_transformation(\"RAS\")\n # f = vol.aligned_volume\n # else:\n # try:\n # f, h = nrrd.read(file)\n # except:\n # print('could not read file {}'.format(file))\n # logging.getLogger('data').error('could not read file {}'.format(file))\n # raise Exception('could not read file {}'.format(file))\n # self.affine = np.eye(4)\n # return f\n # elif ending in ['.dcm']:\n # f = pydicom.dcmread(file).pixel_array\n # return f\n # elif ending in ['.mha', '.mhd']:\n # f = skio.imread(file, plugin='simpleitk')\n # self.affine = np.eye(4)\n # return f\n elif ending in ['.png', '.pgm', '.pnm']:\n data = imread(file)\n if len(data.shape) > 2:\n return np.transpose(data, [2, 0, 1])\n else:\n return data\n return imread(file)\n else:\n raise Exception('{} not known'.format(ending))", "def read(infile):\n _, ext = os.path.splitext(infile)\n ext = ext.strip('.')\n return read_funcs[ext](infile)", "def imread(fname):\r\n return skimage.io.imread(fname)", "def _get_file_object(infilename):\n\n _, extension = os.path.splitext(infilename)\n if extension.lower() == '.spe':\n return parsers.SpeFile(infilename)\n elif extension.lower() == '.spc':\n return parsers.SpcFile(infilename)\n elif extension.lower() == '.cnf':\n return parsers.CnfFile(infilename)\n else:\n raise NotImplementedError(\n 'File type {} can not be read'.format(extension))", "def viewFileByExt(filename):\n filename_ext = file_func.getFilenameExt(filename)\n if filename_ext:\n filename_ext = filename_ext.lower()\n if filename_ext in PDF_FILENAME_EXT:\n from . import pdf_func\n return pdf_func.viewPDF(pdf_filename=filename)\n\n elif filename_ext in IMG_FILENAME_EXT:\n from . import img_func\n return img_func.viewImage(img_filename=filename)\n\n elif filename_ext in OFFICE_FILENAME_EXT:\n from . import office_func\n return office_func.openInLibreOffice(filename)\n else:\n log_func.warning(u'Not supported view file type <%s>' % filename_ext)\n else:\n log_func.warning(u'Not defined ext of filename <%s>' % filename)\n return False", "def iread(filename, *args, verbose=True, **kwargs):\n\n # determine if file is valid:\n # assert isinstance(filename, str), 'filename must be a string'\n\n\n # TODO read options for image\n # opt = {\n # 'uint8': False,\n # 'single': False,\n # 'double': False,\n # 'grey': False,\n # 'grey_709': False,\n # 'gamma': 'sRGB',\n # 'reduce': 1.0,\n # 'roi': None\n # }\n\n if isinstance(filename, str) and (filename.startswith(\"http://\") or filename.startswith(\"https://\")):\n # reading from a URL\n\n resp = urllib.request.urlopen(filename)\n array = np.asarray(bytearray(resp.read()), dtype=\"uint8\")\n image = cv.imdecode(array, -1)\n print(image.shape)\n return (image, filename)\n\n elif isinstance(filename, (str, Path)):\n # reading from a file\n\n path = Path(filename).expanduser()\n\n if any([c in \"?*\" for c in str(path)]):\n # contains wildcard characters, glob it\n # recurse and return a list\n # https://stackoverflow.com/questions/51108256/how-to-take-a-pathname-string-with-wildcards-and-resolve-the-glob-with-pathlib\n \n parts = path.parts[1:] if path.is_absolute() else path.parts\n p = Path(path.root).glob(str(Path(\"\").joinpath(*parts)))\n pathlist = list(p)\n\n if len(pathlist) == 0 and not path.is_absolute():\n # look in the toolbox image folder\n path = Path(__file__).parent / \"images\" / path\n parts = path.parts[1:] if path.is_absolute() else path.parts\n p = Path(path.root).glob(str(Path(\"\").joinpath(*parts)))\n pathlist = list(p)\n \n if len(pathlist) == 0:\n raise ValueError(\"can't expand wildcard\")\n\n imlist = []\n pathlist.sort()\n for p in pathlist:\n imlist.append(iread(p, **kwargs))\n return imlist\n\n else:\n # read single file\n\n if not path.exists():\n if path.is_absolute():\n raise ValueError(f\"file {filename} does not exist\")\n # file doesn't exist\n # see if it matches the supplied images\n path = Path(__file__).parent / \"images\" / path\n\n if not path.exists():\n raise ValueError(f\"file {filename} does not exist, and not found in supplied images\")\n\n # read the image\n # TODO not sure the following will work on Windows\n im = cv.imread(path.as_posix(), **kwargs) # default read-in as BGR\n\n if im is None:\n # TODO check ValueError\n raise ValueError(f\"Could not read {filename}\")\n\n return (im, str(path))\n\n elif islistof(filename, (str, Path)):\n # list of filenames or URLs\n # assume none of these are wildcards, TODO should check\n out = []\n for file in filename:\n out.append(iread(file, *args))\n return out\n else:\n raise ValueError(filename, 'invalid filename')", "def img_in(filename):\n temp_img = Image.open(filename)\n img = np.array(temp_img)\n name = filename.split('.')[-2]\n return name, img", "def _read_file(self):\n extension = self.path.split('.')[-1]\n if extension!='avi':\n raise Exception(\"Invalid Format\")\n\n return cv2.VideoCapture(self.path)", "def MFileReader(fvcom, *args, **kwargs):\n\n if isinstance(fvcom, str):\n FVCOM = FileReader(fvcom, *args, **kwargs)\n else:\n for file in fvcom:\n if file == fvcom[0]:\n FVCOM = FileReader(file, *args, **kwargs)\n else:\n FVCOM += FileReader(file, *args, **kwargs)\n\n return FVCOM", "def read_image_file(file_name):\n return torch.from_numpy(np.asarray(Image.open(file_name).convert('L')))", "def load(cls,filename,format=None,**kwargs):\n\n\t\tif format is None:\n\t\t\t\n\t\t\textension = filename.split(\".\")[-1]\n\t\t\tif extension in [\"fit\",\"fits\"]:\n\t\t\t\tformat=\"fits\"\n\t\t\telif extension in [\"npy\",\"npz\"]:\n\t\t\t\tformat=\"npz\"\n\t\t\telse:\n\t\t\t\traise IOError(\"File format not recognized from extension '{0}', please specify it manually\".format(extension))\n\n\t\tif format==\"fits\":\n\t\t\treturn loadFITS(cls,filename)\n\t\telif format==\"npz\":\n\t\t\treturn loadNPZ(cls,filename)\n\t\telse:\n\t\t\tangle,data = format(filename,**kwargs)\n\t\t\treturn cls(data,angle)", "def get_input(self, idx):\r\n img_filename = self.root / \"images\" / self._image_array[idx]\r\n x = Image.open(img_filename)\r\n return x", "def inferReader(filePath):\n for reg, reader in six.iteritems(REGEXES):\n match = re.match(reg, filePath)\n if match and match.group() == filePath:\n debug('Inferred reader for {}: {}'\n .format(filePath, reader.__name__))\n return reader\n raise SerpentToolsException(\n 'Failed to infer filetype and thus accurate reader from'\n 'file path {}'.format(filePath)\n )", "def open_image(self, filename):\n return np.array(self.ds['test'].load_image(filename))", "def reader(name, version=None, mimetype=None):\n\treturn _data_processor('read', name, version, mimetype)", "def _read_ext(cls, hdulist, extname, **kwargs):\n try:\n if cls == Table:\n # use Table.read method to ensure extra header keywords are loaded\n # as metadata\n obj = Table.read(hdulist, hdu=extname)\n obj = Table(obj, **kwargs)\n else:\n obj = cls(hdulist[extname].data, **kwargs)\n except Exception as e:\n raise IOError('%s: Impossible to open extension %s as a %s\\n%s' % (\n os.path.basename(hdulist.filename()), extname, cls.__name__, e))\n return obj", "def loadVtk(self, fname):\n\n reader = vtk.vtkUnstructuredGridReader()\n reader.SetFileName(filename)\n reader.Update()\n self._vtk = reader.GetOutput()", "def openFile(path_name):\n if os.path.isdir(path_name):\n reader = sitk.ImageSeriesReader()\n dicom_names = reader.GetGDCMSeriesFileNames(path_name)\n reader.SetFileNames(dicom_names)\n image_object = reader.Execute()\n \n elif os.path.isfile(path_name):\n image_object = sitk.ReadImage(path_name)\n\n else:\n print(\"Path name wrong.\")\n return None\n\n return image_object", "def image(fname):\n return cv2.imread(fname)", "def imagefile(self):\n if self.__filetype==\"flatWarp\" :\n ext = \".fw\"\n elif self.__filetype==\"camWarp\" :\n ext = \".camWarp\"\n elif self.__filetype==\"raw\" :\n ext = \".Data.dat\"\n else :\n raise ValueError(f\"requested file type {self.__filetype} not recognized\")\n\n return self.__imagefolder/self.file.replace(\".im3\", ext)", "def get_reader(fn):\n if is_bed(fn):\n return BedReader(fn)\n elif is_vcf(fn):\n return VcfReader(fn)\n else:\n raise ValueError(\"Could not get reader for %s\" % fn)", "def load_file(self):\n extensions = DataReader().get_supported_extensions_as_string()\n file_name, _ = QFileDialog.getOpenFileName(self, \"Open data set\", \"\",\n \"Images (\" + extensions + \")\")\n if not file_name:\n return\n\n self.render_widget.load_file(file_name)\n self.switch_to_simple()", "def get_split(split_name, dataset_dir, file_pattern=None, reader=None):\n if split_name not in SPLITS_TO_SIZES:\n raise ValueError('split name %s was not recognized.' % split_name)\n \n if not file_pattern:\n file_pattern = FILE_PATTERN\n file_pattern = os.path.join(dataset_dir, file_pattern % split_name)\n \n # Allowing None in the signature so that dataset_factory can use the default.\n if reader is None:\n reader = tf.TFRecordReader\n# #文件名格式\n# if file_pattern is None:\n# file_pattern = _get_output_filename('tfrecords','voc_2007_train')#need fix your filename\n# print(file_pattern)\n \n # 适配器1:将example反序列化成存储之前的格式。由tf完成\n keys_to_features = {\n 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpg'),\n 'image/height': tf.FixedLenFeature([1], tf.int64),\n 'image/width': tf.FixedLenFeature([1], tf.int64),\n 'image/channels': tf.FixedLenFeature([1], tf.int64),\n 'image/shape': tf.FixedLenFeature([3], tf.int64),\n 'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64),\n 'image/object/bbox/difficult': tf.VarLenFeature(dtype=tf.int64),\n 'image/object/bbox/truncated': tf.VarLenFeature(dtype=tf.int64),\n }\n \n #适配器2:将反序列化的数据组装成更高级的格式。由slim完成\n items_to_handlers = {\n 'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),\n 'shape': slim.tfexample_decoder.Tensor('image/shape'),\n 'object/bbox': slim.tfexample_decoder.BoundingBox(\n ['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/'),\n 'object/label': slim.tfexample_decoder.Tensor('image/object/bbox/label'),\n 'object/difficult': slim.tfexample_decoder.Tensor('image/object/bbox/difficult'),\n 'object/truncated': slim.tfexample_decoder.Tensor('image/object/bbox/truncated'),\n }\n # 解码器\n decoder = slim.tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers)\n \n # dataset对象定义了数据集的文件位置,解码方式等元信息\n dataset = slim.dataset.Dataset(\n data_sources=file_pattern,\n reader=reader,\n num_samples = SPLITS_TO_SIZES['test'], # 手动生成了三个文件, 每个文件里只包含一个example\n decoder=decoder,\n items_to_descriptions = ITEMS_TO_DESCRIPTIONS,\n num_classes=NUM_CLASSES)\n return dataset", "def load_image(path: str):\n if path.endswith('.npy'):\n return np.load(path)\n if path.endswith(('.nii', '.nii.gz', '.hdr', '.img')):\n import nibabel as nib\n return nib.load(path).get_data()\n if path.endswith('.tif'):\n from PIL import Image\n with Image.open(path) as image:\n return np.asarray(image)\n\n raise ValueError(f\"Couldn't read image from path: {path}.\\n\"\n \"Unknown file extension.\")", "def loaddata(path):\n if path.endswith(\".tiff\") or path.endswith(\".tif\"):\n try:\n from vigra.impex import readVolume\n except ImportError:\n raise ImportError(\"Vigra is needed to read/write TIFF volumes, but could not be imported.\")\n\n volume = readVolume(path)\n return volume\n\n elif path.endswith(\".h5\"):\n try:\n from Antipasti.netdatautils import fromh5\n except ImportError:\n raise ImportError(\"h5py is needed to read/write HDF5 volumes, but could not be imported.\")\n\n volume = fromh5(path)\n return volume\n\n else:\n raise NotImplementedError(\"Can't load: unsupported format. Supported formats are .tiff and .h5\")", "def __getitem__(self, idx):\n ImageFile.LOAD_TRUNCATED_IMAGES = True\n image = Image.open(self.filenames[idx]) # PIL image\n width, height = image.size\n if width<IMAGE_SIZE or height<IMAGE_SIZE:\n image = image.resize((IMAGE_SIZE+50, IMAGE_SIZE+50))\n \n image = image.convert('RGB')\n tensor = self.transform(image)\n sample = (tensor, self.labels[idx])\n return sample", "def _load_image(self, index: int) -> Tensor:\n path = self.files[index][\"image\"]\n with rasterio.open(path) as f:\n array = f.read()\n tensor = torch.from_numpy(array).float()\n return tensor", "def parse_extension(filepath):\n extension = os.path.splitext(filepath)[1][1:]\n\n extensions_dict = {\"netcdf\": ['nc'],\n \"mitiff\": ['mitiff'],\n \"geotiff\": ['gtiff', 'tiff', 'tif']}\n\n driver = None\n\n for key in extensions_dict:\n if extension in extensions_dict[key]:\n driver = key \n\n if driver is not None:\n return driver\n else:\n raise Exception(\"Unknown file extension, cannot guess file format\")", "def __getitem__(self, idx):\n label, path = self.pathList[idx]\n file_list = os.listdir(path)\n # parse the images in the folder\n for pt_file in file_list:\n file_path = path + pt_file\n extension = os.path.splitext(file_path)[1]\n\n # choose only the file that has the tensor weights\n if extension == '.pt':\n tensor = torch.load(file_path) # size Nx20x2 because the shape is made of 20 (x,y) tuples\n \n # Change type Int to type Float\n tensor = tensor.type(torch.FloatTensor) # On cpu for now\n\n # normalization of the coordinates\n #tensor[:,:,0] /= 300 # x\n #tensor[:,:,1] /= 150 # y\n\n tensor = tensor.view(tensor.size()[0], 40) # resize to Nx40\n\n return tensor, label", "def __ext_from_image_stream(stream):\n ext_map = {'GIF': '.gif', 'JPEG': '.jpg', 'PNG': '.png',\n 'TIFF': '.tiff', 'WMF': '.wmf'}\n stream.seek(0)\n format = PIL_Image.open(stream).format\n if format not in ext_map:\n tmpl = \"unsupported image format, expected one of: %s, got '%s'\"\n raise ValueError(tmpl % (ext_map.keys(), format))\n return ext_map[format]", "def get_from_file(self, filename):\n print \"loading from file...\"\n return cv2.imread(filename)", "def retrieveImageInfo(self, filename):\t\t \n\t\tassert filename, \"Filename must be defined\"\n\t\tassert os.path.exists(filename), \"File that we're retrieving information \\\n\t\t\t\t\t\t\t\t\t\tfrom (%s) needs to exist, but doesn't.\" % filename\n\t\tself.ext = filename.split(\".\")[-1].lower()\n\t\trdr = self.getReaderByExtension(self.ext)\n\t\t\n\t\tif self.ext == \"bmp\":\n\t\t\trdr.Allow8BitBMPOn()\n\t\trdr.SetFileName(filename)\n\t\tif rdr.IsA(\"vtkExtTIFFReader\"):\n\t\t\trdr.UpdateInformation()\n\t\t\tif rdr.GetNumberOfScalarComponents() == 1:\n\t\t\t\trdr.RawModeOn()\n\n\t\tdata = rdr.GetOutput()\n\t\tdata.Update()\n\t\tself.numberOfComponents = data.GetNumberOfScalarComponents()\n\n\t\tif not self.ctf:\n\t\t\tbd = self.getDataBitDepth(data)\n\t\t\tself.ctf = vtk.vtkColorTransferFunction()\n\t\t\tif bd == 8 or bd == 12:\n\t\t\t\tself.ctf.AddRGBPoint(0, 0, 0, 0)\n\t\t\t\tself.ctf.AddRGBPoint((2 ** bd) - 1, 0, 1, 0)\n\t\t\telse:\n\t\t\t\trange = data.GetScalarRange()\n\t\t\t\tself.ctf.AddRGBPoint(range[0], 0, 0, 0)\n\t\t\t\tself.ctf.AddRGBPoint(range[1], 0, 1, 0)\n\t\t\t\n\t\tself.x, self.y, z = data.GetDimensions()\n\t\tself.dimensions = (self.x, self.y, self.slicesPerTimepoint)\n\t\tif z > 1:\n\t\t\tself.slicesPerTimepoint = z\n\t\t\tself.z = z\n\t\t\tself.dimensions = (self.x, self.y, self.slicesPerTimepoint)\n\t\t\tlib.messenger.send(self, \"update_dimensions\")\n\t\tself.originalDimensions = self.dimensions", "def create(path: Union[Path, str]):\n if isinstance(path, str):\n path = Path(path)\n \n ext = path.suffix.lower()\n if ext == \".hdr\":\n reader = RadianceHDRFormat.read\n elif ext == \".exr\":\n reader = OpenEXRFormat.read\n elif ext == \".pfm\":\n reader = PFMFormat.read\n else:\n # assuming the image is LDR\n def pil_reader(path: Union[Path, str]) -> np.ndarray:\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n img = Image.open(f)\n img = np.asarray(img.convert(\"RGB\"))\n if img.dtype.startswith(\"uint\") or img.dtype.startswith(\"int\"):\n info = np.iinfo(img.dtype)\n elif img.dtype.startswith(\"float\"):\n info = np.finfo(img.dtype)\n else:\n raise TypeError()\n\n min_ = float(info.min)\n max_ = float(info.max)\n return min_max_normalization(img.astype(np.float64), min_, max_) \n reader = pil_reader\n return reader", "def read_array(self, filename):\n extension = filename.split('.')[-1] # Get file extension\n if extension == 'mat':\n array = sci.loadmat(filename)\n elif extension == 'npy':\n array = np.load(filename)\n else:\n print('Error!!! Unrecognised file type for read_array()')\n array = None\n return array", "def nircam_image_reader(file_name):\n\n hdulist = fits.open(file_name)\n data = Data(label='NIRCam Image')\n data.header = hdulist[0].header\n wcs = WCS(hdulist[0].header)\n\n # drop the last axis since the cube will be split\n data.coords = coordinates_from_wcs(wcs.sub(2))\n data.add_component(hdulist[0].data[0], 'Flux')\n data.add_component(hdulist[0].data[1], 'Uncertainty')\n\n return data", "def load(self,filename):\n basename = os.path.basename(filename)\n self.name, ext = os.path.splitext(basename)\n if ext == '.xml':\n self.load_xml(filename)\n elif ext == '.tsv':\n self.load_tsv_fast(filename)\n elif ext == '.tsvs':\n self.load_tsv(filename)\n else:\n print 'Error: only .xml and .tsv files are supported'", "def vF3d_VTK(field,name,VTKformat): \n if VTKformat == 'vtu':\n vf3d_vtu(field,name)\n elif VTKformat == None:\n print 'Please select a VTK format'\n else:\n print 'The selected format has not been developed yet'\n return #nothing, since functions output the written VTK file", "def get_loader(csv_file, img_dir, image_size, batch_size, mode='val', dataset='vg'):\n\n if dataset == 'VG':\n \n transform = transforms.Compose([\n transforms.Resize(image_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n dataset = GeneratedVGDataset(csv_file, img_dir, transform)\n else:\n \traise Exception(\"currently only VG generated images dataset is provided \")\n\n shuffle = True if mode == 'train' else False\n \n data_loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle, num_workers=4)\n\n return data_loader", "def load_volume(name, nx, ny, nz):\n\n # load raw volume into memory\n img = np.fromfile(name, dtype=np.float32)\n img = np.reshape(img, (ny, nx, nz))\n\n return img.transpose(0, 2, 1)", "def __getitem__(self, t):\n collection = {}\n\n # read raw data and unpack (if necessary)\n for typ in self.files.keys():\n scan_data = None\n if typ == \"label\":\n scan_data = np.fromfile(self.files[typ][t], dtype=np.uint16)\n else:\n scan_data = unpack(np.fromfile(self.files[typ][t], dtype=np.uint8))\n\n # turn in actual voxel grid representation.\n collection[typ] = scan_data.reshape(VOXEL_DIMS)\n\n return self.filenames[t], collection", "def imgRead(filename: str, representation: int) -> np.ndarray:\r\n if representation==LOAD_GRAY_SCALE:\r\n img = cv2.imread(filename,0)\r\n else:\r\n img = cv2.imread(filename)\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n return img.astype('uint8')", "def _open_image(self, path):\n return cv.imread(path, 1)\n # .astype(float)", "def get_split(split_name, dataset_dir, file_pattern=None, reader=None):\n\tif split_name not in SPLITS_TO_SIZES and split_name[:-2] not in SPLITS_TO_SIZES:\n\t\traise ValueError('split name %s was not recognized.' % split_name)\n\n\tif not file_pattern:\n\t\tfile_pattern = _FILE_PATTERN\n\tfile_pattern = os.path.join(dataset_dir, file_pattern % split_name)\n\t#Allowing None in the signature so that the dataset_factory can use the default\n\tif reader is None:\n\t\treader = tf.TFRecordReader\n\n\tkeys_to_features = {\n\t\t'image/encoded': tf.FixedLenFeature(\n\t\t\t(), tf.string, default_value=''),\n\t\t'image/format': tf.FixedLenFeature(\n\t\t\t(), tf.string, default_value='jpeg'),\n\t\t'image/class/label': tf.FixedLenFeature(\n\t\t\t[], tf.int64, default_value=tf.zeros([], dtype=tf.int64)),\n\t}\n\titems_to_handlers = {\n\t\t'image': slim.tfexample_decoder.Image(),\n\t\t'label': slim.tfexample_decoder.Tensor('image/class/label'),\n\t}\n\n\tdecoder = slim.tfexample_decoder.TFExampleDecoder(\n\t\tkeys_to_features, items_to_handlers)\n\n\tlabels_to_names = None\n\tif dataset_utils.has_labels(dataset_dir):\n\t\tlabels_to_names=dataset_utils.read_label_file(dataset_dir)\n\t\n\tif split_name in SPLITS_TO_SIZES:\n\t\tnum_samples = SPLITS_TO_SIZES[split_name]\n\telif split_name[:-2] in SPLITS_TO_SIZES:\n\t\tnum_samples = SPLITS_TO_SIZES[split_name[:-2]]\n\n\treturn slim.dataset.Dataset(data_sources=file_pattern,\n\t\treader=reader,\n\t\tdecoder=decoder,\n\t\tnum_samples=num_samples,\n\t\titems_to_descriptions=_ITEMS_TO_DESCRIPTIONS,\n\t\tnum_classes=_NUM_CLASSES,\n\t\tlabels_to_names=labels_to_names)", "def file_reader(image_file, label_file):\n\n image = im.imread(image_file)\n\n with open(label_file, \"r\") as file:\n label = float(file.read())\n\n return image, label", "def get_itk_image(path):\n\n reader = itk.ImageFileReader()\n reader.SetFileName(path)\n\n image = reader.Execute()\n\n return image", "def get_split(split_name, dataset_dir, file_pattern=None, reader=None):\n\tif split_name not in _SPLITS_TO_SIZES:\n\t\traise ValueError(\"split name %s was not recognized.\" % split_name)\n\tif not file_pattern:\n\t\tfile_pattern = _FILE_PATTERN\n\tfile_pattern = os.path.join(dataset_dir, file_pattern % split_name)\n\tif reader is None:\n\t\treader = tf.TFRecordReader\n\tkeys_to_features = {\"image/encoded\": tf.FixedLenFeature((), tf.string, default_value=\"\"), \"image/format\": tf.FixedLenFeature((), tf.string, default_value=\"png\"), \"image/class/label\": tf.FixedLenFeature([1], tf.int64, default_value=tf.zeros([1], dtype=tf.int64))}\n\titems_to_handlers = {\"image\": slim.tfexample_decoder.Image(shape=[32, 32, 3], channels=3), \"label\": slim.tfexample_decoder.Tensor(\"image/class/label\", shape=[])}\n\tdecoder = slim.tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers)\n\tlabels_to_names = None\n\tif dataset_utils.has_labels(dataset_dir):\n\t\tlabels_to_names = dataset_utils.read_label_file(dataset_dir)\n\treturn slim.dataset.Dataset(data_sources=file_pattern, reader=reader, decoder=decoder, num_samples=_SPLITS_TO_SIZES[split_name], num_classes=_NUM_CLASSES, items_to_descriptions=_ITEMS_TO_DESCRIPTIONS, labels_to_names=labels_to_names)", "def import_grid(file_name):\n\n return FileReader(file_name=file_name).grid", "def LoadMetadata(filename):\r\n## print filename\r\n globbed=glob.glob(os.path.join(os.path.dirname(filename),'*.zvi'))\r\n if globbed:\r\n return LoadZVIMetaData(globbed[0])\r\n globbed=glob.glob(os.path.join(os.path.dirname(filename),'*.xml'))\r\n if globbed:\r\n return LoadAxioVisionXMLMetaData(globbed[0])\r\n globbed=glob.glob(os.path.join(os.path.dirname(filename),'metadata.txt'))\r\n if globbed:\r\n return LoadMMMetaData(globbed[0])\r\n return None\r\n #no further valid options, crash horribly\r", "def load_faces(path, ext=\".pgm\"):\n \n #\n # You code here\n #\n \n images = []\n img_shape = (0, 0)\n\n for root, dirs, files in os.walk(path):\n for file in files:\n if ext in file: # check if file is of pgm-type\n img_path = os.path.join(root, file)\n img = plt.imread(img_path) # Read the image\n img_shape = img.shape\n img = img.flatten() # Transform 2D image into vector M = height x width\n images.append(img)\n\n img_array = np.asarray(images) \n\n return img_array, img_shape", "def get_input(path):\n img = imread(path)\n return img", "def driver_from_file(input_file: str, quick: bool = True) -> str:\n input_file = MPath.from_inp(input_file)\n\n # mapchete files can immediately be returned:\n if input_file.suffix == \".mapchete\":\n return \"Mapchete\"\n\n # use the most common file extensions to quickly determine input driver for file:\n if quick:\n try:\n return driver_from_extension(input_file.suffix)\n except ValueError:\n pass\n\n # brute force by trying to open file with rasterio and fiona:\n try:\n logger.debug(\"try to open %s with rasterio...\", input_file)\n with rasterio_open(input_file): # pragma: no cover\n return \"raster_file\"\n except Exception as rio_exception:\n try:\n logger.debug(\"try to open %s with fiona...\", input_file)\n with fiona_open(input_file): # pragma: no cover\n return \"vector_file\"\n except Exception as fio_exception:\n if input_file.exists():\n logger.exception(f\"fiona error: {fio_exception}\")\n logger.exception(f\"rasterio error: {rio_exception}\")\n raise MapcheteDriverError(\n \"%s has an unknown file extension and could not be opened by neither \"\n \"rasterio nor fiona.\" % input_file\n )\n else:\n raise FileNotFoundError(\"%s does not exist\" % input_file)", "def read(dataset = \"training\", path = \".\"):\n\n if dataset is \"training\":\n fname_img = os.path.join(path, 'train-images.idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels.idx1-ubyte')\n elif dataset is \"testing\":\n fname_img = os.path.join(path, 't10k-images.idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels.idx1-ubyte')\n else:\n raise ValueError(\"dataset must be 'testing' or 'training'\")\n\n # Load everything in some numpy arrays\n with open(fname_lbl, 'rb') as flbl:\n magic, num = struct.unpack(\">II\", flbl.read(8))\n lbl = np.fromfile(flbl, dtype=np.int8)\n\n with open(fname_img, 'rb') as fimg:\n magic, num, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), rows, cols)\n\n get_img = lambda idx: (lbl[idx], img[idx])\n\n # Create an iterator which returns each image in turn\n for i in range(len(lbl)):\n yield get_img(i)", "def read_image(filename, representation):\n img = imread(filename)\n img = int2float(img)\n if representation == GS_REP:\n img = rgb2gray(img)\n return img", "def file_type(filepath):\n imexts = ['.png', '.bmp', '.jpg', 'jpeg']\n textexts = ['.csv', '.txt']\n if filepath.endswith('.hdf5') or filepath.endswith('.h5'):\n return 'hdf5'\n if any([filepath.endswith(ext) for ext in textexts]):\n return 'delim'\n if filepath.endswith('.grm.raw'):\n return 'grm.raw'\n if filepath.endswith('.npy'):\n return 'npy'\n if _is_bed(filepath):\n return 'bed'\n if _is_gen(filepath):\n return 'gen'\n if any([filepath.endswith(ext) for ext in imexts]):\n return 'image'\n return 'unknown'", "def read(filename, ext=None, extver=None, columns=None, rows=None,\n header=False, case_sensitive=False, upper=False, lower=False,\n vstorage='fixed', verbose=False, trim_strings=False, **keys):\n\n if keys:\n import warnings\n warnings.warn(\n \"The keyword arguments '%s' are being ignored! This warning \"\n \"will be an error in a future version of `fitsio`!\" % keys,\n DeprecationWarning, stacklevel=2)\n\n kwargs = {\n 'lower': lower,\n 'upper': upper,\n 'vstorage': vstorage,\n 'case_sensitive': case_sensitive,\n 'verbose': verbose,\n 'trim_strings': trim_strings\n }\n\n read_kwargs = {}\n if columns is not None:\n read_kwargs['columns'] = columns\n if rows is not None:\n read_kwargs['rows'] = rows\n\n with FITS(filename, **kwargs) as fits:\n\n if ext is None:\n for i in xrange(len(fits)):\n if fits[i].has_data():\n ext = i\n break\n if ext is None:\n raise IOError(\"No extensions have data\")\n\n item = _make_item(ext, extver=extver)\n\n data = fits[item].read(**read_kwargs)\n if header:\n h = fits[item].read_header()\n return data, h\n else:\n return data", "def openFile(dir):\n # If path is a directory\n if os.path.isdir(dir):\n # Check file sizes from first file\n Nt = len(glob.glob(dir + \"/1_*.txt\")) # Nt\n tmp = np.loadtxt(dir + \"/1_0.txt\") \n Ny, Nx = tmp.shape\n # Output array\n V = np.zeros((Nt, 2, Ny, Nx))\n for n in range(Nt):\n V1 = np.loadtxt(\"{0}/1_{1}.txt\".format(dir, n))\n V2 = np.loadtxt(\"{0}/2_{1}.txt\".format(dir, n))\n V[n] = V1, V2\n return V\n # If path is a file\n elif os.path.isfile(dir):\n if '.npy' in dir: # Numpy data file\n return np.load(dir)\n elif '.txt' in dir: # Plain text data file\n return np.loadtxt(dir)\n else:\n raise Exception(\"File extension not supported.\")\n else:\n raise Exception(\"Path is not supported.\")", "def _openFlt(self, fname):\n image = np.loadtxt(fname)\n\n if(image !=None):\n M,N=(int(image[0]), int(image[1]))\n image = image[2:image.shape[0]]\n image = image.reshape((M,N))\n else:\n raise IOError, \"Image file can not be opened\"\n\n return image", "def read_image(fname, roi=None, dset_name='default', parallelism=1):\n\n from functools import partial\n from numpy import array, ndarray\n from multiprocessing import Pool, cpu_count\n\n if isinstance(fname, str):\n fmt = fname.split('.')[-1]\n \n if fmt == '.h5' or fmt == '.hdf5':\n reader = partial(readers[fmt], roi=roi, dset_name=dset_name)\n else:\n reader = partial(readers[fmt], roi=roi)\n \n result = reader(fname)\n\n elif isinstance(fname, (tuple, list, ndarray)):\n fmt = fname[0].split('.')[-1]\n if fmt == '.h5' or fmt == '.hdf5':\n reader = partial(readers[fmt], roi=roi, dset_name=dset_name)\n else:\n reader = partial(readers[fmt], roi=roi)\n\n if parallelism == 1:\n result = array([reader(f) for f in fname])\n\n else:\n if parallelism == -1:\n num_cores = cpu_count()\n else:\n num_cores = min(parallelism, cpu_count())\n\n with Pool(num_cores) as pool:\n result = array(pool.map(reader, fname))\n else:\n raise TypeError(\n \"First argument must be string for a one file or (tuple, list, ndarray) for many files\"\n )\n\n return result", "def _load(self, pkgpart, part_dict):\n # call parent to do generic aspects of load\n super(Image, self)._load(pkgpart, part_dict)\n # set file extension\n self.__ext = posixpath.splitext(pkgpart.partname)[1]\n # return self-reference to allow generative calling\n return self", "def loadFile(filterExt):\n basicFilter = \"*.\" + filterExt\n filePath = fileDialog2(fileFilter=basicFilter, dialogStyle=2, fm=1)\n if(filePath != None):\n #openfile = open('/Users/camtton/Desktop/drawing.svg', 'r')\n tokens = getSVGpath(filePath[0])\n return tokens\n else:\n print 'Please select a %s file'%(filterExt)", "def __init__(self, filepath: str):\n self.filetype: str = filepath[len(filepath) - 3:].upper()\n self.tags = None\n self.locations: [Location] = None\n self.intermediaryImage = None\n self.outlined = None\n if self.filetype == 'TIF':\n print('found tif')\n with TiffFile(filepath) as tif:\n # fileInfo(tif)\n self.tags = metadataGeoTags(tif)\n self.image = tif.asarray()\n elif self.filetype == 'PNG' or self.filetype == 'JPG':\n print('found png')\n self.image = cv2.imread(filepath, cv2.IMREAD_UNCHANGED)\n else:\n print('invalid file type:', self.filetype)", "def get_infile(filename):\r\n if filename.endswith(\".gz\"):\r\n fin = GzipFile(filename, \"rb\")\r\n else:\r\n fin = open(filename, \"U\")\r\n return fin", "def get_infile(filename):\r\n if filename.endswith(\".gz\"):\r\n fin = GzipFile(filename, \"rb\")\r\n else:\r\n fin = open(filename, \"U\")\r\n return fin", "def img_read(name):\n\n img = cv2.imread(name)\n\n return img", "def get_split(split_name, dataset_dir, file_pattern=None, reader=None):\n if split_name not in SPLITS_TO_SIZES:\n raise ValueError('split name %s was not recognized.' % split_name)\n\n if not file_pattern:\n file_pattern = _FILE_PATTERN\n file_pattern = os.path.join(dataset_dir, file_pattern % split_name)\n\n # Allowing None in the signature so that dataset_factory can use the default.\n if not reader:\n reader = tf.TFRecordReader\n\n keys_to_features = {\n 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpg'),\n 'image/class/label': tf.FixedLenFeature(\n [], tf.int64, default_value=tf.zeros([], dtype=tf.int64)),\n 'image/height': tf.FixedLenFeature(\n [], tf.int64, default_value=tf.zeros([], dtype=tf.int64)),\n 'image/width':tf.FixedLenFeature(\n [], tf.int64, default_value=tf.zeros([], dtype=tf.int64)), \n }\n\n items_to_handlers = {\n 'image': slim.tfexample_decoder.Image(),\n 'label': slim.tfexample_decoder.Tensor('image/class/label'),\n 'height': slim.tfexample_decoder.Tensor('image/height'),\n 'width': slim.tfexample_decoder.Tensor('image/width'),\n \n }\n\n decoder = slim.tfexample_decoder.TFExampleDecoder(\n keys_to_features, items_to_handlers)\n\n labels_to_names = None\n if dataset_utils.has_labels(dataset_dir):\n labels_to_names = dataset_utils.read_label_file(dataset_dir)\n\n return slim.dataset.Dataset(\n data_sources=file_pattern,\n reader=reader,\n decoder=decoder,\n num_samples=SPLITS_TO_SIZES[split_name],\n num_classes=2,\n items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,\n labels_to_names=labels_to_names)", "def load_file(path='vgg19.mat'):\n\t\tfile=loadmat(path)\n\t\tfile=file['layers']\n\t\tprint(\"Success load_file\")\n\t\treturn file", "def _load(f, as_gray=False):\n # importing io is quite slow since it scans all the backends\n # we lazy import it here\n from skimage.io import imread\n return imread(os.path.join(data_dir, f), plugin='pil', as_gray=as_gray)", "def im_open(path):\n\n try:\n assert os.path.isdir(path)\n #get file list in directory - glob includes full path\n files = sorted(glob.glob('{}{}*'.format(path,os.sep)), key=sort_key) \n #load the collection\n raw_stack = io.imread_collection(files)\n #turn the collection into a np array and remove extraneous OCT portion from 1025:1083 on x axis. (z,y,x)\n #if .bmp files are open (from pv-oct), the slicing will not affect them, the x-axis is only 540 pixels.\n stack = io.collection.concatenate_images(raw_stack)[:,:,0:1024]\n \n return stack\n\n except AssertionError:\n sys.exit(\"A non-directory object was given to the __open__ function\")", "def get_file_type(file_path):\n ext = pathlib.Path(file_path).suffix\n return \"video\" if ext == \".avi\" else \"image\"", "def loadFromFile(cls , filename):\n if FortIO.isFortranFile( filename ):\n return EclGrid( filename )\n else:\n return EclGrid.loadFromGrdecl( filename )", "def render_vtk(file_name):\n import vtk\n\n # Read the source file.\n reader = vtk.vtkUnstructuredGridReader()\n reader.SetFileName(file_name)\n reader.Update() # Needed because of GetScalarRange\n output = reader.GetOutput()\n scalar_range = output.GetScalarRange()\n\n # Create the mapper that corresponds the objects of the vtk.vtk file\n # into graphics elements\n mapper = vtk.vtkDataSetMapper()\n mapper.SetInputData(output)\n mapper.SetScalarRange(scalar_range)\n\n # Create the Actor\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n\n # Create the Renderer\n renderer = vtk.vtkRenderer()\n renderer.AddActor(actor)\n renderer.SetBackground(1, 1, 1) # Set background to white\n\n # Create the RendererWindow\n renderer_window = vtk.vtkRenderWindow()\n renderer_window.AddRenderer(renderer)\n\n # Create the RendererWindowInteractor and display the vtk_file\n interactor = vtk.vtkRenderWindowInteractor()\n interactor.SetRenderWindow(renderer_window)\n interactor.Initialize()\n interactor.Start()", "def dispatch_loader(fname, direc, sep=\"\\t\"):\n ext = fname.split(\".\")[-1]\n # print('Loading from: {}/{}'.format(direc, fname))\n if ext in (\"tsv\" or \"txt\"):\n return load_df_from_txt(fname, direc, sep)\n elif ext == \"pkl\":\n return load_df_from_pkl(fname, direc)\n else:\n raise IOError(\"Unexpected file extension {}.\".format(ext))", "def __init__(self, fits_file, ext=0):", "def read(dataset = \"training\", path = \".\"):\n\n if dataset is \"training\":\n fname_img = os.path.join(path, 'train-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')\n elif dataset is \"testing\":\n fname_img = os.path.join(path, 't10k-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')\n else:\n raise ValueError, \"dataset must be 'testing' or 'training'\"\n\n # Load everything in some numpy arrays\n with open(fname_lbl, 'rb') as flbl:\n magic, num = struct.unpack(\">II\", flbl.read(8))\n lbl = np.fromfile(flbl, dtype=np.int8)\n\n with open(fname_img, 'rb') as fimg:\n magic, num, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), rows, cols)\n\n get_img = lambda idx: (lbl[idx], img[idx])\n\n # Create an iterator which returns each image in turn\n for i in xrange(len(lbl)):\n yield get_img(i)", "def imread(filename, *args, **kwargs):\r\n try:\r\n netpbm = NetpbmFile(filename)\r\n image = netpbm.asarray()\r\n finally:\r\n netpbm.close()\r\n return image", "def image_loader(fileobj):\n if isinstance(fileobj, six.string_types):\n return cv2.imread(fileobj, cv2.IMREAD_COLOR)[..., ::-1] #bgr->rgb\n elif isinstance(fileobj, bytes):\n byte_arr = bytearray(fileobj)\n else:\n byte_arr = bytearray(fileobj.read())\n \n return cv2.imdecode(np.asarray(byte_arr, dtype=np.uint8), cv2.IMREAD_COLOR)[..., ::-1] #bgr->rgb", "def loader(path):\n img = np.load(path)\n img = img[1:4]\n if np.random.choice((True, False)):\n img = img[:, :, ::-1]\n img = np.array(img)\n if np.random.choice((True, False)):\n img = img[:, ::-1, :]\n img = np.array(img)\n\n img = img.transpose((1, 2, 0)) # pytorch is going to rotate it back\n return img", "def read_image(self, item):\n assert item['image_dtype'] == 'uint16'\n\n filename = os.path.join(self.home(item['basename']))\n s = open(filename, 'rb').read()\n assert hashlib.md5(s).hexdigest() == item['md5']\n img = np.fromstring(s, dtype=item['image_dtype']).byteswap()\n img = img.reshape(item['image_shape'])\n return img", "def read_file(path: str, flag: bool, size=224):\n img_path = sorted(os.listdir(path))\n img_num = len(img_path)\n img_data = np.zeros((img_num, 224, 224, 3), dtype=np.uint8)\n img_label = np.zeros(img_num, dtype=np.uint8)\n for lps, no in enumerate(img_path):\n img = cv2.imread(os.path.join(path, no))\n img_data[lps, :, :] = cv2.resize(img, (224, 224))\n if flag:\n img_label[lps] = int(no.split(\"_\")[0])\n if flag:\n return img_data, img_label\n else:\n return img_data", "def __getitem__(self, idx):\n im = Image.open(self.data_path + self.sample_df.loc[idx,'filename'])\n # load label\n label = torch.tensor(self.sample_df.loc[idx,'abnormal_XR'])\n # load mask\n if self.load_mask:\n mask = Image.open(self.data_path + self.sample_df.loc[idx,'mask_filename'])\n else:\n mask = None\n\n # load semi-label\n if self.load_semilabels:\n semi_label = torch.tensor(self.sample_df.loc[idx, 'semi_label'])\n else:\n semi_label = None\n\n im, mask = self.transform(im, mask)\n\n return im, label, mask, semi_label, torch.tensor(idx)", "def load_metaimage(path, voxel=True):\n\n meta = SimpleITK.ReadImage(path)\n\n if voxel:\n voxel_data = SimpleITK.GetArrayFromImage(meta)\n meta = [voxel_data, meta]\n\n return meta", "def tiffread(f):\n if type(f) is str:\n # single image\n im = tf.imread(f)\n return im\n\n elif type(f) is list and len(f) == 3:\n # return rgb stack\n f.sort(reverse=True) # so r, g, b\n ims = [tf.imread(x) for x in f]\n return np.dstack(ims)\n else:\n raise ValueError(\"f must be a string or list of 3 strings\")", "def read(path: Union[Path, str]) -> np.ndarray:\n return _reader.imread(str(path))", "def read(fname, fmt=None):\n if not fmt:\n fmt = fname.split(\".\")[-1]\n\n if fmt in ['yml', 'yaml']:\n return _storage_read(fname, yaml.safe_load)\n elif fmt == \"json\":\n return _storage_read(fname, json.load)\n elif fmt == \"pickle\":\n return _storage_read(fname, pickle.load, 'rb')\n else:\n raise Exception()", "def __getitem__(self, idx):\n\n image = Image.open(self.filenames[idx])\n image = self.transform(image)\n\n return image, self.labels[idx]", "def _get_img_tensor(self, fname, internal_transform):\n transforms = list(self.base_transforms)\n if internal_transform:\n transforms.insert(1, internal_transform)\n\n return T.Compose(transforms)(Image.open(self.imgs_root / fname))" ]
[ "0.64241487", "0.5978514", "0.59677935", "0.5905505", "0.5868323", "0.5654292", "0.5641504", "0.5602225", "0.5563264", "0.55544156", "0.55478626", "0.55258757", "0.5498806", "0.54907084", "0.5490219", "0.5475998", "0.5461645", "0.5444304", "0.54409236", "0.541251", "0.5400047", "0.53965837", "0.5396306", "0.5388983", "0.5361113", "0.5355787", "0.53399056", "0.5339533", "0.5339176", "0.5320629", "0.53079545", "0.53033334", "0.529854", "0.52977824", "0.5296988", "0.52954155", "0.526601", "0.5261992", "0.52300787", "0.521139", "0.5209631", "0.5209493", "0.5188725", "0.51776206", "0.51772445", "0.5173592", "0.51719373", "0.515757", "0.515743", "0.51536745", "0.51271236", "0.5125388", "0.51247317", "0.512316", "0.51221216", "0.5111944", "0.5109721", "0.51094437", "0.5104292", "0.51030254", "0.5094585", "0.5091795", "0.50719583", "0.5065389", "0.5054199", "0.50478184", "0.5045613", "0.5020838", "0.5014986", "0.5004078", "0.5004003", "0.4996389", "0.49843624", "0.4983675", "0.49824637", "0.4981559", "0.4981559", "0.4972791", "0.49643743", "0.4948976", "0.49402356", "0.493676", "0.4936308", "0.49346223", "0.49320468", "0.49287787", "0.49278852", "0.4927464", "0.4920612", "0.49205703", "0.49187043", "0.4910923", "0.4907226", "0.49006066", "0.4895799", "0.48946625", "0.48905966", "0.48880735", "0.48825547", "0.48809218" ]
0.75270355
0
create the reader list from a given set of file names and parameters
def getReadersFromFilenames(self): for i in self.readers: del i self.readers = [] if not self.filenames: raise Logging.GUIError("No files could be found", \ "For some reason, no files were listed to be imported.") files = self.filenames print "Determining readers from ", self.filenames isRGB = 1 self.ext = files[0].split(".")[-1].lower() dim = self.dimMapping[self.ext] # Initially flip the image if it's tiff, png or jpg. # In setVerticalFlip we negate the setting to have it set correctly. if self.ext.lower() in ["png", "jpg", "jpeg"]: self.flipVertically = True if self.ext in ["tif", "tiff"]: reader = vtkbxd.vtkExtTIFFReader() reader.SetFileName(files[0]) reader.UpdateInformation() if reader.GetNumberOfScalarComponents() >= 3: print "MODE IS RGB, IS AN RGB IMAGE" else: print "MODE ISN'T RGB, THEREFORE NOT RGB" isRGB = 0 rdr = self.getReaderByExtension(self.ext, isRGB) rdr.SetFileName(files[0]) if rdr.GetNumberOfSubFiles() > 1: dim = 3 self.isRGB = isRGB self.is3D = (dim == 3) dirName = os.path.dirname(files[0]) print "THERE ARE", self.slicesPerTimepoint, "SLICES PER TIMEPOINT" self.ext = files[0].split(".")[-1].lower() if dim == 3: totalFiles = len(files) for i, file in enumerate(files): rdr = self.getReaderByExtension(self.ext, isRGB) rdr.SetFileName(file) self.readers.append(rdr) return totalFiles = len(files) / self.slicesPerTimepoint imgAmnt = len(files) if totalFiles == 1: rdr = self.getReaderByExtension(self.ext, isRGB) arr = vtk.vtkStringArray() for fileName in files: arr.InsertNextValue(os.path.join(dirName, fileName)) rdr.SetFileNames(arr) self.readers.append(rdr) return if imgAmnt > 1: # If the pattern doesn't have %, then we just use # the given filenames and allocate them to timepoints # using slicesPerTimepoint slices per timepoint ntps = len(files) / self.slicesPerTimepoint filelst = files[:] # dirn #TODO: what was this? for tp in range(0, ntps): rdr = self.getReaderByExtension(self.ext, isRGB) arr = vtk.vtkStringArray() for i in range(0, self.slicesPerTimepoint): arr.InsertNextValue(filelst[0]) filelst = filelst[1:] rdr.SetFileNames(arr) rdr.SetDataExtent(0, self.x - 1, 0, self.y - 1, 0, self.slicesPerTimepoint - 1) rdr.SetDataSpacing(self.spacing) rdr.SetDataOrigin(0, 0, 0) self.readers.append(rdr) return elif imgAmnt == 1: # If only one file rdr = self.getReaderByExtension(self.ext, isRGB) rdr.SetDataExtent(0, self.x - 1, 0, self.y - 1, 0, self.slicesPerTimepoint - 1) rdr.SetDataSpacing(self.spacing) rdr.SetDataOrigin(0, 0, 0) rdr.SetFileName(files[0]) Logging.info("Reader = ", rdr, kw = "io") self.readers.append(rdr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_file_readers():\n savefile_path = os.path.join(os.getcwd()+ \"/../data/\", SAVE_FILE)\n file_reader_list = []\n for file in os.listdir(savefile_path):\n file_reader = open(os.path.join(savefile_path,file), \"r\")\n file_reader_list.append({\"file_reader\": file_reader, \"last_read\": { \"word\": \"\", \"doc_score_list\": []}})\n return file_reader_list", "def get_file_list(params):\n if params['mode'] == 'test':\n create_file_list(params)\n\n with open(params['file_list']) as flist:\n full_lines = [line.strip() for line in flist]\n\n full_lines = shuffle_lines(full_lines, params[\"shuffle_seed\"])\n\n # use only partial data for each trainer in distributed training\n if params['mode'] == 'train':\n real_trainer_num = max(trainers_num, 1)\n img_per_trainer = len(full_lines) // real_trainer_num\n full_lines = full_lines[trainer_id::real_trainer_num][:img_per_trainer]\n\n return full_lines", "def __init__(self, files):\n self.files = files and [NamedFile(data=d, filename=fn) for (d, fn) in files]", "def _load_seqs(file_format, filename, fmt, kw, parser_kw):\n fmt = fmt or file_format\n if not fmt:\n msg = \"could not determined file format, set using the format argument\"\n raise ValueError(msg)\n parser_kw = parser_kw or {}\n for other_kw in (\"constructor_kw\", \"kw\"):\n other_kw = kw.pop(other_kw, None) or {}\n kw.update(other_kw)\n return list(FromFilenameParser(filename, fmt, **parser_kw))", "def _read(self, file_paths: List[str], **read_opts) -> List[DataFrame]:\n return [read_file(file_path, **read_opts) for file_path in file_paths]", "def MFileReader(fvcom, *args, **kwargs):\n\n if isinstance(fvcom, str):\n FVCOM = FileReader(fvcom, *args, **kwargs)\n else:\n for file in fvcom:\n if file == fvcom[0]:\n FVCOM = FileReader(file, *args, **kwargs)\n else:\n FVCOM += FileReader(file, *args, **kwargs)\n\n return FVCOM", "def read_data_files(filenames, datapath, ids=None):\n filenames = np.array(filenames) # make sure it's array\n if ids is None:\n ids = range(0, len(filenames))\n\n for i in [filenames[k] for k in ids]:\n yield str(open(datapath+i, 'r').read())", "def files_constructor(cmd_args, file_type):\n\n if file_type == 'learning':\n learning_files_list = [LearningFile(address, cmd_args.primary_selex_sequence) for address in cmd_args.learning_file_list]\n [learning_files_list[i].cycle_matrix(i, len(learning_files_list)) for i in range(len(learning_files_list))]\n return learning_files_list\n\n elif file_type == 'prediction':\n if cmd_args.prediction_file:\n return PredictionFile(cmd_args.prediction_file)\n else:\n return None\n\n else:\n 'the user can insert here some code for suppeltementary files'", "def cluster_files_reader(\n files_pattern, trainer_count, trainer_id, loader=pickle.load\n):\n\n def reader():\n if not callable(loader):\n raise TypeError(\"loader should be callable.\")\n file_list = glob.glob(files_pattern)\n file_list.sort()\n my_file_list = []\n for idx, fn in enumerate(file_list):\n if idx % trainer_count == trainer_id:\n print(\"append file: %s\" % fn)\n my_file_list.append(fn)\n for fn in my_file_list:\n with open(fn, \"r\") as f:\n lines = loader(f)\n for line in lines:\n yield line\n\n return reader", "def parse_parameters(filename):\n\n # read in the parameters\n mainInput = ParserClass.Parser(filename)\n if 'LogFile' in mainInput['Inputs']:\n if mainInput['Inputs']['LogFileUsePID']:\n logger = Logging.Logger(mainInput['Inputs']['LogFile']+'_{}'.format(os.getpid()))\n else:\n logger = Logging.Logger(mainInput['Inputs']['LogFile'])\n \n else:\n logger = print\n\n # Generate a filelist to loop over\n filelist = np.loadtxt(mainInput['Inputs']['filelist'],dtype=str,ndmin=1)\n if isinstance(mainInput['Inputs']['data_dir'], type(None)):\n filelist = [filename for filename in filelist]\n else:\n filelist = ['{}/{}'.format(mainInput['Inputs']['data_dir'],\n filename.split('/')[-1]) for filename in filelist]\n \n # Some items should always be a list\n if not isinstance(mainInput['Inputs']['pipeline'], list):\n mainInput['Inputs']['pipeline'] = [mainInput['Inputs']['pipeline']]\n # Get the class names (modulename, classname)\n jobnames = [c for c in mainInput['Inputs']['pipeline']]\n\n logger('Running: '+' '.join(mainInput['Inputs']['pipeline']))\n\n\n prejobnames = [c for c in mainInput['Inputs']['preamble']]\n\n\n # Read the class parameter file\n classInput = ParserClass.Parser(mainInput['Inputs']['classParameters'])\n\n # Initalise the classes : classInput are the kwargs to initiate classes\n jobs = []\n for job in jobnames:\n jobs += [getClass(job)(logger=logger,**classInput[job])]\n\n # Initalise the classes : classInput are the kwargs to initiate classes\n prejobs = []\n for prejob in prejobnames:\n prejobs += [getClass(prejob)(logger=logger,**classInput[prejob])]\n\n\n return jobs,prejobs, filelist, mainInput, classInput, logger", "def __init__(self, dataset_dir, listfile=None):\n Reader.__init__(self, dataset_dir, listfile)\n self._data = [line.split(',') for line in self._data]\n\n def process_ihm(x):\n return list(map(int, x.split(';')))\n\n def process_los(x):\n x = x.split(';')\n if x[0] == '':\n return ([], [])\n return (list(map(int, x[:len(x)//2])), list(map(float, x[len(x)//2:])))\n\n def process_ph(x):\n return list(map(int, x.split(';')))\n\n def process_decomp(x):\n x = x.split(';')\n if x[0] == '':\n return ([], [])\n return (list(map(int, x[:len(x)//2])), list(map(int, x[len(x)//2:])))\n\n self._data = [(fname, float(t), process_ihm(ihm), process_los(los),\n process_ph(pheno), process_decomp(decomp))\n for fname, t, ihm, los, pheno, decomp in self._data]", "def read_files(filenames, gram_size=1):\n assert isinstance(filenames, list), \"filenames argument must be a list\"\n parser = MorParser()\n for fn in filenames:\n for uid, speaker, ngram in generate_chunks(parser.parse(fn), gram_size):\n yield fn, uid, speaker, ngram", "def construct_bibfile_data(*paths):\n bibs = [reffile_factory(path) for path in paths]\n return bibs", "def parseInputFileList (self) :\n filelist = []\n try :\n with open (self.cfgName) as fIn:\n for line in fIn:\n line = (line.split(\"#\")[0]).strip()\n if line:\n self.lines.append(line)\n except IOError:\n print \"*** WARNING: cfg file \" , self.cfgName , \" not found\"\n return\n\n #return filelist", "def make_input_list(self):\n input_entries = [i for i in self.params.input if i != None]\n input_list = []\n\n # run through the list of multiple input entries (or just the one) and\n # concatenate the input list (right now GUI only supplies folder, but\n # this will change in future)\n for input_entry in input_entries:\n if os.path.isfile(input_entry):\n if input_entry.endswith('.lst'): # read from file list\n with open(input_entry, 'r') as listfile:\n listfile_contents = listfile.read()\n input_list.extend(listfile_contents.splitlines())\n elif input_entry.endswith(('pickle', 'mccd', 'cbf', 'img')):\n input_list.append(input_entry) # read in image directly\n\n elif os.path.isdir(input_entry):\n abs_inp_path = os.path.abspath(input_entry)\n for root, dirs, files in os.walk(abs_inp_path):\n for filename in files:\n found_file = os.path.join(root, filename)\n if found_file.endswith(('pickle', 'mccd', 'cbf', 'img')):\n input_list.append(found_file)\n\n # Pick a randomized subset of images\n if self.params.advanced.random_sample.flag_on and \\\n self.params.advanced.random_sample.number < len(input_list):\n inp_list = self.select_random_subset(input_list)\n else:\n inp_list = input_list\n\n return inp_list", "def read_file(self, file_name_list):\n\n # Iterating over the file name list\n for file_name in file_name_list:\n\n # Opening MTF file\n #try: \n mtf_file = open(file_name,\"r\")\n #except Exception: pass # TODO\n\n # Reading file\n for line in mtf_file:\n # Processing line\n line_list = line.strip().split(\"\\t\")\n tf_id=line_list[0]\n name=line_list[1]\n database=line_list[2]\n tf_class=int(line_list[3])\n genes=line_list[4].split(\";\")\n genes_suffix=line_list[5].split(\";\")\n\n self.add(Motif(tf_id, name, database, tf_class, genes, genes_suffix))\n\n\n # Termination\n mtf_file.close()", "def read_dir():\n file_list=[]\n title_list = []\n for filename in os.listdir(\"alignments/\"):\n if filename.endswith(\".aln\"): #Retrieve only alignment files.\n file_list.append(filename)\n with open (\"genID.txt\",'r') as x: #The genID.txt file contains relevant gene names.\n while True:\n rule = x.readline()\n if len(rule) > 0: #If the rule is empty, the program does not use it.\n if rule[0] == \"B\": #Only fetch gen names.\n title_list.append(rule) #The title_list is used to create the variant files in a later stadium\n else:\n break\n return file_list,title_list", "def read(self, filenames, encoding=None):\n if isinstance(filenames, str):\n filenames = [filenames]\n read_ok = []\n for filename in filenames:\n try:\n with open(filename, encoding=encoding) as f:\n self.read_file(f)\n except OSError:\n continue\n read_ok.append(filename)\n return read_ok", "def load(cls, filenames, config_factory=Config):\n return cls([config_factory(f, label=label) for label, f in filenames])", "def create_read_list(samfile):\n read_sampler = ReadSampler()\n for line in samfile:\n line = sam_utils.SamAlignment(line)\n vals = line.get_aligned_blocks()\n if len(vals) > 1:\n logging.info(\"Skipping gapped read %s %s\"%(line.QNAME, str(vals))) \n read_sampler.add_read(vals[0])\n return read_sampler", "def __init__(self, _format, name=\"Main_list.txt\"):\n self.format = _format\n self.name = name\n self.path = self.format + os.sep + self.name\n self.list = []\n self.length = 0", "def files_to_reduce(parameters, evaluate_files):\n files_to_reduce = []\n sample = []\n shear = []\n\n if len(evaluate_files) == 0:\n files_to_reduce.extend(parameters)\n else:\n # call function for retrieve the IDs list\n evaluate_files_l = evaluate_files_list(evaluate_files)\n for parameter in parameters:\n if int(parameter['index']) in evaluate_files_l:\n files_to_reduce.append(parameter['filename'])\n sample.append(parameter['sample'])\n shear.append(parameter['shear'])\n\n return files_to_reduce, sample, shear", "def _read_files(self) -> MMD:\n\t\theaders = []\n\t\tbodies = []\n\t\tif self.config.file_type == FileType.CSV:\n\t\t\tif self.config.source_uris.endswith('.zip'):\n\t\t\t\twith ZipFile(self.config.source_uris) as zf:\n\t\t\t\t\tfor item in zf.namelist():\n\t\t\t\t\t\tif item.endswith('.csv'):\n\t\t\t\t\t\t\t# with zf.open(item, 'r') as infile:\n\t\t\t\t\t\t\tcsv_reader = csv.reader(TextIOWrapper(zf.open(item, 'r'), 'utf-8'))\n\t\t\t\t\t\t\theaders.append(next(csv_reader))\n\t\t\t\t\t\t\t# need to find a more efficient way, the csv reader is a generator that can only be used once\n\t\t\t\t\t\t\tbodies.append(list(csv_reader))\n\t\t\telif self.config.source_uris.endswith('.csv'):\n\t\t\t\tfor uri in self.config.source_uris:\n\t\t\t\t\tif uri.endswith('.csv'):\n\t\t\t\t\t\tcsv_reader = csv.reader(open(uri, newline='', encoding='utf-8'))\n\t\t\t\t\t\theaders.append(next(csv_reader))\n\t\t\t\t\t\tbodies.append(list(csv_reader))\n\t\telif self.config.file_type == FileType.CNSCHEMA:\n\t\t\theader = ['@id', 'label_@language', 'label_@value']\n\t\t\tbody = []\n\t\t\twith open(self.config.source_uris, 'r') as load_f:\n\t\t\t\tload_dict = json.load(load_f)\n\t\t\t\theader.extend(load_dict['@context'].keys())\n\t\t\t\theader = [h for h in header if h not in ['label', 'range', 'domain', 'subClassOf']]\n\t\t\t\ttmp_h = [h for h in header if h not in ['@id', '@language', '@value']]\n\t\t\t\tfor item in load_dict['@graph']:\n\t\t\t\t\tif item['@id'].split('/')[-2] == 'resource':\n\t\t\t\t\t\trow = [item['@id'], item['label']['@language'], item['label']['@value']]\n\t\t\t\t\t\tfor h in tmp_h:\n\t\t\t\t\t\t\tif h in item:\n\t\t\t\t\t\t\t\trow.append(item[h])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\trow.append(None)\n\t\t\t\t\t\tbody.append(tuple(row))\n\t\t\theaders.append(tuple(header))\n\t\t\tbodies.append(body)\n\t\telif self.config.file_type == FileType.OPENBASE:\n\t\t\theader = []\n\t\t\tbody = []\n\t\t\twith open(self.config.source_uris, 'r') as load_f:\n\t\t\t\tfor line in load_f:\n\t\t\t\t\trow = []\n\t\t\t\t\tflat_line = flatten_json(json.loads(line))\n\t\t\t\t\tfor key in flat_line:\n\t\t\t\t\t\tif key not in header:\n\t\t\t\t\t\t\theader.append(key)\n\t\t\t\t\tfor h in header:\n\t\t\t\t\t\tif h in flat_line:\n\t\t\t\t\t\t\trow.append(flat_line[h])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\trow.append(None)\n\t\t\t\t\tbody.append(row)\n\t\t\tfor item in body:\n\t\t\t\tif len(item) < len(header):\n\t\t\t\t\titem.extend([None for i in range(len(header) - len(item))])\n\t\t\theaders.append(tuple(header))\n\t\t\tbodies.append(tuple([tuple(item) for item in body]))\n\t\telif self.config.file_type == FileType.OPENKS:\n\t\t\t# knowledge graph dataset loading \n\t\t\tif os.path.exists(self.config.source_uris + '/entities') and os.path.exists(self.config.source_uris + '/triples'):\n\t\t\t\theaders = [['entities'], ['triples']]\n\t\t\t\tfor file in ['entities', 'triples']:\n\t\t\t\t\ttmp = []\n\t\t\t\t\twith open(self.config.source_uris + '/' + file, 'r') as load_f:\n\t\t\t\t\t\tfor line in load_f:\n\t\t\t\t\t\t\ttmp.append(tuple([item.strip() for item in line.split('\\t')]))\n\t\t\t\t\t\tbodies.append(tuple(tmp))\n\t\t\t# general text dataset loading\n\t\t\telif os.path.exists(self.config.source_uris + '/train') and os.path.exists(self.config.source_uris + '/valid'):\n\t\t\t\theaders = [['train'], ['valid']]\n\t\t\t\tfor file in ['train', 'valid']:\n\t\t\t\t\ttmp = []\n\t\t\t\t\twith open(self.config.source_uris + '/' + file, 'r') as load_f:\n\t\t\t\t\t\tfor line in load_f:\n\t\t\t\t\t\t\ttmp.append(tuple([item.strip() for item in line.split('@@')]))\n\t\t\t\t\t\tbodies.append(tuple(tmp))\n\t\t\telse:\n\t\t\t\tlogger.warn('Only allows loading with entities and triples for now!')\n\t\t\t\traise IOError\n\t\telif self.config.file_type == FileType.NERO:\n\t\t\theaders = [['unlabeled_data'], ['predict'], ['pattern']]\n\t\t\tfor file in ['unlabeled_data', 'predict', 'pattern']:\n\t\t\t\ttmp = []\n\t\t\t\twith open(self.config.source_uris + '/' + file + '.json', 'r') as load_f:\n\t\t\t\t\tfor line in load_f:\n\t\t\t\t\t\ttmp.append(line.strip())\n\t\t\t\t\tbodies.append(tuple(tmp))\n\n\t\tmmd.name = self.config.data_name\n\t\tmmd.headers = headers\n\t\tmmd.bodies = bodies\n\t\treturn mmd", "def read_input_files(self):\r\n\r\n for input_file in self.list_of_input_files:\r\n input_file.read_header_of_file()\r\n self.list_of_header_objects.extend(input_file.list_of_header_objects)\r\n self.list_of_header_objects_without_ID.extend(input_file.list_of_header_objects_without_ID)\r\n self.list_of_contigs.extend(input_file.list_of_contigs)\r\n\r\n self.list_of_header_objects = list(toolz.unique(self.list_of_header_objects, key=lambda x: x.tag_and_ID))\r\n self.list_of_header_objects_without_ID = list(\r\n toolz.unique(self.list_of_header_objects_without_ID, key=lambda x: x.line))\r\n self.list_of_contigs = list(toolz.unique(self.list_of_contigs, key=lambda x: x.line))\r\n self.list_of_header_objects.extend(self.list_of_header_objects_without_ID)\r\n self.list_of_header_objects.sort(key=lambda x: x.line)\r\n self.list_of_header_objects.extend(self.list_of_contigs)\r\n self.list_of_header_objects.sort(key=lambda x: x.tag, reverse=False)\r\n self.create_body_header_line_for_output()\r\n self.write_header_in_output_file()\r\n\r\n list_of_chrom = list(self.indices.keys())\r\n list_of_chrom.sort(key=lambda x: self.alphanum_key(x))\r\n for chrom in list_of_chrom:\r\n self.list_of_body_objects.clear()\r\n for input_file in self.list_of_input_files:\r\n input_file.read_specific_chrom_body_of_file(chrom)\r\n self.list_of_body_objects.extend(input_file.list_of_body_objects)\r\n\r\n self.adjust_body_records_to_samples()\r\n self.list_of_body_objects = list(toolz.unique(self.list_of_body_objects, key=lambda x: x.line))\r\n self.list_of_body_objects.sort(key=lambda x: self.alphanum_key(x.line))\r\n self.verify_and_merge_body_records()\r\n self.write_specific_chrom_in_output_file()", "def initialize_files(file_name, ran, file_extension):\r\n \"\"\"Specifiy the exact file name and the number of files --> file_name_(range) e.g file_name=chickens ,ran=16\"\"\"\r\n answer_file_rep = [file_name + str(number) for number in range(1, ran)]\r\n answer_files = [file + \"{}\".format(file_extension) for file in answer_file_rep]\r\n answers = [\"answer\" + str(number) for number in range(1, ran)]\r\n return answer_files, ran, answers", "def from_filenames(\n filenames,\n verbose=False,\n unhandled=None,\n compare_beam=None,\n compare_detector=None,\n compare_goniometer=None,\n scan_tolerance=None,\n format_kwargs=None,\n load_models=True,\n ):\n experiments = ExperimentList()\n for db in DataBlockFactory.from_filenames(\n filenames,\n verbose=verbose,\n unhandled=unhandled,\n compare_beam=compare_beam,\n compare_detector=compare_detector,\n compare_goniometer=compare_goniometer,\n scan_tolerance=scan_tolerance,\n format_kwargs=format_kwargs,\n ):\n experiments.extend(\n ExperimentListFactory.from_datablock_and_crystal(db, None, load_models)\n )\n return experiments", "def loadFromFile(self, filename):\n\t\treturn []", "def _open_files(inputs, mode):\n assert isinstance(inputs, list)\n\n local_open = pf.open\n return [local_open(ffile, mode=mode) for ffile in inputs]", "def read_in_LC_files(input_files, obj_names, style='SNANA'):\n LC_list = []\n if style == 'SNANA':\n for i, input_file in enumerate(input_files):\n t, f, filts, err = np.genfromtxt(input_file,\n usecols=(1, 4, 2, 5), skip_header=18,\n skip_footer=1, unpack=True, dtype=str)\n t = np.asarray(t, dtype=float)\n f = np.asarray(f, dtype=float)\n err = np.asarray(err, dtype=float)\n\n sn_name = obj_names[i]\n new_LC = LightCurve(sn_name, t, f, err, filts)\n LC_list.append(new_LC)\n else:\n raise ValueError('Sorry, you need to specify a data style.')\n return LC_list", "def buildfilelist():\r\n for files in filelist:\r\n if os.path.splitext(files)[1]=='.dxf': #查找目录下的dxf文件,加入到readfilelist文件列表中 \r\n readfilelist.append(files)\r\n #feilin=file('feilin(ph).dxf','w') #新建一个文件,名字先占位用,后续改成由配置文件中读入名称。 \r", "def parse_files(files):\n ans = []\n if files:\n for f in files:\n split = f.split(\"=\")\n if len(split) != 2:\n raise Exception(\"invalid file specification '%s'\" % f)\n ans.append((split[0], split[1]))\n return ans", "def read(self, filenames):\n if isinstance(filenames, basestring):\n filenames = [filenames]\n read_ok = []\n for filename in filenames:\n try:\n fp = open(filename)\n except IOError:\n continue\n self._read(fp, filename)\n fp.close()\n read_ok.append(filename)\n return read_ok", "def Make_FileList(input_name):\n\tif args.input_type == 'FILE':\n\t\tFileList=[]\n\t\tFileList.append(input_name)\n\tif args.input_type == 'FOLDER':\n\t\tFileList = glob.glob('%s/*' % input_name)\n\treturn FileList", "def __init__(self):\n self.filelist = list()", "def files_list_reduce(filename, fieldnames):\n\n parameters = []\n with open(filename) as csv_file:\n\n reader = csv.DictReader(csv_file, fieldnames=fieldnames)\n iterRows = iter(reader)\n next(iterRows)\n for row in iterRows:\n if row['index'] == '':\n continue\n if row['index'] == 'END':\n break\n parameters.append(row)\n return parameters", "def read(self,filenames):\n\n if isinstance(filenames, basestring):\n filenames = [filenames]\n read_ok = []\n for filename in filenames:\n try:\n fp = open(filename)\n except IOError:\n continue\n self._read(fp)\n fp.close()\n read_ok.append(filename)\n return read_ok", "def get_data(methylation_files, names, window, smoothen=5):\n return [read_meth(f, n, window, smoothen) for f, n in zip(methylation_files, names)]", "def read_data(r_filename, nr_filename, config):\n\t\n\t# Create a queue for each file\n\tr_queue = tf.train.string_input_producer(r_filename)\n\tnr_queue = tf.train.string_input_producer(nr_filename)\n\n\tr_result = _read_from_file(r_queue, config, class_label = 1)\n\tnr_result = _read_from_file(nr_queue, config, class_label = 0)\n\n\tmin_queue_examples = 100 # Currently an arbitrary number\n\n\tr_sequences, r_label_batch, r_subjects, r_names, r_coords, r_features = _generate_half_batch(\n\t\t\t\t\t\t\t\t\t\t\t\tr_result,\n\t\t\t\t\t\t\t\t\t\t\t\tmin_queue_examples,\n\t\t\t\t\t\t\t\t\t\t\t\tbatch_size = config.batch_size,\n\t\t\t\t\t\t\t\t\t\t\t\tnum_steps = config.num_steps,\n\t\t\t\t\t\t\t\t\t\t\t\ttest_mode = config.test_mode)\n\tnr_sequences, nr_label_batch, nr_subjects, nr_names, nr_coords, nr_features = _generate_half_batch(\n\t\t\t\t\t\t\t\t\t\t\t\tnr_result,\n\t\t\t\t\t\t\t\t\t\t\t\tmin_queue_examples,\n\t\t\t\t\t\t\t\t\t\t\t\tbatch_size = config.batch_size,\n\t\t\t\t\t\t\t\t\t\t\t\tnum_steps = config.num_steps,\n\t\t\t\t\t\t\t\t\t\t\t\ttest_mode = config.test_mode)\n\n\tsequence_batches_joined = tf.concat([r_sequences, nr_sequences], 0)\n\tlabel_batches_joined = tf.concat([r_label_batch, nr_label_batch], 0)\n\tsubjects_batches_joined = tf.concat([r_subjects, nr_subjects], 0)\n\tnames_batches_joined = tf.concat([r_names, nr_names], 0)\n\tcoords_batches_joined = tf.concat([r_coords, nr_coords], 0)\n\tfeature_batches_joined = tf.concat([r_features, nr_features], 0)\n\n\treturn sequence_batches_joined, label_batches_joined, subjects_batches_joined, names_batches_joined, coords_batches_joined, feature_batches_joined", "def open_netcdf_files(rlzn_path_list,name_prefix): #{{{\n\n fopen_list = []\n for path in rlzn_path_list:\n netcdf_name = glob.glob(path+'/'+name_prefix)\n fopen_list.append(netCDF4.Dataset(netcdf_name[0],'r'))\n\n return fopen_list #}}}", "def _generate_examples(self, files):\n idx = 0\n for filename in files:\n with open(filename) as file:\n for line in file:\n yield idx, {\"text\": line}\n idx += 1", "def build_file_list(location, filters):\n f = []\n for (dir_path, dir_name, file_names) in os.walk(location):\n for file in file_names:\n f.append(os.path.join(dir_path, file))\n obj_list = map(lambda file: os.path.join(location, file), f)\n\n if type(filters) == list:\n for filter in filters:\n obj_list = [i for i in obj_list if filter in i]\n else:\n obj_list = [i for i in obj_list if filters in i]\n\n return obj_list", "def prepare_list_of_files(kernel_name, kernel_file_list, params, grid, threads, block_size_names):\n temp_files = dict()\n\n kernel_string = get_kernel_string(kernel_file_list[0], params)\n name, kernel_string = prepare_kernel_string(kernel_name, kernel_string, params, grid, threads, block_size_names)\n\n if len(kernel_file_list) > 1:\n for f in kernel_file_list[1:]:\n #generate temp filename with the same extension\n temp_file = get_temp_filename(suffix=\".\" + f.split(\".\")[-1])\n temp_files[f] = temp_file\n #add preprocessor statements to the additional file\n _, temp_file_string = prepare_kernel_string(kernel_name, get_kernel_string(f, params), params, grid, threads, block_size_names)\n write_file(temp_file, temp_file_string)\n #replace occurences of the additional file's name in the first kernel_string with the name of the temp file\n kernel_string = kernel_string.replace(f, temp_file)\n\n return name, kernel_string, temp_files", "def generate_read_list(num_files, world_size):\n return np.array_split(np.arange(num_files), world_size)", "def prepare_reader(self, unused_filename_queue):\n raise NotImplementedError()", "def get_file_list(mixer_file, select_random, use_list_of_files):\n logger = logging.getLogger(get_file_list.__name__)\n files = list()\n\n if use_list_of_files:\n with open(mixer_file, 'r') as list_file:\n for line in list_file:\n files.append(os.path.join('data/raw',line.strip()))\n\n if select_random:\n random.shuffle(files)\n\n else:\n\n mixer = parse_mixer_file(mixer_file)\n\n for m in mixer:\n path = os.path.join(project_dir, m[0])\n all_mixer_files = [os.path.join(path,f) for f in os.listdir(path) \n if os.path.isfile(os.path.join(path, f)) and f.split('.')[-1] == 'csv']\n\n current_files = list()\n # Check if the number of samples is limited\n if m[2] >= 0:\n sample_count = 0\n for f in all_mixer_files:\n # Get number of lines without the header line\n num_lines = sum(1 for line in open(f)) - 1\n\n if (sample_count + num_lines) > m[2]:\n current_files.append((f, m[2] - sample_count))\n sample_count += (m[2] - sample_count)\n break\n else:\n current_files.append((f, -1))\n sample_count += num_lines\n\n if sample_count < m[2]:\n logger.warn('Not enough samples ({} < {}): {}'.format(sample_count, m[2], m[0]))\n else:\n # No limit, take all samples in the files\n current_files = zip(all_mixer_files, [-1]*len(all_mixer_files))\n\n if m[1] < 0:\n # -1 means all .csv files\n files += current_files\n elif m[1] > 0:\n if m[1] > len(current_files):\n logger.warn('Not enough files ({} < {}): {}'.format(len(current_files),\n m[1], m[0]))\n files += current_files[:m[1]]\n\n if select_random:\n random.shuffle(files)\n else:\n files = sorted(files, key=lambda x: int(os.path.basename(x[0]).split('_')[-1].split('.')[0]))\n\n return files", "def _create_filelist(self):\n print \"[--init] creating %s\" % self.file_list\n if self.source_file is not None:\n shutil.copyfile(self.source_file, self.file_list)\n elif self.source_path is not None:\n filenames = get_file_paths(self.source_path)\n if self.shuffle_file:\n random.shuffle(filenames)\n with open(self.file_list, 'w') as fh:\n for fname in filenames:\n fh.write(\"0000\\t\" + fname + \"\\n\")\n else:\n sys.exit(\"[--init] ERROR: \" +\n \"need to define input with --filelist or \" +\n \"--source-directory option, aborting\")\n read_only(self.file_list)", "def __init__(self, names, file_extensions, version):\n assert isinstance(names, collections.abc.Sequence), type(names)\n assert names\n assert isinstance(file_extensions, collections.abc.Sequence), type(file_extensions)\n assert file_extensions\n if __debug__:\n for name in names:\n assert isinstance(name, str), type(name)\n assert name\n for file_extension in file_extensions:\n assert isinstance(file_extension, str), type(file_extension)\n assert file_extension\n assert file_extension.startswith('.'), file_extension\n assert isinstance(version, tuple) or version is None\n self.names = [name for name in names]\n self.default_name = self.names[0]\n self.file_extensions = [file_extension.lower() for file_extension in file_extensions]\n self.default_file_extension = self.file_extensions[0]\n self.version = version", "def parse_from_list(l, os_name=None):\n\n if os_name in ['windows', 'nt']:\n FilePathObject = WindowsFilePath\n elif os_name in ['posix', 'linux']:\n FilePathObject = PosixFilePath\n else:\n raise ValueError('incorrect os_name given')\n\n if not isinstance(l, (list, tuple)):\n raise ValueError('expect a list with filepaths')\n\n l_args = map(lambda x: tuple(x) if isinstance(\n x, (tuple, list)) else tuple([x]), l)\n\n return [FilePathObject(*fp) for fp in l_args]", "def read_parse_file(params):\n\tparam_names = []\n\tparam_options = []\n\tif not os.path.isfile(params.parse_file):\n\t\tprint(\"parse file does not exist! ({})\".format(params.parse_file))\n\t\tsys.exit(NO_PARSE)\n\twith open(params.parse_file, 'r') as pf:\n\t\t# first line should be iteration regex\n\t\tsetattr(params, 'iteration_regex', re.compile(pf.readline().strip()))\n\t\tfor line in pf:\n\t\t\tparam_desc = line.split(';')\n\t\t\tparam_names.append(param_desc[0])\n\t\t\tparam_options.append(param_desc[1])\n\n\treturn param_names,param_options", "def readFiles(trainFile, testFile):\r\n\r\n\t# Open both files and split into lines\r\n\twith open(trainFile) as f:\r\n\t\ttrainLines = f.read().splitlines()\r\n\r\n\twith open(testFile) as f:\r\n\t\ttestLines = f.read().splitlines()\r\n\r\n\t\t\r\n\t# Extract training data\r\n\tfor line in trainLines:\r\n\t\tline = line.split()\r\n\t\t\r\n\t\tid = line[0]\r\n\t\tclass_id = line[1]\r\n\t\twords = line[2:]\r\n\t\t\r\n\t\trow = [id, class_id, words]\r\n\t\t\r\n\t\ttrainingData.append(row)\r\n\t\t\r\n\t\t\r\n\t# Extract testing data\r\n\tfor line in testLines:\r\n\t\tline = line.split()\r\n\t\t\r\n\t\tid = line[0]\r\n\t\tclass_id = line[1]\r\n\t\twords = line[2:]\r\n\t\t\r\n\t\trow = [id, class_id, words]\r\n\t\t\r\n\t\ttestData.append(row)", "def generate_reader(n):\n counter = 1\n for i in range(n):\n name = generate_reader_name()\n if not name in readers:\n readers[name] = f'Reader/{counter}'\n counter += 1", "def _read_dataset(a_files):\n return [(list(ifields[TXT_IDX]), ifields[GLD_IDX])\n for ifile in a_files for ifields in iterlines(ifile)]", "def __init__(self, dataset_dir, listfile=None, period_length=48.0):\n Reader.__init__(self, dataset_dir, listfile)\n self._data = [line.split(\",\") for line in self._data]\n self._data = [(x, int(y)) for (x, y) in self._data]\n self._period_length = period_length", "def construct_combined_set(filenames, sensor_names, cnt_preprocessors,\n marker_def, end_marker_def, trial_classes,\n trial_start_offset_ms, trial_stop_offset_ms,\n min_break_length_ms, max_break_length_ms,\n break_start_offset_ms, break_stop_offset_ms,\n last_set_split_trial, add_trial_breaks=True,\n filename_to_extra_args=None):\n default_args = deepcopy(locals())\n sets = []\n\n if filename_to_extra_args is not None:\n for filename_with_args in filename_to_extra_args:\n assert filename_with_args in filenames\n\n for i_file, filename in enumerate(filenames):\n this_args = copy(default_args)\n if filename_to_extra_args is not None and (\n filename in filename_to_extra_args):\n for key in filename_to_extra_args[filename]:\n assert key in this_args\n this_args[key] = filename_to_extra_args[filename][key]\n assert key != 'last_set_split_trial', \"Does not make sense :)\"\n marker_segmenter = MarkerSegmenter(segment_ival=[\n this_args['trial_start_offset_ms'], \n this_args['trial_stop_offset_ms']],\n marker_def=this_args['marker_def'],\n trial_classes=this_args['trial_classes'],\n end_marker_def=this_args['end_marker_def'])\n trial_break_adder = AddTrialBreaks(min_length_ms=this_args['min_break_length_ms'],\n max_length_ms=this_args['max_break_length_ms'], \n start_offset_ms=this_args['break_start_offset_ms'], \n stop_offset_ms=this_args['break_stop_offset_ms'],\n start_marker_def=this_args['marker_def'],\n end_marker_def=this_args['end_marker_def'])\n if (i_file < len(filenames) - 1) or (\n this_args['last_set_split_trial'] is None):\n segmenters = [marker_segmenter,]\n else:\n segmenters = [marker_segmenter,\n RestrictTrialRange(0,this_args['last_set_split_trial'])]\n if this_args['add_trial_breaks']:\n segmenters.append(trial_break_adder)\n segmenter = PipelineSegmenter(segmenters)\n cnt_set = SetWithMarkers(BBCIDataset(filename,\n load_sensor_names=this_args['sensor_names']),\n this_args['cnt_preprocessors'],\n segmenter) \n sets.append(cnt_set)\n\n # add last set last part as test set if you split apart last set\n # we use that this_args is now from last set already\n if last_set_split_trial is not None:\n segmenters = [marker_segmenter,\n RestrictTrialRange(last_set_split_trial,None),]\n if this_args['add_trial_breaks']:\n segmenters.append(trial_break_adder)\n segmenter = PipelineSegmenter(segmenters)\n cnt_set = SetWithMarkers(BBCIDataset(filenames[-1], # again last file needed\n load_sensor_names=this_args['sensor_names']),\n this_args['cnt_preprocessors'],\n segmenter)\n sets.append(cnt_set)\n dataset = CombinedSet(sets)\n return dataset", "def buildRegFilterList(self, filename, listname='regFilterList'):", "def read_list_file(path_file):\n with open(path_file,'r') as f_in:\n lines = f_in.readlines()\n lines = [x for x in lines if not (x.strip() == '' or x.strip()[0] == '#')]\n left_file_list = []\n right_file_list = []\n gt_file_list = []\n conf_file_list = []\n for l in lines:\n to_load = re.split(',|;',l.strip())\n left_file_list.append(to_load[0])\n right_file_list.append(to_load[1])\n if len(to_load)>2:\n gt_file_list.append(to_load[2])\n if len(to_load)>3:\n conf_file_list.append(to_load[3])\n return left_file_list,right_file_list,gt_file_list,conf_file_list", "def from_file_list(\n cls,\n audio_file_list,\n target_sr=None,\n int_values=False,\n offset=0,\n duration=0,\n trim=False,\n channel_selector=None,\n *args,\n **kwargs,\n ):\n if isinstance(channel_selector, int):\n # Shortcut when selecting a single channel\n if channel_selector >= len(audio_file_list):\n raise RuntimeError(\n f'Channel cannot be selected: channel_selector={channel_selector}, num_audio_files={len(audio_file_list)}'\n )\n # Select only a single file\n audio_file_list = [audio_file_list[channel_selector]]\n # Reset the channel selector since we applied it here\n channel_selector = None\n\n samples = None\n\n for a_file in audio_file_list:\n # Load audio from the current file\n a_segment = cls.from_file(\n a_file,\n target_sr=target_sr,\n int_values=int_values,\n offset=offset,\n duration=duration,\n channel_selector=None,\n trim=False, # Do not apply trim to individual files, it will be applied to the concatenated signal\n *args,\n **kwargs,\n )\n\n # Only single-channel individual files are supported for now\n if a_segment.num_channels != 1:\n raise RuntimeError(\n f'Expecting a single-channel audio signal, but loaded {a_segment.num_channels} channels from file {a_file}'\n )\n\n if target_sr is None:\n # All files need to be loaded with the same sample rate\n target_sr = a_segment.sample_rate\n\n # Concatenate samples\n a_samples = a_segment.samples[:, None]\n\n if samples is None:\n samples = a_samples\n else:\n # Check the dimensions match\n if len(a_samples) != len(samples):\n raise RuntimeError(\n f'Loaded samples need to have identical length: {a_samples.shape} != {samples.shape}'\n )\n\n # Concatenate along channel dimension\n samples = np.concatenate([samples, a_samples], axis=1)\n\n # Final setup for class initialization\n samples = np.squeeze(samples)\n sample_rate = target_sr\n\n return cls(\n samples, sample_rate, target_sr=target_sr, trim=trim, channel_selector=channel_selector, *args, **kwargs,\n )", "def __init__(self, dataset_dir, listfile=None, period_length=48.0):\n Reader.__init__(self, dataset_dir, listfile)\n self._data = [line.split(',') for line in self._data]\n self._data = [(x, int(y)) for (x, y) in self._data]\n self._period_length = period_length", "def __init__(self, filename, listfile=True):\n if hasattr(filename, 'read'):\n self.file = filename\n else:\n self.file = open(filename, 'rb')\n self.header = self.read_header()\n self.hash_table = self.read_table('hash')\n self.block_table = self.read_table('block')\n if listfile:\n self.files = self.read_file('(listfile)').splitlines()\n else:\n self.files = None", "def __init__(self, dataset_dir, listfile=None):\n Reader.__init__(self, dataset_dir, listfile)\n self._data = [line.split(',') for line in self._data]\n self._data = [(mas[0], float(mas[1]), list(map(int, mas[2:]))) for mas in self._data]", "def __init__(self,file_reader):\n self.file_reader = file_reader", "def mk_parses(listfile, corenlp_host):\n # if not listfile.endswith('.listfile'):\n # filetype = 'Co-Reference List file'\n # error = 'has incorrect file type'\n # raise FilenameException(\"Error: %s %s\" % (filetype, error))\n\n try:\n with open(listfile) as f:\n pserver = jsonrpc.ServerProxy(jsonrpc.JsonRpc20(),\n jsonrpc.TransportTcpIp(\n addr=(corenlp_host, 8080), limit=1000))\n parses = dict([(get_id(path), FileParse(path, pserver))\n for path in f.readlines()\n if path.lstrip()[0] != '#'])\n except IOError:\n stderr.write(strerror(EIO)) # stderr.write does not have newlines\n stderr.write(\"\\nERROR: Could not open list file\\n\")\n exit(EIO)\n else:\n return parses", "def fileReader(self, fileHouses, fileBatteries):\n\n # Initiate ID\n ID = 0\n\n # Open the file containing houses\n with open(fileHouses) as h, open(fileBatteries) as b:\n\n # Read the file and separate values in list\n readerHouses = csv.reader(h, delimiter=',', quoting=csv.QUOTE_NONE)\n readerBatteries = csv.reader(b, delimiter=',',\n quoting=csv.QUOTE_NONE)\n\n # Skip the header of the file\n next(h)\n next(b)\n\n # Create instances of houses or batteries\n for row in readerHouses:\n self.houses.append(houseClass.house(ID, int(row[0]),\n int(row[1]),\n float(row[2])))\n ID += 1\n\n ID = 0\n\n for row in readerBatteries:\n self.batteries.append(batteryClass.battery(ID, int(row[0]),\n int(row[1]), float(row[2])))\n ID += 1", "def list(ffiles):\n ret = {}\n print('Reading: ')\n for ffile in ffiles:\n print(ffile)\n ret[ffile] = data_file(ffile)\n return ret", "def from_files(paths: list[str]) -> Catalog:\n cat = Catalog()\n for file in paths:\n with fsspec.open(file, mode=\"r\") as fh:\n new_cat = Catalog.from_str(fh.read())\n cat = cat.join(new_cat)\n return cat", "def _open_files(path, filenames, barcode, queue):\n if not exists(path):\n mkdir(path)\n\n handles = []\n\n for filename in filenames:\n base, ext = basename(filename).split('.', True)\n handles.append(\n Handle('{}/{}_{}.{}'.format(path, base, barcode, ext), queue,\n f_open=_type_handler[ext.split('.')[-1]]))\n\n return handles", "def read_files(self):\n for f in self.filenames:\n self.games.extend(pgn.loads(open(f).read()))", "def build_data_from_file(cls,file_path,number_elems=None):\n raise NotImplementedError('Abstract method has not been implemented')", "def load_files(basis_handles, parameter_handles):\n\n logging.info('Loading basis matrices')\n\n bases = [load_basis(handle) for handle in basis_handles]\n\n logging.info('Loading scaling parameters')\n\n parameters = [load_parameters(handle) for handle in parameter_handles]\n\n return bases, parameters", "def load_training_data(list_files):\n training_data = []\n for tr_file in list_files:\n with open(os.path.join(\"data\", tr_file)) as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\")\n next(reader)\n for row in reader:\n training_data.append(row[1])\n return training_data", "def read_files(self):\n files = []\n # if this is test folder then there are no labels\n if 'test' in self.list_path:\n for item in self.img_list:\n image_path = item\n name = os.path.splitext(os.path.basename(image_path[0]))[0]\n files.append({\n \"img\": image_path[0],\n \"name\": name,\n })\n else:\n for item in self.img_list:\n image_path, label_path = item\n name = os.path.splitext(os.path.basename(label_path))[0]\n files.append({\n \"img\": image_path,\n \"label\": label_path,\n \"name\": name,\n \"weight\": 1\n })\n return files", "def parseInputFileList (self):\n filelist = []\n try:\n with open (self.cfgName) as fIn:\n for line in fIn:\n line = (line.split(\"@@@\")[0]).strip()\n if line:\n self.lines.append(line)\n except IOError:\n print \"*** WARNING: label cfg file \" , self.cfgName , \" not found\"\n return", "def Load_trials(files=[], trials=[]):\n\n # adds each file to files list\n\n while True:\n new_file = Add_file(files)\n if new_file:\n files.append(new_file)\n else:\n break\n\n for file in files:\n try:\n ff = open(file)\n failed_to_read_counter = 0\n ff.readline() # skips the title line\n line_read_counter = 0\n while True:\n line_read_counter += 1\n try:\n line = ff.readline()\n except:\n failed_to_read_counter += 1\n continue\n # breaks at last line\n if not line:\n break\n else:\n # splits by tabs\n try:\n fields = line.split(\"\\t\")\n rank = int(fields[0])\n except:\n continue\n # instances a new trial for each line and includes in list of trials\n trial = (\n Trial.add_trial(rank, fields[1], fields[2], fields[3], fields[4], fields[5], fields[6], fields[7],\n file))\n ff.close()\n finally:\n a=0\n return trials", "def load_from_files(*filenames,**kwargs):\n if 'keys' in kwargs.keys() and 'dtype' not in kwargs.keys():\n raise ValueError('Please set dtype as well.')\n elif 'keys' in kwargs.keys() and 'dtype' in kwargs.keys():\n if len(kwargs['keys']) != len(kwargs['dtype']):\n raise ValueError('Length of keys and dtype must match.')\n\n z_range = kwargs.pop('z_range',None)\n z_key = kwargs.pop('z_key',None)\n keys = kwargs.pop('keys',['Name','RA','Dec','z'])\n dtypes = kwargs.pop('dtype',[object,float,float,float])\n case_sensitive = kwargs.pop('case_sensitive',False)\n comments = kwargs.pop('comments','#')\n delimiter = kwargs.pop('delimeter',None)\n return_fileindex = kwargs.pop('return_fileindex',False)\n\n if kwargs != {}:\n unknown_kw = ' '.join(kwargs.keys())\n raise TypeError('load_from_files got unknown keyword arguments: {}'.format(unknown_kw))\n\n if not case_sensitive:\n keys = [a.upper() for a in keys]\n\n if z_range is not None and z_key is None:\n z_keys = [key for key in keys \n if key[0].upper() == 'Z' or key.upper() == \"REDSHIFT\"] \n if len(z_keys) == 0:\n raise ValueError('Failed to determine z_key, please set kwarg z_key')\n elif len(z_keys) > 1:\n raise ValueError('Ambiguous z_key, please set kwargs z_key manually')\n else:\n z_key = z_keys[0]\n\n out = None\n fileindex = []\n\n for k,filename in enumerate(filenames):\n tmp = np.genfromtxt(filename,names=True,comments=comments,dtype=None,\n case_sensitive=case_sensitive,delimiter=delimiter)\n \n if z_range is None:\n tmp2 = np.zeros((len(tmp),),dtype=zip(keys,dtypes))\n fileindex.extend([k for a in range(len(tmp))])\n for key in keys:\n tmp2[key] = tmp[key]\n else:\n z_filter = (tmp[z_key] >= z_range[0]) & (tmp[z_key] < z_range[1]) \n tmp2 = np.zeros((np.sum(z_filter),),dtype=zip(keys,dtypes))\n fileindex.extend([k for a in range(np.sum(z_filter))])\n for key in keys:\n tmp2[key] = tmp[key][z_filter]\n \n if out is None:\n out = tmp2\n else:\n out = np.concatenate((out,tmp2))\n \n if return_fileindex:\n return [out[key] for key in keys] + [np.array(fileindex)]\n else:\n return [out[key] for key in keys]", "def get_movie_data(files: list) -> list:\n pass", "def file_reader(filename = 'conv_params'):\n\n with open(filename) as f:\n info = f.readlines()\n info = [i.strip() for i in info] # each element in info is a string of a line from the file\n info = [i.split() for i in info] # split each whitespace delimited element into a list of lists\n info = [[i.split('-') for i in j] for j in info] # note info is 3 layers deep\n\n info[2] = info[2][0] # makes default E just a single string of the number\n info[3] = info[3][0]\n\n return info", "def from_files(cls, mos_file_paths, *, allow_incomplete=False):\n logger.info(\"Making MosCollection from %s files\", len(mos_file_paths))\n mos_readers = sorted([\n mr\n for mr in [MosReader.from_file(mfp) for mfp in mos_file_paths]\n if mr is not None\n ])\n return cls(mos_readers, allow_incomplete=allow_incomplete)", "def mkfilelist(filesN, filesE):\n newlist = []\n for _fN in filesN:\n for _fE in filesE:\n trN = SacIO(_fN, headonly=True)\n stN = trN.kstnm.rstrip()\n dtN = trN.delta\n trE = SacIO(_fE, headonly=True)\n stE = trE.kstnm.rstrip()\n dtE = trE.delta\n if stN == stE:\n if dtE != dtN:\n print \"sampling intervals are not identical for %s and %s\" % (_fN, _fE)\n break\n else:\n newlist.append((_fN, _fE))\n break\n\n return newlist, 1. / dtN", "def read_model_list(self,filename):\n\n self.grid_params = config.grid_params\n\n # set the correct dimension:\n self.ndim = len(self.grid_params)\n\n # set prefix and postfix:\n listfile = open(filename,\"r\")\n line = listfile.readline().strip();\n columns = line.split()\n if (len(columns) < 1): sys.exit(\"Erroneous first line in %s.\"%(filename))\n self.prefix = columns[0]\n if (len(columns) > 1): self.postfix = columns[1]\n\n # read models and put them into evolutionary tracks:\n nmodels = 0\n nmodes = 0\n models_small_spectra = []\n for line in listfile:\n line = line.strip()\n columns = line.split()\n glb = np.empty((nglb,),dtype = gtype)\n glb[imass] = utilities.to_float(columns[1])\n glb[iradius] = utilities.to_float(columns[2])\n glb[iluminosity] = utilities.to_float(columns[3])\n glb[iz0] = utilities.to_float(columns[4])\n glb[ix0] = utilities.to_float(columns[5])\n glb[iage] = utilities.to_float(columns[6])\n glb[itemperature] = utilities.to_float(columns[7])\n\n i = 8\n for (name, name_latex) in config.user_params:\n glb[user_params_index[name]] = utilities.to_float(columns[i])\n i += 1\n\n # print glb[0]\n aModel = Model(glb, _name = columns[0])\n exceed_freqlim = aModel.read_file(self.prefix + columns[0] + self.postfix)\n aModel.multiply_modes(1.0/aModel.glb[ifreq_ref]) # make frequencies non-dimensional\n aModel.sort_modes()\n aModel.remove_duplicate_modes()\n for track in self.tracks:\n if (track.matches(aModel)):\n track.append(aModel)\n break\n else:\n aTrack = Track(aModel,self.grid_params)\n self.tracks.append(aTrack)\n nmodels += 1\n nmodes += len(aModel.modes)\n if (not exceed_freqlim):\n models_small_spectra.append(aModel.name)\n print(nmodels, nmodes)\n listfile.close()\n\n # right list of models with spectra which are too small in a file:\n output = open(\"models_small_spectra\",\"w\")\n for name in models_small_spectra: output.write(name+\"\\n\")\n output.close()\n\n # sort tracks:\n for track in self.tracks: track.sort()\n\n # sanity check:\n for track in self.tracks:\n duplicate = track.duplicate_ages()\n if duplicate[0]:\n print(\"ERROR: the track \",track.grid_params,\" = \",track.params)\n print(\" has models with the same age. Please remove\")\n print(\" duplicate models.\")\n print(\" Check models:\", duplicate[1], duplicate[2])\n sys.exit(1)\n\n # update list of indices:\n self.ndx = range(len(self.tracks))\n\n # need to create grid from scratch since tracks have been sorted.\n self.grid = np.asarray([track.params for track in self.tracks])", "def from_args(args, verbose=False, unhandled=None):\n\n # Create a list for unhandled arguments\n if unhandled is None:\n unhandled = []\n\n experiments = ExperimentList()\n ## First try as image files\n # experiments = ExperimentListFactory.from_datablock(\n # DataBlockFactory.from_args(args, verbose, unhandled1))\n\n # Try to load from serialized formats\n for filename in args:\n try:\n experiments.extend(\n ExperimentListFactory.from_serialized_format(filename)\n )\n if verbose:\n print(\"Loaded experiments from %s\" % filename)\n except Exception as e:\n if verbose:\n print(\"Could not load experiments from %s: %s\" % (filename, str(e)))\n unhandled.append(filename)\n\n # Return the experiments\n return experiments", "def infile_list(args):\n infiles = []\n for arg in args:\n infiles += glob.glob(arg)\n infiles = [pipes.quote(f) for f in infiles]\n return infiles", "def readFromFiles(self, networkFile, demandFile):\n self.readNetworkFile(networkFile)\n self.readDemandFile(demandFile)\n self.validate()\n self.finalize()", "def test_file_reader(self) -> None:\n result = [['123', 'Jin He', 'Computer Science'],\n ['234', 'Nanda Koka', 'Software Engineering'],\n ['345', 'Benji Cai', 'Software Engineering']]\n # file have header\n self.assertTrue(\n list(file_reader('C:/Users/Nidhi/Desktop/SEM3/810/HW08/student_majors.txt', 3, '|', True)) == result)\n # file without header\n self.assertFalse(\n list(file_reader('C:/Users/Nidhi/Desktop/SEM3/810/HW08/student_majors.txt', 3, '|')) == result)\n # More than 3 datafield\n with self.assertRaises(ValueError):\n list(file_reader(\n 'C:/Users/Nidhi/Desktop/SEM3/810/HW08/student_majors.txt', 4, '|', True))\n # file not found\n with self.assertRaises(FileNotFoundError):\n list(file_reader('abc.txt', 3, '|', True))", "def loadInputFiles(self):\n\t\tfor filename in self.input_filename_list:\n\t\t\tfor module in self.modules:\n\t\t\t\tmodule.Add(filename)", "def model_files(cmd_args):\n if cmd_args.learning_file_list:\n learning_files_list = files_constructor(cmd_args, file_type='learning')\n else:\n learning_files_list = None\n\n if cmd_args.prediction_file:\n prediction_file = files_constructor(cmd_args, file_type='prediction')\n else:\n prediction_file = None\n\n\n return learning_files_list, prediction_file", "def load_runner(flist, **kwargs):\n if isinstance(flist, str):\n flist = [flist]\n frame_list = []\n for fname in flist:\n frame_list += _gen_frame_list(fname)\n return _frame_loader(frame_list, **kwargs)", "def getFileList(*args, filespec: AnyStr=\"\", folder: AnyStr=\"\", **kwargs)->List[AnyStr]:\n pass", "def read_file(inp_fn):\n lines = [line.strip().split(\",\")\n for line in open(inp_fn)\n if not (line.startswith(\"#\"))]\n return [(int(line[0]), year_record({\"male\": int(line[-3]),\n \"female\": int(line[-2]),\n \"unknown\": int(line[-1])},\n None, None))\n for line in lines[1:]]", "def get_file_data(reader_file):\n\n complete_data_list = []\n for row in reader_file:\n complete_data_list.append(row)\n\n return complete_data_list", "def select_files(input_line):\n uri = str(input_line.split(',')[0])\n label = str(input_line.split(',')[1])\n\n yield uri, label", "def ReadFilesGenerator(self):\n\n for file in self._file_names:\n file_list = []\n\n # TODO see further into yielding one line at a time\n with open(file, 'r', encoding='mbcs') as sped:\n file_list = sped.read().splitlines()\n\n if not self.isSigned(file_list):\n file_list = self.stripSignature(file_list)\n\n yield file, file_list", "def Open(self):\n fileHandler = open(self.path, \"r\")\n self.list = []\n constructor = None\n if self.format == \"music\": # Escoger el constructor adecuado\n constructor = Format.Music\n elif self.format == \"videos\":\n constructor = Format.Videos\n elif self.format == \"pictures\":\n constructor = Format.Pictures\n\n for line in fileHandler:\n name, author, album, year, _type, path = line.strip().split(\"¬\")\n entry = constructor(name, author, album, year, _type, path)\n self.list.append(entry)\n\n self.length = len(self.list)\n fileHandler.close()", "def _generate_examples(self, **kwargs):\n file_paths = kwargs.get(\"file_paths\")\n if not file_paths:\n raise ValueError(\"Must pass file_paths.\")\n\n for file_path in file_paths:\n for record in SeqIO.parse(file_path, \"fasta\"):\n yield record.id, {\n \"sequence\": str(record.seq),\n \"description\": str(record.description),\n \"id\": str(record.id),\n }", "def in_filepath_list(class_paths: List[str]) -> List:\n registry, not_founds = build_registry(class_paths)\n builder = FilepathListBuilder()\n source = builder.build(registry)\n\n return [source, not_founds]", "def prepare_reader(self, filename_queue, batch_size=1024):\n reader = tf.TFRecordReader()\n _, serialized_examples = reader.read_up_to(filename_queue, batch_size)\n\n tf.add_to_collection(\"serialized_examples\", serialized_examples)\n return self.prepare_serialized_examples(serialized_examples)", "def create_file_list(params):\n data_dir = params.get('data_dir', '')\n params['file_list'] = \".tmp.txt\"\n imgtype_list = {'jpg', 'bmp', 'png', 'jpeg', 'rgb', 'tif', 'tiff'}\n with open(params['file_list'], \"w\") as fout:\n tmp_file_list = os.listdir(data_dir)\n for file_name in tmp_file_list:\n file_path = os.path.join(data_dir, file_name)\n if imghdr.what(file_path) not in imgtype_list:\n continue\n fout.write(file_name + \" 0\" + \"\\n\")", "def read_lists(path_to_lists, taille,taille_sup):\n\n os.chdir(path_to_lists)\n\n heme = [i.strip() for i in open(\"heme.list\").readlines()]\n heme_sample = data_sample(heme,taille_sup)\n\n steroid = [i.strip() for i in open(\"steroid.list\").readlines()]\n steroid_sample = data_sample(steroid,len(steroid))\n\n nucleotide = [i.strip() for i in open(\"nucleotide.list\").readlines()]\n nucleotide_sample = data_sample(nucleotide,taille)\n\n control = [i.strip() for i in open(\"control.list\").readlines()]\n control_sample = data_sample(control,taille)\n\n #data_total = heme_sample+nucleotide_sample+control_sample+steroid_sample\n\n data_total = heme_sample+nucleotide_sample+control_sample\n return data_total,heme,steroid,nucleotide,control", "def __init__(self, dataset_dir, listfile=None):\n Reader.__init__(self, dataset_dir, listfile)\n self._data = [line.split(',') for line in self._data]\n self._data = [(x, float(t), int(y)) for (x, t, y) in self._data]", "def set_files(self, file_list):\n\tif file_list==None: return []\n\timport types\n\tisString = isinstance(file_list, types.StringTypes) \n\tisList = isinstance(file_list, list) \n\tassert isString or isList, \"You should provide a list of files as list or as CVS string!\"\n\tif isList: return file_list\n\tif isString :\n\t import re\n\t file_list_converted = re.sub(r'\\s', '', file_list).split(',') #remove all whitespaces\n\t return file_list_converted", "def defineFILEandRANK(files: int, ranks: int) -> Tuple[List[str]]:\r\n alpha = 'abcdefghijklmnopqrstuvwxyz'\r\n fileList = []\r\n rankList = []\r\n for file in range(files):\r\n fileList.append(alpha[file])\r\n for rank in reversed(range(0, ranks)):\r\n rankList.append(str(rank))\r\n \r\n return fileList, rankList" ]
[ "0.6492544", "0.63382894", "0.6250799", "0.611366", "0.6069598", "0.6018633", "0.59182394", "0.5909983", "0.58632267", "0.58579755", "0.5853619", "0.5849561", "0.5829838", "0.580928", "0.57897955", "0.5785899", "0.57765526", "0.57752377", "0.57527864", "0.57526433", "0.5749772", "0.5746242", "0.5745427", "0.5736978", "0.5735016", "0.5718842", "0.57107687", "0.56734157", "0.5665857", "0.5665557", "0.56596106", "0.5653231", "0.56495225", "0.56368935", "0.56358653", "0.56309664", "0.56259525", "0.56200933", "0.56123054", "0.5607947", "0.55790967", "0.5571911", "0.55709785", "0.5569839", "0.5566333", "0.5563507", "0.55588645", "0.55582106", "0.55541813", "0.55524856", "0.55445844", "0.55438435", "0.55418944", "0.55329305", "0.5529695", "0.5527274", "0.551793", "0.5510912", "0.5508778", "0.550814", "0.5500842", "0.5495455", "0.5493571", "0.54744893", "0.5474127", "0.54555285", "0.5440225", "0.54315084", "0.54303676", "0.542959", "0.5418596", "0.5416025", "0.5415067", "0.54094183", "0.54074836", "0.54039866", "0.539313", "0.5388496", "0.53869545", "0.53866214", "0.5381011", "0.537989", "0.5379454", "0.53788984", "0.53745127", "0.53741", "0.5368982", "0.5366444", "0.53659916", "0.5358723", "0.53579015", "0.5355051", "0.5344213", "0.53426135", "0.53375554", "0.53335905", "0.5324907", "0.5321942", "0.5321128", "0.5316644" ]
0.679022
0
return the number of slices per timepoint
def getSlicesPerTimepoint(self): return self.slicesPerTimepoint
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getDataSetCount(self):\n\t\treturn int(self.numberOfImages / self.slicesPerTimepoint)", "def size(self):\n size = 1\n for current_slice in self.slices:\n size *= current_slice.stop - current_slice.start\n return size", "def n(self):\n return self._time_axis.size", "def _get_slice_len(s, axlen):\n if s.start is None:\n start = 0\n else:\n start = s.start\n if s.stop is None:\n stop = axlen\n else:\n stop = np.min([s.stop, axlen])\n if s.step is None:\n step = 1\n else:\n step = s.step\n\n return ((stop - 1 - start) // step) + 1", "def getDimensions(self):\n\t\tprint \"Returning\",self.x,self.y,self.slicesPerTimepoint\n\t\treturn (self.x, self.y, self.slicesPerTimepoint)", "def size(self, time):\n if self.start_time <= time <= self.end_time:\n return self.masks[time - self.start_time].sum()\n else:\n return 0", "def n_timesteps(self) -> int:\n return len(self.time)", "def spike_count(spikeTime, start, stop, dt):\n\n\n #Spike time turned into a numpy array\n spikeTime = np.array(spikeTime)\n # print('Spike Times: ', spikeTime)\n\n #Creat interval array - intervals in which to break up the time array - sub time interval array\n duration = stop-start #Total run time\n n = duration/dt #How many subintervals from time horizon results from user defined interval\n splitInterval = np.linspace(0, duration, n+1) #create numpy array of subinterval over which to count spikes\n # print ('split interval: ', splitInterval)\n\n ##Find length over which to iterate in for loop\n length_splitInt = len(splitInterval)\n # print('length splitInterval: ', length_splitInt)\n length_time = len(spikeTime)\n # print('length time: ', length_time)\n length = length_splitInt + ((length_time) - 2)\n # print('length :', length)\n\n i=0 #inex for time array\n j=0 #index for splitInterval array.\n k=0 #index for new matrix that will store the grouped values from the split time array\n counter = 0 #counter variable to keep track of spike count for each subinterval through loop\n SpikeCount = [] #Initialize array to collect the number of spikes occuring wihtin each subinterval\n\n for i in range(length):\n if (i == 0) and (spikeTime[0] == splitInterval[0]):\n counter += 1\n i += 1\n\n # Spot check\n # print('if counter: ', counter)\n # print('time element: ', spikeTime[k])\n # print('splitInt: ', splitInterval[j], splitInterval[j + 1])\n # print('i: ', i)\n # print('if k: ', k)\n\n if k < (len(spikeTime) - 1):\n k += 1\n\n # Spot check\n # print('iff k: ', k)\n # print('iff counter: ', counter)\n\n else:\n j += 1\n\n # Spot check\n # print('iff counter: ', counter)\n # print(SpikeCount)\n # print('iff j: ', j)\n\n elif (spikeTime[k] > splitInterval[j]) and (spikeTime[k] <= splitInterval[j + 1]):\n counter += 1\n i += 1\n\n # Spot check\n # print('if counter: ', counter)\n # print('time element: ', spikeTime[k])\n # print('splitInt: ', splitInterval[j], splitInterval[j + 1])\n # print('i: ', i)\n # print('if k: ', k)\n\n if k < (len(spikeTime) - 1):\n k += 1\n\n # Spot check\n # print('iff k: ', k)\n # print('iff counter: ', counter)\n\n else:\n j += 1\n # Spot check\n SpikeCount.append(counter)\n # print('iff counter: ', counter)\n # print(SpikeCount)\n # print('iff j: ', j)\n\n\n\n else:\n SpikeCount.append(counter)\n counter = 0\n j += 1\n i += 1\n\n # Spot Check\n # print('else counter: ', counter)\n # print(SpikeCount)\n # print('time element: ', spikeTime[k])\n # print('splitInt: ', splitInterval[j], splitInterval[j + 1])\n # print('else j: ', j)\n # print('else i: ', i)\n # print('else k: ', k)\n\n return (SpikeCount, splitInterval)", "def calc_cycle_count(self, time):\n dur = self.get_duration()\n phases = time / dur\n count = int(math.floor(phases))\n\n if not self.enable_loop():\n count = np.clip(count, 0, 1)\n\n return count", "def times(self) -> int:\n return self._channel_arrays[0].shape[self.time_pos]", "def numel(self):\n return self.t.size", "def count_timepoints(sc, session, files):\n tuples = zip(range(len(files)), files)\n files_sc = sc.parallelize(tuples)\n\n def count_planes(kv):\n index, path2 = kv\n try:\n from ScanImageTiffReader import ScanImageTiffReader\n img = ScanImageTiffReader(path2).data()\n except Exception:\n import tifffile\n img = tifffile.imread(path2)\n return img.shape[0]\n\n data2 = files_sc.map(count_planes).collect()\n frame_numbers = np.array(data2)\n vol_numbers = frame_numbers / len(session.fieldMask)\n return vol_numbers.astype(int)", "def get_num_timesteps(self) -> int:\n return len(self._indices)", "def setSlicesPerTimepoint(self, n):\n\t\tassert n > 0, \"Slices per timepoint needs to be greater than 0\"\n\t\tprint \"Setting slices per timepoint to \", n\n\t\tself.slicesPerTimepoint = n\n\t\tself.z = n\n\t\tself.readers = []", "def count_segments(markers) -> int:\n cnt = Counter()\n for row in markers:\n cnt.update(row)\n n_cnt = dict(takewhile(lambda x: x[1] >= 10, cnt.most_common()))\n del n_cnt[1]\n del n_cnt[-1]\n return len(n_cnt.keys())", "def getSegmentCount(self) -> int:\n ...", "def _number_of_intervals(self):\n return self._number_of_levels - 1", "def ntimebins(self, t0, t1):\n t0 = Time(t0, scale='utc')\n t1 = Time(t1, scale='utc')\n nt = ((t1-t0).to(u.s) / self.dtsample /\n (self.setsize)).to(u.dimensionless_unscaled).value\n return np.round(nt).astype(int)", "def count(time):\n \n return len(events(time))", "def _calc_slices(X):\n\n n_rows = X.shape[0]\n slices = [n_rows // comm.size for _ in range(comm.size)]\n count = n_rows % comm.size\n for i in range(count):\n slices[i] += 1\n\n return np.array(slices, dtype=np.int64)", "def N(self):\n return len(self.time)", "def traj_nslice (u,teq,tsample) :\n # get the number of frames in the slice (http://stackoverflow.com/a/7223557)\n traj_slice = u.trajectory[teq::tsample]\n return sum(1 for _ in traj_slice)", "def traj_nslice (u,teq,tsample) :\n # get the number of frames in the slice (http://stackoverflow.com/a/7223557)\n traj_slice = u.trajectory[teq::tsample]\n return sum(1 for _ in traj_slice)", "def slice_timeseries(n_slices,dataset):\n\n n,l=np.shape(dataset)\n\n X = np.reshape(dataset,(n*n_slices,l//n_slices))\n\n print('sliced data shape (nr. of slices, slice length):',np.shape(X))\n print('#####################################')\n \n return X", "def get_num_chunks(self) -> int:", "def __len__(self):\n _, timesteps, height, width = self.data.shape\n height //= self.size\n width //= self.size\n\n if self.subset == 'train':\n out = self.length\n elif self.subset == 'all':\n out = height * width\n else:\n out = (height // 2) * (width // 2)\n\n if not self.time:\n out *= timesteps\n\n return out", "def dim_calculator():\r\n probe_set = np.arange(1, 101)\r\n X = -36 + ((probe_set - 1) // 10) * 4\r\n Y = 2 - ((probe_set - 1) % 10) * 4\r\n dim = np.vstack((X, Y)).T\r\n return dim", "def dim(self) -> int:", "def n_points(self):\n\n if self.data_reduced:\n return len(self.data_reduced[0])\n else:\n return 0", "def Test_NumSegments(Daten):\n N_Leitungen = len(Daten.PipeSegments)\n\n return N_Leitungen", "def get_naive_size(self) -> int:\n return (self.triples.time_end - self.triples.time_begin + 1).sum()", "def voxel_count(self):\n return self.cols * self.rows * self.sections", "def n_elements(x, dist, var=None):\n n = dist/mdiff(x)\n if var == 'time':\n n = n/60\n return int(np.round(n))", "def num_time_bins(self):\n return self.header.time_gate_bin_count * self.header.samples_per_time_bin", "def slice_length(start, stop, step):\n return (stop - start + step - 1) // step", "def multiListSliceCount(lol):\n count = 1\n for i in range(0, len(lol)):\n count *= len(lol[i])\n #print \"multiListSliceCount of:%s is:%d\" % (lol, count)\n return count", "def get_number_of_measurement(self):\n num_of_meas = 0\n for time in self.mdvtc.keys():\n num_of_meas = num_of_meas + self.mdvtc[time].get_number_of_measurement()\n #\n return num_of_meas", "def calc_num_bursts(num_timeseries):\n num_vectors = 0\n for i in range(1, num_timeseries + 1):\n num_vectors += ((i + (CORRELATION_NUM_PIPES - 1)) /\n CORRELATION_NUM_PIPES)\n return ((num_vectors + (CORRELATION_NUM_VECTORS_PER_BURST-1)) /\n CORRELATION_NUM_VECTORS_PER_BURST)", "def get_series_data_length(self):\n global OVER_PAD_LENGTH_COUNT\n\n length = []\n for k in range(3):\n for i in range(len(self.pulse_data[k])):\n length.append(self.pulse_data[k][i].shape[0])\n return length", "def n_elements(self) -> int:\n n_elem = np.prod(self.shape)\n if self.n_timesteps > 1:\n n_elem = int(n_elem / self.n_timesteps)\n return n_elem", "def compute_num_tracks(x_offset: int, y_offset: int,\n x: int, y: int, track_info: Dict[int, int]):\n x_diff = x - x_offset\n y_diff = y - y_offset\n result = 0\n for length, num_track in track_info.items():\n if x_diff % length == 0 and y_diff % length == 0:\n # it's the tile\n result += num_track\n return result", "def __len__(self) -> int:\n import h5py\n\n with h5py.File(\n os.path.join(self.root, self.data_dir, self.img_file_name), \"r\"\n ) as f:\n num_datapoints: int = f[self.split][\"pv_log\"].shape[0]\n\n return num_datapoints", "def num_ticks(self, start, end, desired_ticks=8):\n return len(self.ticks(start, end, desired_ticks))", "def num_ticks(self, start, end, desired_ticks=8):\n return len(self.ticks(start, end, desired_ticks))", "def num_ticks(self, start, end, desired_ticks=8):\n return len(self.ticks(start, end, desired_ticks))", "def count_elements_in_dataset(dataset):\n return dataset.count()", "def nSlices(self):\n return self._c_param.lee_richards_n_slices", "def countComponents26(cube):\n n,l = labelComponents26(cube);\n return n;", "def get_event_count(event_times, start, end):\n mask = (event_times > start) & (event_times <= end)\n return event_times[mask].size", "def get_time_slices(self):\n tot = []\n for clu in self._clusters:\n tot.extend(self._clusters[clu].to_dict()[:])\n #tot.sort()\n return tot", "def segment_n(self):\n return len(self.segment_lengths)", "def Points_Counting(self):\n return len(self.__traectory_list)", "def __CalculateSliceSize(self, readShapeZYX):\n # readShapeZYX is the dimension of the data we must READ to fill the required output area;\n # i.e .the fill area plus margins. If we're filling globally it's the same thing.\n dataBPP = 4\n memLimit = self._jobDetails.MemTargetBytes\n outputsBPP = dataBPP * 2 + 1 # the output data, distances, and flags\n # approximate total number of pixels we can read for each file\n sliceSqrd = memLimit / (readShapeZYX[0] * (dataBPP + outputsBPP))\n # not implementing slicing in y dimension so xSize is total pixels / total height\n sliceXSize = sliceSqrd / readShapeZYX[1]\n return sliceXSize", "def n_series(self):\n return self.container['n_series']", "def _num_samples(x: npt.ArrayLike) -> int:\n if not hasattr(x, \"__len__\") and not hasattr(x, \"shape\"):\n if hasattr(x, \"__array__\"):\n x = np.asarray(x)\n else:\n raise TypeError(\"Expected sequence or array-like, got %s\" % type(x))\n if hasattr(x, \"shape\"):\n if len(x.shape) == 0:\n raise TypeError(\"Singleton array %r cannot be considered\" \" a valid collection.\" % x)\n # Check that shape is returning an integer or default to len\n # Dask dataframes may not return numeric shape[0] value\n if isinstance(x.shape[0], numbers.Integral):\n return x.shape[0]\n else:\n return len(x)\n else:\n return len(x)", "def get_nb_element_per_dimension(recipe):\n return len(recipe[\"r\"]), len(recipe[\"c\"]), len(recipe[\"z\"])", "def __number_measurements(a, func_axis=None):\n if func_axis == None:\n return a.size\n else:\n return a.size / a.shape[func_axis]", "def countTriplets(arr, r):\n c_2, c_3 = Counter(), Counter()\n n_triplets = 0\n for e in arr:\n # print(f'arr: {arr}, e: {e}, c_3: {c_3}, c_2: {c_2}, n_triplets: {n_triplets}')\n if e in c_3:\n n_triplets += c_3[e]\n if e in c_2:\n c_3[e*r] += c_2[e]\n c_2[e*r] += 1\n return n_triplets", "def dim_calculatorP3():\r\n probe_set = np.arange(1, 101)\r\n X = 20 - 36 + ((probe_set - 1) // 10) * 4\r\n Y = 2 - ((probe_set - 1) % 10) * 4\r\n dim = np.vstack((X, Y)).T\r\n return dim", "def num_points_sweep(self, start, stop, step):\r\n return(abs((stop - start)//step) + 1)", "def get_length(data):\n return np.array([len(conv) for conv in data]).reshape(-1, 1)", "def count_data(self):\n try:\n ndata = len(self.x)\n logger.info(\"Number of data points: {0}\".format(ndata))\n except AttributeError:\n logger.error(\"Data object has not been defined\")\n ndata = 0\n return ndata", "def get_contours_number(self):\n ncontour = len(self.x)\n logger.info(\"Number of contours: {0}\".format(ncontour))\n return ncontour", "def getNumTiles(self):\n return len(list(product(list(range(self.width+1))[1:], list(range(self.height+1))[1:])))", "def get_number_of_measurement(self):\n used_fragments = set()\n counter = 0\n for fragment in self.observed_fragments:\n num_of_isotope = 0\n used_counter = 0\n for i in self.mdv[fragment]:\n num_of_isotope = num_of_isotope + 1\n if self.mdv[fragment][i]['use'] == 'use':\n\n counter = counter + 1\n used_counter = used_counter + 1\n if num_of_isotope == used_counter:\n used_fragments.add(fragment)\n return counter-len(used_fragments)", "def __len__(self):\n nsamp = self.data.shape[-1]\n kernel = int(self.kernel * self.fs)\n stride = int(self.stride * self.fs)\n n_stride = int(np.ceil((nsamp - kernel) / stride) + 1)\n return max(0, n_stride)", "def compute_track_length(points, bin_size=17):\n pca = PCA(n_components=2)\n length = 0.\n if len(points) >= 2:\n coords_pca = pca.fit_transform(points)[:, 0]\n bins = np.arange(coords_pca.min(), coords_pca.max(), bin_size)\n # bin_inds takes values in [1, len(bins)]\n bin_inds = np.digitize(coords_pca, bins)\n for b_i in np.unique(bin_inds):\n mask = bin_inds == b_i\n if np.count_nonzero(mask) < 2: continue\n # Repeat PCA locally for better measurement of dx\n # pca_axis = pca.fit_transform(points[mask])\n pca_axis = coords_pca[mask]\n dx = pca_axis.max() - pca_axis.min()\n length += dx\n return length", "def nr_points(self):\n return len(self.x)", "def count(self) -> int:\n return self.end_measure_num - self.start_measure_num + 1", "def n_rounds(self) -> int:\n return self.y.shape[0]", "def num_chunking_units(self):\n if self._source_paths:\n return len(self._source_paths)\n return 1", "def n_cs(self):\n return np.size(self._cs, 0)", "def _get_observation_count(self):\n observation_count = 0\n for sequence in self.seq_list:\n observation_count += sequence.shape[0] \n \n return observation_count", "def count(self):\r\n return self.data_array.size", "def get_points_number(self):\n ncontour = self.get_contours_number\n npoints = []\n for i in range(0, ncontour):\n npoints.append(len(self.x[i]))\n return npoints", "def __len__(self):\n return int(np.floor(len(self.dataset_df) / self.batch_size))", "def numSegments(self):\n\n return self.getHierView().numSegments()", "def get_stepsize(ds):\n tube_pos = ds.axes[-1]\n if tube_pos.ndim == 2: #very old data, just take one slice\n tube_pos = tube_pos[0]\n tubesep = abs(tube_pos[0]-tube_pos[-1])/(len(tube_pos)-1)\n tube_steps = ds.axes[0]\n bin_size = abs(tube_steps[0]-tube_steps[-1])/(len(tube_steps)-1)\n pixel_step = int(round(tubesep/bin_size))\n bin_size = tubesep/pixel_step\n #print 'Determined tube separation to be %f, corresponding to %d steps' % (tubesep,pixel_step)\n return bin_size", "def get_num_points(self):\n dimensions = self.data.shape\n return dimensions[0]", "def dimension(self):", "def get_number_of_segments(self):\n\n return len(self._break_points) - 1", "def dimensions():", "def calculate_number_of_segments(self):\n return sum(len(eg.transcript_file.segments) for eg in self.exemplars)", "def count_segments(self, raw_only: bool = False) -> int:\n if self.segments:\n self_count = 0 if raw_only else 1\n return self_count + sum(\n seg.count_segments(raw_only=raw_only) for seg in self.segments\n )\n else:\n return 1", "def dim(self) -> int:\n pass", "def number_of_steps(self) -> int:\n return len(self.step_points)", "def num_timesteps(self):\n return self._num_timesteps", "def nbytes_at(self, device_id:int):\n if self._slices:\n if isinstance(self._coherence._local_states[device_id], dict): # there are subarrays no this device\n if self._slices_hash in self._coherence._local_states[device_id].keys(): # this subarray is already there\n return self._array.nbytes_at(device_id)\n else: # the subarray will be moved to there\n return self._array.nbytes_at(device_id) + self.subarray_nbytes # add the incoming subarray size\n else: # there is a complete copy on this device, no need to prepare subarray\n return self.nbytes\n else:\n return self.nbytes", "def count():", "def n_points(self) -> int:\n return len(self.all_df)", "def __len__(self): \r\n length = len(self.data) - 2* self.skip_window\r\n #print ('length', length)\r\n return length\r\n #raise NotImplementedError('Implement the __len__ method of the dataset')\r", "def n_points(self) -> int:\n return len(self.df)", "def size(self) -> Tuple[groupable, pdarray]:\n return self.count()", "def num_run_cycles(self, run_idx):\n return self.num_traj_frames(run_idx, 0)", "def numberOfPoints(self):\n return 20000", "def getLength(self):\n lons = self._toplons\n lats = self._toplats\n seg = self._group_index\n groups = np.unique(seg)\n ng = len(groups)\n rlength = 0\n for i in range(ng):\n group_segments = np.where(groups[i] == seg)[0]\n nseg = len(group_segments) - 1\n for j in range(nseg):\n ind = group_segments[j]\n P0 = Point(lons[ind], lats[ind])\n P1 = Point(lons[ind + 1], lats[ind + 1])\n dist = P0.distance(P1)\n rlength = rlength + dist\n return rlength", "def get_dimension_length(self):\n pass", "def find_num_beats_per_window(p1, p2, p3, p4, p5, p6):\n\n return len(p1), len(p2), len(p3), len(p4), len(p5), len(p6)", "def __len__(self):\r\n return int(np.ceil(len(self.pathways) / float(self.batch_size)))", "def _get_observation_dimension(self):\n return len(self._get_observation_np())" ]
[ "0.7135924", "0.6884999", "0.64077276", "0.6373212", "0.6304518", "0.6302463", "0.6214839", "0.6137181", "0.6126625", "0.61182857", "0.6099939", "0.6055298", "0.6052316", "0.6023152", "0.59912443", "0.5985234", "0.59840417", "0.59639126", "0.59609234", "0.59437424", "0.5941274", "0.59409493", "0.59409493", "0.5938911", "0.593823", "0.5906485", "0.588328", "0.58790076", "0.58749837", "0.5840988", "0.5831759", "0.58248883", "0.58102876", "0.57956505", "0.5780419", "0.57775396", "0.5774456", "0.57719934", "0.5734191", "0.5718746", "0.57088643", "0.5708398", "0.57015574", "0.57015574", "0.57015574", "0.5697563", "0.56928396", "0.5680043", "0.56774986", "0.5669859", "0.5648747", "0.56308746", "0.56233114", "0.5617497", "0.5614036", "0.5612783", "0.5590715", "0.5590399", "0.5588557", "0.55870557", "0.55864936", "0.5583911", "0.5580825", "0.5574367", "0.5553499", "0.5546722", "0.55459243", "0.55295485", "0.55119896", "0.55115664", "0.55107576", "0.55060244", "0.5502313", "0.54934996", "0.5492216", "0.54901946", "0.5482243", "0.54818743", "0.5479869", "0.5472665", "0.54693747", "0.54636186", "0.54628986", "0.5462504", "0.54566705", "0.5450361", "0.5444831", "0.54419744", "0.54366475", "0.5436525", "0.54334915", "0.54281604", "0.5424359", "0.542419", "0.5423022", "0.5419185", "0.5418408", "0.5416286", "0.5400809", "0.5398093" ]
0.76755035
0
Set the number of slices that belong to a given timepoint
def setSlicesPerTimepoint(self, n): assert n > 0, "Slices per timepoint needs to be greater than 0" print "Setting slices per timepoint to ", n self.slicesPerTimepoint = n self.z = n self.readers = []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setNSlices(self,n):\n assert(n> 0)\n self._c_param.lee_richards_n_slices = n", "def setNumTimeSubSteps(*argv):", "def getSlicesPerTimepoint(self):\n\t\treturn self.slicesPerTimepoint", "def _set_window_time(slices, times):\n t_idx_ = [t[-1] for t in slices]\n return times[t_idx_]", "def set_numpins(self, n):\n self.numpins = n", "def slice_timeseries(n_slices,dataset):\n\n n,l=np.shape(dataset)\n\n X = np.reshape(dataset,(n*n_slices,l//n_slices))\n\n print('sliced data shape (nr. of slices, slice length):',np.shape(X))\n print('#####################################')\n \n return X", "def set_segments(self, segments):\n self.send_command(Command.SET_SEGMENT_COUNT, [segments])", "def setNPoints(self,n):\n assert(n > 0)\n self._c_param.shrake_rupley_n_points = n", "def set_slices(self, start=0.0, end=1.0, step=None, num=None):\n\n if step is None:\n s = (end - start) / float(num)\n self._slices = frange(start, end, s)\n elif num is None:\n self._slices = frange(start, end, step)\n else:\n raise RuntimeError()\n\n LOG.info('Num slices: %d', len(self._slices))\n LOG.info('Slices: %s', self._slices)", "def setNumberOfIntervals(self, n=500):\n self._simulator_.update(numberOfIntervals=n)\n return", "def setIterationCount(self, newIterationCount):\n \n pass", "def _set_number_of_subsamples(self, number_of_subsamples):\n self._number_of_subsamples = number_of_subsamples\n self._compute_down_sample_factor()", "def scale_in(self, count):\n pass", "def set_number_of_time_steps(self, number_of_time_steps):\n self.number_of_time_steps = number_of_time_steps", "def __init__(self, slice_number: int = -1):\n super().__init__()\n self.metric = 'SEGAREA'\n self.slice_number = slice_number", "def test_write_slices(self):\n dt = np.dtype('(3,)i')\n\n data1 = np.ones((2,), dtype=dt)\n data2 = np.ones((4,5), dtype=dt)\n\n dset = self.f.create_dataset('x', (10,9,11), dtype=dt)\n\n dset[0,0,2:4] = data1\n self.assertArrayEqual(dset[0,0,2:4], data1)\n\n dset[3, 1:5, 6:11] = data2\n self.assertArrayEqual(dset[3, 1:5, 6:11], data2)", "def SetDimensions(self, p_int, p_int_1, p_int_2, p_int_3, p_int_4, p_int_5, p_int_6):\n ...", "def getDataSetCount(self):\n\t\treturn int(self.numberOfImages / self.slicesPerTimepoint)", "def setSplitCount(self, count):\n pass", "def set_t_span(self,dt,tfin):\n self.dt, self.tfin = dt,tfin\n self.t_span = np.arange(0,tfin,dt)\n return self.t_span", "def plot_pcs_slice(self,data_in,large_slice,plot_slice=None,\n num_pcs=4,indiv=0, color_array=None,fs=30,sz=4):\n if color_array == None:\n color_array = self._get_colors()\n # Plot params\n fig = plt.figure(figsize=(sz,6))\n gs = GridSpec(sz+len(self.states_list),1)\n feature_ax = plt.subplot(gs[:sz,:])\n data_in = data_in[:num_pcs,large_slice][::-1,:]\n max_ = ceil(data_in.max()-data_in.min()) + 1\n ttime = np.arange(data_in.shape[1])\n for ii in range(0,num_pcs):\n feature_ax.plot(ttime,data_in[ii,:]+ii*max_,'k')\n feature_ax.set_yticks(np.arange(num_pcs)*max_)\n feature_ax.set_yticklabels('')\n\n feature_ax.set_ylim((data_in.min()-1,num_pcs*max_-1))\n\n xlabel_= np.linspace(0,data_in.shape[1],5,dtype='int')\n feature_ax.set_xticks(xlabel_)\n feature_ax.set_xlim((xlabel_[0],xlabel_[-1]))\n feature_ax.set_xticklabels(list(map(str,xlabel_ // fs)))\n\n if not (plot_slice is None):\n feature_ax.axvline(plot_slice[0], color=color_array[0],linestyle=':',lw=2)\n feature_ax.axvline(plot_slice[-1], color=color_array[0],linestyle=':',lw=2)\n plot_pcs_slice_sub(self,data_in,large_slice,plot_slice,indiv,color_array)\n return", "def __init__(self, slice_number: int=-1):\n super().__init__()\n self.metric = 'GTAREA'\n self.slice_number = slice_number", "def _numberOfPoints_changed(self):\n self.reinitialiseData()", "def setNIterations(self, value):\n return self._set(nIterations=value)", "def time_frame_stride(self, value):\n self._time_frame_stride = value", "def setCount(self, num):\n self.count=num", "def set_part_length(self, seconds):\n self._part_length = seconds", "def setNumberOfIterations(self, value):\n return self._set(numberOfIterations=value)", "def setNumberOfIterations(self, value):\n return self._set(numberOfIterations=value)", "def _on_num_points_change(self, _):\n self.num_points = self.num_points_slider.value\n self.redraw_whole_plot()", "def _set_neighs_slice(self, key):\n ## Condition to use slice type\n self._constant_neighs = True\n self.ks = range(1) if self.ks is None else self.ks\n ## Possible options\n if key is None:\n self.idxs = slice(0, self._n, 1)\n elif isinstance(key, slice):\n start = 0 if key.start is None else key.start\n stop = self._n if key.stop is None else key.stop\n stop = self._n if key.stop > 10*16 else key.stop\n step = 1 if key.step is None else key.step\n self.idxs = slice(start, stop, step)\n elif type(key) in inttypes:\n self.idxs = slice(0, key, 1)\n elif type(key) == tuple:\n self.idxs = slice(key[0], key[1], 1)\n self._setted = True", "def time_interval_sub(self, time_step, nsteps):\n world.subtime = TimeAxis(0.0, int(nsteps), float(time_step))\n print(\"Setting subtime\")", "def plot_pcs_slice_sub(self,data_in,large_slice,plot_slice,\n indiv=0,color_array=None,sz=8):\n fig = plt.figure(figsize=(sz,6))\n gs = GridSpec(sz+len(self.states_list),1)\n feature_ax = plt.subplot(gs[:sz,:])\n stateseq_ax = plt.subplot(gs[sz+1])\n\n if color_array is None:\n color_array = self._get_colors()\n\n r_plot_slice = list(map(lambda x: large_slice[0] + x, plot_slice))\n z, perm = relabel_model_z(self,index=indiv)\n z = z[r_plot_slice]\n stateseq_norep, durations = rle(z)\n\n max_ = ceil(data_in.max()-data_in.min()) +1\n data_in=data_in[:,plot_slice]\n ttime = np.arange(data_in.shape[1])\n for ii in range(0,data_in.shape[0]):\n feature_ax.plot(ttime,data_in[ii,:] + ii*max_,'k')\n\n feature_ax.set_xlim((0,len(plot_slice)))\n feature_ax.set_ylim((data_in.min()-1,data_in.shape[0]*max_-1))\n feature_ax.set_yticks([])\n feature_ax.set_xticks([])\n\n stateseq_ax.imshow(z[:,np.newaxis].T,aspect='auto',\n cmap=ListedColormap(color_array),vmin=0,vmax=len(perm))\n stateseq_ax.set_yticks([])\n stateseq_ax.set_xticks([])\n\n for ii, pos in enumerate(durations.cumsum()):\n if durations[ii] >=1:\n feature_ax.axvline(pos,\n color=color_array[stateseq_norep[ii]],\n linestyle=':')\n return", "def SetDataSlice(vDataSet,arr,aIndexZ,aIndexC,aIndexT):\r\n nx = vDataSet.GetSizeX()\r\n ny = vDataSet.GetSizeY()\r\n nz = vDataSet.GetSizeZ()\r\n dtype = GetType(vDataSet)\r\n\r\n if DEBUG:\r\n print(\"SetDataVolume\")\r\n print(\"vDataSet:\",(nz,ny,nx),GetType(vDataSet))\r\n print(arr.shape)\r\n print(arr.dtype)\r\n print(aIndexC)\r\n print(aIndexT)\r\n\r\n #Make sure the data is in range and convert the array\r\n s = arr\r\n if dtype != arr.dtype:\r\n miset,maset = GetTotalRange(vDataSet)\r\n arr[arr<miset]=miset\r\n arr[arr>maset]=maset\r\n s = arr.astype(dtype)\r\n\r\n s = s.swapaxes(0,1)\r\n if dtype == np.uint8:\r\n SetData = vDataSet.SetDataSliceBytes\r\n elif dtype == np.uint16:\r\n SetData = vDataSet.SetDataSliceShorts\r\n elif dtype == np.float32:\r\n SetData = vDataSet.SetDataSliceFloat32\r\n\r\n SetData(s,aIndexZ,aIndexC,aIndexT)\r\n #vDataSet.SetChannelRange(aIndexC,miset,maset)\r", "def setDimension(self, dimension):\n self.components = self.components[:dimension]\n self.components += [0 for i in range(dimension - len(self.components))]", "def setIterations(self,niterations):\n self.niterations = niterations", "def set_control_points(cls):\r\n \"\"\" EXECUTE THIS FUNCTION IN THE FARM CLASS! \"\"\"\r\n \r\n # t: consists of N points\r\n # p: is a finer grid made from t (default is 10 times N points)\r\n cls.t = np.fromfunction(lambda j : (1.0/2.0 + j)*cls.DT, (cls.N, ))\r\n cls.p = np.linspace(cls.t[0], cls.t[-1], num=10*cls.N)", "def setNumIterations(*argv):", "def set_length(stage, num):\n stage_maxes[stage] = num\n set_nums(stage)\n\n canvas.delete('tick_' + stage)\n\n if num == 0:\n return # No ticks\n\n # Draw the ticks in...\n _, y1, _, y2 = canvas.coords('bar_' + stage)\n\n dist = (width - 40) / num\n if round(dist) <= 1:\n # Don't have ticks if they're right next to each other\n return\n tag = 'tick_' + stage\n for i in range(num):\n pos = int(20 + dist*i)\n canvas.create_line(\n pos, y1, pos, y2,\n fill='#00785A',\n tags=tag,\n )\n canvas.tag_lower('tick_' + stage, 'bar_' + stage)", "def test_write_slices(setup_teardown_file):\n f = setup_teardown_file[3]\n\n dt = np.dtype('(3,)i')\n\n data1 = np.ones((2, ), dtype=dt)\n data2 = np.ones((4, 5), dtype=dt)\n\n dset = f.create_dataset('x', (10, 9, 11), dtype=dt)\n\n dset[0, 0, 2:4] = data1\n assert np.array_equal(dset[0, 0, 2:4], data1)\n\n dset[3, 1:5, 6:11] = data2\n assert np.array_equal(dset[3, 1:5, 6:11], data2)", "def plot_slices(hists, slice_width, fibre_pos):\n pmt_info = rat.utility().GetPMTInfo()\n # Find max angle\n max_angle = 0\n pmtIDs = hists.keys()\n for pmtID in pmtIDs:\n angle = taf.fibre_to_pmt_angle(fibre_pos, pmt_info.GetPosition(pmtID))\n if angle > max_angle:\n max_angle = angle\n print max_angle\n\n # Step between 0 and max_angle in slices of slice_width\n # create a plot of all pmt time spectra within each slice\n ROOT.gStyle.SetPalette(1) \n cuts = np.arange(0., max_angle+slice_width, slice_width)\n for i, cut in enumerate(cuts): \n tmpHists = []\n if i > 0:\n low_range = cuts[i-1]\n hi_range = cuts[i]\n s = ROOT.THStack(\"stack\", \"Slice: %1.1f - %1.1f deg\" % (low_range, hi_range)) \n count = 0\n for pmtID in pmtIDs:\n angle = taf.fibre_to_pmt_angle(fibre_pos, pmt_info.GetPosition(pmtID))\n if angle > low_range and angle < hi_range:\n #print pmtID\n count = count + 1\n hists[pmtID].SetLineColor(count)\n s.Add(hists[pmtID])\n print \"Drawing...\"\n s.Draw(\"nostack\")\n s.GetHistogram().GetXaxis().SetTitle(\"Time (ns)\")\n #s.Write()\n #c1.BuildLegend(0.5, 0.2, 0.88, 0.88)\n c1.Update()\n c1.Modified()\n c1.Print(\"./results/slices/Slice_%1.1f.png\" % low_range)\n s.Delete()\n #time.sleep(1)", "def set_calculated_segments(self, total_lights, segments):\n self.set_segments(segments)\n self.set_lights_per_segment(int(total_lights / segments))", "def test_n_loc(self):\n dates = pd.date_range(start=\"2007-01-01\", end=\"2007-02-01\")\n\n ts = pd.DataFrame(\n {\n \"var1\": np.arange(len(dates)),\n \"var2\": np.arange(len(dates))\n },\n index=dates)\n\n dataset = GriddedNcContiguousRaggedTs(self.testdatapath,\n self.grid,\n mode=\"w\")\n fill_values = {\"var1\": 5, \"var2\": 5}\n\n for gpi in self.gpis:\n dataset.write(gpi, ts, fill_values=fill_values)", "def plot_time_slices(self):\n U = self.r.u[:, 15:-15, :]\n T = range(U.shape[2])\n kwarglist = [dict(t=t,\n index=self.index,\n U=U,\n levels=self.levels,\n fname=self.time_slice_path(t))\n for t in T]\n util.parallel_process(plot_time_slice, kwarglist=kwarglist)", "def setIterations(self, value):\n return self._set(nIterations=value)", "def traj_nslice (u,teq,tsample) :\n # get the number of frames in the slice (http://stackoverflow.com/a/7223557)\n traj_slice = u.trajectory[teq::tsample]\n return sum(1 for _ in traj_slice)", "def traj_nslice (u,teq,tsample) :\n # get the number of frames in the slice (http://stackoverflow.com/a/7223557)\n traj_slice = u.trajectory[teq::tsample]\n return sum(1 for _ in traj_slice)", "def time_slices(field=['uu1'], datadir='data/', proc=-1, extension='xz',\n format='native', tmin=0., tmax=1.e38, amin=0., amax=1.,\n transform='plane[0]', dtstep=1, deltat=0,\n oldfile=False, outfile=\"\"):\n\n import pylab as plt\n\n datadir = os.path.expanduser(datadir)\n if outfile != \"\":\n outslice = open(outfile, \"w\")\n filename = []\n if proc < 0:\n for i in field:\n filename += [datadir + '/slice_' + i + '.' + extension]\n else:\n for i in field:\n filename += [datadir + '/proc' +\n str(proc) + '/slice_' + i + '.' + extension]\n\n # Read the global dimensions.\n dim = read_dim(datadir, proc)\n if dim.precision == 'D':\n precision = 'd'\n else:\n precision = 'f'\n\n # Set up slice plane.\n if extension == 'xy' or extension == 'Xy':\n hsize = dim.nx\n vsize = dim.ny\n if extension == 'xz':\n hsize = dim.nx\n vsize = dim.nz\n if extension == 'yz':\n hsize = dim.ny\n vsize = dim.nz\n plane = []\n infile = []\n for i in filename:\n plane += [np.zeros((vsize, hsize), dtype=precision)]\n\n infile += [npfile(i, endian=format)]\n\n ifirst = True\n islice = 0\n plotplane = []\n dt = 0\n nextt = tmin\n while True:\n try:\n raw_data = []\n for i in infile:\n raw_data += [i.fort_read(precision)]\n except ValueError:\n break\n except TypeError:\n break\n\n if oldfile:\n t = raw_data[0][-1]\n for i in range(len(raw_data)):\n plane[i] = raw_data[i][:-1].reshape(vsize, hsize)\n else:\n t = raw_data[0][-2]\n for i in range(len(raw_data)):\n plane[i] = raw_data[i][:-2].reshape(vsize, hsize)\n\n exec('tempplane =' + transform)\n\n if t > tmin and t < tmax:\n if dt == 0:\n plotplane += tempplane.tolist()\n\n if ifirst:\n #print \"----islice----------t---------min-------max-------delta\" # Python 2\n print(\"----islice----------t---------min-------max-------delta\")\n #print \"%10i %10.3e %10.3e %10.3e %10.3e\" % \\ # Python 2\n #(islice, t, tempplane.min(), tempplane.max(), # Python 2\n #tempplane.max() - tempplane.min()) # Python 2\n print(\"{0:10} {1:10.3e} {2:10.3e} {3:10.3e} {4:10.3e}\".format(islice, t, tempplane.min(), tempplane.max(), tempplane.max() - tempplane.min()))\n if outfile != \"\":\n outslice.write(\n #\"%10i %10.3e %10.3e %10.3e %10.3e\" % # Python 2\n #(islice, # Python 2\n #t, # Python 2\n #tempplane.min(), # Python 2\n #tempplane.max(), # Python 2\n #tempplane.max() - # Python 2\n #tempplane.min())) # Python 2\n \"{0:10} {1:10.3e} {2:10.3e} {3:10.3e} {4:10.3e}\".format(\n islice,\n t,\n tempplane.min(),\n tempplane.max(),\n tempplane.max() -\n tempplane.min())) \n outslice.write(\"\\n\")\n\n ifirst = False\n islice += 1\n nextt = t + deltat\n if deltat == 0:\n dt = (dt + 1) % dtstep\n elif t >= nextt:\n dt = 0\n nextt = t + deltat\n else:\n dt = 1\n\n ax = plt.axes()\n ax.set_xlabel('t')\n ax.set_ylabel('y')\n ax.set_ylim\n plt.imshow(np.array(plotplane).reshape(islice, vsize).transpose(),\n vmin=amin, vmax=amax)\n manager = plt.get_current_fig_manager()\n manager.show()\n\n for i in infile:\n i.close()\n if outfile != \"\":\n outslice.close()", "def load_obstab_feedback_sliced(self, dataset='' , file ='' , datetime='' ):\n k = dataset \n F = file \n dt = datetime\n \n if dt != self.unique_dates[k][F]['up_to_dt_slice']:\n print(\"Error! the dit does not correspond to the dt I calculated in the previous loading! \")\n return 0\n \n logging.debug(\" === (Re)Load data for %s file %s counter %s\" , dataset, file, data[k][F][\"counter\"])\n print(blue + 'Memory used before reading data: ', process.memory_info().rss/1000000000 , cend)\n \n slice_size = self.slice_size\n \n file = data[k][F]['h5py_file']\n rts, ri = data[k][F][\"recordtimestamp\"][:] , data[k][F][\"recordindex\"][:]\n\n index_min = self.unique_dates[k][F]['indices'][dt]['low'] # here no offset since I am reading the original data \n ind = np.where(rts==dt)[0][0] # index of specific dt , I need the extremes indices of the next date_time after slicing \n \n try: \n up_to_dt_slice = rts[ind + slice_size ] # \n index_max = self.unique_dates[k][F]['indices'][up_to_dt_slice]['low'] # maximum index in the array of date_time to slice on\n update_index = True\n except:\n \"\"\" If the dt is too large, I take the whole array \"\"\"\n index_max = 1000000000000000\n update_index = False \n \n \n ####################\n # OBSERVATIONS TABLE\n #################### \n logging.debug ('*** Loading observations_table' )\n obs_tab = file['observations_table'] \n\n #print('CHECKING THE INDICES:::: ' , k , ' index_min ', index_min , ' index_max ', index_max )\n obs_dic= {} \n for ov in self.observations_table_vars:\n v = copy.deepcopy( obs_tab[ov][index_min:index_max ] )\n obs_dic[ov] = v \n data[k][F]['observations_table']= obs_dic \n\n ###########\n # ERA5FB\n ###########\n if k == 'era5_1' or k == 'era5_2':\n logging.debug('*** Loading era5fb ' )\n era5fb_tab = file['era5fb']\n fb_dic = {} \n for ov in self.era5fb_columns:\n try:\n v = copy.deepcopy( era5fb_tab[ov][index_min:index_max ] )\n fb_dic[ov] = v \n except:\n continue\n #print(\"CANNOT FIND \", ov ) \n \n data[k][F]['era5fb_tab']= fb_dic\n \n print(blue + 'Memory used after reading data: ', process.memory_info().rss/1000000000 , cend)\n \n \"\"\" Updating the indices \"\"\" \n self.unique_dates[k][F]['index_offset'] = copy.deepcopy( self.unique_dates[k][F]['index_offset_next'] ) \n \n if update_index: \n self.unique_dates[k][F]['index_offset_next'] = index_max \n self.unique_dates[k][F]['up_to_dt_slice'] = up_to_dt_slice\n\n return 0", "def test_slice_other_dimension(setup_teardown_file):\n f = setup_teardown_file[3]\n\n for i, shape in enumerate([(3, 0), (1, 2, 0), (2, 0, 1)]):\n dset = f.create_dataset('x%d'%i, shape, dtype=np.int32)\n assert dset.shape == shape\n out = dset[:1]\n assert isinstance(out, np.ndarray)\n assert out.shape == (1,)+shape[1:]", "def setSideCount(self, count):\n _count = count\n if not isinstance(_count, int):\n _count = int(count)\n if _count < 3:\n raise ValueError, \"Invalid count: %d\" % _count\n self.__nsides = _count\n self.__increment = (360.0/float(_count)) * (math.pi/180.0)\n for _i in range(_count):\n self.__xpts.insert(_i, 0.0)\n self.__ypts.insert(_i, 0.0)", "def timeSlice(requestContext, seriesList, startSliceAt, endSliceAt=\"now\"):\n\n results = []\n start = time.mktime(parseATTime(startSliceAt).timetuple())\n end = time.mktime(parseATTime(endSliceAt).timetuple())\n\n for slicedSeries in seriesList:\n slicedSeries.name = 'timeSlice(%s, %s, %s)' % (slicedSeries.name, int(start), int(end))\n\n curr = time.mktime(requestContext[\"startTime\"].timetuple())\n for i, v in enumerate(slicedSeries):\n if v is None or curr < start or curr > end:\n slicedSeries[i] = None\n curr += slicedSeries.step\n\n results.append(slicedSeries)\n\n return results", "def setPTLimits(*args):\n args[0].Limit.PTLimit.pt_limit = args[1]", "def set_count(self, count, asset=None):\n self._set_property('pc:count', count, asset)", "def setPointWidth(self, width):\n for point in self.points:\n point.width = width", "def setPointSize(self, size):\n for point in self.points:\n point.size = size", "def setSegmentWidth(self, width):\n for segment in self.segments:\n segment.width = width", "def set_n(self, n: int) -> None:\r\n self.n_is_set = True\r\n self.n = n", "def count(self):\n self.scale(end_scale=(1.5, 1.5), duration=1.5, \n rel_origin=(0.5, 0.8), harmonic=True, loop=True)", "def set_t(self, Orbit):\n\t\n\tself.t = np.arange(self.t_min, self.t_max, Orbit.dt)\n\tself.N = len(self.t)\n\t\n\treturn", "def from_slice(self, slice):\n\n start = 0 if slice.start is None else slice.start\n step = 1 if slice.step is None else slice.step\n return self.count(start, step, stop=slice.step)", "def onSetToFourthSize(self, evt):\n\t\tself.halfResampleZ.Enable(0)\n\t\tself.fourthResampleZ.Enable(1)\n\t\tif self.dataUnits:\n\t\t\tzf = 1\n\t\t\tx, y, z = self.dataUnits[0].dataSource.getOriginalDimensions()\n\t\t\t\n\t\t\tif self.fourthResampleZ.GetValue():\n\t\t\t\tzf = 0.25\n\t\t\tself.currSize = int(0.25 * x), int(0.25 * y), int(zf * z) \n\t\tfor obj in [self.factorLabel, self.dimLabel, self.newDimX, self.newDimY, self.newDimZ, self.factorX, self.factorY, self.factorZ]:\n\t\t\tobj.Enable(0)", "def n_series(self, n_series):\n\n self.container['n_series'] = n_series", "def _create_slice(arr, id, reference_name, slice_start, slice_end):\n url = f\"http://{request.host}{BASE_PATH}/data?id={id}&reference_name={reference_name}&start={slice_start}&end={slice_end}\"\n arr.append({ 'url': url, })", "def number_of_nodes(self, number_of_nodes):\n\n self._number_of_nodes = number_of_nodes", "def setSplit(tmr_channel, total, newSecondPart):\n writeTMR(tmr_channel, TMR_CMPLD1, total-newSecondPart)\n writeTMR(tmr_channel, TMR_CMPLD2, newSecondPart)", "def set_free_set_offset(self, params, n):\n raise NotImplementedError()", "def set_dims(self, dataset):\n block.Block.set_dims(self, dataset)\n \n raw = dataset.blocks['raw']\n\n # local reference to input data\n data = dataset.get_source_data('prep')\n\n # this is the calculated proper size for self.data\n raw_dims = list(data.shape) # so we can compare to self.dims LIST\n\n # if we average FIDs, the dimensions change here \n raw_dims[-2] = int(raw_dims[-2]/self.set.fids_to_average)\n\n if self.dims is None:\n self._reset_dimensional_data(dataset)\n elif (self.dims)[::-1] != raw_dims: #FIXME bjs - need reverse here, ARRRRGH, why?\n self._reset_dimensional_data(dataset)\n\n # calculate measure_time array based on whether we average or not\n measure_time = list(raw.measure_time)\n nfids = len(measure_time)\n navgs = self.set.fids_to_average\n measure_time = measure_time[0::navgs]\n if (nfids % navgs) != 0:\n del measure_time[-1]\n self.measure_time = np.array(measure_time)", "def _setNumber(x, y, xsize, ysize, field):\n count = 0\n xarray = np.array([x-1, x-1, x-1, x, x, x+1, x+1, x+1])\n yarray = np.array([y-1, y, y+1, y-1, y+1, y-1, y, y+1])\n for i in range(8):\n if (xarray[i] < 0) or (yarray[i] < 0):\n pass\n elif (xarray[i] > xsize-1) or (yarray[i] > ysize-1):\n pass\n else:\n if field[xarray[i], yarray[i]] == np.inf:\n count += 1\n return count", "def set_free_set_offset(self, params, n):\n params.scaling_options.free_set_offset = n\n return params", "def _set_sample(self, sample, PB_X, t):\n for sensor in PB_X.keys():\n sample.set(sensor, np.array(PB_X[sensor]), t=t+1)", "def prepare_voxel_slice(self,slices,llc,urc,direction):\n\t\tsize=urc-llc\n\t\tres = float(size[direction] / slices)\n\t\tdims=numpy.ceil(size/res)\n\t\treturn numpy.zeros((dims[(direction+1) % 3],dims[(direction+2) % 3]),dtype='bool'), res", "def setswitchinterval(n): # real signature unknown; restored from __doc__\n pass", "def slice_parameters(self, slice_parameters: SliceParamsIm):\n\n self._slice_parameters = slice_parameters", "def _get_slice_len(s, axlen):\n if s.start is None:\n start = 0\n else:\n start = s.start\n if s.stop is None:\n stop = axlen\n else:\n stop = np.min([s.stop, axlen])\n if s.step is None:\n step = 1\n else:\n step = s.step\n\n return ((stop - 1 - start) // step) + 1", "def AddSlice(self, data_slice):\n self.slices.append(data_slice)", "def set_pointsize(self, pointsize):\n\tself.m_pointsize = pointsize", "def setVoxelSize(self, vxs):\n\t\tself.voxelsize = vxs\n\t\ta, b, c = vxs\n\t\tself.spacing = [1, b / a, c / a]", "def set_point_size(self, point_size=0.0):\r\n for b in self.buf:\r\n b.unib[8] = point_size", "def SetDataVolume(vDataSet,arr,aIndexC,aIndexT):\r\n nx = vDataSet.GetSizeX()\r\n ny = vDataSet.GetSizeY()\r\n nz = vDataSet.GetSizeZ()\r\n dtype = GetType(vDataSet)\r\n\r\n if DEBUG:\r\n print(\"SetDataVolume\")\r\n print(\"vDataSet:\",(nz,ny,nx),GetType(vDataSet))\r\n print(arr.shape)\r\n print(arr.dtype)\r\n print(aIndexC)\r\n print(aIndexT)\r\n\r\n #Make sure the data is in range and convert the array\r\n s = arr\r\n if dtype != arr.dtype:\r\n miset,maset = GetTotalRange(vDataSet)\r\n arr[arr<miset]=miset\r\n arr[arr>maset]=maset\r\n s = arr.astype(dtype)\r\n\r\n if dtype == np.uint8:\r\n SetData = vDataSet.SetDataVolumeAs1DArrayBytes\r\n s = s.tostring()\r\n elif dtype == np.uint16:\r\n SetData = vDataSet.SetDataVolumeAs1DArrayShorts\r\n s = np.ravel(s)\r\n elif dtype == np.float32:\r\n SetData = vDataSet.SetDataVolumeAs1DArrayFloats\r\n s = np.ravel(s)\r\n SetData(s,aIndexC,aIndexT)\r\n\r\n if 0:\r\n #Old method slice by slice\r\n if dtype == np.uint8:\r\n SetData = vDataSet.SetDataSubVolumeAs1DArrayBytes\r\n elif dtype == np.uint16:\r\n s = np.ravel(s)\r\n SetData = vDataSet.SetDataSubVolumeAs1DArrayShorts\r\n elif dtype == np.float32:\r\n s = np.ravel(s)\r\n SetData = vDataSet.SetDataSubVolumeAs1DArrayFloats\r\n\r\n for z in range(nz):\r\n t = time.time()\r\n l = arr[z,...].swapaxes(0,1).tostring()\r\n SetData(l,0,0,z,aIndexC,aIndexT,nx,ny,1)\r\n print z,time.time()-t\r\n\r\n #vDataSet.SetChannelRange(aIndexC,miset,maset)\r", "def set_layer(self, n):\n self.layernum = n\n self.update()", "def set_length(self, ak_tpl: BKT, newLength: float): # -> None:\n ...", "def __init__(self):\n self.counter = [[0, x + 1] for x in range(300)]", "def test_slice_setslice_forbidden(self):\n global setVal\n class foo:\n def __setslice__(self, i, j, value):\n global setVal\n setVal = i, j, value\n def __setitem__(self, index, value):\n global setVal\n setVal = index, value\n\n foo()[::] = 23\n self.assertEqual(setVal, (slice(None, None, None), 23))\n foo()[::None] = 23\n self.assertEqual(setVal, (slice(None, None, None), 23))", "def chunksize(self, value):\n\n self.data.chunksize = int(value)\n self.mask.chunksize = int(value)", "def onSetToCustDims(self, evt):\n\t\tself.halfResampleZ.Enable(0)\n\t\tself.fourthResampleZ.Enable(0)\n\t\t\n\t\tfor obj in [self.factorLabel, self.dimLabel, self.newDimX, self.newDimY, self.newDimZ, self.factorX, self.factorY, self.factorZ]:\n\t\t\tobj.Enable(1)\n\t\ttry:\n\t\t\trx = int(self.newDimX.GetValue())\n\t\t\try = int(self.newDimY.GetValue())\n\t\t\trz = int(self.newDimZ.GetValue())\n\t\t\tself.currSize = (rx, ry, rz)\n\t\texcept:\n\t\t\tpass", "def setN(self, value):\n return self._set(n=value)", "def plot_time_slice(index, t, U, fname, levels):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n title = 'Time slice {r} {t:0>4d}'.format(r=index, t=t)\n ax.set_title(title)\n U = U[:, :, t]\n contourf = ax.contourf(U, levels)\n fig.colorbar(contourf)\n util.makedirs_p(os.path.dirname(fname))\n fig.savefig(fname)", "def nskip(self, date, time0=None):\n time0 = self.time0 if time0 is None else Time(time0, scale='utc')\n dt = Time(date, scale='utc') - time0\n nskip = int(round((dt / self.dtsample / self.setsize)\n .to(u.dimensionless_unscaled)))\n return nskip", "def set_num_selections(self, integrity):\n #p = 1-self.integrity\n p = integrity\n numerator = 1\n denominator = 1+(0.29/p)\n num_selections = numerator/denominator\n self.num_selections = int(num_selections*self.limit)", "def set_n_accum(self, n):\n self.lib.SetNumberAccumulations(ct.c_int(n))", "def test_slice_of_length_zero(setup_teardown_file):\n f = setup_teardown_file[3]\n\n for i, shape in enumerate([(3, ), (2, 2, ), (2, 1, 5)]):\n dset = f.create_dataset('x%d'%i, data=np.zeros(shape, np.int32))\n assert dset.shape == shape\n out = dset[1:1]\n assert isinstance(out, np.ndarray)\n assert out.shape == (0,)+shape[1:]", "def setTickLength(major=24,minor=16):\n dislin.ticlen(major,minor)", "def size(self):\n size = 1\n for current_slice in self.slices:\n size *= current_slice.stop - current_slice.start\n return size", "def setSegments(self, segments):\n for point, segment in zip(self.points, segments):\n point.set(segment.p1)", "def test_slice_zero_length_dimension(setup_teardown_file):\n f = setup_teardown_file[3]\n\n for i, shape in enumerate([(0,), (0, 3), (0, 2, 1)]):\n dset = f.create_dataset('x%d'%i, shape, dtype=np.int32)\n assert dset.shape == shape\n out = dset[...]\n assert isinstance(out, np.ndarray)\n assert out.shape == shape\n out = dset[:]\n assert isinstance(out, np.ndarray)\n assert out.shape == shape\n if len(shape) > 1:\n out = dset[:, :1]\n assert isinstance(out, np.ndarray)\n assert out.shape[:2] == (0, 1)", "def set_index_ub(self, param, length):\n if tik.Dprofile().get_product_name() in (MINI, CLOUD, HISI_ES):\n sum_mask_ub = self.instance.Tensor(self.dtype, (16,),\n name=\"sum_mask_ub\",\n scope=tik.scope_ubuf)\n work_tensor_ub = self.instance.Tensor(self.dtype, (16,),\n name=\"work_tensor_ub\",\n scope=tik.scope_ubuf)\n self.instance.vec_reduce_add(self.mask, sum_mask_ub, param['reduce_mask_ub'], work_tensor_ub, 1, 8)\n\n mask_scalar = self.instance.Scalar(\"uint16\", name=\"mask_scalar\")\n mask_scalar.set_as(sum_mask_ub[0])\n with self.instance.if_scope(mask_scalar != 0):\n with self.instance.if_scope(param['count'] < PRE_NMS_TOPN):\n with self.instance.for_range(0, length) as mask_index:\n param['index_offset'].set_as(param['index_offset'] + 1)\n with self.instance.if_scope(param['count'] < PRE_NMS_TOPN):\n mask_scalar.set_as(param['reduce_mask_ub'][mask_index])\n\n # 1 fp16 == 15360 uint16\n with self.instance.if_scope(mask_scalar == 15360):\n param['index_ub'][param['count']].set_as(\n param['index_offset'])\n param['count'].set_as(param['count'] + 1)\n with self.instance.else_scope():\n param['index_offset'].set_as(param['index_offset'] + length)", "def setTickNumber(n=2, axes='XYZ'):\n dislin.ticks(n, axes)", "def set_time_step_size(self, delta_t):\n self.delta_t = delta_t", "def multiply_slice(starting_index, len_slice=13):\n\tmultiple = 1\n\n\tfor i in range(starting_index, starting_index + len_slice):\n\t\tmultiple *= int(number[i])\n\treturn multiple" ]
[ "0.6350953", "0.6137186", "0.58842385", "0.5869937", "0.56822366", "0.56016797", "0.5523093", "0.5513194", "0.54768455", "0.5377458", "0.5337342", "0.5313137", "0.5259041", "0.5251777", "0.52457607", "0.5220166", "0.5212587", "0.52124", "0.52096623", "0.5208932", "0.52069575", "0.51825", "0.5155861", "0.51429486", "0.5130738", "0.51236975", "0.5101", "0.50905824", "0.50905824", "0.50864315", "0.5071263", "0.5067158", "0.5056918", "0.50416493", "0.50392294", "0.50110346", "0.4999466", "0.49732566", "0.49684837", "0.49546963", "0.49459282", "0.49224037", "0.49170938", "0.49081662", "0.4906962", "0.49060866", "0.49060866", "0.49059883", "0.49040362", "0.490058", "0.4896172", "0.4892774", "0.48923996", "0.4878454", "0.4871208", "0.48590085", "0.4857186", "0.48560584", "0.4852769", "0.48518562", "0.48424405", "0.4838887", "0.4835871", "0.48272958", "0.48226804", "0.48112807", "0.48077324", "0.47916076", "0.47892195", "0.47886434", "0.47873196", "0.47860453", "0.478302", "0.47794884", "0.47762123", "0.4774138", "0.47738564", "0.4771362", "0.47706622", "0.4770489", "0.4758699", "0.47533458", "0.47479957", "0.47477376", "0.47468528", "0.4731566", "0.47313982", "0.4726222", "0.47251266", "0.47214192", "0.47183752", "0.47146446", "0.4708973", "0.46964964", "0.46926847", "0.469115", "0.46907213", "0.46901634", "0.46875232", "0.4681169" ]
0.7975833
0
Returns the number of individual DataSets (=time points) managed by this DataSource
def getDataSetCount(self): return int(self.numberOfImages / self.slicesPerTimepoint)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count(self):\n return self.data_container.count", "def get_num_datasets(self, data):\n dsets = set()\n for items in data:\n dsetid = items[3]\n dsets.add(dsetid)\n return len(dsets)", "def count_data(self):\n try:\n ndata = len(self.x)\n logger.info(\"Number of data points: {0}\".format(ndata))\n except AttributeError:\n logger.error(\"Data object has not been defined\")\n ndata = 0\n return ndata", "def get_data_ninstances(self):\n return self.data_ninstances", "def data_count(self):\n return(len(self.data))", "def datacounts(self):\n return self._properties[\"datacounts\"]", "def num_entries(self):\r\n raise NotImplementedError('BaseDataSource::num_entries not specified.')", "def count(self):\r\n return self.data_array.size", "def __len__(self):\n return len(self._timeseriesData)", "def __len__(self):\n return self._dataset.size(dirs=self._dirs)", "def number_of_data_nodes(self):\n return int(self._data['number_of_data_nodes'])", "def data_count(self):\r\n\r\n shp = self.df.shape\r\n row_count = shp[0]\r\n return row_count", "def Points_Counting(self):\n return len(self.__traectory_list)", "def getNumTimeDataFiles(self):\n return self.nTimeDataFiles", "def metric_data_count(self):\n\n if not self.__settings:\n return 0\n\n return len(self.__stats_table)", "def count_elements_in_dataset(dataset):\n return dataset.count()", "def get_n_sets(self):\n if not self._refreshed:\n self.refresh()\n return self._nSets", "def NumberOfEntries(self):\n return _table.DSTable_NumberOfEntries(self)", "def __len__(self):\r\n if self.is_superset:\r\n length = 0\r\n for ds in self.data:\r\n length += len(ds)\r\n return length\r\n else:\r\n return len(self.data)", "def len(self):\n return self.d_series.map_partitions(\n lambda s: s.list.len(), meta=self.d_series._meta\n )", "def dataCount(self, collectionName):\n count = collectionName.find().count()\n return count", "def getSampleCount(self):\r\n return len(self._data)", "def get_num_records(self):\n return self.__num_records", "def __len__(self):\n return self.data.index.get_level_values(0).to_series().nunique()", "def data_center_count(self) -> int:\n return pulumi.get(self, \"data_center_count\")", "def __len__(self):\n return len(self.dataset)", "def get_result_set_count(self):\n return self.db.zcount(\"soq_results\", 0, time())", "def size(self) -> Tuple[groupable, pdarray]:\n return self.count()", "def count(self):\n return self.size()", "def size(self, index):\n return self.base_dataset.size(index)", "def n_points(self) -> int:\n return len(self.all_df)", "def get_number_of_datasets_in_fifo(self):\n return self.read_byte_data(APDS_9960.GESTURE_FIFO_LEVEL_REG_ADDRESS)", "def dataset_size(self):\n return self.dataset.size", "def get_all_dataset_counts(\n self,\n ) -> Dict[Tuple[str, int, int], int]:\n res = self._engine.execute(\n select(\n [\n PRODUCT.c.name,\n TIME_OVERVIEW.c.start_day,\n TIME_OVERVIEW.c.period_type,\n TIME_OVERVIEW.c.dataset_count,\n ]\n )\n .select_from(TIME_OVERVIEW.join(PRODUCT))\n .where(TIME_OVERVIEW.c.product_ref == PRODUCT.c.id)\n .order_by(\n PRODUCT.c.name, TIME_OVERVIEW.c.start_day, TIME_OVERVIEW.c.period_type\n )\n )\n\n return {\n (\n r.name,\n *TimePeriodOverview.from_flat_period_representation(\n r.period_type, r.start_day\n )[:2],\n ): r.dataset_count\n for r in res\n }", "def __len__(self):\n return int(np.floor(len(self.dataset_df) / self.batch_size))", "def getNrEntries(self):\n return len(self.data)", "def get_data_count(self, collection):\n # Use 'data_count' attribute when available. It is created in the\n # BaseCollectionViewSet class.\n return (\n collection.data_count\n if hasattr(collection, \"data_count\")\n else collection.data.count()\n )", "def __len__(self) -> int:\n import h5py\n\n with h5py.File(\n os.path.join(self.root, self.data_dir, self.img_file_name), \"r\"\n ) as f:\n num_datapoints: int = f[self.split][\"pv_log\"].shape[0]\n\n return num_datapoints", "def __len__(self):\n return self.data.num_samples", "def get_num_objects(cls):\n return cls.mum_objects", "def count(self):\n\n raise NotImplementedError", "def __len__(self):\n return self.tsdf.shape[0]", "def __len__(self): \r\n length = len(self.data) - 2* self.skip_window\r\n #print ('length', length)\r\n return length\r\n #raise NotImplementedError('Implement the __len__ method of the dataset')\r", "def NumberOfRows(self):\n return _table.DSTable_NumberOfRows(self)", "def __len__(self):\n if self.settype == \"train\":\n return 64000\n else:\n return len(self.list_ids)", "def numCoordsets(self):\n\n return self._n_csets", "def get_count(cls):\n total = 0\n for counter in SimpleCounterShard.objects.all():\n total += counter.count\n return total", "def n_series(self):\n return self.container['n_series']", "def getDataCount(self, filter: t.Mapping[t.Text, t.Any] = {}\n ) -> DatasetCount:\n aggregate_data = self.getAggregateData(\n pipeline={\"count\": {\"$sum\": 1}},\n filter=filter,\n )\n count = first(aggregate_data.data)[\"count\"]\n return DatasetCount(count=count)", "def size(self):\n\t\treturn self._count", "def count(self):\n return len(self)", "def get_loaded_temps(self):\n # The data loaded successfully as long as instance variable _data_set is updated.\n if self._data_set is None:\n return None\n else:\n return int(len(self._data_set))", "def numobs(self):\n return len(self.datelist)", "def test_data_source_soaps_count_get(self):\n pass", "def getNumData(self):\n return len(self.data)", "def test_set_count(self) -> int:\n return pulumi.get(self, \"test_set_count\")", "def test_data_source_soaps_id_dynamic_datas_count_get(self):\n pass", "def count(self):\n with self.pdq:\n (count,)=self.pdq.cursor().execute('select count(*) from pdq').next()\n return count", "def _number_of_samples(self):\n return len(self._raw_data.samples)", "def n_points(self):\n\n if self.data_reduced:\n return len(self.data_reduced[0])\n else:\n return 0", "def __len__(self):\n return len(self.dataset) * self.samples_per_pair", "def __len__(self):\n return self.dataset.shape[0]", "def count(self, axis=None):\n return self.data.count(axis=axis)", "def count(self):\n return len(self._runs)", "def _get_dataset_size(loader):\n if isinstance(loader, (tuple, list)):\n return len(loader[0].dataset)\n else:\n return len(loader.dataset)", "def get_number_of_measurement(self):\n num_of_meas = 0\n for time in self.mdvtc.keys():\n num_of_meas = num_of_meas + self.mdvtc[time].get_number_of_measurement()\n #\n return num_of_meas", "def get_count(self):\n\n\t\treturn self.__count", "def nspins(self):\n return len(self)", "def count(self):\n return self.get_count()", "def count(self):\n objects = self.all()\n return len(objects)", "def count(self):\n return len(self.objects)", "def count(self) -> int:\n return self._adapter.count()", "def getNumEvents(dbsApi, dset):\n summary = getDsetSummary(dbsApi, dset)\n # it means the dataset was not produced\n if summary[0]['num_file'] == 0:\n return -1\n return summary[0]['num_event']", "def sum_pandas(self):\n return len(self.panda_files)", "def dim(self):\n return self._historical_data.dim", "def n_points(self) -> int:\n return len(self.df)", "def resource_record_set_count(self) -> int:\n return pulumi.get(self, \"resource_record_set_count\")", "def getNumStatDataFiles(self):\n return self.nStatDataFiles", "def dimension_count(self):\n return self._dimensionCount", "def get_number_of_data_points(self):\n\n log.warning(\n \"get_number_of_data_points not implemented, values for statistical measurements such as AIC or BIC are \"\n \"unreliable\",\n )\n\n return 1.0", "def getCount(self):\n return self.base.get(\"count\", [])", "def __len__(self):\n return len(self.data_list)", "def count(self):\n if not self.model:\n raise NameError('database model has not been set.')\n\n with self.session() as session:\n query = self.get_query(session)\n data = query.count()\n return data", "def getSampleCount(self):\r\n return len(self._biom_table.SampleIds)", "def numPostings(years):\n\tcount = []\n\tfor year in years:\n\t\tfilename = \"SmartEnergy\" +str(year) +\".xlsx\"\n\t\tDB = pd.read_excel(filename, sheet_name = 'Filters')\n\t\tcount.append(DB.iloc[10][1])\n\treturn count", "def get_num_points(self):\n dimensions = self.data.shape\n return dimensions[0]", "def dataset_size(self):\n if not self._dataset_size:\n # pylint: disable=attribute-defined-outside-init\n self._dataset_size = count_file_lines(\n self._hparams.source_dataset.files)\n return self._dataset_size", "def getCount(self):\n return self.count", "def tally(self):\n return self.count", "def numpoints(self):\n return len(self.pars) + 1 # so dof is 1", "def __len__(self):\n return self.dbms.getNbTables(self.db)", "def get_count(self):\r\n return self.count", "def numIncrementals(self) -> int:\n return len(self._dataArrays)", "def length(self):\n return self.count", "def countDataSize(self,filename):\n \n d = h5py.File(filename,'r')\n features = d['spectrometer/features'][:]\n select = self.selectData(features.astype(float), self.ifeature, d)\n N = len(features[select])\n d.close()\n\n N = (N//self.offsetLen) * self.offsetLen\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def nrows(self):\n return len(self.__data)", "def __len__(self):\n\n if self.is_finite_set:\n size = 0\n for set in self.sets:\n size += len(set)\n return size\n else:\n raise ValueError(\"'%s' is not a finite set.\" % self)", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:" ]
[ "0.7276192", "0.7221659", "0.72136873", "0.7015325", "0.69837743", "0.68426126", "0.6835689", "0.6830701", "0.6756762", "0.66132236", "0.6597357", "0.65931714", "0.65570873", "0.6531572", "0.6491252", "0.64376664", "0.6436097", "0.6429143", "0.642638", "0.6403021", "0.6397654", "0.63931596", "0.63931406", "0.6380524", "0.6339096", "0.6327278", "0.6325876", "0.6319374", "0.6318805", "0.63138866", "0.63003045", "0.6297256", "0.6283697", "0.62796164", "0.6264214", "0.62595505", "0.6255302", "0.625098", "0.6245024", "0.6243325", "0.6179734", "0.61540115", "0.615167", "0.6123423", "0.61196154", "0.61132866", "0.61123437", "0.6107287", "0.61067814", "0.6104321", "0.6095845", "0.6092341", "0.60891366", "0.6084064", "0.6083234", "0.60792243", "0.60721445", "0.6070228", "0.6064453", "0.6057428", "0.6050286", "0.6049808", "0.6041182", "0.60402495", "0.6038916", "0.60320735", "0.60268146", "0.60254765", "0.6021886", "0.6013794", "0.6013709", "0.6011516", "0.60094833", "0.6001444", "0.59982216", "0.59862095", "0.59851784", "0.59780717", "0.597463", "0.5963324", "0.5949581", "0.5949581", "0.5948882", "0.5936727", "0.59352684", "0.59323776", "0.5932301", "0.5931923", "0.5931417", "0.5931261", "0.5919807", "0.59165865", "0.59165335", "0.59035915", "0.59008366", "0.58992636", "0.5897145", "0.5894442", "0.5894442", "0.5894442" ]
0.79010725
0
Timepoint i i The timepoint to return
def getDataSet(self, i, raw = 0): data = self.getTimepoint(i) if self.isRGB and self.numberOfComponents == 4: extract = vtk.vtkImageExtractComponents() extract.SetComponents(0, 1, 2) extract.SetInput(data) data = extract.GetOutput() if self.flipVertically: flip = vtk.vtkImageFlip() flip.SetFilteredAxis(1) flip.SetInput(data) data = flip.GetOutput() if self.flipHorizontally: flip = vtk.vtkImageFlip() flip.SetFilteredAxis(0) flip.SetInput(data) data = flip.GetOutput() return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_time_points(self):\n return self._time", "def time(self):\r\n raise NotImplementedError", "def setTimepoint(self, tp):\n\t\tpass", "def time(self):\n return self._begin", "def t0(self):\n return self._time_axis.start", "def get_time(self):\n start=''\n end=''\n time=''\n times=self.times\n print(times[self.istep])\n if self.istep > 0:\n start=ncEarth.beginstr % times[self.istep].isoformat()\n\n\n if self.istep < len(times)-2:\n end = ncEarth.endstr % times[self.istep+1].isoformat()\n\n if start is not '' or end is not '':\n time=ncEarth.timestr % {'begin':start,'end':end}\n\n return time", "def oneTimepoint(timepoint):\n\tt = []\n\tfor vs in timepoint:\n\t\tt.append((timepoint.attrib.get('CollectionTime'), vs[0].text, vs[1].text))\n\treturn(t)", "def start_time(self) -> float:\r\n ...", "def get_time(self) -> float:\n raise NotImplementedError()", "def timeofflight(xhat0):\n\t\timport poincare\n\t\t#Find the point on the computed poincare section, closest to the x0\n\t\timin = np.argmin(np.linalg.norm(ps[:,1:5]-xhat0, axis=1))\n\t\t#Take its time of flight as\n\t\tif imin < np.size(ps,0)-1: \n\t\t\tTapproximate = ps[imin+1,0]-ps[imin,0]\n\t\telse:\n\t\t\tTapproximate = ps[imin,0]-ps[imin-1,0]\n\t\tprint(\"Tapproximate:\")\n\t\tprint(Tapproximate)\n\t\t#Integrate for a little bit longer than the approximated integration time:\n\t\tstoptime = 1.2*Tapproximate\n\t\tnumpoints = int(stoptime/0.01)\n\t\t#Integration time array:\n\t\tt = np.linspace(0, stoptime, numpoints)\n\t\txhatphi0 = np.append(xhat0, np.array([0], float))\n\t\txhatsol = twomode.intslice(xhatphi0, t, abserror=1.0e-14, relerror=1.0e-12)\n\t\ttx = np.append(np.array([t], float).transpose(), xhatsol, axis=1)\n\t\t#Compute Poincare section:\n\t\tpsreturn=poincare.computeps(tx, reqv, nhat, 1)\n\t\tprint(\"psreturn:\")\n\t\tprint(psreturn)\n\t\t#Take the time nearest to the approximated time. This is due to the\n\t\t#fact that the array ps sometimes includes the initial point and sometimes\n\t\t#does not, hence we are not always sure the position of the first return.\n\t\titof = np.argmin(np.abs(psreturn[:,0]-Tapproximate))\n\t\ttof = psreturn[itof,0]\n\t\treturn tof", "def at(self, t):\n return self.start + (self.end - self.start) * t", "def current_time_step(self) -> ts.TimeStep:\n return self._current_time_step", "def gettime(self):\n return self.t", "def time(self):\n raise NotImplementedError()", "def getTimes():", "def getTimes():", "def getTimes():", "def current_time(cls) -> float:", "def get_time(self):\n return self.get_timed() / 10.0", "def initialTime(self):\n return self.params['t0']", "def startTime(self) -> float:\n try: return self.times[0]\n except IndexError: return 0.0", "def compute_time_step():\n\n dt = Hydro.compute_time_step()\n\n return dt", "def time(self):\n return sum(self._interval) * .5", "def time(self):\n return self.time_array", "def get_point_at(self, t):\n segment = self.get_segment_for_time(t)\n return segment.point_at(t)", "def timeStep(self):\n return self.params['h']", "def wall_time(self):", "def get_time(self):\n return numpy.linspace(self.header.time_gate_start, \\\n self.header.time_gate_stop, self.num_time_bins())", "def round_trip_time(self):\n ...", "def __call__(self, t):\n \n c = c1 = c2 = 0\n d = d1 = d2 = None\n while d1==None or d2==None or d<=d2:\n _t = self.time(c)\n if _t == None:\n break\n else:\n d = abs(t - _t)\n if d1 == None or d<d1:\n c1, d1 = c, d\n elif d2 == None or d<d2:\n c2, d2 = c, d\n c += 1\n t1, t2 = map(self.time, [c1, c2])\n v1, v2 = map(self.__getitem__, [c1, c2])\n # a lo mejor la lista solo contiene un elemento\n if t1 == t2:\n return v1\n else:\n # si contiene por lo menos dos, pues interpolacion lineal\n return (v2*(t - t1) - v1*(t - t2))/(t2 - t1)", "def getPivotPointKeyTime(self, index, view) -> float:\n ...", "def get_time(t):\n return [time.clock()-t[0], time.time()-t[1]]", "def evaluate(self, time) -> float:\n ...", "def range(self):\n return self.times[0], self.times[-1]", "def time_return(self):\n return self.time", "def get_imeastime(self):\n return self.itime", "def time_step(self):\n return self._time_step", "def target(self, time, points, dt, num_way):\n start_index = min(int(time / dt), num_way - 1)\n end_index = min(start_index + 1, num_way - 1)\n start_point = points[start_index]\n end_point = points[end_index]\n fraction = float(time % dt) / dt\n return linear_interpolation_two_points(start_point, end_point, fraction).reshape(3)", "def mapRetime(ti, timelineTime):\n return ti.sourceIn() + int((timelineTime - ti.timelineIn()) * ti.playbackSpeed())", "def time(self) -> int:\n pass", "def time(self):\n try:\n if self.single_date:\n return self.stime\n else:\n return self.stime + (self.etime - self.stime) / 2\n except TypeError:\n return None", "def time_based(t, eta_init, last_eta, d = 0.01):\n return last_eta/(1+d*t)", "def get_time_step(self):\n return self._time_step", "def _get_half_time(self):\n return self.__half_time", "def now(self):\n return self._startTime + self.timeToOffset(self.currentTime, self._timeScale)", "def GetTime(self, *args, **kwargs):\n pass", "def Tt(s_c, point, system):\n Tx = tra(s_c, point, system)\n Tx.get_time()\n return Tx.time", "def getTime(self):\n return self.time", "def _omori_time(integ, c, p):\n if p == 1:\n return c*(exp(integ) - 1)\n else:\n return (integ*(1 - p) + c**(1 - p))**(1/(1 - p)) - c", "def time(self):\n return self[self.time_columns]", "def time(self):\n return self[self.time_columns]", "def getTime(self) -> float:\n return self.t", "def timeRange(self):\r\n _times = self.getTimes()\r\n return _times[0], _times[-1]", "def time_slot(self):\n min_time = self.introduction_time or 0\n current_time = self.agent.evaluation_context.get_current_time()\n assert isinstance(current_time, Number)\n if current_time < min_time:\n return None\n num = divmod(current_time - min_time, self.__period)[0] + 1\n start = min_time + (num - 1) * self.period\n end = start + self.period\n return num, start, end", "def ctime(self): # real signature unknown; restored from __doc__\r\n pass", "def exposuretime(self):\n _, = self.exposuretimes\n return _", "def get_time_info(self):\n\n raise NotImplementedError", "def get_time(self):\n return self.time_param", "def getTime(self):\n return self.step / (self.max_step + int(self.include))", "def getStartTime(self):\n raise NotImplementedError", "def time_function(t):\n\n omega = np.pi\n return np.sin(omega * t) + np.sin(10 * omega * t) + np.sin(20 * omega * t)", "def get_times(self):\n raise NotImplementedError(\"Abstract method not implemented.\")", "def time_slot(self):\n pass", "def __getitem__(self, index: datetime.time) -> float:\n\n # If the index is in the profile, return the index.\n if index in self._profile:\n return self._profile[index]\n\n # If the index is not in the profile, then the closest value needs to be\n # determined. If there is a tie, this does not matter.\n delta_t_to_t_map = {\n (\n abs(\n time.hour * 3600\n + time.minute * 60\n + time.second\n - (index.hour * 3600 + index.minute * 60 + index.second)\n )\n ): time\n for time in self._profile\n }\n return self._profile[delta_t_to_t_map[min(delta_t_to_t_map)]]", "def getSimulationTime(self):\r\n raise NotImplementedError()", "def time(self):\n return Time(self.hour, self.minute, self.second)", "def calculate_time(ix, xi, wf):\n wf_len = len(wf)\n x_time = np.arange(ix, (ix + wf_len * xi), xi)\n return x_time", "def T(self, point = -1):\n return self.solution('T', point)", "def tend(self):\n return self.tstart + self.duration", "def get_time(self):\n return self.time", "def getStartTime(self):\n return _osgAnimation.Vec2LinearSampler_getStartTime(self)", "def __get_time_span(self):\n\n nonzero = self.data[\"time\"].nonzero()\n return iso_time.time(self.data[\"time\"][nonzero[0][0]]), iso_time.time(\n self.data[\"time\"][nonzero[0][-1]])", "def at(self, t):\r\n return TimeArray(self[self.index_at(t)], time_unit=self.time_unit)", "def time(self):\r\n return self._idx", "def endTime(self) -> float:\n try: return self.times[-1]\n except IndexError: return 0.0", "def start_time(self):\n # TODO: use pd.Timestamp instead\n return self.time[0].to_pydatetime()", "def next_change(self, t=None):\n if t is None:\n t = self.engine.get_now()\n t -= self.initTime\n\n p = float(self.period) / POINTS_PER_CYCLE\n t2 = math.floor(t / p) * p + p\n\n t2 += self.initTime\n\n return t2", "def time(self):\n return time(\n self.hour, self.minute, self.second, self.microsecond, fold=self.fold\n )", "def t0(self):\n return self._t0", "def get_time(self):\n return self._ticks", "def constructTimeLineItem(self):\n\t\treturn", "def evaluateTime(self, *args):\n return _osgAnimation.Motion_evaluateTime(self, *args)", "def __call__ (self, t):\n #if t <= self.last_t:\n #raise SpaceTimeContinuumError(\n #\"We're moving back in time! Last t = {}, now = {}\".format(\n #self.last_t, t))\n\n #samp = self._sample(t)\n #self.last_t = t\n #self.last_samp = samp\n #return samp\n pass", "def getStartTime(self):\n return _osgAnimation.Vec4LinearSampler_getStartTime(self)", "def gettime(self):\n interval, value = _timerfd._timerfd.gettime(self)\n interval = self._join_time(*interval)\n value = self._join_time(*value)\n return interval, value", "def GetTime(self):\n return self.hour, self.minute, self.second", "def getStartTime(self):\n return _osgAnimation.Vec3LinearSampler_getStartTime(self)", "def test_time(self):\r\n pass", "def _tv_pointwise(data):\n return np.maximum(\n 0,\n np.ceil(\n (np.expand_dims(data.meeting_time - data.lag, -1) - np.arange(data.x.shape[0]))\n / data.lag\n ),\n )", "def min_time(self):\n #{{{ function to return time of first sample\n\n return self.mintime", "def _ith_point(self, i):\n if self.start is S.NegativeInfinity:\n initial = self.stop\n else:\n initial = self.start\n\n if self.start is S.NegativeInfinity:\n step = -1\n else:\n step = 1\n\n return initial + i*step", "def inter_arrival_times(self):\n # this function returns arrival times between two subsequent tuples in ms\n # task mean_inter_arrival_time std_inter_arrival_time\n if self.inter_arrival_time is None:\n if self.tuple_arrival is None:\n self.tuple_arrivals()\n self.inter_arrival_time = convert_throughput_to_inter_arr_times(self.tuple_arrival)\n\n return self.inter_arrival_time", "def PTS(self):", "def get_reltriggertimes(self):\n return np.array(self.trtimes)-self.soundstarttime", "def time(self):\n\t\treturn self._time", "def REAL_TIME_ADVANCE(dt):", "def at(self, t, tol=None):\r\n return self.data[..., self.time.index_at(t)]", "def step(self):\n\n e = self.event_queue.get()\n self.current_time = e.time\n component = e.component\n component.output(self.current_time)\n component.input(self.current_time)\n component.fire()\n\n self.event_queue.put(VirtualTimeScheduler.Event(self.current_time + component.interval, component))\n\n return self.current_time", "def get_time() -> int:\n return store.time", "def getSegment(self, t: float, endBehavior: str = 'halt') -> Tuple[int,float]:\n if len(self.times)==0:\n raise ValueError(\"Empty trajectory\")\n if len(self.times)==1:\n return (-1,0)\n if t > self.times[-1]:\n if endBehavior == 'loop':\n try:\n t = t % self.times[-1]\n except ZeroDivisionError:\n t = 0\n else:\n return (len(self.milestones)-1,0)\n if t >= self.times[-1]:\n return (len(self.milestones)-1,0)\n if t <= self.times[0]:\n return (-1,0)\n i = bisect.bisect_right(self.times,t)\n p=i-1\n assert i > 0 and i < len(self.times),\"Invalid time index \"+str(t)+\" in \"+str(self.times)\n u=(t-self.times[p])/(self.times[i]-self.times[p])\n if i==0:\n if endBehavior == 'loop':\n t = t + self.times[-1]\n p = -2\n u=(t-self.times[p])/(self.times[-1]-self.times[p])\n else:\n return (-1,0)\n assert u >= 0 and u <= 1\n return (p,u)", "def get_time_walking(self):\n return self.time_step_to_enqueue - self.time_entered" ]
[ "0.679934", "0.6443112", "0.64257336", "0.64229566", "0.6388925", "0.63801783", "0.6370203", "0.6363844", "0.63452727", "0.6335522", "0.63265383", "0.63182783", "0.63152707", "0.6278807", "0.61801505", "0.61801505", "0.61801505", "0.6171835", "0.6104696", "0.6076395", "0.6036604", "0.603092", "0.6029649", "0.5990972", "0.5990795", "0.59827584", "0.5976216", "0.5961865", "0.594199", "0.5936456", "0.5935444", "0.5932402", "0.59293896", "0.59293413", "0.5924414", "0.59177715", "0.5911112", "0.5904048", "0.589973", "0.5883483", "0.58779603", "0.5877914", "0.58748597", "0.58729553", "0.5867333", "0.58635885", "0.58564943", "0.5853735", "0.584898", "0.58410245", "0.58410245", "0.5836833", "0.58148086", "0.581181", "0.5806365", "0.58059835", "0.5799788", "0.5791074", "0.57721853", "0.5768007", "0.5753", "0.57458824", "0.57442605", "0.5734192", "0.57335484", "0.57330894", "0.5729656", "0.5725458", "0.5724946", "0.5713531", "0.57030916", "0.5699016", "0.56964445", "0.5695903", "0.5686287", "0.56836575", "0.5682945", "0.5676177", "0.56726706", "0.5671211", "0.56696343", "0.5667257", "0.5662354", "0.56614304", "0.56589943", "0.56582904", "0.56579876", "0.56551176", "0.56543016", "0.56483454", "0.5644313", "0.5633681", "0.56329614", "0.5625423", "0.56243485", "0.5621892", "0.5621597", "0.56145895", "0.5613262", "0.5609079", "0.5607369" ]
0.0
-1
A method that reads information from an image
def retrieveImageInfo(self, filename): assert filename, "Filename must be defined" assert os.path.exists(filename), "File that we're retrieving information \ from (%s) needs to exist, but doesn't." % filename self.ext = filename.split(".")[-1].lower() rdr = self.getReaderByExtension(self.ext) if self.ext == "bmp": rdr.Allow8BitBMPOn() rdr.SetFileName(filename) if rdr.IsA("vtkExtTIFFReader"): rdr.UpdateInformation() if rdr.GetNumberOfScalarComponents() == 1: rdr.RawModeOn() data = rdr.GetOutput() data.Update() self.numberOfComponents = data.GetNumberOfScalarComponents() if not self.ctf: bd = self.getDataBitDepth(data) self.ctf = vtk.vtkColorTransferFunction() if bd == 8 or bd == 12: self.ctf.AddRGBPoint(0, 0, 0, 0) self.ctf.AddRGBPoint((2 ** bd) - 1, 0, 1, 0) else: range = data.GetScalarRange() self.ctf.AddRGBPoint(range[0], 0, 0, 0) self.ctf.AddRGBPoint(range[1], 0, 1, 0) self.x, self.y, z = data.GetDimensions() self.dimensions = (self.x, self.y, self.slicesPerTimepoint) if z > 1: self.slicesPerTimepoint = z self.z = z self.dimensions = (self.x, self.y, self.slicesPerTimepoint) lib.messenger.send(self, "update_dimensions") self.originalDimensions = self.dimensions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def image_info(img):\n\tprint(img.format)\n\tprint(img.size)\n\tprint(img.mode)", "def test_read_image(self):\n pass", "def image_info(path):\n global working_img\n working_img = Image.open(path)\n print('=======================================')\n print(f'이미지 파일 이름:{working_img.filename}')\n print(f'이미지 파일 파일 형식:{working_img.format}')\n print(f'이미지 용량:{working_img.size}')\n print(f'이미지 색상모드:{working_img.mode}')\n print(f'이미지 크기:{working_img.width}x{working_img.height}')", "def print_image_info(input_image):\n print()\n print(\"Basic Information on image: {}\".format(input_image.filename))\n print(\"Format: {}\".format(input_image.format))\n print(\"Mode: {}\".format(input_image.mode))\n print(\"Size: {}\".format(input_image.size))\n print(\"Width: {}\".format(input_image.width))\n print(\"Height: {}\".format(input_image.height))\n print(\"Palette: {}\".format(input_image.palette))\n print()", "def getimage(self):", "def get_image_info(path):\n try:\n image = Image.open(path)\n except IOError:\n logger.error(f\"'{path}' is not an image\")\n return\n\n if image.format != \"JPEG\":\n logger.error(f\"'{path}' is not a JPEG\")\n return\n\n info = {\n \"filename\": path,\n \"width\": image.width,\n \"height\": image.height,\n \"fileSize\": os.path.getsize(path),\n \"md5\": md5sum_file(path),\n }\n return info", "def getImageInfo(img, header=''):\n if (os.path.exists(img) == False):\n print \"image not found: \", img\n return\n # Assume this is a CASA image\n if (header == ''):\n try:\n print \"imhead\",\n header = imhead(img, mode = 'list') # This will work for most CASA builds\n except:\n print \"imhead\",\n header = imhead(img) # needed to prevent crash in early CASA 4.6 builds (see CAS-8214)\n print \"imhead\",\n header = imhead(img, mode = 'list')\n if (header is None):\n print \"imhead returned NoneType. This image header is not sufficiently standard.\"\n return\n if ('beammajor' in header.keys()):\n bmaj = header['beammajor']\n bmin = header['beamminor']\n bpa = header['beampa']\n elif ('perplanebeams' in header.keys()):\n beammajor = []\n beamminor = []\n beampa = []\n for beamchan in range(header['perplanebeams']['nChannels']):\n beamdict = header['perplanebeams']['*'+str(beamchan)]\n beammajor.append(beamdict['major']['value'])\n beamminor.append(beamdict['minor']['value'])\n beampa.append(beamdict['positionangle']['value'])\n bmaj = np.median(beammajor)\n bmin = np.median(beamminor)\n sinbpa = np.sin(np.radians(np.array(beampa)))\n cosbpa = np.cos(np.radians(np.array(beampa)))\n bpa = np.degrees(np.median(np.arctan2(np.median(sinbpa), np.median(cosbpa))))\n else:\n bmaj = 0\n bmin = 0\n bpa = 0\n naxis1 = header['shape'][0]\n naxis2 = header['shape'][1]\n cdelt1 = header['cdelt1']\n cdelt2 = header['cdelt2']\n if (header['cunit1'].find('rad') >= 0):\n # convert from rad to arcsec\n cdelt1 *= 3600*180/np.pi\n elif (header['cunit1'].find('deg') >= 0):\n # convert from deg to arcsec\n cdelt1 *= 3600\n if (header['cunit2'].find('rad') >= 0):\n cdelt2 *= 3600*180/np.pi\n # convert from rad to arcsec\n elif (header['cunit2'].find('deg') >= 0):\n # convert from deg to arcsec\n cdelt2 *= 3600\n if (type(bmaj) == dict):\n # casa >= 4.1.0 (previously these were floats)\n bmaj = headerToArcsec(bmaj)\n bmin = headerToArcsec(bmin)\n bpa = headerToArcsec(bpa)/3600.\n ghz = 0\n if ('ctype4' in header.keys()):\n if (header['ctype4'] == 'Frequency'):\n imgfreq = header['crval4']\n cdelt = header['cdelt4']\n crpix = header['crpix4']\n npix = header['shape'][3]\n ghz = imgfreq*1e-9\n if (ghz == 0):\n if ('ctype3' in header.keys()):\n if (header['ctype3'] == 'Frequency'):\n imgfreq = header['crval3']\n cdelt = header['cdelt3']\n crpix = header['crpix3']\n npix = header['shape'][2]\n ghz = imgfreq*1e-9\n return([bmaj,bmin,bpa,cdelt1,cdelt2,naxis1,naxis2,ghz], header)", "def image_process(image_info):\n path = os.path.join(cfg.IMAGESET, image_info.get(\"index\") + \".jpg\")\n if not os.path.exists(path):\n raise IOError(\"please check your file is not exists: \" + path)\n def load_image(path):\n image = Image.open(path)\n return image\n return load_image(path)", "def file_reader(image_file, label_file):\n\n image = im.imread(image_file)\n\n with open(label_file, \"r\") as file:\n label = float(file.read())\n\n return image, label", "def read_image(image_path, *args, **kwargs):\n # TODO: Implement the method\n image2 = Image.open(image_path)\n image = num.asarray(image2)\n\n return image", "def read_image(self, item):\n assert item['image_dtype'] == 'uint16'\n\n filename = os.path.join(self.home(item['basename']))\n s = open(filename, 'rb').read()\n assert hashlib.md5(s).hexdigest() == item['md5']\n img = np.fromstring(s, dtype=item['image_dtype']).byteswap()\n img = img.reshape(item['image_shape'])\n return img", "def read_img(img_path): \n return sitk.GetArrayFromImage(sitk.ReadImage(img_path))", "def read_image(path):\n img = misc.imread(path)\n return img", "def _GetImageInfo(self,path):\n hd = Header(path, scan=True)\n hdr = hd.hdr\n self.hdr = hdr\n if hdr is None:\n# Either a ref.dat file or it isn't an imaging file.\n if 'ref' in path and 'dat' in path:\n self.refdats[os.path.realpath(path)] = True\n info = {'type':'refdat'}\n return info\n else:\n return None\n elif hdr['filetype'] == 'dicom' and not path.endswith('.yaml'):\n# Write a yaml file to the raw data directory if possible.\n dirname, outfile = self._yaml_filename(path)\n yaml_name = '%s/%s' % (dirname, outfile)\n if not os.path.exists(yaml_name):\n# Create yaml file using dirname,\n# e.g., ../anatomicals/S2_EFGRE3D/s2_efgre3d.yaml\n try:\n hd.write_hdr_to_yaml('%s/%s' % (dirname,outfile))\n except IOError:\n# This is a nonessential function, so ignore exceptions\n# such as access violations.\n pass\n elif hdr['filetype'] == 'dicom' or hdr['filetype'] == 'ge_ifile':\n if not os.path.isdir(path):\n path = os.path.dirname(path)\n shdr = hdr['subhdr']\n nhdr = hdr['native_header']\n self.shdr = shdr\n if 'dti' in shdr.get('PulseSequenceName','').lower() \\\n or 'dti' in nhdr.get('PulseSequenceFile',''):\n psdname = 'dti'\n else:\n psdname = os.path.basename((shdr.get('PulseSequenceName','').strip()).lower())\n info = {'psdname':psdname, \\\n 'acqtime':shdr['AcqTime'], \\\n 'series':int(shdr['SeriesNumber']), \\\n 'plane':hdr['plane'].strip(), \\\n 'type':self.imgtype.get(psdname,None), \\\n 'plane':hdr['plane'], \\\n 'acqtime':shdr['SeriesTime'], \\\n# 'fmapdir':None, \\\n 'refdat':None, \\\n 'imgfile':None, \\\n 'base':None, \\\n 'tdim':int(hdr['tdim']), \\\n 'echo_spacing':None, \\\n 'filetype':'brik', \\\n 'suffix':self.suffix.get(hdr['filetype'], 'brik'), \\\n 'data_filetype':hdr['filetype']}\n if info['type'] == 'localizer':\n# Don't process the localizer.\n return info\n if isinstance(info['acqtime'], int):\n info['acquisition_time'] = time.ctime(info['acqtime'])\n if nhdr.get('ImageFormat',('unknown'))[0] == 'DERIVED' and info['type'] == 'epi':\n# Sometimes screenshots are defined as epis.\n info['type'] = None\n\n# Call the method appropriate to the type of scan in this series.\n stat = apply( self.GetInfoMethods.get(info['type'], self._NoInfo), \\\n [info, path])\n if stat:\n info = {'type':'break'}\n return info\n info['suffix'] = self.suffix.get(info['filetype'], 'brik')\n return info", "def process(self, image):", "def read_img(components):\n\n img_buf = open(components[0], 'rb').read()\n\n if not img_buf:\n raise Exception('image not read, path=%s' % components[0])\n\n arr = np.fromstring(img_buf, np.uint8)\n img = cv2.imdecode(arr, cv2.IMREAD_COLOR)\n components[1], components[2] = img.shape[:2]\n components[10] = img\n\n return components", "def read_image(img_path):\n img = imageio.imread(uri=img_path)\n return img", "def load_image(nom):\n print(\"load_image : [\", nom, \"]\")\n fic = gdal.Open(nom)\n print(fic)\n return fic.ReadAsArray(), fic.GetGeoTransform()", "def _read_image(self, image_path:str, label:str):\n # Get the full path to the image\n image = \"\"\n if label == \"real\":\n image = os.path.join(self.root, \"real\", image_path)\n else:\n image = os.path.join(self.root, \"fake\", image_path)\n \n # Read the image\n image = cv2.imread(image)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # Normalize the image\n image = image / 255.0\n\n # Convert the image to floating point to use it as\n # an input to the PyTorch model\n image = image.astype(np.float32)\n\n return image", "def image_info(self):\n\n if not self._image_info:\n path_image_info = os.path.join(\n self._path, f\"ImageSet_{self._image['ImageSetID']}.ImageInfo\"\n )\n\n # Make sure the ImageInfo file really exists\n if not os.path.exists(path_image_info):\n self.logger.warning(\"ImageInfo path doesn't exist: %s\", path_image_info)\n return None\n\n self.logger.debug(\"Reading image data from: %s\", path_image_info)\n self._image_info = pinn_to_dict(path_image_info)\n\n return self._image_info", "def read_image(path: str):\n return Image.open(path, mode=\"r\")", "def read_image(path):\n img = ndimage.imread(path, mode=\"RGB\") \n return img", "def open_image_and_meta(image_bytes):\n with MemoryFile(image_bytes) as memfile:\n with memfile.open() as src:\n meta = src.meta\n arr = reshape_as_image(src.read())\n return arr, meta", "def read_first_image(file_name):\n pixels = None\n try : # open file_name\n with fits.open(file_name) as fits_blocks:\n block = fits_blocks[0]\n pixels = block.data\n header = block.header\n except IOError:\n print(\"Error while opening/reading file !\")\n exit()\n except FileNotFoundError:\n print(\"Error with file name\")\n exit()\n\n\n return header, pixels", "def read_image(img):\n out = Image.open(img)\n return Technicolor(out)", "def load(image_path):\n out = None\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n # Use skimage io.imread\n out = io.imread(image_path)\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return out", "def load_image(self, image_id):\n # Load image\n# print(self.image_info[image_id]['path'])\n image = cv2.imread(self.image_info[image_id]['path'],cv2.IMREAD_GRAYSCALE) \n image = image[:,:, np.newaxis] #Add 1 dimension for grayscale images\n return image", "def read_image(filename, representation):\n img = imread(filename)\n img = int2float(img)\n if representation == GS_REP:\n img = rgb2gray(img)\n return img", "def get_image_data(self):\n raise NotImplementedError(str(type(self)) + 'does not'\n 'implement get_image.')", "def iread(filename, *args, verbose=True, **kwargs):\n\n # determine if file is valid:\n # assert isinstance(filename, str), 'filename must be a string'\n\n\n # TODO read options for image\n # opt = {\n # 'uint8': False,\n # 'single': False,\n # 'double': False,\n # 'grey': False,\n # 'grey_709': False,\n # 'gamma': 'sRGB',\n # 'reduce': 1.0,\n # 'roi': None\n # }\n\n if isinstance(filename, str) and (filename.startswith(\"http://\") or filename.startswith(\"https://\")):\n # reading from a URL\n\n resp = urllib.request.urlopen(filename)\n array = np.asarray(bytearray(resp.read()), dtype=\"uint8\")\n image = cv.imdecode(array, -1)\n print(image.shape)\n return (image, filename)\n\n elif isinstance(filename, (str, Path)):\n # reading from a file\n\n path = Path(filename).expanduser()\n\n if any([c in \"?*\" for c in str(path)]):\n # contains wildcard characters, glob it\n # recurse and return a list\n # https://stackoverflow.com/questions/51108256/how-to-take-a-pathname-string-with-wildcards-and-resolve-the-glob-with-pathlib\n \n parts = path.parts[1:] if path.is_absolute() else path.parts\n p = Path(path.root).glob(str(Path(\"\").joinpath(*parts)))\n pathlist = list(p)\n\n if len(pathlist) == 0 and not path.is_absolute():\n # look in the toolbox image folder\n path = Path(__file__).parent / \"images\" / path\n parts = path.parts[1:] if path.is_absolute() else path.parts\n p = Path(path.root).glob(str(Path(\"\").joinpath(*parts)))\n pathlist = list(p)\n \n if len(pathlist) == 0:\n raise ValueError(\"can't expand wildcard\")\n\n imlist = []\n pathlist.sort()\n for p in pathlist:\n imlist.append(iread(p, **kwargs))\n return imlist\n\n else:\n # read single file\n\n if not path.exists():\n if path.is_absolute():\n raise ValueError(f\"file {filename} does not exist\")\n # file doesn't exist\n # see if it matches the supplied images\n path = Path(__file__).parent / \"images\" / path\n\n if not path.exists():\n raise ValueError(f\"file {filename} does not exist, and not found in supplied images\")\n\n # read the image\n # TODO not sure the following will work on Windows\n im = cv.imread(path.as_posix(), **kwargs) # default read-in as BGR\n\n if im is None:\n # TODO check ValueError\n raise ValueError(f\"Could not read {filename}\")\n\n return (im, str(path))\n\n elif islistof(filename, (str, Path)):\n # list of filenames or URLs\n # assume none of these are wildcards, TODO should check\n out = []\n for file in filename:\n out.append(iread(file, *args))\n return out\n else:\n raise ValueError(filename, 'invalid filename')", "def _get_image_info(\n image_id: int,\n width: int,\n height: int,\n file_name: str,\n license_id=1,\n flickr_url=\"\",\n coco_url=\"\",\n date_captured=datetime.datetime.utcnow().isoformat(' ')):\n image_info = {\n \"id\": image_id,\n \"width\": width,\n \"height\": height,\n \"file_name\": file_name,\n \"license\": license_id,\n \"flickr_url\": flickr_url,\n \"coco_url\": coco_url,\n \"date_captured\": date_captured,\n }\n\n return image_info", "def get_image(inf):\n try:\n x, y = Image.open(inf).size\n except FileNotFoundError:\n print(\"Error: {} file not found.\".format(inf))\n sys.exit(1)\n\n pixels = list(Image.open(inf).getdata())\n return x, y, pixels", "def readImage(self, path, tt=1):\n return cv2.imread( path, tt)", "def test_read(self):\n for line in TESTIMAGES.split(\"\\n\"):\n vals = line.split()\n name = vals[0]\n dim1, dim2 = [int(x) for x in vals[1:3]]\n mini, maxi, mean, stddev = [float(x) for x in vals[3:]]\n obj = marccdimage()\n obj.read(self.fn[name])\n self.assertAlmostEqual(mini, obj.getmin(), 2, \"getmin\")\n self.assertAlmostEqual(maxi, obj.getmax(), 2, \"getmax\")\n self.assertAlmostEqual(mean, obj.getmean(), 2, \"getmean\")\n self.assertAlmostEqual(stddev, obj.getstddev(), 2, \"getstddev\")\n self.assertEqual(dim1, obj.dim1, \"dim1\")\n self.assertEqual(dim2, obj.dim2, \"dim2\")", "def read_image(image_path):\n if not os.path.exists(image_path):\n raise IOError('File does not exist: %s' % image_path)\n else:\n return Image.open(image_path)", "def read_img(img_path):\n return sitk.GetArrayFromImage(sitk.ReadImage(img_path))", "def _process_image(filename, coder):\n # Read the image file.\n with tf.gfile.FastGFile(filename, 'rb') as f:\n image_data = f.read()\n \n # Convert any PNG to JPEG's for consistency.\n if _is_png(filename):\n print('Converting PNG to JPEG for %s' % filename)\n image_data = coder.png_to_jpeg(image_data)\n # Decode the RGB JPEG.\n image = coder.decode_jpeg(image_data)\n\n # Check that image converted to RGB\n assert len(image.shape) == 3\n height = image.shape[0]\n width = image.shape[1]\n assert image.shape[2] == 3\n\n return image_data, height, width", "def read_image(filename):\n img = Image.open(filename)\n im = np.array(img)\n return im", "def read_image(image_path):\n im = Image.open(image_path, 'r')\n return np.array(im)", "def img_read(name):\n\n img = cv2.imread(name)\n\n return img", "def do_info (self, line) :\n\t\tprint\n\t\tprint get_info_string( self.__image )\n\t\tprint", "def get_input(path):\n img = imread(path)\n return img", "def build_img_info(img_root):\n imgs = []\n feats = []\n K = []\n for i, name in enumerate(os.listdir(img_root)):\n if '.jpg' in name or '.JPG' in name:\n path = os.path.join(img_root, name)\n img = cv2.imread(path)\n imgs.append(img)\n feature_process = FeatureProcess(img)\n kpt, des = feature_process.extract_features()\n photo_info = PhotoExifInfo(path)\n photo_info.get_tags()\n K.append(photo_info.get_intrinsic_matrix())\n A = photo_info.get_area()\n D = photo_info.get_diam()\n feats.append({'kpt': kpt, 'des': des, 'A': A, 'D': D})\n return imgs, feats, K", "def imread(fname):\n try:\n fp = open(fname, 'rb')\n im = Image.open(fp)\n except:\n sys.stderr.write('IOException: Invalid input type on '+fname+'\\n')\n sys.exit(1)\n else:\n if im.format not in FILETYPES:\n sys.stderr.write('IOException: Invalid image type\\n')\n sys.exit(1)\n \n fa = np.array(im.convert('F'))\n im = im.convert('RGB')\n wa = np.array(im)\n \n fp.close()\n\n return fa, wa", "def fetchInfo(self, path):\n\n\n img = self.getImageObject(path)\n\n if isinstance(img, ImageFile):\n return img.size\n else:\n return [img.width, img.height]", "def _getImage(self, img):\n\n # lazily fill in some attributes\n if not 'local_file_path' in img:\n img['local_file_path'] = os.path.join(self.image_root, img['filename'])\n if not 'feat' in img: # also fill in the features\n # NOTE: imgid is an integer, and it indexes into features\n fn = os.path.basename(img['filename'])\n return img", "def read_img(img_path):\n img_list=[]\n print('image loading...')\n for _,_,files in os.walk(img_path):\n for f in files:\n if f.find('.dcm')>=0:\n tmp_img=dicom.dcmread(os.path.join(img_path,f))\n tmp_img=tmp_img.pixel_array#[0::2,0::2]\n img_list.append(tmp_img)\n img_data=np.array(img_list)\n print('done')\n return img_data", "def __read_image(self, filename):\n self.image = cv2.imread(filename)\n self.image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)\n self.im_copy = self.image.copy()\n self.height, self.width = self.image.shape[:2]\n self.debug = 0", "def process_image(self):\n pass", "def get_raw_img(image_name):\n # IMREAD_COLOR ignores transparency (!)\n return cv2.imread(image_name, cv2.IMREAD_COLOR)", "def Read(image_path):\n # use cv2.imread() to read an images.\n # syntax : cv2.imread(filename, flag=None)\n return cv2.imread(image_path, 0)", "def image_data_info(page):\n xObject = page['/Resources']['/XObject'].getObject()\n\n for obj_key in xObject:\n obj = xObject[obj_key]\n if obj['/Subtype'] == '/Image':\n width, height = (obj['/Width'], obj['/Height'])\n num_bytes = len(obj._data)\n density = num_bytes * 1.0 / (width * height)\n return {'width': width, 'height': height, 'size': num_bytes, 'density': density}\n\n return None", "def get_img_info(self, idx):\n\n image = self.get_img(idx)\n img_height = image.size[0]\n img_width = image.size[1]\n\n return {\"height\": img_height, \"width\": img_width}", "def read_image(fileame, representation):\n validate_representation(representation)\n\n im = imread(fileame)\n if representation == 1 and is_rgb(im):\n # We should convert from Grayscale to RGB\n im = rgb2gray(im)\n return im.astype(np.float32)\n\n return normlized_image(im)", "def read_img(img_id, train_or_test, size):\n img = image.load_img(join(data_dir, train_or_test, img_id + '.jpg'), target_size=size)\n # img = image.img_to_array(img)\n return img", "def read_img(img_path:str) -> object:\n img = cv2.imread(img_path)\n return img", "def load_image(self, image_id):\n # Load image\n path = self.image_info[image_id]['path']\n if path.endswith(\".png\" or \".jpg\"):\n image = skimage.io.imread(path)\n elif path.endswith(\".dcm\"):\n ds = pydicom.read_file(path)\n image = ds.pixel_array\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n return image", "def fitsread(imgname, header = False):\n try:\n if header:\n img_data, header = pyfits.getdata(imgname, ignore_missing_end = True, header = True)\n return img_data, header\n else:\n img_data = pyfits.getdata(imgname, ignore_missing_end = True)\n return img_data\n except IOError:\n print \"FITSREAD: Unable to open FITS image %s\" %imgname\n \n return", "def read_image(image_file_path: str):\n\n pixels = numpy.array(Image.open(image_file_path))\n\n return pixels", "def read_image(self, image):\n if image not in self.content:\n img = cv2.imread(image)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = cv2.copyMakeBorder(img, 5, 5, 5, 5, cv2.BORDER_CONSTANT)\n text = pytesseract.image_to_string(img).lower()\n self.content[image] = text\n return text\n return self.content[image]", "def process_image(file_path):\n img_array = io.imread(file_path)\n detections, shapes, descriptors = detect_faces(person_database,img_array)\n\n names = []\n\n for desc in descriptors:\n name = find_match(person_database, desc)\n names.append(name)\n\n return pic_array, names, detections, shapes, descriptors", "def read_image(self, filePath):\n if filePath.endswith(\".dcm\"):\n image = sitk.ReadImage(filePath)\n image = sitk.GetArrayFromImage(image).astype(\"int16\")\n image = np.expand_dims(image[0,:,:], -1)\n elif filePath.endswith(\".png\"):\n image = cv2.imread(filePath)\n image = np.array(image, dtype = \"int16\")\n elif filePath.endswith(\".mha\"):\n image = sitk.ReadImage(filePath)\n image = sitk.GetArrayFromImage(image).astype(\"int16\")\n image = np.transpose(image,(1,2,0))\n return image", "def read(self):\n with self.lock:\n return self.image", "def imread(path):\n with open(path, 'rb') as f:\n with PIL.Image.open(f) as img:\n return img.convert('RGB')", "def open_image(infile):\n with fits.open(infile) as f:\n header = f[0].header\n data = f[0].data\n if data.ndim == 2:\n # NAXIS=2: [Y, X]\n image = data\n elif data.ndim == 3 and data.shape[0] == 1:\n # NAXIS=3: [FREQ=1, Y, X]\n image = data[0, :, :]\n elif data.ndim == 4 and data.shape[0] == 1 and data.shape[1] == 1:\n # NAXIS=4: [STOKES=1, FREQ=1, Y, X]\n image = data[0, 0, :, :]\n else:\n raise ValueError(\"Slice '{0}' has invalid dimensions: {1}\".format(\n infile, data.shape))\n return (header, image)", "def process_image(im):\r\n h, _, _ = im.shape\r\n im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\r\n \r\n # Divide the picture into 3 regions\r\n l1 = int(0.65*h)\r\n l2 = int(0.77*h)\r\n im1 = im_gray[:l1,:]\r\n im2 = im_gray[l1+1:l2,:]\r\n im3 = im_gray[l2+1:,:]\r\n \r\n # Extract 4 pictures\r\n pics = extract_4_pics(im, im1)\r\n \r\n # Extract the word size\r\n word_size = extract_word_size(im2)\r\n \r\n # Extract the letters\r\n letters = extract_letters(im3)\r\n \r\n print 'word size =', word_size\r\n print 'letters =', letters\r\n for i, pic in enumerate(pics):\r\n imsave(str(i) + '.png', pic)\r\n\r\n return word_size, letters, pics", "def read_image(path):\n reader = sitk.ImageSeriesReader()\n dicom_filenames = reader.GetGDCMSeriesFileNames(path)\n reader.SetFileNames(dicom_filenames)\n reader.LoadPrivateTagsOn()\n img = reader.Execute()\n img.SetOrigin((0, 0, 0))\n return img", "def read_image(path):\n\n image = cv2.imread(path)\n return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)", "def read_image(filename, representation):\n\n color_flag = True #if RGB image\n image = imread(filename)\n\n float_image = image.astype(np.float64)\n\n if not np.all(image <= 1):\n float_image /= NORMALIZE #Normalized to range [0,1]\n\n if len(float_image.shape) != 3 : #Checks if RGB or Grayscale\n color_flag = False\n\n if color_flag and representation == 1 : #Checks if need RGB to Gray\n return skimage.color.rgb2gray(float_image)\n\n # Same coloring already\n return float_image", "def load_image(self, image_index):\n\t\t\timage_info = self.coco.loadImgs(self.image_ids[image_index])[0]\n\t\t\tpath = os.path.join(self.data_dir, 'images', self.set_name, image_info['file_name'])\n\t\t\treturn read_image_bgr(path)", "def read_image(image_path: str):\n\treturn cv.imread(image_path, cv.IMREAD_UNCHANGED)", "def generate_image_info(image):\n image = ee.Image(image)\n image_vis = image.visualize(**{\n 'min': image_min,\n 'max': image_max,\n 'palette': image_palette\n })\n\n print(image_min, image_max)\n\n if 'hillshade' in r and r['hillshade']:\n image_vis = hillshade(image_vis,\n image.subtract(image_min).divide(ee.Image.constant(image_max).subtract(image_min)),\n True)\n\n m = image_vis.getMapId()\n\n mapid = m.get('mapid')\n token = m.get('token')\n\n url = 'https://earthengine.googleapis.com/map/{mapid}/{{z}}/{{x}}/{{y}}?token={token}'.format(\n mapid=mapid,\n token=token\n )\n\n result = {\n 'mapid': mapid,\n 'token': token,\n 'url': url\n }\n return result", "def read_images(self, img_name, label_name):\n image_string = tf.read_file(img_name)\n image_decoded = tf.image.decode_jpeg(image_string, channels=3)\n label_string = tf.read_file(label_name)\n label_decoded = tf.image.decode_jpeg(label_string, channels=1)\n return image_decoded, label_decoded", "def imread(fname):\r\n return skimage.io.imread(fname)", "def read_image(image_path):\n return np.array(load_img(image_path, color_mode='grayscale')) / 255", "def read_map_file(path):\r\n with open(path) as f:\r\n dir_name = os.path.dirname(path)\r\n img = cv2.imread(dir_name + '/' + f.readline().strip())\r\n assert img.shape[0] > 0 and img.shape[1] > 0, 'Can not open image file'\r\n meter_per_pixel = float(f.readline().strip())\r\n ori_str = f.readline().strip().split()\r\n origin = np.array([int(ori_str[0]), int(ori_str[1])])\r\n init_heading = float(ori_str[2])\r\n return img, meter_per_pixel, origin, init_heading", "def get_image(image_path):\r\n image = Image.open(image_path, 'r')\r\n width, height = image.size\r\n pixel_values = list(image.getdata())\r\n if image.mode == 'RGB':\r\n channels = 3\r\n elif image.mode == 'L':\r\n channels = 1\r\n else:\r\n print(\"Unknown mode: %s\" % image.mode)\r\n return None\r\n pixel_values = np.array(pixel_values).reshape((1,width, height, channels))\r\n # print(pixel_values.shape)\r\n return pixel_values", "def read_image(filename, representation):\n image = scipy.misc.imread(filename)\n if int(representation) == 1:\n image = rgb2gray(image)\n return img_as_float(image)", "def __read_img_file(filename, label):\n image = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB)\n height, width, _ = image.shape\n image = cv2.resize(image, (img_size, img_size))\n # A label is consist of [y1, x1, y2, x2, class_idx]\n label = np.reshape(label, (-1, 5))\n rel_bboxes = label[..., 0:4] / np.array([height, width, height, width], np.float32)\n label = np.concatenate([rel_bboxes, np.expand_dims(label[..., -1], 1)], axis=-1)\n return image, label", "def input_image():\r\n im = cv2.imread('im7.png')\r\n return im", "def getImage(self, imageName):\n if imageName in self._imageDictionary:\n return self._imageDictionary[imageName]\n else:\n print(str(imageName) + \" is not in the dictionary\")", "def read_image(img_path):\n got_img = False\n if not osp.exists(img_path):\n raise IOError(\"{} does not exist\".format(img_path))\n while not got_img:\n try:\n img = Image.open(img_path).convert('RGB')\n got_img = True\n except IOError:\n from ipdb import set_trace; set_trace()\n print(\"IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.\".format(img_path))\n pass\n return img", "def generate_image_info(im, params):\n im = ee.Image(im)\n\n # some images are scaled to a factor of 10.\n if params.get('scale') == 'log':\n im = im.log10()\n\n im = im.sldStyle(params.get('sld_style'))\n\n m = im.getMapId()\n\n mapid = m.get('mapid')\n token = m.get('token')\n\n url = 'https://earthengine.googleapis.com/map/{mapid}/{{z}}/{{x}}/{{y}}?token={token}'.format(\n mapid=mapid,\n token=token\n )\n\n result = {\n 'mapid': mapid,\n 'token': token,\n 'url': url\n }\n return result", "def parse_image_meta(meta):\n image_id = meta[:, 0]\n image_shape = meta[:, 1:4]\n window = meta[:, 4:8] # (x1, y1, x2, y2) window of image in in pixels\n active_class_ids = meta[:, 8:]\n return image_id, image_shape, window, active_class_ids", "def get_image_data (file_path, metadata_required):\n lookup = ImageLookup()\n return lookup.lookup_by_filename(file_path, metadata_required=False)", "def __read_image(self, path):\n path = 'data/' + path\n image = cv2.imread(path)\n\n # Convert greyscale image to BGR\n if image.shape[-1] == 1:\n image = np.dstack([image, image, image])\n\n # Convert BGR image to RGB image\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n return image", "def extract_exif_data(self, path_img):\n try:\n img = PIL.Image.open(path_img)\n self.exif = {PIL.ExifTags.TAGS[k]: v\n for k, v in img._getexif().items()\n if k in PIL.ExifTags.TAGS}\n if 'GPSInfo' in self.exif.keys():\n latitude = str(self.getCoordinate(self.exif['GPSInfo'][2], self.exif['GPSInfo'][1]))\n longitude = str(self.getCoordinate(self.exif['GPSInfo'][4], self.exif['GPSInfo'][3]))\n self.exif['GPSInfo'] = \"https://www.google.com/maps/search/?api=1&query=\" + str(latitude) + \",\" + str(\n longitude)\n return self.exif\n except:\n return None", "def readImg(filename, h1, h2, w1, w2):\n img = cv2.imread(filename, 1)\n # plt.figure()\n # plt.imshow(img)\n img = img[h1:h2, w1:w2]\n return img", "def read_image(filename, representation):\n im = imread(filename)\n if representation == GS_REP:\n im = rgb2gray(im)\n im = np.divide(im, MAX_VALUE - 1)\n return im", "def read_image(img_path):\n got_img = False\n if not osp.exists(img_path):\n raise IOError(\"{} does not exist\".format(img_path))\n while not got_img:\n try:\n img = Image.open(img_path).convert('RGB')\n got_img = True\n except IOError:\n print(\"IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.\".format(img_path))\n pass\n return img", "def getimg(filename):\n return np.asarray(Image.open('imgdb/'+filename))", "def read_img(img_id, data_dir, train_or_test, size):\n img = image.load_img(os.path.join(data_dir, train_or_test, '%s.jpg' % img_id), target_size=size)\n img = image.img_to_array(img)\n return img", "def facts(url_file_stream_or_string):\n source = imagefacts.open_resource._open_resource(url_file_stream_or_string, _handle_url)\n data = source.read()\n return imagefacts.getimageinfo.getImageInfo(data)", "def extract(self, source):\n\t\tp = Parser()\n\t\tf = open_pds(source)\n\t\tif self.log: self.log.debug(\"Parsing '%s'\" % (source))\n\t\tself.labels = p.parse(f)\n\t\tif self.log: self.log.debug(\"Found %d labels\" % (len(self.labels)))\n\t\tif self._check_image_is_supported():\n\t\t\tif self.log: self.log.debug(\"Image in '%s' is supported\" % (source))\n\t\t\tdim = self._get_image_dimensions()\n\t\t\tloc = self._get_image_location()\n\t\t\timageSampleBits = int(self.labels['IMAGE']['SAMPLE_BITS'])\n\t\t\timageSampleType = self.labels['IMAGE']['SAMPLE_TYPE']\n\t\t\tmd5Checksum = self._get_image_checksum()\n\t\t\tif self.log: self.log.debug(\"Image dimensions should be %s\" % (str(dim)))\n\t\t\tif self.log: self.log.debug(\"Seeking to image data at %d\" % (loc))\n\t\t\tf.seek(loc)\n\t\t\tif imageSampleBits == 8:\n\t\t\t\treadSize = dim[0] * dim[1]\n\t\t\telif imageSampleBits == 16:\n\t\t\t\treadSize = dim[0] * dim[1] * 2\n\t\t\tprint readSize\n\t\t\tif self.log: self.log.debug(\"Seek successful, reading data (%s)\" % (readSize))\n\t\t\t# rawImageData = f.readline()\n\t\t\t# f.seek(-int(self.labels[\"RECORD_BYTES\"]), os.SEEK_CUR)\n\t\t\trawImageData = f.read(readSize)\n\t\t\tif md5Checksum:\n\t\t\t\trawImageChecksum = hashlib.md5(rawImageData).hexdigest()\n\t\t\t\tchecksumVerificationPassed = rawImageChecksum == md5Checksum and True or False\n\t\t\t\tif not checksumVerificationPassed:\n\t\t\t\t\tif self.log: self.log.debug(\"Secure hash verification failed\")\n\t\t\t\t\tif self.raisesChecksumError:\n\t\t\t\t\t\terrorMessage = \"Verification failed! Expected '%s' but got '%s'.\" % (md5Checksum, rawImageChecksum)\n\t\t\t\t\t\traise ChecksumError, errorMessage\n\t\t\t\telse:\n\t\t\t\t\tif self.log: self.log.debug(\"Secure hash verification passed\")\n\t\t\tif self.log: self.log.debug(\"Read successful (len: %d), creating Image object\" % (len(rawImageData)))\n\t\t\t# The frombuffer defaults may change in a future release;\n\t\t\t# for portability, change the call to read:\n\t\t\t# frombuffer(mode, size, data, 'raw', mode, 0, 1).\n\t\t\tif (imageSampleBits == 16) and imageSampleType == ('MSB_INTEGER'):\n\t\t\t\t#img = Image.frombuffer('I', dim, rawImageData, 'raw', 'I;16BS', 0, 1)\n\t\t\t\timg = Image.frombuffer('F', dim, rawImageData, 'raw', 'F;16B', 0, 1)\n\t\t\t\timg = ImageMath.eval(\"convert(a/16.0, 'L')\", a=img)\n\t\t\telse:\n\t\t\t\timg = Image.frombuffer('L', dim, rawImageData, 'raw', 'L', 0, 1)\n\t\t\tif self.log:\n\t\t\t\tself.log.debug(\"Image result: %s\" % (str(img)))\n\t\t\t\tself.log.debug(\"Image info: %s\" % (str(img.info)))\n\t\t\t\tself.log.debug(\"Image mode: %s\" % (str(img.mode)))\n\t\t\t\tself.log.debug(\"Image size: %s\" % (str(img.size)))\n\t\telse:\n\t\t\tif self.log: self.log.error(\"Image is not supported '%s'\" % (source))\n\t\t\timg = None\n\t\tf.close()\n\n\t\treturn img, self.labels", "def input(self):\n\t\treturn self.image", "def load_image(self, img_name):\n img_data = cv2.imread(img_name, 0)\n return img_data", "def readImage(imageName, flags=cv2.IMREAD_GRAYSCALE):\r\n if imageName.__class__ != \"\".__class__:\r\n print(\"ERROR - readImage: Input type must be a string (image name). Was: \", imageName.__class__)\r\n return None\r\n\r\n # cv2 loads an image in BGR format. Here we have a default value used to load it in grayscale mode\r\n image = cv2.imread(imageName, flags)\r\n return image", "def get_image_params(image_path):\n image = cv2.imread(image_path)\n\n return image, image.shape", "def _open_image(self, path):\n return cv.imread(path, 1)\n # .astype(float)", "def load_image(filename):\n with open(filename, 'rb') as img_handle:\n img = Image.open(img_handle)\n img_data = img.getdata()\n if img.mode.startswith('RGB'):\n pixels = [round(.299 * p[0] + .587 * p[1] + .114 * p[2])\n for p in img_data]\n elif img.mode == 'LA':\n pixels = [p[0] for p in img_data]\n elif img.mode == 'L':\n pixels = list(img_data)\n else:\n raise ValueError('Unsupported image mode: %r' % img.mode)\n w, h = img.size\n return {'height': h, 'width': w, 'pixels': pixels}" ]
[ "0.72628486", "0.72226435", "0.6992954", "0.6965782", "0.6876386", "0.68329227", "0.6822123", "0.68075496", "0.6758192", "0.6675034", "0.6649149", "0.6627219", "0.6605855", "0.660208", "0.6594365", "0.65857846", "0.6574458", "0.6571832", "0.6551282", "0.65477014", "0.65150887", "0.6500088", "0.6498421", "0.6484258", "0.64821255", "0.64788777", "0.64529884", "0.64347917", "0.6429904", "0.64220023", "0.6418864", "0.64057183", "0.63978034", "0.6396632", "0.63962835", "0.63908404", "0.6378583", "0.63784915", "0.637627", "0.6359455", "0.6348777", "0.6339746", "0.6337124", "0.6333099", "0.63245404", "0.631913", "0.63156587", "0.63144714", "0.6306402", "0.6299224", "0.6297234", "0.6294848", "0.6254201", "0.625124", "0.6244615", "0.6244138", "0.6240435", "0.6234931", "0.6222696", "0.62211865", "0.6220273", "0.6199955", "0.61988896", "0.6190136", "0.6186386", "0.6180324", "0.61780167", "0.61777973", "0.6175079", "0.6173031", "0.61662096", "0.61606133", "0.6154262", "0.6140873", "0.6135946", "0.6135829", "0.61357945", "0.61282223", "0.6127465", "0.61258453", "0.6119198", "0.61107224", "0.6100164", "0.6095051", "0.609198", "0.60816747", "0.60811526", "0.6079313", "0.6072328", "0.6067591", "0.6067257", "0.6066834", "0.60658807", "0.60597664", "0.60548115", "0.60487103", "0.60436964", "0.6042961", "0.60350674", "0.6034436" ]
0.64804757
25
Return the nth timepoint
def getTimepoint(self, n, onlyDims = 0): if not self.readers: self.getReadersFromFilenames() if self.is3DImage(): if not self.readers: raise Logging.GUIError("Attempt to read bad timepoint", "Timepoint %d is not defined by the given filenames" % n) self.reader = self.readers[0] minZ = n * self.slicesPerTimepoint maxZ = (n+1) * self.slicesPerTimepoint - 1 extract = vtk.vtkExtractVOI() extract.SetInput(self.reader.GetOutput()) extract.SetVOI(0, self.x - 1, 0, self.y - 1, minZ, maxZ) changeInfo = vtk.vtkImageChangeInformation() changeInfo.SetInput(extract.GetOutput()) changeInfo.SetOutputOrigin(0, 0, 0) changeInfo.SetExtentTranslation((0,0,-minZ)) data = changeInfo.GetOutput() else: if n >= len(self.readers): n = 0 raise Logging.GUIError("Attempt to read bad timepoint", "Timepoint %d is not defined by the given filenames" % n) self.reader = self.readers[n] data = self.reader.GetOutput() if not self.voxelsize: size = data.GetSpacing() x, y, z = [size.GetElement(x) for x in range(0, 3)] self.voxelsize = (x, y, z) print "Read voxel size", self.voxelsize if onlyDims: return return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nthPersonGetsNthSeat(self, n: int) -> float:\n if n == 1:\n return 1.0\n\n return 1 / 2", "def get_second(time_index):\n return np.array(time_index.second).reshape(-1,1)", "def get_times_slice(self, index, next_index):\n next_index = tf.minimum(next_index, self.Nt)\n indices = tf.range(index, next_index)\n return tf.gather(self.times, indices, axis=0)", "def time(n):\n steps = 3 + math.ceil(n/5.0)*2\n return steps", "def nth(f, *N):\n return dmp_ground_nth(f.rep, N, f.lev, f.dom)", "def get_t(self, n, c):\n t = 1\n while t * n + t * t * n * n < 2 * c:\n t += 1\n return t - 1", "def at(self, t):\n return self.start + (self.end - self.start) * t", "def _ith_point(self, i):\n if self.start is S.NegativeInfinity:\n initial = self.stop\n else:\n initial = self.start\n\n if self.start is S.NegativeInfinity:\n step = -1\n else:\n step = 1\n\n return initial + i*step", "def __getitem__(self, index: datetime.time) -> float:\n\n # If the index is in the profile, return the index.\n if index in self._profile:\n return self._profile[index]\n\n # If the index is not in the profile, then the closest value needs to be\n # determined. If there is a tie, this does not matter.\n delta_t_to_t_map = {\n (\n abs(\n time.hour * 3600\n + time.minute * 60\n + time.second\n - (index.hour * 3600 + index.minute * 60 + index.second)\n )\n ): time\n for time in self._profile\n }\n return self._profile[delta_t_to_t_map[min(delta_t_to_t_map)]]", "def Ni_find(t):\r\n return ep(t) - 1", "def time_to_position(tracks, point):\n\n index1 = [index for index, track_point in enumerate(tracks[0]) if track_point == point][0]\n index2 = [index for index, track_point in enumerate(tracks[1]) if track_point == point][0]\n\n # We add one to the length of each track as 0,0 to first point is missing from the track data\n return index1 + 1 + index2 + 1", "def ntimestep(self):\n if self._ntimestep is None:\n self._ntimestep = self.get_data_ntimestep()\n\n return self._ntimestep", "def window_index_time(t,windowsize,overlap):\r\n \r\n try:\r\n t=t.tolist()\r\n except:\r\n t=t\r\n \r\n t1=t[0]\r\n t2=t1 + timedelta(seconds=windowsize)\r\n pt1=[0]\r\n pt2=[othertime.findNearest(t2,t)]\r\n while t2 < t[-1]:\r\n t1 = t2 - timedelta(seconds=overlap)\r\n t2 = t1 + timedelta(seconds=windowsize)\r\n\r\n pt1.append(othertime.findNearest(t1,t))\r\n pt2.append(othertime.findNearest(t2,t))\r\n \r\n return pt1, pt2", "def get_time_to_point(self, point):\n dist = 0\n for node in self.order_hist:\n if point == node:\n return dist\n dist += 1\n return -1", "def get_nanosecond(time_index):\n return np.array(time_index.nanosecond).reshape(-1,1)", "def nth(n, seq):\n try:\n return seq[n]\n except TypeError:\n return next(itertools.islice(seq, n, None))", "def nth(iterable, index):\n return next(itertools.islice(iterable, index, None))", "def next_change(self, t=None):\n if t is None:\n t = self.engine.get_now()\n t -= self.initTime\n\n p = float(self.period) / POINTS_PER_CYCLE\n t2 = math.floor(t / p) * p + p\n\n t2 += self.initTime\n\n return t2", "def target(self, time, points, dt, num_way):\n start_index = min(int(time / dt), num_way - 1)\n end_index = min(start_index + 1, num_way - 1)\n start_point = points[start_index]\n end_point = points[end_index]\n fraction = float(time % dt) / dt\n return linear_interpolation_two_points(start_point, end_point, fraction).reshape(3)", "def get_complex_last_elf(n):\n if math.floor(math.log(n, 3)) == math.ceil(math.log(n, 3)):\n return n\n prev_log3 = 3 ** int(math.floor(math.log(n, 3)))\n next_log3 = prev_log3 + 1\n # print 'log3', prev_log3, next_log3\n midpoint = int((next_log3 - prev_log3) / 2)\n index = n - prev_log3\n # print 'midpoint index', midpoint\n # print 'midpoint', prev_log3 + midpoint\n # print 'index', index\n if index <= midpoint:\n # print 'it\\'s the straight index'\n return index\n else:\n # print 'it\\'s the index times 2 minus half the distance'\n return index * 2 - midpoint", "def _getIndexAtTime(self, startTime: float) -> int:\n return round(startTime * self.frameRate * self.sampleWidth)", "def time(self):\r\n return self._idx", "def timestep_idx(self, timestep):\n timestep = pd.to_datetime(timestep)\n idx = np.where(self.time_index == timestep)[0][0]\n\n return idx", "def getPivotPointKeyTime(self, index, view) -> float:\n ...", "def time(self, k):\n \n it = Historial.__getitem__(self, k)\n if it != None:\n return it[0]\n else:\n return None", "def getWeightTime(self, index):\r\n\t\treturn None", "def GetPointForLabel(points):\n # TODO: find the last point at a minute boundary\n return points[-1]", "def poly_nth(f, n):\n if n < 0 or n > len(f)-1:\n raise IndexError\n else:\n return f[zzx_degree(f)-n]", "def findNthPlayerFromSeat(self, seat, n):\n\t\tfor i in range(1,7):\n\t\t\tindex = (seat + i) % 6\n\t\t\tif self.playerList[index] != None:\n\t\t\t\tif n > 1:\n\t\t\t\t\tn = n - 1\n\t\t\t\telse:\n\t\t\t\t\treturn (self.playerList[index], index)", "def get_timing(pidx):\n pgconn = get_dbconn(\"mesosite\")\n cursor = pgconn.cursor()\n cursor.execute(\n \"SELECT avg(timing) from autoplot_timing where appid = %s \"\n \"and valid > (now() - '7 days'::interval)\",\n (pidx,),\n )\n timing = cursor.fetchone()[0]\n return timing if timing is not None else -1", "def get_point_at(self, t):\n segment = self.get_segment_for_time(t)\n return segment.point_at(t)", "def index_in_epoch(self):\n return self._index_in_epoch", "def take_nth(n):\n def _take_nth_xducer(step):\n outer = {\"idx\": 0}\n def _take_nth_step(r=Missing, x=Missing):\n if r is Missing: return step()\n if x is Missing:\n return step(r)\n if outer[\"idx\"] % n:\n outer[\"idx\"] += 1\n return r\n else:\n outer[\"idx\"] += 1\n return step(r, x)\n return _take_nth_step\n return _take_nth_xducer", "def get_info(index, n):\n return index/n, index%n", "def get_day(time_index):\n return np.array(time_index.day).reshape(-1,1)", "def get_next(current):\n return 0.5 * (current + n / current)", "def take_second(info):\n return info[1]", "def at(self, t, tol=None):\r\n return self.data[..., self.time.index_at(t)]", "def getPositionKeyTime(self, index, keyIndex, view) -> float:\n ...", "def get_first_timepoints(sj):\n print('> Get first timepoints {}'.format(sj))\n sj_parameters = pickle.load(open(jph(pfo_subjects_parameters, sj), 'r'))\n\n study = sj_parameters['study']\n category = sj_parameters['category']\n\n root_subject = jph(root_study_rabbits, 'A_data', study, category, sj)\n pfi_DWI_Eddi_corrected = jph(root_subject, 'z_tmp', 'z_DWI', '{}_DWI_eddy.nii.gz'.format(sj))\n\n im = nib.load(pfi_DWI_Eddi_corrected)\n\n im_new = set_new_data(im, new_data=im.get_data()[..., :S0_timepoints])\n\n pfi_only_S0 = jph(root_output, '{}_DWI_only_S0.nii.gz'.format(sj))\n nib.save(im_new, pfi_only_S0)", "def T(self, point = -1):\n return self.solution('T', point)", "def get_hour(time_index):\n return np.array(time_index.hour).reshape(-1,1)", "def test_index_at_20101206():\r\n A = np.random.standard_normal(40)\r\n #negative t0\r\n TS_A = ts.TimeSeries(A, t0=-20, sampling_interval=2)\r\n npt.assert_equal(TS_A.time.index_at(TS_A.time), np.arange(40))\r\n #positive t0\r\n TS_A = ts.TimeSeries(A, t0=15, sampling_interval=2)\r\n npt.assert_equal(TS_A.time.index_at(TS_A.time), np.arange(40))\r\n #no t0\r\n TS_A = ts.TimeSeries(A, sampling_interval=2)\r\n npt.assert_equal(TS_A.time.index_at(TS_A.time), np.arange(40))", "def range(self):\n return self.times[0], self.times[-1]", "def get_nth_ast(self, n):\n return self.get_c_sect()[n]", "def getSlicesPerTimepoint(self):\n\t\treturn self.slicesPerTimepoint", "def get_specific_nc_timeindex(fname,\n time_value,\n time_varname='time'):\n\n assert isinstance(time_value, datetime.date)\n\n nc_fid = netCDF4.Dataset(fname, 'r')\n time_values = netCDF4.num2date(nc_fid.variables[time_varname][:],\n nc_fid.variables[time_varname].units,\n nc_fid.variables[time_varname].calendar)\n nearest_index = 0\n nearest_value = time_values[nearest_index]\n nearest_diff = time_difference(nearest_value, time_value)\n for idx, tvalue in enumerate(time_values):\n this_diff = time_difference(tvalue, time_value)\n if this_diff < nearest_diff:\n nearest_index = idx\n nearest_value = time_values[idx]\n nearest_diff = this_diff\n\n return nearest_index", "def _tv_pointwise(data):\n return np.maximum(\n 0,\n np.ceil(\n (np.expand_dims(data.meeting_time - data.lag, -1) - np.arange(data.x.shape[0]))\n / data.lag\n ),\n )", "def get_minute(time_index):\n return np.array(time_index.minute).reshape(-1,1)", "def time_position(self):\n rt_most_pixel = None\n lf_most_pixel = None\n time_position = []\n min_time_len = None\n for i in range (len(np.unique(self.pd.objid))):\n trajec = self.dataset.trajec(self.dataset.keys[i])\n times = trajec.time_epoch_secs + trajec.time_epoch_nsecs / 1e9\n time_pos = np.vstack([times, trajec.position_x])\n time_position.append(time_pos)\n if min_time_len == None:\n min_time_len = len(times)\n elif min_time_len > len(times):\n min_time_len = len(times)\n pixels = np.unique(trajec.position_x)\n if rt_most_pixel ==None:\n rt_most_pixel = pixels[-1]\n elif rt_most_pixel < pixels[-1]:\n rt_most_pixel = pixels[-1]\n if lf_most_pixel ==None:\n lf_most_pixel = pixels[0]\n elif lf_most_pixel > pixels[0]:\n lf_most_pixel = pixels[0]\n print min_time_len\n print rt_most_pixel\n print lf_most_pixel\n print rt_most_pixel - lf_most_pixel\n return time_position, rt_most_pixel, lf_most_pixel", "def oneTimepoint(timepoint):\n\tt = []\n\tfor vs in timepoint:\n\t\tt.append((timepoint.attrib.get('CollectionTime'), vs[0].text, vs[1].text))\n\treturn(t)", "def second(self) -> int:\r\n return self._second", "def _get_var_at_iter(self, n_iter):\n if self.schedule_dict is not None:\n # user specified schedule\n schedule = sorted(self.schedule_dict.items(), key=lambda x: x[0], reverse=True)\n for k, v in schedule:\n if n_iter >= k:\n return v\n return self.x_init\n else: # gradually decreasing schedule\n if self.drop_after_iters > 0:\n factor = 1 + (n_iter - 1) / self.drop_after_iters\n else:\n factor = 1\n\n if self.decrease_type == 'sqrt':\n return 1.0 * self.x_init / math.sqrt(factor)\n else:\n return 1.0 * self.x_init / factor", "def getSegment(self, t: float, endBehavior: str = 'halt') -> Tuple[int,float]:\n if len(self.times)==0:\n raise ValueError(\"Empty trajectory\")\n if len(self.times)==1:\n return (-1,0)\n if t > self.times[-1]:\n if endBehavior == 'loop':\n try:\n t = t % self.times[-1]\n except ZeroDivisionError:\n t = 0\n else:\n return (len(self.milestones)-1,0)\n if t >= self.times[-1]:\n return (len(self.milestones)-1,0)\n if t <= self.times[0]:\n return (-1,0)\n i = bisect.bisect_right(self.times,t)\n p=i-1\n assert i > 0 and i < len(self.times),\"Invalid time index \"+str(t)+\" in \"+str(self.times)\n u=(t-self.times[p])/(self.times[i]-self.times[p])\n if i==0:\n if endBehavior == 'loop':\n t = t + self.times[-1]\n p = -2\n u=(t-self.times[p])/(self.times[-1]-self.times[p])\n else:\n return (-1,0)\n assert u >= 0 and u <= 1\n return (p,u)", "def frame_to_time_point(time_point_of_first_video_frame, camera_frames_in_video, points_per_pulse, frame):\n return int(time_point_of_first_video_frame + (camera_frames_in_video[frame] * points_per_pulse))", "def Temp(t):\n return 20 # Need to link to data", "def find_time_idx(nc, needle):\n tm = nc.variables[\"time\"]\n tstr = tm.units.replace(\"days since \", \"\")\n t0 = datetime.datetime.strptime(tstr.split()[0], \"%Y-%m-%d\")\n cal360 = True if tm.calendar == \"360_day\" else False\n cal365 = True if tm.calendar == \"365_day\" else False\n times = tm[:]\n for i, time in enumerate(times):\n if cal360:\n time = time - 15\n years = time / 360\n months = (time % 360) / 30\n ts = datetime.datetime(t0.year + years, 1 + months, 1)\n elif cal365:\n years = int(time / 365)\n months = int((time % 365) / 30)\n ts = datetime.datetime(t0.year + years, 1 + months, 1)\n else:\n ts = t0 + datetime.timedelta(days=time)\n if ts.year == needle.year and ts.month == needle.month:\n print \"Returning: %s/%s for needle: %s\" % (i, len(times), needle)\n return i\n return None", "def second(self) -> Index:\n warnings.warn(\n \"`second` will return int32 index instead of int 64 index in 4.0.0.\",\n FutureWarning,\n )\n return Index(self.to_series().dt.second)", "def getLinIterTimes( self, var, index = 0 ):\n\n values = self.getLinIterData( var, index )\n return values[1]", "def setSlicesPerTimepoint(self, n):\n\t\tassert n > 0, \"Slices per timepoint needs to be greater than 0\"\n\t\tprint \"Setting slices per timepoint to \", n\n\t\tself.slicesPerTimepoint = n\n\t\tself.z = n\n\t\tself.readers = []", "def timeStep(self):\n return self.params['h']", "def get_tmpT(T_e, i, tmpN_t):\n if i == 0:\n print \"Solution converged into an unphysical state. \",\\\n \"Choose a better initial guess r_0.\"\n else:\n tmpT = np.logspace(np.log10(T_e[-i-1]), np.log10(T_e[-i]), num= tmpN_t,\n endpoint = True)\n return tmpT", "def time_based(t, eta_init, last_eta, d = 0.01):\n return last_eta/(1+d*t)", "def nth(iterable, n, default=None):\n return next(islice(iterable, n, None), default)", "def nth(iterable, n, default=None):\n return next(islice(iterable, n, None), default)", "def nth(iterable, n, default=None):\n return next(islice(iterable, n, None), default)", "def to_timestamp(index, base_dt, fps=4):\n ts = time.mktime(base_dt.timetuple())\n return ts + index * (1.0 / fps)", "def __getitem__(self, i): #i is index given by the caller\n l_self = len(self)\n if i >= self.times * l_self:\n raise IndexError (\"Circle object goes around %d times\" % (self.times)) #raise IndexError\n return self.data[i % l_self] # return answer", "def get_time_ceiling(time, data):\n if time >= data.index.max():\n return data.iloc[-1]\n elif time <= data.index.min():\n return data.iloc[0]\n return data[str(time):].iloc[0]", "def getTimes():", "def getTimes():", "def getTimes():", "def litres(time):\n return int(time / 2)", "def seperate_time(sp):\n\n\tind = np.where(sp > 0)[0]\n\treturn (ind)", "def n(self):\n return self._time_axis.size", "def get_nth_filepath(self, n):\n return self.get_filepaths()[n] if n < len(self.get_filepaths()) else None", "def nth(n, iterable, default = None):\n return next(islice(iterable, n, None), default)", "def traj_nslice (u,teq,tsample) :\n # get the number of frames in the slice (http://stackoverflow.com/a/7223557)\n traj_slice = u.trajectory[teq::tsample]\n return sum(1 for _ in traj_slice)", "def traj_nslice (u,teq,tsample) :\n # get the number of frames in the slice (http://stackoverflow.com/a/7223557)\n traj_slice = u.trajectory[teq::tsample]\n return sum(1 for _ in traj_slice)", "def spark_index(n):\n return int(round((clamp(n) - minimum) * coefficient))", "def __getitem__(self, index):\n return self._timeseriesData[index]", "def _get_time(self, sec, nsec):\n return sec + nsec / (10**9)", "def get_time_index(self, month, year):\n return month + 12 * (year - self._first_valid_date.year) - 1", "def get_time_delta(n):\n return datetime.timedelta(days=n)", "def ricker(dt, pt):\n nt = int(2 * pt / dt)\n c = np.zeros(nt)\n t0 = pt / dt\n a_ricker = 4 / pt\n\n for it in range(0, nt):\n t = ((it + 1) - t0) * dt\n c[it] = -2 * a_ricker * t * math.exp(-(a_ricker * t) ** 2)\n\n return c", "def get_element(self, index):\n original_index = index\n if index < 0:\n index = self.size + index\n if index >= self.size or index < 0:\n raise IndexError(\n 'index %i is out of range for SeriesAxis with size %i'\n % (original_index, self.size)\n )\n return self.start + self.step * index", "def __getitem__(self, i):\n return self.__tiers[i]", "def line_segment(t, # Array of times for each position\n t_now # Current time (float)\n ):\n i = np.argwhere(t > t_now)\n if len(i) > 0:\n if i[0] != 0: # if the current time is not less than the starting time\n segment_starting_index = i[0][0] - 1\n else:\n segment_starting_index = 0\n\n segment_end_index = i[0][0]\n\n else: # if the current time is more than the last point (destination) time\n segment_starting_index = t.shape[0]\n segment_end_index = t.shape[0]\n\n return segment_starting_index, segment_end_index", "def nth_to_last2(head, k):\n if head is None:\n return 0\n i = nth_to_last2(head.next, k) + 1\n if i == k:\n print(head.data)\n return i", "def get_special_point(power,events,borders,eventName,numericValue):\n event_consider = events[events['eventName']==eventName].reset_index(drop=True)\n #around turn_on\n i = 0 \n count = 0\n event_index = []\n while(i<len(event_consider)):\n date = time.mktime(datetime.strptime(event_consider['time'][i], \"%Y-%m-%d %H:%M:%S\").timetuple())\n start = str(datetime.fromtimestamp(date-borders[0]))\n end = str(datetime.fromtimestamp(date+borders[1]))\n serie = Series.from_array(power[(power['time']>=start)&(power['time']<=end)]['value'])\n if len(serie)>0:\n event_index.append(serie.index[int(len(serie)/2)])\n count += 1\n i += 1\n print(\"number of\", eventName ,\"in groudtruth and power=\",count)\n return event_index,[numericValue]*len(event_index)", "def nskip(self, date, time0=None):\n time0 = self.time0 if time0 is None else Time(time0, scale='utc')\n dt = Time(date, scale='utc') - time0\n nskip = int(round((dt / self.dtsample / self.setsize)\n .to(u.dimensionless_unscaled)))\n return nskip", "def get_last(t_, w_):\n return t_ - tf.constant(1)", "def _event_arrival_path(path, n, e_0=0):\n # if e_0 is None:\n # the entry time of the first event\n\n if n > 0:\n e = e_0 + path.tasks[0].in_event_model.delta_plus(n + 1)\n elif n < 0:\n e = e_0 - path.tasks[0].in_event_model.delta_min(-n + 1)\n else:\n e = 0 # same event, so the difference is 0\n\n return e", "def time_step(self):\n return self._time_step", "def time_function(t):\n\n omega = np.pi\n return np.sin(omega * t) + np.sin(10 * omega * t) + np.sin(20 * omega * t)", "def return_pos_index(self, index, tpos, window_size):\r\n maximum = min(len(self.data), index+(tpos//window_size)+1) # since non-inclusive\r\n return np.random.randint(index, maximum)", "def get_time(self):\n start=''\n end=''\n time=''\n times=self.times\n print(times[self.istep])\n if self.istep > 0:\n start=ncEarth.beginstr % times[self.istep].isoformat()\n\n\n if self.istep < len(times)-2:\n end = ncEarth.endstr % times[self.istep+1].isoformat()\n\n if start is not '' or end is not '':\n time=ncEarth.timestr % {'begin':start,'end':end}\n\n return time", "def time_callback(from_index, to_index):\r\n # Convert from routing variable Index to time matrix NodeIndex.\r\n from_node = manager.IndexToNode(from_index)\r\n to_node = manager.IndexToNode(to_index)\r\n return data['time_matrix'][from_node][to_node]", "def get_tmid(l):\n return Time(l['date-obs'], format='fits') + l['exptime']/2*u.s", "def first_touch_timestamp(self):\n return self.touches[:1].index[0]" ]
[ "0.6113835", "0.6054137", "0.5898435", "0.58786285", "0.58263016", "0.57462263", "0.5740125", "0.5694311", "0.5676445", "0.56450284", "0.5632503", "0.55841124", "0.5574055", "0.55318683", "0.55112934", "0.5497918", "0.54749393", "0.54705596", "0.5468096", "0.5458759", "0.5441463", "0.54392374", "0.5435774", "0.5422698", "0.5416039", "0.54121935", "0.5392133", "0.5390978", "0.5365546", "0.53651", "0.5364531", "0.5342265", "0.5339246", "0.5334274", "0.53305435", "0.5305823", "0.5285447", "0.5275633", "0.5270471", "0.52596974", "0.52537596", "0.52521724", "0.5246544", "0.5224158", "0.52199453", "0.5204427", "0.5194786", "0.51843464", "0.51833075", "0.51819235", "0.51737696", "0.51736444", "0.5163691", "0.5162897", "0.51611245", "0.5159491", "0.51531476", "0.51422167", "0.5137233", "0.5133266", "0.5131026", "0.51236296", "0.5119003", "0.51169413", "0.51169413", "0.51169413", "0.51143396", "0.5113877", "0.510628", "0.510267", "0.510267", "0.510267", "0.50987107", "0.5090174", "0.50837684", "0.50824183", "0.5066076", "0.50634974", "0.50634974", "0.5062952", "0.5057905", "0.50560784", "0.5053506", "0.5032898", "0.50262755", "0.5022831", "0.5020925", "0.50172293", "0.5016397", "0.50088054", "0.50060874", "0.50030273", "0.5000733", "0.49781168", "0.49705586", "0.49678513", "0.49663514", "0.49621087", "0.49595273", "0.49566534" ]
0.59078765
2
Returns the (x,y,z) dimensions of the datasets this dataunit contains
def getDimensions(self): print "Returning",self.x,self.y,self.slicesPerTimepoint return (self.x, self.y, self.slicesPerTimepoint)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getDimensions():", "def dimensions():", "def get_data_dimensions(self):\n return image_utils.convert_shape_indexing(self._get_data_dimensions_rc(),\"rc\",self.image_indexing)", "def get_dimensions(self):\r\n x = []\r\n y = []\r\n z = []\r\n for i in self.verts:\r\n x.append(i[0])\r\n y.append(i[1])\r\n z.append(i[2])\r\n\r\n x.append(abs(min(x)))\r\n y.append(abs(min(y)))\r\n z.append(abs(min(z)))\r\n\r\n return max(x), max(y), max(z)", "def dims(self) -> tuple[str, str]:\n # if self.dim0 is not None:\n return self.y_dim, self.x_dim", "def get_num_dimensions(self):\n dimensions = self.data.shape\n return dimensions[1]", "def dims(self):\n return self.v.dims() # TODO: check (empty? etc)\n #return self.t.shape # TODO: check (empty? etc)\n # TODO: convert to tuple? here / in varset?", "def dataDimensions(data):\n logging.info('Number of rows of data: %s' % len(data))\n logging.info('Number of columns of data: %s' % len(data[1]))", "def dimensions(self) -> int:\n return pulumi.get(self, \"dimensions\")", "def dims(self):\n return self[0].dims", "def N(self):\n return self._dimensions", "def get_data_dim(self):\n return self.data_dim", "def dimension(self):\n\t\treturn self.d", "def get_dimensions(self):\n return self.lon_arr.shape", "def get_data_dims(self):\n return [len(self.tps)] + self.get_shape()[1:]", "def get_dimension_length(self):\n pass", "def getNumDimensions(self):\n return len(self.di.keys())", "def getDimensions(self):\n return self._majax, self._minax, self._pa", "def size(self):\n\t\treturn self.dims", "def dimension(self):\n return 3*self.genus - 3 + self.n", "def dimension(self) -> float:\n return self._dimensions", "def dimension(self):", "def dimensions(self) -> typing.Tuple[int, int]:\n dimensions = self.data[2]\n dimensions = re.findall(r'(\\d+)\\s+x\\s+(\\d+)\\s+M', dimensions.replace('-', '0'))\n return dimensions[0] if dimensions else (0, 0)", "def dim(self):\n return self._d", "def dimension(self):\n return len(self.qubit_values)", "def dim(self):\n\t\treturn self.D", "def get_dims(self):\n row_lbl, col_lbl = self.get_idxvals()\n return len(row_lbl), len(col_lbl)", "def num_dim(self):\n return len(self._dimensions)", "def num_dim(self):\n return len(self._dimensions)", "def num_dims(self):\n return self.h5['{}/{}'.format(SETTINGS, N_DIMS_STR)][()]", "def dimensionality(self):\n return int(self.nDims)", "def dim(self) -> int:", "def dim(self):\n return self.m, self.n", "def getDimensions(self):\n return _libsbml.Layout_getDimensions(self)", "def n_dims(self):\n return self.pdm.n_dims", "def calculate_dimensions(self):\n x_coordinates = np.sort(self.grid['x'][:, 0]) # first x node\n self.nr_nodes_z = np.where(x_coordinates == x_coordinates[0])[0].size\n self.nr_elements_x = self.elements.shape[0] / (self.nr_nodes_z - 1)\n self.nr_nodes_x = self.nr_elements_x + 1\n self.nr_elements_z = self.nr_nodes_z - 1", "def dim(self) -> int:\n pass", "def dim(self):\n return self.__dim__", "def dimension_size(self):\n return self._dim", "def InferSpatialDimension(self):\n\n assert self.points is not None\n # if self.points.shape[1] == 3:\n # if self.element_type == \"tri\" or self.element_type == \"quad\":\n # print(\"3D surface mesh of \", self.element_type)\n\n return self.points.shape[1]", "def dimensions(self):\n return len(self.parameter_names)", "def dimension(self):\n return self._dim", "def dimensions(self):\n return self.index.names", "def dimension_count(self):\n return self._dimensionCount", "def getDim(self):\n return \"%dx%d\" % (self.rows, self.cols)", "def dims(self):\n raise NotImplementedError('Please use Vector2Array or Vector3Array')", "def n_dims(self):\n return len(self.dimensions)", "def get_dim(self, name):\n return len(self.root_group.dimensions[name])", "def getDimensions(unique_name=None):", "def dimension(self):\n return self.__N", "def dimensions(self):\n d=dict()\n d['div'] = (self._div)\n d['var'] = len(self.used_variables)\n d['x'] = self.Xdim\n d['y'] = self.Ydim\n d['lev'] = self.lev\n d['dir'] = self._nb_dir\n return(d)", "def dim(self):\n raise NotImplementedError", "def num_dimensions(self):\n if self.__num_dimensions__ == 0:\n # Try to get the number of dimensions from the first point or bounding box\n if len(self.points) > 0:\n self.__num_dimensions__ = len(self.points[0].coordinate)\n elif len(self.bounding_boxes) > 0:\n self.__num_dimensions__ = len(self.bounding_boxes[0].start)\n return self.__num_dimensions__", "def get_dimensions(self, variable):\n try:\n var_dimension = self.dataset[variable].dims\n return var_dimension\n except:\n print(\"Error Occurred: No Dimensions detected... Exiting. \")\n exit()", "def get_dim():\n return (Settings.width, Settings.height)", "def dims(x):\n return len(x.shape)", "def dim(self):\n return self._dim", "def count_dims(da):\n return len(da.dims)", "def ndim(self):\n return self.data.ndim", "def dims(self):\n return (self.startx, self.starty, self.endx, self.endy)", "def getDimension(self):\n dim = len(self.__axis_labels__)\n if dim == 0:\n # Labels weren't set, so how about the data\n dim = self[0].dim()\n return dim", "def size_out(self):\n return self.dimensions", "def dimensions(self) -> Optional[Sequence['outputs.MetricDimensionResponse']]:\n return pulumi.get(self, \"dimensions\")", "def dimensions(self) -> Optional[Sequence['outputs.MetricDimensionResponse']]:\n return pulumi.get(self, \"dimensions\")", "def xdim(self):\n return len(self._x)", "def getDataSetCount(self):\n\t\treturn int(self.numberOfImages / self.slicesPerTimepoint)", "def GetVoxelSize(vDataSet):\r\n nx = vDataSet.GetSizeX()\r\n ny = vDataSet.GetSizeY()\r\n nz = vDataSet.GetSizeZ()\r\n\r\n if nx > 0: nx = abs(vDataSet.GetExtendMaxX()-vDataSet.GetExtendMinX())/nx;\r\n if ny > 0: ny = abs(vDataSet.GetExtendMaxY()-vDataSet.GetExtendMinY())/ny;\r\n if nz > 0: nz = abs(vDataSet.GetExtendMaxZ()-vDataSet.GetExtendMinZ())/nz;\r\n\r\n return nx,ny,nz", "def get_dimensions(self, fieldname=None):\n if fieldname is None:\n dims = self._dims.keys()\n else:\n dims = self.read_field(fieldname).dimensions.keys()\n return tuple(dims)", "def getDimensions(self):\n\ttop = self.getTop()\n\tleft = self.getLeft()\n\twidth = self.getWidth()\n\theight = self.getHeight()\n\treturn top, left, width, height", "def size_in(self):\n return self.dimensions", "def shape(self):\n return self.dataset.shape", "def dim(self) -> int:\n return self._n_dim", "def dim(self):\n return self.raw_wires.get_dim();", "def getDimension(self):\n return len(self.components)", "def get_dim(self):\n return self.dim", "def dimension(self):\r\n a = 0\r\n for x in self.faces():\r\n if (len(x) > a):\r\n a = len(x) \r\n return a-1", "def dimensions (self):\n return (self.width, self.height)", "def dim(self) -> tuple:\n if self.has_tensor(): return self.as_tensor().dim()\n else:\n return tuple(list(self[0].dim()[0]) + [len(self)]), self[0].dim()[1]", "def dataset_size(self):\n return self.dataset.size", "def dim(self):\n return len(self._n)", "def get_dimension_width(self):\n pass", "def dim(self):\n return self._input_dim, self._output_dim", "def dimension(self) -> int:\n return self.options.dimension", "def dimensions(self, varname):\n if self.handle == None: return None\n try:\n var = self.handle.variables[varname]\n except KeyError:\n return None\n return var.dimensions", "def dims(self):\n return tuple(d for d in (v.states for v in self.__vars)) if len(self.__vars) else (1,)", "def dim(self):\n raise NotImplementedError()", "def __len__(self) -> int:\n\n return self.layout.gaDims", "def dim(self):\n return (self.n, )", "def dimension(self):\n return self._dimension", "def dimension(self):\n return self._dimension", "def dimension(self):\n return self._dimension", "def dimension(self):\n return self._dimension", "def get_dim(self):\n return self._dim", "def size(self):\n\n frame = self.get_frame()\n\n # Unpack array dimensions\n height, width, layers = np.array(frame).shape\n\n return width, height", "def getdim(self):\n return round(self.w() / self.c)", "def dim(self) -> int:\n return self.atoms.shape[:-1]", "def ndim(self):\n return len(self.point)", "def _get_dimensions(self):\n corners = []\n for module in self.modules:\n for tile in module:\n corners.append(tile.corner_idx)\n corners.append(tile.opp_corner_idx)\n corners = np.stack(corners)\n\n # Find extremes\n min_yx = corners.min(axis=0)\n max_yx = corners.max(axis=0)\n\n size = max_yx - min_yx\n centre = -min_yx\n return tuple(size), centre", "def dim(self):\n return tuple(self._dim)", "def _get_dimensions(self):\n corners = []\n for module in self.modules:\n for tile in module:\n corners.append(tile.corners())\n corners = np.concatenate(corners)[:, :2] / self._pixel_shape\n\n # Find extremes, add 1 px margin to allow for rounding errors\n min_xy = corners.min(axis=0).astype(int) - 1\n max_xy = corners.max(axis=0).astype(int) + 1\n\n size = max_xy - min_xy\n centre = -min_xy\n # Switch xy -> yx\n return tuple(size[::-1]), centre[::-1]" ]
[ "0.7814758", "0.77764654", "0.77693594", "0.76322365", "0.74653226", "0.7423212", "0.7418739", "0.7339198", "0.7303002", "0.7278066", "0.7271514", "0.72714305", "0.7247757", "0.7213372", "0.7206813", "0.71931577", "0.7168946", "0.7104327", "0.71030354", "0.7093483", "0.70858115", "0.7071672", "0.706293", "0.7042327", "0.70330995", "0.70280665", "0.7014977", "0.69848776", "0.69848776", "0.69213", "0.6914094", "0.6889391", "0.68878496", "0.68841964", "0.68749046", "0.6866607", "0.68660533", "0.6860869", "0.6852686", "0.6850619", "0.6847674", "0.6846514", "0.68429494", "0.6840511", "0.68235224", "0.68228894", "0.68190074", "0.6816659", "0.6797553", "0.6794924", "0.67931783", "0.6778195", "0.677182", "0.67630386", "0.6761141", "0.6755946", "0.67492616", "0.67402416", "0.67344815", "0.67336506", "0.6728515", "0.67277163", "0.67258596", "0.67258596", "0.67115635", "0.670725", "0.6705827", "0.6676417", "0.66747504", "0.66738045", "0.66714835", "0.6662445", "0.6661982", "0.6659658", "0.66446996", "0.6642593", "0.6634986", "0.6630627", "0.6613186", "0.6606938", "0.660239", "0.660072", "0.65932727", "0.6584298", "0.65812355", "0.658086", "0.6576606", "0.65712863", "0.65592617", "0.65592617", "0.65592617", "0.65592617", "0.6550893", "0.65403926", "0.65383965", "0.6533388", "0.6531744", "0.6529495", "0.6523486", "0.6520885" ]
0.7157837
17
Returns the spacing of the datasets this dataunit contains
def getSpacing(self): if not self.spacing: a, b, c = self.getVoxelSize() self.spacing = [1, b / a, c / a] return self.spacing
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetSpacing(self):\r\n\r\n return self._spacing", "def spacings(self):\n return np.array([self.pixel_spacing,\n self.pixel_spacing,\n self.slice_spacing])", "def spacing(self):\r\n\r\n return self.dx, self.dy, self.dz", "def margin(self):\r\n return self._generate_spacing_info(self.config['margin'])", "def get_spaces(self):\n return self.spaces", "def _calculate_spacing(self):\n # Spacing between each raindrop is 1 drop width/height.\n drop = Raindrop(self)\n drop_width, drop_height = drop.rect.size\n avail_space_x = self.rain_settings.screen_width - drop_width\n number_raindrops_x = avail_space_x // (2 * drop_width)\n\n # Determine the number of rows of raindrops that fit on the screen.\n avail_space_y = self.rain_settings.screen_height\n number_raindrops_y = avail_space_y // (2 * drop_height)\n\n return (\n avail_space_x, avail_space_y,\n number_raindrops_x, number_raindrops_y,\n drop_width, drop_height\n )", "def horizontal_spacing(self):\r\n return self.padding[1] + self.padding[3] + self.margin[1] + self.margin[3]", "def slice_spacing(self):\n return np.median(np.diff(self.slice_zvals))", "def get_um_spacing(self) -> Spacing:\n return tuple(float(x * 10**6) for x in self.spacing)", "def GetSpacerPixels(self):\r\n\r\n return self.spacer_pixels", "def padding(self):\r\n return self._generate_spacing_info(self.config['padding'])", "def expected_data_spacing(self, expected_data_spacing):\n\n self._expected_data_spacing = expected_data_spacing", "def GetToolSeparation(self):\r\n \r\n if self._art:\r\n return self._art.GetElementSize(AUI_TBART_SEPARATOR_SIZE)\r\n\r\n return 5", "def vertical_spacing(self):\r\n return self.padding[0] + self.padding[2] + self.margin[0] + self.margin[2]", "def margins(self) -> tuple[int, int, int, int]:\n return self._widget._mgui_get_margins()", "def gap_width(self):\n return self.container['gap_width']", "def get_distance_scale(self, row_spacing: float) -> tuple:\r\n \r\n return (self.field.get_row_spacing(), row_spacing)", "def getDim(self):\n return \"%dx%d\" % (self.rows, self.cols)", "def get_row_spacing(self) -> tuple:\r\n\r\n # Get the y coordinate of the line through each of the\r\n # field's rows at the middle of the image\r\n ys = [line[0] * (self.get_picture().get_size()[0] / 2) + line[1]\r\n for line in self.lines]\r\n\r\n # Get the distances between each of the approximated rows and sort them\r\n dists = sorted([ys[i] - ys[i - 1] for i in range(1, len(ys))])\r\n\r\n # Take the median of the distances\r\n dist_px = dists[len(dists)//2]\r\n\r\n return dist_px", "def getDimensions():", "def getSpace(self):\n return self.space", "def get_spacing(network):\n from openpnm.topotools.generators.tools import get_spacing\n d = {'vert.coords': network.coords, 'edge.conns': network.conns}\n spc = get_spacing(d)\n return spc", "def padding(self):\n pad = self.ntiles - self.windowsize\n return (int((pad - 1)/2.), int((pad + 1)/2.))", "def GetNiceExtentsBySpacing(minval,maxval,spacing,tolerance):\n pass", "def dimension(self):", "def __len__(self) -> int:\n\n return self.layout.gaDims", "def _find_spacing(self, row, ordering, max_width):\n return max_width / (len(ordering[row]) + 1)", "def spacing(self, spacing):\n\n self._spacing = spacing", "def get_dim():\n return (Settings.width, Settings.height)", "def dimensions():", "def widths(self):\n return self._widths", "def getDimensions(self):\n\t\tprint \"Returning\",self.x,self.y,self.slicesPerTimepoint\n\t\treturn (self.x, self.y, self.slicesPerTimepoint)", "def gutter_spacing(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"gutter_spacing\")", "def gutter_spacing(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"gutter_spacing\")", "def gutter_spacing(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"gutter_spacing\")", "def GetSpacing(self, p_int, p_float=..., p_float=..., p_float=...):\n ...", "def getIndividualWidths(self):\n nquad = self.getNumQuads()\n widths = np.zeros(nquad)\n for i in range(nquad):\n q = self._quadrilaterals[i]\n widths[i] = get_quad_width(q) / 1000.0\n return widths", "def get_dimension_width(self):\n pass", "def dim(self) -> int:\n pass", "def get_space(self):\n return self.space", "def margin_size(self) -> int:\n return self._margin_size", "def dims(self) -> tuple[str, str]:\n # if self.dim0 is not None:\n return self.y_dim, self.x_dim", "def DimStyle0(self):\n\t\treturn self.Space(0)", "def get_data_dim(self):\n return self.data_dim", "def space(self):\n return self._space", "def dimension(self):\n\t\treturn self.d", "def get_margin(self):\n return unicode(self._visual_indent * 20)", "def margin_width(self):\n return self.border_width() + self.margin_left + self.margin_right", "def contentsMargins( self ):\n return self._margins", "def dimension(self):\n return 3*self.genus - 3 + self.n", "def get_dimension_length(self):\n pass", "def getDimensions(self):\n return _libsbml.Layout_getDimensions(self)", "def getMargin(self):\n assert False", "def dim(self) -> int:", "def width(self) -> int:", "def width(self) -> int:", "def getSpace(*args):", "def get_widths(self) -> tuple:\n words_width = 0\n spaces_width = 0\n for part in self.line_parts:\n words_width += part.width\n spaces_width += part.spaces_width\n return words_width, spaces_width", "def dim(self):\n\t\treturn self.D", "def ideal_spacing(data, npoints):\n dims = data.shape\n actual_npoints = (data >= 0).sum()\n spacing = np.ones(3, dtype='uint')\n\n while actual_npoints > npoints:\n\n # Subsample the direction with the highest number of samples\n ddims = dims / spacing\n if ddims[0] >= ddims[1] and ddims[0] >= ddims[2]:\n dir = 0\n elif ddims[1] > ddims[0] and ddims[1] >= ddims[2]:\n dir = 1\n else:\n dir = 2\n spacing[dir] += 1\n subdata = data[::spacing[0], ::spacing[1], ::spacing[2]]\n actual_npoints = (subdata >= 0).sum()\n\n return spacing", "def padding_width(self):\n\t\treturn self.paddings_shape_param('W')", "def testViewGapData(self):\n try:\n entryD = self.__mU.doImport(self.__instanceSavePath, fmt=\"pickle\")\n gapCountList = []\n gapLengthList = []\n entryCountD = {}\n for entryId in entryD:\n for _, eD in entryD[entryId][\"selected_polymer_entities\"].items():\n\n analD = eD[\"anal_instances\"] if \"anal_instances\" in eD else {}\n\n for _, aD in analD.items():\n entryCountD[entryId] = True\n gapCount = len(aD[\"gapD\"])\n tL = list(aD[\"gapD\"].values())\n tL = [t if t > 0 else 0 for t in tL]\n gapL = tL if tL else [0]\n gapCountList.append(gapCount)\n gapLengthList.extend(gapL)\n #\n logger.info(\"gaps %d gap lengths %d\", len(gapCountList), len(gapLengthList))\n #\n cu = DisorderChartUtils()\n # cu.doIntegerBarChart(gapCountList, plotPath=self.__plotGapCount, yPlotScale=None, yPlotMax=300000)\n cu.doIntegerBarChart(\n gapCountList,\n plotPath=self.__plotGapCount,\n yPlotScale=\"log\",\n yPlotMax=6,\n xPlotMax=30,\n xPlotLabel=\"Gap Count\",\n yPlotLabel=\"Protein Instances (log)\",\n plotTitle=\"Protein Instance Gap Count\",\n )\n self.__writeLegend(\n self.__plotGapCount,\n \"Gap count statistics for all (%d) protein sequences (%d X-ray structures with resolution limit < 3.5 Angstoms) \" % (len(gapCountList), len(entryCountD)),\n )\n cu.doIntegerBarChart(\n gapLengthList,\n plotPath=self.__plotGapLength,\n yPlotScale=\"log\",\n yPlotMax=6,\n xPlotMax=150,\n xPlotLabel=\"Gap width (residues)\",\n yPlotLabel=\"Gap Instances (log)\",\n plotTitle=\"Protein Instance Gap Widths\",\n )\n self.__writeLegend(\n self.__plotGapLength,\n \"Gap width statistics for all (%d) protein sequences (%d X-ray structures with resolution limit < 3.5 Angstoms) \" % (len(gapLengthList), len(entryCountD)),\n )\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def thickness(self):\n return self._thickness", "def kspace(self):\n return self.kspace_x, self.kspace_y, self.kspace_data", "def dim_calculatorP3():\r\n probe_set = np.arange(1, 101)\r\n X = 20 - 36 + ((probe_set - 1) // 10) * 4\r\n Y = 2 - ((probe_set - 1) % 10) * 4\r\n dim = np.vstack((X, Y)).T\r\n return dim", "def observation_space_size(self) -> int:\n pass", "def dimensions(self) -> int:\n return pulumi.get(self, \"dimensions\")", "def margin(self) -> Tuple[int, int, int, int]:\n return (self.imargin[0].to_pixels(self.parent.width),\n self.imargin[1].to_pixels(self.parent.width),\n self.imargin[2].to_pixels(self.parent.height),\n self.imargin[3].to_pixels(self.parent.height))", "def getdim(self):\n return round(self.w() / self.c)", "def _update_dimensions(self):\n _, self.width = self.window.getmaxyx()\n self.spacing = self.width // self.total_columns", "def custom_spacing(width, height):\n\t\treturn c.lift(imgui.dummy, width, height)", "def dim(self):\n raise NotImplementedError", "def dataDimensions(data):\n logging.info('Number of rows of data: %s' % len(data))\n logging.info('Number of columns of data: %s' % len(data[1]))", "def pixel_spacing_range(self) -> Optional[float]:\n return self._get_property(PIXEL_SPACING_RANGE_PROP, float)", "def width(self):\n\t\tpass", "def dim(self):\n return self.__dim__", "def numspans(self):\n return self.mesh.size - 1", "def dim(self):\n return self._d", "def set_margins_and_spacing(self):\n\n #margin_list\n margin_list = [0,0,0,0]\n\n #lyt_classes_list\n lyt_classes_list = [QtGui.QStackedLayout, QtGui.QGridLayout, QtGui.QFormLayout, \n QtGui.QBoxLayout, QtGui.QVBoxLayout, QtGui.QHBoxLayout, QtGui.QBoxLayout]\n\n #lyt_list\n lyt_list = []\n for lyt_class in lyt_classes_list:\n lyt_list += [wdgt for wdgt in self.findChildren(lyt_class)]\n\n\n \n #set margin and spacing\n for lyt in lyt_list:\n\n #check type\n if(type(lyt) in lyt_classes_list):\n\n #set\n lyt.setContentsMargins(*margin_list)\n lyt.setSpacing(0)", "def widths(self):\n return self._ax.widths", "def width(self):\n return self._get_mean_and_samples_attribute('width')", "def spacingEnergy(self, controlpoints):\n # only remember each spacing energy if the given control points are\n # the snakes current control points\n memorize_energies = np.equal(controlpoints, self.controlpoints).all()\n # reset the spacing energy list if necessary\n if memorize_energies:\n self.spc_energies = []\n \n spacing = 0.0\n # iterate over the adjacent control points\n for i in range(len(controlpoints)):\n if i < len(controlpoints)-1:\n ci = controlpoints[i]\n ci_next = controlpoints[i+1]\n \n # compute the distance between the two points\n di = (ci_next[0]-ci[0], ci_next[1]-ci[1])\n di_abs = sqrt(di[0]**2 + di[1]**2)\n current_spacing = ((di_abs/self.goal_length)-1)**2\n \n # add to the overall value\n spacing += current_spacing\n # safe to list if necessary\n if memorize_energies:\n self.spc_energies.append(current_spacing)\n return spacing", "def dim_calculator():\r\n probe_set = np.arange(1, 101)\r\n X = -36 + ((probe_set - 1) // 10) * 4\r\n Y = 2 - ((probe_set - 1) % 10) * 4\r\n dim = np.vstack((X, Y)).T\r\n return dim", "def dim(self):\n return (self.n, )", "def compute_kspace_units(self):\n if self.c1.has_analytic_ft:\n support_x, support_y = self.c2.support_x, self.c2.support_y\n sample_spacing = self.c2.sample_spacing\n elif self.c2.has_analytic_ft:\n support_x, support_y = self.c1.support_x, self.c1.support_y\n sample_spacing = self.c1.sample_spacing\n else:\n support_x = max(self.c1.support_x, self.c2.support_x)\n support_y = max(self.c1.support_y, self.c2.support_y)\n sample_spacing = min(self.c1.sample_spacing, self.c2.sample_spacing)\n\n self.sample_spacing = sample_spacing\n self.nsamples_x = int(e.floor(round(((support_x / sample_spacing) + 1) * self.Q, 6)))\n self.nsamples_y = int(e.floor(round(((support_y / sample_spacing) + 1) * self.Q, 6)))\n self.kspace_x = forward_ft_unit(sample_spacing, self.nsamples_x, True)\n self.kspace_y = forward_ft_unit(sample_spacing, self.nsamples_y, True)\n return self", "def Space(self):\n\t\tif self.acad.ActiveDocument.ActiveLayout.ModelType:\n\t\t\treturn self.acad.ActiveDocument.ModelSpace\n\t\telse:\n\t\t\treturn self.acad.ActiveDocument.PaperSpace", "def dimension(self) -> float:\n return self._dimensions", "def getDimensions(self):\n return self._majax, self._minax, self._pa", "def default_dimensions(self):\r\n try:\r\n dimension1, dimension2 = self.dimension_finder(self.text_header)\r\n except:\r\n dimension1, dimension2 = 0,0\r\n return (dimension1, dimension2)", "def GetIndentSize(self):\r\n\r\n return 5", "def indicator_space() -> List[Dimension]:\n return [\n Integer(15, 40, name='bull-buy-rsi-value'),\n Integer(10, 30, name='bear-buy-rsi-value'),\n ]", "def N(self):\n return self._dimensions", "def dim(self):\n raise NotImplementedError()", "def padding(self):\n\t\treturn self.paddings_shape_param('W')", "def get_grid_spacing(self, grid_id, grid_spacing):\n grid_spacing = self._grid_spacing[grid_id]", "def test_get_xy_space():\n pass", "def getDataSetCount(self):\n\t\treturn int(self.numberOfImages / self.slicesPerTimepoint)", "def get_spacing(filename='POSCAR', cut=0.9):\n\n structure = Structure.from_file('POSCAR')\n\n lines = open(filename).readlines()\n c_axis = lines[4].split()\n lattice_parameter = lines[1].split()\n split_coords = [line.split() for line in lines[8:8+structure.num_sites]]\n z_coords = list()\n for coord in split_coords:\n z_coord = float(coord[2])\n if z_coord > cut:\n z_coord -= 1\n z_coords.append(z_coord)\n max_height = max([z_height for z_height in z_coords])\n min_height = min([z_height for z_height in z_coords])\n spacing = ((1.0 + min_height) - max_height) * \\\n abs(float(c_axis[2])) * float(lattice_parameter[0])\n\n return spacing", "def dim(self):\n return self._dim", "def calc_cspace(self, mapdata, padding):\n rospy.loginfo(\"Calculating C-Space\")\n c_Space = OccupancyGrid()\n c_Space.header = mapdata.header\n c_Space.info = mapdata.info\n\n c_Space_data = list(mapdata.data)\n listOfPoints = []\n for x in range(mapdata.info.width):\n for y in range(mapdata.info.height):\n index = self.grid_to_index(mapdata,x,y)\n for j in range (padding +1) :\n for k in range(padding +1):\n if(mapdata.data[index] == 100):\n c_Space_data[index] = 100\n if ((x+j) > mapdata.info.width -1 or (x+j) < 0 or (y+k)> mapdata.info.height -1 or (y+k) <0):\n pass\n else:\n index1 = self.grid_to_index(mapdata, (x+j), (y+k))\n c_Space_data[index1] = 100\n listOfPoints.append((x+j, y+k))\n\n if ((x-j) > mapdata.info.width -1 or (x-j) < 0 or (y+k)> mapdata.info.height -1 or (y+k) <0):\n pass\n else:\n index2 = self.grid_to_index(mapdata, (x-j), (y+k))\n c_Space_data[index2] = 100\n listOfPoints.append((x-j, y+k)) \n\n if ((x+j) > mapdata.info.width -1 or (x+j) < 0 or (y-k)> mapdata.info.height -1 or (y-k) <0):\n pass\n else:\n index3 = self.grid_to_index(mapdata, (x+j), (y-k))\n c_Space_data[index3] = 100\n listOfPoints.append((x+j, y-k))\n\n if ((x-j) > mapdata.info.width -1 or (x-j) < 0 or (y-k)> mapdata.info.height -1 or (y-k) <0):\n pass\n else:\n index4 = self.grid_to_index(mapdata, (x-j), (y-k))\n c_Space_data[index4] = 100\n listOfPoints.append((x-j, y-k))\n c_Space.data=c_Space_data\n self.cspace_pub.publish(c_Space)\n rospy.loginfo(\"Publishing OccupancyGrid for C-Space\")\n return c_Space" ]
[ "0.69162726", "0.6651873", "0.66186863", "0.6618427", "0.6541836", "0.64272296", "0.6390034", "0.6284166", "0.6187805", "0.61713886", "0.6085297", "0.6026972", "0.6021646", "0.60199386", "0.5960843", "0.5940496", "0.59104943", "0.5868685", "0.573976", "0.5731771", "0.57220376", "0.56487423", "0.56223077", "0.5594772", "0.55903035", "0.55810946", "0.5577763", "0.5576113", "0.55648994", "0.555305", "0.55424625", "0.5523321", "0.55190796", "0.55190796", "0.55190796", "0.5503417", "0.5485612", "0.5468764", "0.5434494", "0.54340833", "0.54312396", "0.54252404", "0.54201066", "0.54178953", "0.5413793", "0.5413681", "0.53998077", "0.53892297", "0.5359313", "0.53581214", "0.5351968", "0.53451", "0.53429466", "0.53414935", "0.534078", "0.534078", "0.53249055", "0.52890927", "0.52869886", "0.5284249", "0.52797264", "0.5276143", "0.52652884", "0.5257448", "0.5255773", "0.52418303", "0.5236235", "0.5234077", "0.5233128", "0.5228916", "0.52285725", "0.522376", "0.5223402", "0.52211905", "0.52166706", "0.5213663", "0.52015954", "0.52011657", "0.51847965", "0.51844543", "0.51823604", "0.5182218", "0.5179822", "0.5173287", "0.5162146", "0.515002", "0.514977", "0.5139248", "0.51362693", "0.51348644", "0.51337516", "0.51335496", "0.5130295", "0.5130214", "0.5129097", "0.51240355", "0.5118522", "0.51140684", "0.51066035", "0.5090071" ]
0.7266339
0
Returns the voxel size of the datasets this dataunit contains
def getVoxelSize(self): return self.voxelsize
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def voxel_size(self):\n return self.calculation.voxel_size", "def dataset_size(self):\n return self.dataset.size", "def GetVoxelSize(vDataSet):\r\n nx = vDataSet.GetSizeX()\r\n ny = vDataSet.GetSizeY()\r\n nz = vDataSet.GetSizeZ()\r\n\r\n if nx > 0: nx = abs(vDataSet.GetExtendMaxX()-vDataSet.GetExtendMinX())/nx;\r\n if ny > 0: ny = abs(vDataSet.GetExtendMaxY()-vDataSet.GetExtendMinY())/ny;\r\n if nz > 0: nz = abs(vDataSet.GetExtendMaxZ()-vDataSet.GetExtendMinZ())/nz;\r\n\r\n return nx,ny,nz", "def voxel_count(self):\n return self.cols * self.rows * self.sections", "def voxel_size(self):\n # Set experiment if unset:\n if self._exp is None:\n self._populate_exp()\n\n # Set cframe if unset:\n if self._coord_frame is None:\n self._populate_coord_frame()\n\n if self.axis_order == AxisOrder.XYZ:\n vox_size = (\n self._coord_frame.x_voxel_size,\n self._coord_frame.y_voxel_size,\n self._coord_frame.z_voxel_size,\n )\n elif self.axis_order == AxisOrder.ZYX:\n vox_size = (\n self._coord_frame.z_voxel_size,\n self._coord_frame.y_voxel_size,\n self._coord_frame.x_voxel_size,\n )\n return (vox_size, self._coord_frame.voxel_unit)", "def getDataSetCount(self):\n\t\treturn int(self.numberOfImages / self.slicesPerTimepoint)", "def size(self, index):\n return self.base_dataset.size(index)", "def __len__(self):\n return self._dataset.size(dirs=self._dirs)", "def get_num_dimensions(self):\n dimensions = self.data.shape\n return dimensions[1]", "def dimension(self):\n return len(self.qubit_values)", "def dimension_size(self):\n return self._dim", "def get_size(self):\n # return the size along the index dimension\n size = 0\n if self._data is not None:\n size = shape(self._data)[self.index_dimension]\n\n return size", "def get_data_dim(self):\n return self.data_dim", "def size(self):\n ret = 0\n for ii in self.__data:\n ret += int(ii.get_size())\n return ret", "def get_voxel_size(self, axis):\n assert axis in MRI3Daxes\n if self.meta_data is not None:\n affine, header = self.meta_data\n return header['pixdim'][1:4][MRI3Daxes.index(axis)]\n else:\n raise Exception('xndarray does not have any meta data to get'\n 'voxel size')", "def dataset_size(self):\n if not self._dataset_size:\n # pylint: disable=attribute-defined-outside-init\n self._dataset_size = count_file_lines(\n self._hparams.source_dataset.files)\n return self._dataset_size", "def width(self) -> int:\n return self._obj[self.x_dim].size", "def size(self):\n\t\treturn self.dims", "def get_size(self):\n tmpsize = 0\n for variable in self.variables:\n tmpsize += variable.get_size()\n for subchunk in self.subchunks:\n tmpsize += subchunk.get_size()\n return tmpsize", "def get_total_item_size(dataset):\n total_items = 0\n for element in dataset:\n total_items += 1\n return total_items", "def get_data_dimensions(self):\n return image_utils.convert_shape_indexing(self._get_data_dimensions_rc(),\"rc\",self.image_indexing)", "def num_dim(self):\n return len(self._dimensions)", "def num_dim(self):\n return len(self._dimensions)", "def data_size(self) -> int:\n return len(self.__labels)", "def size(self):\n size = 1\n for current_slice in self.slices:\n size *= current_slice.stop - current_slice.start\n return size", "def _get_dataset_size(loader):\n if isinstance(loader, (tuple, list)):\n return len(loader[0].dataset)\n else:\n return len(loader.dataset)", "def number_of_data_nodes(self):\n return int(self._data['number_of_data_nodes'])", "def get_dimension_length(self):\n pass", "def getDimension(self):\n return len(self.components)", "def dimension(self):\r\n a = 0\r\n for x in self.faces():\r\n if (len(x) > a):\r\n a = len(x) \r\n return a-1", "def count(self):\r\n return self.data_array.size", "def get_size(self):\n return (\n sys.getsizeof(self.children) +\n sys.getsizeof(self.parent) +\n sys.getsizeof(self.dataset_id) +\n sys.getsizeof(self.k) +\n self.filter.get_size()\n )", "def size_in(self):\n return self.dimensions", "def get_dim(self, name):\n return len(self.root_group.dimensions[name])", "def getNumDimensions(self):\n return len(self.di.keys())", "def size(self) -> int:\n\n return self.sizes.sum()", "def dim(self) -> int:\n return self._n_dim", "def get_voxel_size(path: str) -> float:\n dcm = pydicom.dcmread(path, force=True)\n x_str, y_str = dcm.PixelSpacing\n x = Decimal(str(x_str))\n y = Decimal(str(y_str))\n z = Decimal(str(dcm.SpacingBetweenSlices))\n print(float(x * y * z))\n return float(x * y * z)", "def dimension_count(self):\n return self._dimensionCount", "def get_num_points(self):\n dimensions = self.data.shape\n return dimensions[0]", "def __len__(self):\r\n if self.is_superset:\r\n length = 0\r\n for ds in self.data:\r\n length += len(ds)\r\n return length\r\n else:\r\n return len(self.data)", "def size(self):\n size = 0\n size += self.data.size * sys.getsizeof(self.data)\n return size / 1024.0 / 1024.0 / 1024.0", "def element_size(self):\n vecs = (\n self.nodes[self.elements[:, :4], :][:, 1:, :]\n - self.nodes[self.elements[:, :4], :][:, 0, None, :]\n )\n return np.abs(np.linalg.det(vecs)) / 6", "def xdim(self):\n return len(self._x)", "def get_size(self):\n return self._data_size", "def __len__(self) -> int:\n import h5py\n\n with h5py.File(\n os.path.join(self.root, self.data_dir, self.img_file_name), \"r\"\n ) as f:\n num_datapoints: int = f[self.split][\"pv_log\"].shape[0]\n\n return num_datapoints", "def size(self) -> int:\n size = self.da.length()\n return size", "def size(self)->int:\n\n return np.prod([axes if is_integer(axes) else len(axes) for axes in self._dim_axes])", "def size(self):\n return self.data.size", "def size(self):\r\n return len(self._train_datas)", "def get_nbytes(dset):\n if 'nbytes' in dset.attrs:\n # look if the dataset has an attribute nbytes\n return dset.attrs['nbytes']\n elif hasattr(dset, 'value'):\n # else extract nbytes from the underlying array\n return dset.size * numpy.zeros(1, dset.dtype).nbytes", "def dimension(self) -> float:\n return self._dimensions", "def size(self):\n futures = self.client.map(_call_size, self.vecDask, pure=False)\n sizes = self.client.gather(futures)\n return np.sum(sizes)", "def get_value_size(self):\n size = 0\n if self._data is not None:\n size = shape(self._data)[self.value_dimension]\n\n return size", "def size(self):\n return self.__row_count * self.__col_count", "def n_points(self):\n\n if self.data_reduced:\n return len(self.data_reduced[0])\n else:\n return 0", "def __len__(self):\n if not self.opt.union:\n return min(len(self.dataset), self.opt.max_dataset_size)\n else:\n return len(self.batch_sampler)", "def num_dimensions(self):\n if self.__num_dimensions__ == 0:\n # Try to get the number of dimensions from the first point or bounding box\n if len(self.points) > 0:\n self.__num_dimensions__ = len(self.points[0].coordinate)\n elif len(self.bounding_boxes) > 0:\n self.__num_dimensions__ = len(self.bounding_boxes[0].start)\n return self.__num_dimensions__", "def dim(self):\n return len(self._n)", "def size(self) -> pulumi.Output[float]:\n return pulumi.get(self, \"size\")", "def ndarray_size(self) -> int:\n pass", "def GetDataSetSize(ds_type, name_len, num_elements, element_multipler):\n\n # Number of bytes in the data type\n datatype_size = 4\n if ds_type == 50: # Byte Datatype\n datatype_size = 1\n elif ds_type == 20: # Int Datatype\n datatype_size = 4\n elif ds_type == 10: # Float Datatype\n datatype_size = 4\n\n return ((num_elements * element_multipler) * datatype_size) + Ensemble.GetBaseDataSize(name_len)", "def getDimension(self):\n dim = len(self.__axis_labels__)\n if dim == 0:\n # Labels weren't set, so how about the data\n dim = self[0].dim()\n return dim", "def XSize(self):\n return self.dataset.RasterXSize if self.dataset else None", "def dataDimensions(data):\n logging.info('Number of rows of data: %s' % len(data))\n logging.info('Number of columns of data: %s' % len(data[1]))", "def get_size(self):\n tmpsize = 0\n for variable in self.variables:\n tmpsize += variable.get_size()\n for subchunk in self.subchunks:\n tmpsize += subchunk.get_size()\n self.size.value = tmpsize\n return self.size.value + self.ID.get_size() + self.size.get_size()", "def size(self):\n return int(misc.intprod(self.shape))", "def __len__(self):\n return int(np.floor(len(self.dataset_df) / self.batch_size))", "def nbytes(self) -> int:\n\n return self.data.nbytes + self.shape.nbytes", "def dimension(self):\n return 3*self.genus - 3 + self.n", "def dimension(self):\n return self.__N", "def size(self):\n return self.N", "def ndim(self):\n return self.data.ndim", "def __len__(self):\n return self.dataset.shape[0]", "def get_data_size(self):\n if self.doc_ftrs is not None:\n data = self.doc_ftrs\n elif self.query_ftrs:\n data = self.query_ftrs\n elif self.usr_ftrs:\n data = self.usr_ftrs\n else:\n raise ValueError('Cannot infer data size.')\n data_shape = tf.shape(data)\n return data_shape[0], data_shape[1]", "def size_out(self):\n return self.dimensions", "def __len__(self):\n\t\treturn min(len(self.dataset), self.opt.max_dataset_size)", "def nbytes(self):\n\n return self.data.type.datasize", "def size(self) -> Tuple[groupable, pdarray]:\n return self.count()", "def dim(self) -> int:\n pass", "def countDataSize(self,filename):\n \n d = h5py.File(filename,'r')\n features = d['spectrometer/features'][:]\n select = self.selectData(features.astype(float), self.ifeature, d)\n N = len(features[select])\n d.close()\n\n N = (N//self.offsetLen) * self.offsetLen\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def __get_dataset_size(input_):\n in_type = __get_input_type(input_)\n b_unit = 1024.0 * 1024.0\n if in_type == \"numpy_array\":\n size_in_MB = input_.nbytes / b_unit\n elif in_type == \"hdf\":\n size_in_MB = os.path.getsize(input_) / b_unit\n else:\n list_file = losa.find_file(input_ + \"/*.tif*\")\n if list_file:\n size_1_file = np.asarray(Image.open(list_file[0])).nbytes / b_unit\n else:\n size_1_file = 0.0\n size_in_MB = len(list_file) * size_1_file\n return size_in_MB", "def nbytes(self):\n dtype = self.config[\"dtype\"]\n if dtype is None:\n return None\n\n size = reduce(mul, self.shape, 1)\n nbytes = size * dtype.itemsize\n\n if getattr(self, \"masked\", True):\n nbytes += size\n\n return nbytes", "def get_size(self, hdf):\n return sum([sys.getsizeof(hdf[p]) for p in hdf.list_nodes()]) + sum(\n [self.get_size(hdf[p]) for p in hdf.list_groups()]\n )", "def data_size( self, groups ):\n #if len(groups) == 0:\n # return 0\n return max( groups.values() )", "def nbytes(self):\n # type: () -> int\n size = 0\n for chunk in self.data.chunks:\n for buf in chunk.buffers():\n size += buf.size\n return size", "def dimensions():", "def num_elements(self):\n return self.subset.num_elements()", "def get_size(self):\n return self._surf.get_size()", "def count_data(self):\n try:\n ndata = len(self.x)\n logger.info(\"Number of data points: {0}\".format(ndata))\n except AttributeError:\n logger.error(\"Data object has not been defined\")\n ndata = 0\n return ndata", "def get_length(self):\n if self.opt.num_buckets > 1:\n return sum([len(bucket) for bucket in self.data])\n else:\n return len(self.data)", "def __len__( self ):\n return len( self._raster_data )", "def graph_data_size(self) -> int:\n return int(self.graph_tuple_stats.graph_data_size or 0)", "def test_size(self) -> int:\n return int(self.data_size * self.__test_fraction)", "def size(self) -> int:\n return self.da.length()", "def size(self) -> int:\n return self.da.length()", "def size(self) -> int:\n return self.da.length()", "def size(self) -> int:\n return self.da.length()", "def n_dims(self):\n return len(self.dimensions)", "def size(self, batch):\n x,y,m = batch \n return sum([mm.sum() for mm in m])" ]
[ "0.8266003", "0.76687074", "0.7574417", "0.75476074", "0.7534628", "0.7282379", "0.72617334", "0.7169072", "0.7134597", "0.7131639", "0.71055424", "0.7064898", "0.70379966", "0.7025604", "0.7004166", "0.6994787", "0.69689655", "0.69596523", "0.6953735", "0.69417727", "0.6908508", "0.6892053", "0.6892053", "0.6887908", "0.6852594", "0.6827202", "0.68255895", "0.68230164", "0.6822927", "0.6802025", "0.67960024", "0.67942625", "0.6772995", "0.6762101", "0.67555916", "0.67230934", "0.6718384", "0.67155033", "0.6714495", "0.6701687", "0.67016155", "0.66925704", "0.6690807", "0.6690481", "0.66859525", "0.6684816", "0.66835886", "0.6649728", "0.6646967", "0.66452", "0.6641968", "0.6641089", "0.66361624", "0.66359687", "0.66357076", "0.6631694", "0.6624262", "0.66186744", "0.6618538", "0.6608617", "0.6606963", "0.6606518", "0.6605088", "0.6602196", "0.65952927", "0.6584458", "0.6578666", "0.6576369", "0.65705764", "0.65597665", "0.65564764", "0.6554649", "0.65545905", "0.65401906", "0.6531687", "0.65247244", "0.6523251", "0.6522284", "0.65190244", "0.6507744", "0.6505202", "0.6502565", "0.6500292", "0.649602", "0.64862347", "0.64839983", "0.64818263", "0.64766675", "0.64760697", "0.6474356", "0.64719796", "0.6464944", "0.64612925", "0.64581925", "0.64547074", "0.64547074", "0.64547074", "0.64547074", "0.6449409", "0.64456713" ]
0.7754449
1
set the voxel sizes of the images that are read
def setVoxelSize(self, vxs): self.voxelsize = vxs a, b, c = vxs self.spacing = [1, b / a, c / a]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _assign_sizes(self):", "def setinputsizes(self, sizes):\n pass", "def setImageDimensions(*args):", "def update_size(self):\n self.size = self.image.size\n self.width, self.height = self.size", "def set_size(self, size=None):\n if not size:\n size = self.output_size\n self.img = cv2.resize(self.img, size)\n self.update_image()\n self.update_size()", "def voxel_size(self):\n # Set experiment if unset:\n if self._exp is None:\n self._populate_exp()\n\n # Set cframe if unset:\n if self._coord_frame is None:\n self._populate_coord_frame()\n\n if self.axis_order == AxisOrder.XYZ:\n vox_size = (\n self._coord_frame.x_voxel_size,\n self._coord_frame.y_voxel_size,\n self._coord_frame.z_voxel_size,\n )\n elif self.axis_order == AxisOrder.ZYX:\n vox_size = (\n self._coord_frame.z_voxel_size,\n self._coord_frame.y_voxel_size,\n self._coord_frame.x_voxel_size,\n )\n return (vox_size, self._coord_frame.voxel_unit)", "def set_num_images(self,num_images):\n for roi in self.rois:\n roi.set_num_images(num_images)\n self.num_images = num_images", "def set_sizes(self, sizes):\n self._sizes = sizes", "def getVoxelSize(self):\n\t\treturn self.voxelsize", "def voxel_size(self):\n return self.calculation.voxel_size", "def update_dimensions(self):\n self.chunk = numpy.full((self.current_height, self.current_width), fill_value=Constants.VALUE_INITIALIZER,\n dtype=\"int16\")", "def _SetDimensions(self):\n self._size = 0\n for variable_ndarray in self._layer.get_weights():\n size = variable_ndarray.size\n self._dimensions.append((variable_ndarray.shape, size))\n self._size += size", "def voxel_count(self):\n return self.cols * self.rows * self.sections", "def get_raw_image_sizes() -> set:\n sizes = set()\n data = SUNRGBDTrainDataset(True, augment=False)\n for i in range(len(data)):\n sizes.add(data[i][0].shape)\n return sizes", "def test_correct_image_size(location):\n chunkloc = resave_to_chunks(root=location[\"dir\"],\n n_imgs=10,\n output_stem=location[\"stem\"])\n\n loaded = np.load(chunkloc)\n assert len(loaded.files) > 0\n\n first = loaded[loaded.files[0]]\n assert first.shape != ()\n assert first.shape == (520, 696)", "def setup(self):\n self.debug(\"Setup ..\")\n\n if self.pipeline.settings.useHardwarePCF:\n self.error(\n \"Global Illumination does not work in combination with PCF!\")\n import sys\n sys.exit(0)\n return\n\n self.settings = VoxelSettingsManager()\n self.settings.loadFromFile(join(self.sceneRoot, \"voxels.ini\"))\n\n self.debug(\n \"Loaded voxels, grid resolution is\", self.settings.GridResolution)\n\n self.gridScale = self.settings.GridEnd - self.settings.GridStart\n self.voxelSize = self.gridScale / float(self.settings.GridResolution)\n self.entrySize = Vec2(\n 1.0 / float(self.settings.StackSizeX), 1.0 / float(self.settings.StackSizeY))\n self.frameIndex = 0\n\n invVoxelSize = Vec3(\n 1.0 / self.voxelSize.x, 1.0 / self.voxelSize.y, 1.0 / self.voxelSize.z)\n invVoxelSize.normalize()\n self.normalizationFactor = invVoxelSize / \\\n float(self.settings.GridResolution)\n\n # Debugging of voxels, VERY slow\n self.debugVoxels = False\n\n if self.debugVoxels:\n self.createVoxelDebugBox()\n\n # Load packed voxels\n packedVoxels = Globals.loader.loadTexture(\n join(self.sceneRoot, \"voxels.png\"))\n packedVoxels.setFormat(Texture.FRgba8)\n packedVoxels.setComponentType(Texture.TUnsignedByte)\n # packedVoxels.setKeepRamImage(False)\n\n # Create 3D Texture to store unpacked voxels\n self.unpackedVoxels = Texture(\"Unpacked voxels\")\n self.unpackedVoxels.setup3dTexture(self.settings.GridResolution, self.settings.GridResolution, self.settings.GridResolution,\n Texture.TFloat, Texture.FRgba8)\n self.unpackedVoxels.setMinfilter(Texture.FTLinearMipmapLinear)\n self.unpackedVoxels.setMagfilter(Texture.FTLinear)\n\n self.unpackVoxels = NodePath(\"unpackVoxels\")\n self.unpackVoxels.setShader(\n BetterShader.loadCompute(\"Shader/GI/UnpackVoxels.compute\"))\n\n print \"setting inputs ..\"\n self.unpackVoxels.setShaderInput(\"packedVoxels\", packedVoxels)\n print \"setting inputs ..\"\n self.unpackVoxels.setShaderInput(\n \"stackSizeX\", LVecBase3i(self.settings.StackSizeX))\n print \"setting inputs ..\"\n self.unpackVoxels.setShaderInput(\n \"gridSize\", LVecBase3i(self.settings.GridResolution))\n print \"setting inputs ..\"\n self.unpackVoxels.setShaderInput(\"destination\", self.unpackedVoxels)\n print \"executing shader ..\"\n self._executeShader(\n self.unpackVoxels, self.settings.GridResolution / 8, self.settings.GridResolution / 8, self.settings.GridResolution / 8)\n\n print \"creating direct radiance texture ..\"\n # Create 3D Texture to store direct radiance\n self.directRadianceCache = Texture(\"Direct radiance cache\")\n self.directRadianceCache.setup3dTexture(self.settings.GridResolution, self.settings.GridResolution, self.settings.GridResolution,\n Texture.TInt, Texture.FR32i)\n\n self.directRadiance = Texture(\"Direct radiance\")\n self.directRadiance.setup3dTexture(self.settings.GridResolution, self.settings.GridResolution, self.settings.GridResolution,\n Texture.TFloat, Texture.FRgba16)\n\n print \"setting texture states ..\"\n for prepare in [self.directRadiance, self.unpackedVoxels]:\n prepare.setMagfilter(Texture.FTLinear)\n prepare.setMinfilter(Texture.FTLinearMipmapLinear)\n prepare.setWrapU(Texture.WMBorderColor)\n prepare.setWrapV(Texture.WMBorderColor)\n prepare.setWrapW(Texture.WMBorderColor)\n prepare.setBorderColor(Vec4(0,0,0,1))\n\n self.unpackedVoxels.setBorderColor(Vec4(0))\n # self.directRadiance.setBorderColor(Vec4(0))\n\n self.populateVPLNode = NodePath(\"PopulateVPLs\")\n self.clearTextureNode = NodePath(\"ClearTexture\")\n self.copyTextureNode = NodePath(\"CopyTexture\")\n self.generateMipmapsNode = NodePath(\"GenerateMipmaps\")\n self.convertGridNode = NodePath(\"ConvertGrid\")\n\n\n if False:\n surroundingBox = Globals.loader.loadModel(\n \"Models/CubeFix/Model.egg\")\n surroundingBox.setPos(self.settings.GridStart)\n surroundingBox.setScale(self.gridScale)\n\n # surroundingBox.setTwoSided(True)\n surroundingBox.flattenStrong()\n surroundingBox.reparentTo(Globals.render)\n\n self.bindTo(self.populateVPLNode, \"giData\")\n self.reloadShader()\n\n self._generateMipmaps(self.unpackedVoxels)", "def _set_x_block_size(self):\n self._scene_gen.block_dimensions = (self._block_size_x_spinbox.value(),\n self._scene_gen.block_dimensions[Y],\n self._scene_gen.block_dimensions[Z])\n self._refresh_view()", "def setinputsizes(self, sizes):\r\n if self._closed:\r\n raise Error('The cursor has been closed.')\r\n if self.connection._closed:\r\n raise Error('The connection to the database has been closed.')\r\n else:\r\n pass", "def __init__(self, nb_sub_images, window_size, recovery, image_horiz_size):\n self.nb_sub_images = nb_sub_images\n self.window_size = window_size\n self.recovery = recovery\n self.image_horiz_size = image_horiz_size", "def set_image_data(self, data_file):\n # TODO: support other file formats, like hd5 and maybe raw binary?\n import scipy.io\n self.image_data = np.atleast_3d(scipy.io.loadmat(data_file).values()[0])\n if self.image_data.ndim == 3:\n self.image_data = self.image_data.reshape(self.image_data.shape + (1,))\n # TODO: confirm that this voxel reordering is necessary. Maybe lean on the recon\n # folks to standardize thier voxle order? Might also look at\n self.image_data = self.image_data.transpose((1,0,2,3))[::-1,:,::-1,:]\n\n if self.image_data.shape[0] != self.size_x or self.image_data.shape[1] != self.size_y:\n msg = 'Image matrix discrepancy. Fixing the header, assuming image_data is correct...'\n self.log and self.log.warning(msg) or print(msg)\n self.size_x = self.image_data.shape[0]\n self.size_y = self.image_data.shape[1]\n self.mm_per_vox[0] = float(self.fov[0] / self.size_x)\n self.mm_per_vox[1] = float(self.fov[1] / self.size_y)\n if self.image_data.shape[2] != self.num_slices:\n msg = 'Image slice count discrepancy. Fixing the header, assuming image_data is correct...'\n self.log and self.log.warning(msg) or print(msg)\n self.num_slices = self.image_data.shape[2]\n if self.image_data.shape[3] != self.num_timepoints:\n msg = 'Image time frame discrepancy (header=%d, array=%d). Fixing the header, assuming image_data is correct...' % (self.num_timepoints, self.image_data.shape[3])\n self.log and self.log.warning(msg) or print(msg)\n self.num_timepoints = self.image_data.shape[3]", "def set_default_parameters(self):\n super().set_default_parameters()\n if not \"region_size\" in vars(self):\n self.region_size = 0.08\n if not \"RGB_bands\" in vars(self):\n self.RGB_bands = [\"B4\",\"B3\",\"B2\"]\n if not \"split_RGB_images\" in vars(self):\n self.split_RGB_images = True\n # in PROCESSED dir we expect RGB. NDVI, BWNDVI\n self.num_files_per_point = 3", "def countDataSize(self,filename):\n \n d = h5py.File(filename,'r')\n features = d['spectrometer/features'][:]\n select = self.selectData(features.astype(float), self.ifeature, d)\n N = len(features[select])\n d.close()\n\n N = (N//self.offsetLen) * self.offsetLen\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def countDataSize(self,filename):\n \n try:\n d = h5py.File(filename,'r')\n except:\n print(filename)\n return \n\n N = 0\n scan_edges = d['level2/Statistics/scan_edges'][:]\n for (start,end) in scan_edges:\n N += (end-start)//self.offsetLen * self.offsetLen\n d.close()\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def get_num_of_images(self):", "def _set_pixel_size(self) -> None:\n # Not Pansharpened images\n if self.band_combi == Sv1BandCombination.PMS:\n # TODO: manage default resolution for PAN band ?\n self.pixel_size = self._ms_res\n # Pansharpened images\n else:\n self.pixel_size = self._pan_res", "def resize(self, **kwargs):\n\n if self.image is None:\n raise ValueError('self.image is None! The image has to be initialized!')\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n self.image = ndimage.interpolation.zoom(self.image * 1., **kwargs)\n\n # if size <= 3, pad with zeros\n\n if np.min(self.image.shape) < 5:\n self.image = np.pad(self.image, pad_width=3, mode='constant', constant_values=0)\n\n if self.image.max() > 0:\n self.image = rescale_intensity(self.image, out_range=(0, 255))\n\n if 'Voxel size x' in self.metadata.index and 'Voxel size y' in self.metadata.index \\\n and 'Voxel size z' in self.metadata.index:\n new_voxel_size = np.array([self.metadata['Voxel size z'], self.metadata['Voxel size y'],\n self.metadata['Voxel size x']]) / kwargs['zoom']\n self.metadata['Voxel size'] = str(new_voxel_size)\n self.metadata['Voxel size z'], self.metadata['Voxel size y'], self.metadata['Voxel size x'] = new_voxel_size\n\n return self.image", "def set_pic_size(self, im_name):\n im_vals = np.genfromtxt(im_name, delimiter=self.delim)\n self.pic_width = int(np.size(im_vals[0]) - 1) # the first column of ASCII image is row number\n try: self.pic_height = int(np.size(im_vals[:,0])) \n except IndexError: \n self.pic_width = int(np.size(im_vals) - 1)\n self.pic_height = 1\n self.create_rect_mask()\n return self.pic_width, self.pic_height", "def setFilmSize(self, size_x, size_y):\n self.lens.setFilmSize(size_x, size_y)\n self.rebuildMatrixCache()", "def set_size(self, w, h):\n\t\tpass", "def set_size(self, width, height):\r\n \r\n self.image = pygame.transform.scale(self.image, (width, height))\r\n self.rect = self.image.get_rect()", "def setImageSize(cls, width, height):\n\t\tcls._width = width\n\t\tcls._height = height", "def dimensions():", "def chunksize(self, value):\n\n self.data.chunksize = int(value)\n self.mask.chunksize = int(value)", "def __init__(self, image_size, heatmap_size):\n super(ProjectLayer, self).__init__()\n self.image_size = image_size\n self.heatmap_size = heatmap_size\n if isinstance(self.image_size, int):\n self.image_size = [self.image_size, self.image_size]\n if isinstance(self.heatmap_size, int):\n self.heatmap_size = [self.heatmap_size, self.heatmap_size]", "def updateSize(self, *args):\n width = self.width.get()\n height = self.height.get()\n self.initialXScale.config(to=width)\n self.initialYScale.config(to=height)\n # error check that state is not outside bounds\n for ball, state in self.ballStates.items():\n if state[0] > width:\n state[0] = width\n if state[1] > height:\n state[1] = height", "def sizes(options):\n # import data\n pixels = dict() # volumes are given in #pixels\n snap_mask = \"/net/astrogate/export/astrodata/jgacon/filex/processing/\" \\\n \"export/f8_h50_v100_objs_snap_%d.csv\"\n snap_ids = np.arange(2,28+1)\n z = snapid2z(snap_ids)\n print z\n\n for id in snap_ids:\n snap = snap_mask % (id - 1) # fix: snap number one too low in filename\n pixels[id] = np.genfromtxt(snap)[1:-1,1] # row 2 contains volumes\n # rm void & halo volumes\n\n # visualise\n if \"err\" in options.keys():\n nums = np.array([pixels[id].size for id in snap_ids])\n avgs = np.array([np.mean(pixels[id]) for id in snap_ids])\n mods = np.array([st.mode(pixels[id])[0][0] for id in snap_ids])\n meds = np.array([np.median(pixels[id]) for id in snap_ids])\n stds = np.array([np.std(pixels[id]) for id in snap_ids])\n\n print mods\n print mods.shape\n\n plt.figure()\n plt.title(\"Sizes of filaments as function of redshift\")\n plt.xlabel(\"Redshift $z$\")\n plt.xticks(snap_ids[::3], z[::3])\n\n plt.ylabel(\"Size in #pixels\")\n\n plt.errorbar(snap_ids, avgs, yerr=stds, label=\"Mean\")\n plt.plot(snap_ids, mods, \"g\", label=\"Mode\")\n plt.plot(snap_ids, meds, \"c\", label=\"Median\")\n plt.legend(loc=\"best\")\n\n plt.twinx()\n plt.ylabel(\"#Filaments\", color=\"r\")\n plt.tick_params(\"y\", colors=\"r\")\n\n plt.plot(snap_ids, nums, \"r--\")\n\n plt.savefig(options[\"err\"])\n\n if \"dist\" in options.keys():\n targets = np.array([5,10,15,20,25])\n plt.figure()\n plt.title(\"Volume distribution of filaments\")\n plt.xlabel(\"Volume $V$ in #pixels\")\n plt.ylabel(\"#Element with $V$ / Total #Elements\")\n plt.xlim([0,1000])\n for target in targets:\n sns.kdeplot(pixels[target], label=\"$z$ = %f\" % snapid2z(target))\n plt.legend(loc=\"best\")\n plt.savefig(options[\"dist\"])\n\n if \"dist_inter\" in options.keys():\n default = snap_ids[-1]\n fig, ax = plt.subplots()\n plt.subplots_adjust(bottom=0.25)\n sns.kdeplot(pixels[int(default - 2)], ax=ax)\n plt.xlim([0, 1000])\n plt.ylim([0, 0.01])\n plt.xlabel(\"Volume $V$ of filaments in #pixels\")\n plt.ylabel(\"#Filaments with volume $V$ / Total #Filaments\")\n\n nums = np.array([pixels[id].size for id in snap_ids])\n ax2 = ax.twinx()\n ax2.set_ylabel(\"#Filaments\", color=\"r\", alpha=0.5)\n ax2.tick_params(axis=\"y\", labelcolor=\"r\")\n ax2_x = np.linspace(0, 1000, nums.size)\n ax2.plot(ax2_x, nums, \"r--\", alpha=0.5)\n point, = ax2.plot(ax2_x[default - 2], nums[default - 2], \"ro\", alpha=0.5)\n\n axcolor = 'lightgoldenrodyellow'\n axfreq = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)\n sid = Slider(axfreq, \"ID\", 2, 28, valinit=default, valstep=1)\n ax.set_title(\"$z$ = %f\" % snapid2z(default))\n\n def update(val):\n id = sid.val\n\n print id\n #ax.clear()\n ax.set_ydata()\n ax.set_xdata()\n ax.set_title(\"$z$ = %f\" % snapid2z(int(id)))\n ax.set_xlim([0,1000])\n ax.set_ylim([0, 0.01])\n sns.kdeplot(pixels[int(id)], ax=ax)\n point.set_xdata(ax2_x[int(id) - 2])\n point.set_ydata(nums[int(id) - 2])\n fig.canvas.draw_idle()\n sid.on_changed(update)\n\n plt.show()\n\n\n if \"hist\" in options.keys():\n conc = None\n for id, vols in pixels.iteritems():\n data = np.empty((vols.size, 2))\n data[:,0] = id\n data[:,1] = vols\n\n if conc is None:\n conc = data\n else:\n conc = np.vstack((conc, data))\n\n plt.figure()\n plt.hist2d(conc[:,0], conc[:,1], bins=(snap_ids.size, 1000))\n plt.ylim([100,400])\n plt.savefig(options[\"hist\"])", "def SetSize(self, m: 'unsigned int', n: 'unsigned int') -> \"void\":\n return _itkArray2DPython.itkArray2DD_SetSize(self, m, n)", "def get_image_size(self):", "def apply(self):\n self.grid_size = self.values[0]", "def _resize(self, size: Tuple[int, int], axis: int = None) -> None:\n\t\tif self.name == \"\":\n\t\t\tself.ds._file['/matrix'].resize(size, axis)\n\t\telse:\n\t\t\tself.ds._file['/layers/' + self.name].resize(size, axis)", "def GetVoxelSize(vDataSet):\r\n nx = vDataSet.GetSizeX()\r\n ny = vDataSet.GetSizeY()\r\n nz = vDataSet.GetSizeZ()\r\n\r\n if nx > 0: nx = abs(vDataSet.GetExtendMaxX()-vDataSet.GetExtendMinX())/nx;\r\n if ny > 0: ny = abs(vDataSet.GetExtendMaxY()-vDataSet.GetExtendMinY())/ny;\r\n if nz > 0: nz = abs(vDataSet.GetExtendMaxZ()-vDataSet.GetExtendMinZ())/nz;\r\n\r\n return nx,ny,nz", "def _get_box_sizes(self, image_info, cat):\n\n\n file_id=0\n impath=image_info['image_path'][file_id].strip()\n ext=image_info['image_ext'][file_id]\n wcs_data = fitsio.read_header(impath, ext=ext)\n wcs = eu.wcsutil.WCS(wcs_data)\n\n\n jacob = wcs.get_jacobian(100,100)\n dudcol, dudrow, dvdcol, dvdrow = jacob\n\n det = dvdrow*dudcol - dvdcol*dudrow\n pixel_scale = np.sqrt(abs(det))\n print('found pixel scale:',pixel_scale)\n box_size = cat['box_size_arcsec']/pixel_scale\n\n # clip to range\n box_size.clip(\n min=self['min_box_size'],\n max=self['max_box_size'],\n out=box_size,\n )\n box_size = box_size.astype('i4')\n\n w,=np.where( ( (box_size % 2) != 0 ) )\n if w.size > 0:\n box_size[w] += 1\n\n return box_size", "def __init__(self, sizes, image_size):\n super(CraterNetwork, self).__init__(sizes);\n self.im_size = image_size\n self.validating = False", "def SetSize(self, m: 'unsigned int', n: 'unsigned int') -> \"void\":\n return _itkArray2DPython.itkArray2DUI_SetSize(self, m, n)", "def onSetToFourthSize(self, evt):\n\t\tself.halfResampleZ.Enable(0)\n\t\tself.fourthResampleZ.Enable(1)\n\t\tif self.dataUnits:\n\t\t\tzf = 1\n\t\t\tx, y, z = self.dataUnits[0].dataSource.getOriginalDimensions()\n\t\t\t\n\t\t\tif self.fourthResampleZ.GetValue():\n\t\t\t\tzf = 0.25\n\t\t\tself.currSize = int(0.25 * x), int(0.25 * y), int(zf * z) \n\t\tfor obj in [self.factorLabel, self.dimLabel, self.newDimX, self.newDimY, self.newDimZ, self.factorX, self.factorY, self.factorZ]:\n\t\t\tobj.Enable(0)", "def scan_size(self):\n max_memory = 10e9/4 # because 32-bit floats will be used\n memory = 0\n for f in self.filenames:\n img = cv2.imread(f, int(self.color))\n if img is not None:\n m = 1\n for dim in img.shape:\n m *= dim\n memory += m\n else:\n print('error opening %s' % f)\n print('size is %s bytes' % memory)\n return memory <= max_memory", "def scan_size(self):\n max_memory = 10e9/4 # because 32-bit floats will be used\n memory = 0\n for f in self.filenames:\n img = cv2.imread(f, int(self.color))\n if img is not None:\n m = 1\n for dim in img.shape:\n m *= dim\n memory += m\n else:\n print('error opening %s' % f)\n print('size is %s bytes' % memory)\n return memory <= max_memory", "def scan_size(self):\n max_memory = 10e9/4 # because 32-bit floats will be used\n memory = 0\n for f in self.filenames:\n img = cv2.imread(f, int(self.color))\n if img is not None:\n m = 1\n for dim in img.shape:\n m *= dim\n memory += m\n else:\n print('error opening %s' % f)\n print('size is %s bytes' % memory)\n return memory <= max_memory", "def getDimensions():", "def SetSize(self, m: 'unsigned int', n: 'unsigned int') -> \"void\":\n return _itkArray2DPython.itkArray2DF_SetSize(self, m, n)", "def get_image_sizes():\n widths = []\n heights = []\n\n from settings import folders_location\n for individual_folder_name in listdir(folders_location):\n individual_training_folder_path = folders_location + individual_folder_name + \"/training/\"\n\n image_paths = listdir(individual_training_folder_path)\n for image_path in image_paths:\n img = cv2.imread(individual_training_folder_path + image_path)\n\n height, width, channel = img.shape\n widths.append(width)\n heights.append(height)\n\n print(individual_training_folder_path + image_path)\n\n print(\"Min: %s, Max: %s\" % (np.min(widths), np.max(widths)))\n print(\"Average: %s\" % (np.average(widths)))\n\n return widths", "def set_data_size(self, num_bytes):\n self.model.data_size = num_bytes\n self.refresh_memory()", "def set_variables(self):\n self.feat_size = None # Set this in your inherited class\n raise NotImplementedError(\"set_variables() is not implemented\")", "def set_element_dimensions(self, size_x, size_y, size_z):\n size_x = 1.0 * size_x\n size_y = 1.0 * size_y\n size_z = 1.0 * size_z\n x = np.repeat(size_x, self.numelements)\n y = np.repeat(size_y, self.numelements)\n z = np.repeat(size_z, self.numelements)\n self.dimensions = g.Points.from_xyz(x, y, z)\n return self", "def estimate_size(self, datasets):\n datasets = Datasets(datasets)\n \n# self.fit.run(datasets)\n\n if self.size_values:\n self.size_parameter.scan_values = self.size_values.to_value(self.size_parameter.unit)\n self.size_parameter.scan_min = self.size_min.to_value(self.size_parameter.unit)\n self.size_parameter.scan_max = self.size_max.to_value(self.size_parameter.unit)\n self.size_parameter.scan_n_values = self.size_n_values\n \n result = super().run(datasets, self.size_parameter)\n return result", "def __len__(self):\n return self._num_samples_per_file * len(self._files) // self._world_size", "def resize_images(self, images):\n \n img_list = []\n \n for img in images:\n \n yield np.resize(img, (64, 64, 3))", "def SetImageSize(self,x=IS.GET_IMAGE_SIZE_X_MAX,y=0):#non-zero ret\r\n r = CALL(\"SetImageSize\",self,INT(x),INT(y))\r\n if x & 0x8000 == 0x8000:\r\n return self.CheckForNoSuccessError(r)\r\n return self.CheckForSuccessError(r)", "def __init__(self, pool_size):\n self.pool_size = pool_size\n if self.pool_size > 0: # create an empty pool\n self.num_imgs = 0\n self.images = []", "def _get_image_size(self):\n return (3, 224, 224)", "def GetTextureDimensions(self):\n ...", "def set_size(self, size):\n \n self.width = size[0]\n self.height = size[1]", "def set_size(self, mode):\n return len(self.data_index[mode])", "def set_point_size(self, point_size=0.0):\r\n for b in self.buf:\r\n b.unib[8] = point_size", "def load(self):\n\n # get files in folder\n files = [f for f in listdir(self.data_path)]\n print(\"loading images from folder: %s\" % self.data_path)\n\n images = []\n image_targets = []\n for f in files:\n filepath = path.join(self.data_path, f)\n images.append(io.imread(filepath, as_grey=True))\n image_targets.append(self.target)\n\n # define new size and resize images\n new_size = (2 ** self.size_exponent, 2 ** self.size_exponent)\n for i in range(0, len(images)):\n # images[i] = transform.resize(images[i], new_size)\n images[i] = misc.imresize(images[i], new_size) / 16\n\n self.images = images\n self.targets = image_targets", "def initial_resizing(fr_raw_data_path, fr_data_path, dim=300):\n with h5py.File(fr_raw_data_path, 'r') as data:\n images = resize_array(np.asarray(data['images'].value), dim=dim)\n labels = data['labels'].value\n \n with h5py.File(fr_data_path, 'w') as f:\n f.create_dataset('images', data=images)\n\n with h5py.File(fr_raw_data_path, 'r') as data: \n f.copy(data['fri_data'], 'fri_data')\n f.copy(data['frii_data'], 'frii_data')\n f.copy(data['labels'], 'labels')", "def size(self, val):\n self.width = val\n self.height = val", "def resize(self):\n pass", "def __init__(self, size=(2, 2), **kwargs):\n super(MaxUnpooling2D, self).__init__(**kwargs)\n self.size = size", "def __len__(self):\n return self.num_images", "def __len__(self):\n return self.num_images", "def __len__(self):\n return self.num_images", "def __len__(self):\n return self.num_images", "def __len__(self):\n return self.num_images", "def set_num_images(self,num_images):\n if num_images != self.num_images:\n self.counts = [{} for _ in range(num_images)]\n\n for _ in range(num_images,len(self.thresholds)): # delete unneeded thresholds\n self.thresholds.pop()\n for _ in range(len(self.thresholds), num_images): # make new thresholds\n self.thresholds.append(self.default_threshold)\n\n for _ in range(num_images,len(self.autothreshs)): # delete unneeded autothreshs\n self.autothreshs.pop()\n for _ in range(len(self.autothreshs), num_images): # make new autothreshs\n self.autothreshs.append(self.default_autothresh)\n\n self.num_images = num_images", "def get_voxel_size(path: str) -> float:\n dcm = pydicom.dcmread(path, force=True)\n x_str, y_str = dcm.PixelSpacing\n x = Decimal(str(x_str))\n y = Decimal(str(y_str))\n z = Decimal(str(dcm.SpacingBetweenSlices))\n print(float(x * y * z))\n return float(x * y * z)", "def setUp(self):\n self.gray_image = np.ndarray((100, 200), dtype=np.uint8)\n self.rgb_image = np.ndarray((100, 200, 3), dtype=np.uint8)", "def scale(self, size=128):\n scale_factor = size / max(self.voxels.shape)\n self.voxels = ndimage.zoom(self.voxels, scale_factor)\n self.point_position = self.point_position * scale_factor\n self.voxel_size = False # To ignore this\n \n return(self)", "def setImages( self, event_key, images ):\n print \"event index\",event_key[0]\n self.run = event_key[1]\n self.subrun = event_key[2]\n self.event_num = event_key[3]\n print self.run,self.subrun,self.event_num\n self.images = images\n #print self.images.img_v\n #for img in self.images.img_v:\n # print img.shape\n self.labeltools.setImage( event_key[0], self.images )", "def _set_x_size(self):\n self._level_gen.size = (self._level_size_x_spinbox.value(),\n self._level_gen.size[Y],\n self._level_gen.size[Z])\n self._refresh_view()", "def __CalculateSliceSize(self, readShapeZYX):\n # readShapeZYX is the dimension of the data we must READ to fill the required output area;\n # i.e .the fill area plus margins. If we're filling globally it's the same thing.\n dataBPP = 4\n memLimit = self._jobDetails.MemTargetBytes\n outputsBPP = dataBPP * 2 + 1 # the output data, distances, and flags\n # approximate total number of pixels we can read for each file\n sliceSqrd = memLimit / (readShapeZYX[0] * (dataBPP + outputsBPP))\n # not implementing slicing in y dimension so xSize is total pixels / total height\n sliceXSize = sliceSqrd / readShapeZYX[1]\n return sliceXSize", "def updateSize(self, *args):\n return None", "def onSize(self, event): \n\t\tw, h = self.GetClientSizeTuple()\n\t\tself.tree.SetDimensions(0, 0, w, h)", "def set_dimensions(self, fshape=1, params=np.array([0., 0., 0., 0.])):\n self.FHIT_C = 1\n self.FSHAPE = fshape\n self.RLEN1 = params[0]\n self.RLEN2 = params[1]\n self.RWIDX1 = params[2]\n self.RWIDX2 = params[3]", "def test_full_resize(self):\n number_of_pixels = 300\n destination = base_path +'/test_data/rendering_tests/resized_images/'\n source_folder = base_path + '/test_data/rendering_tests/filter_database/'\n\n\n for the_file in os.listdir(destination):\n file_path = os.path.join(destination, the_file)\n if os.path.isfile(file_path):\n os.unlink(file_path)\n\n\n self.assertEqual(0, len(os.listdir(destination)))\n rb.find_all_files(number_of_pixels,source_folder, destination)\n self.assertEqual(6, len(os.listdir(destination)))\n for the_file in os.listdir(destination):\n file_path = os.path.join(destination,the_file)\n with Image.open(file_path) as f:\n self.assertNotEqual(number_of_pixels+5, f.size[0])\n self.assertNotEqual(number_of_pixels+5, f.size[1])\n # the above checks that the size does not vary as needed\n # probably not necessary\n self.assertEqual(number_of_pixels, f.size[0])\n self.assertEqual(number_of_pixels, f.size[1])", "def size(img):\n\treturn img.size", "def _set_z_block_size(self):\n self._scene_gen.block_dimensions = (self._scene_gen.block_dimensions[X],\n self._scene_gen.block_dimensions[Y],\n self._block_size_z_spinbox.value())\n self._refresh_view()", "def update_resize(self, viewer, dims):\n self.recalc(viewer)", "def setSurfaceSize(xmin, xmax, ymin, ymax):\n dislin.sursze(xmin, xmax, ymin, ymax)", "def set_pool_size(self, pool_size):\n self._semantic_decoder.set_pool_size(pool_size)\n if self._instance_decoder is not None:\n self._instance_decoder.set_pool_size(pool_size)", "def __init__(self, images, batch_size, ctx, multisp):\n self.ctx = ctx\n self.batch_size = batch_size\n self.images = []\n self.multisp = multisp\n\n self.images=images\n\n if self.images:\n self.channels, self.imgsize, _ = self._read_img(self.images[0]['data']).shape\n\n logging.info(\"Found a total of {} images\".format(len(self.images)))", "def input_load(self):\n return self.nmos_size + self.pmos_size", "def estimate_size(self, ixreader):\r\n raise NotImplementedError", "def SetDimensions(self, p_int, p_int_1, p_int_2, p_int_3, p_int_4, p_int_5, p_int_6):\n ...", "def setsize(self, size):\n self.__size = size", "def testSize (self):\r\n \r\n perpixel = bytes_per_pixel [self.bih_vals [bih_BitCount]]\r\n width = self.bih_vals [bih_Width]\r\n height = self.bih_vals [bih_Height]\r\n expected = self.bih_vals [bih_SizeImage]\r\n\r\n # Rows always have multiples of 4 bytes\r\n \r\n padding = 3 - ((perpixel * width + 3) % 4)\r\n size = (width * perpixel + padding) * height\r\n\r\n if not size == expected:\r\n print \"Calculated size = %d (<> %d)\" % (size, expected)\r\n print \"***** File size error *****\"", "def __len__(self):\n return math.ceil(self.number_of_images / self.batch_size)", "def input_image_size(interpreter):\n _, height, width, channels = interpreter.get_input_details()[0]['shape']\n return width, height, channels", "def __init__(self, *, points, n_x=1, n_y=1, n_z=1, size_x=None, size_y=None, size_z=None, range=None, **kwargs):\n super().__init__(points=points, **kwargs)\n\n if size_x is None or size_y is None or size_z is None:\n print('WARNING: when computing a voxelgrid for running a Neural net, voxel sizes should be homogeneous among different point clouds or the neural network wont learn spatial relationships. To ensure this, use (size_x, size_y, size_z) instead of (n_x, n_y, n_z)')\n\n self.x_y_z = [n_x, n_y, n_z]\n self.sizes = np.array([size_x, size_y, size_z])\n\n if range is None:\n self.xyzmin = self.bounds[0]\n self.xyzmax = self.bounds[1]\n else: \n self.xyzmin = range[:3]\n self.xyzmax = range[3:]\n\n for n, size in enumerate(self.sizes):\n if size is None:\n continue\n\n # ensure that 'sizes' are respected by making the box bigger if necessary\n margin = size - ((self.xyzmax[n] - self.xyzmin[n]) % size)\n self.xyzmin[n] -= margin / 2\n self.xyzmax[n] += margin / 2\n self.x_y_z[n] = int(round((self.xyzmax[n] - self.xyzmin[n]) / size))", "def setTestSampleSize(self, Ntest):\n self.Ntest = Ntest" ]
[ "0.66988456", "0.6689091", "0.6536548", "0.61790437", "0.6176843", "0.61598647", "0.6149251", "0.6120889", "0.6050634", "0.6040322", "0.60017174", "0.59967524", "0.59520566", "0.5908052", "0.58848417", "0.5881362", "0.5838115", "0.5830686", "0.5829773", "0.57882386", "0.57879984", "0.57736975", "0.57540166", "0.57257664", "0.5723163", "0.5719538", "0.57143813", "0.5660189", "0.5649517", "0.5646133", "0.56358474", "0.5603753", "0.55859613", "0.5564792", "0.5561999", "0.5553092", "0.5542233", "0.55421203", "0.5539714", "0.55349207", "0.55238724", "0.5517819", "0.54927963", "0.54754645", "0.54752177", "0.5474271", "0.5474271", "0.5474271", "0.5471876", "0.54173285", "0.5406461", "0.5404069", "0.5393685", "0.53915197", "0.5371111", "0.53633344", "0.53435415", "0.5337368", "0.53284353", "0.53269225", "0.53251433", "0.53226656", "0.53136903", "0.5310848", "0.53105265", "0.5307229", "0.530705", "0.5301535", "0.5296068", "0.52944183", "0.52944183", "0.52944183", "0.52944183", "0.52944183", "0.5290509", "0.52859247", "0.52769566", "0.5270676", "0.52693075", "0.5267989", "0.5264675", "0.52631307", "0.5257604", "0.52524304", "0.52492684", "0.5238744", "0.5238017", "0.52356434", "0.5227367", "0.52178025", "0.5206543", "0.52036506", "0.5199872", "0.51828796", "0.5172366", "0.51670367", "0.5166318", "0.516528", "0.5163989", "0.5162968" ]
0.7176527
0
Loads the specified .oif file and imports data from it.
def loadFromFile(self, filename): return []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data(self) -> None:", "def _load(self):\n op_type_file_path = os.path.join(\n self._profiling_dir,\n self._csv_file_to_analyse.format(self._device_id)\n )\n op_type_file_path = validate_and_normalize_path(\n op_type_file_path, raise_key=\"Invalid op_type_file_path\")\n if not os.path.isfile(op_type_file_path):\n log.warning('The file <%s> does not exist.', op_type_file_path)\n return\n\n with open(op_type_file_path, 'r') as file:\n csv_reader = csv.reader(file)\n _ = next(csv_reader)\n for info in csv_reader:\n self._data.append(self._convert_field_type(info))", "def init_from_file(self):\n self.src.load('start.00') \n self.oe1.load('start.01')\n #self.det.load('start.02')\n print('NOTE: variables loaded from start.00/start.01 files')", "def load_data(self):", "def load_data(self):\n self.tif_file = self._find_tif_file()\n if self.with_labeling is not None:\n self.colabel_file = self._find_colabeled_file()\n self.colabel_stack = self._load_colabeled_img()\n self.dff, self.indices = self._populate_dff_data()\n self.loaded = True", "def load(self, filename):\n pass", "def load(cls, from_file):\n raise NotImplementedError", "def load_file(path, data_type=None, *args, **kwargs):\n\n path = os.path.normpath(path)\n if os.path.isdir(path) and path[-1] != os.sep:\n path = path + os.sep\n\n if data_type == None:\n data_type = autodetect(path)\n\n if data_type == \"prospa\":\n return dnpIO.prospa.import_prospa(path, *args, **kwargs)\n\n elif data_type == \"topspin\":\n return dnpIO.topspin.import_topspin(path, *args, **kwargs)\n\n elif data_type == \"topspin dir\":\n return dnpIO.topspin.import_topspin_dir(path, *args, **kwargs)\n\n elif data_type == \"delta\":\n return dnpIO.delta.import_delta(path, *args, **kwargs)\n\n elif data_type == \"vnmrj\":\n return dnpIO.vnmrj.import_vnmrj(path, *args, **kwargs)\n\n elif data_type == \"tnmr\":\n return dnpIO.tnmr.import_tnmr(path, *args, **kwargs)\n\n elif data_type == \"specman\":\n return dnpIO.specman.import_specman(path, *args, **kwargs)\n\n elif data_type == \"xepr\" or data_type == \"xenon\":\n return dnpIO.bes3t.import_bes3t(path, *args, **kwargs)\n\n elif data_type == \"winepr\" or data_type == \"esp\":\n return dnpIO.winepr.import_winepr(path, *args, **kwargs)\n\n elif data_type == \"h5\":\n return dnpIO.h5.load_h5(path, *args, **kwargs)\n\n elif data_type == \"power\":\n return dnpIO.power.importPower(path, *args, **kwargs)\n\n elif data_type == \"vna\":\n return dnpIO.vna.import_vna(path, *args, **kwargs)\n\n elif data_type == \"cnsi_powers\":\n return dnpIO.cnsi.get_powers(path, *args, **kwargs)\n\n else:\n raise ValueError(\"Invalid data type: %s\" % data_type)", "def load(f: Union[str, os.PathLike], model):\n from ..utils.flopy_io import multi_line_strip\n\n pkg_ws = os.path.split(f)[0]\n with open(f) as foo:\n t = [0]\n while t[0] != \"ncells\":\n t = multi_line_strip(foo).split()\n\n ncells = int(t[1])\n\n t = [0]\n while t[0] != \"izone\":\n t = multi_line_strip(foo).split()\n\n method = multi_line_strip(foo).split()[0]\n\n if method in (\"internal\", \"open/close\"):\n izone = np.zeros((ncells,), dtype=int)\n i = 0\n fobj = foo\n if method == \"open/close\":\n fobj = open(os.path.join(pkg_ws, t[1]))\n while i < ncells:\n t = multi_line_strip(fobj)\n if t[0] == \"open/close\":\n if fobj != foo:\n fobj.close()\n fobj = open(os.path.join(pkg_ws, t[1]))\n for zn in t:\n izone[i] = zn\n i += 1\n else:\n izone = np.array([t[1]] * ncells, dtype=int)\n\n zon = ZoneFile6(model, izone)\n return zon", "def load(self, file_id):\n pass", "def load(self):\r\n self.read(self.filename)", "def _load_data() -> UpliftData:\n with project_dir(\"axinova\"):\n data = UpliftData(\n ax_data=load_bin(\"ax_data.feather\"),\n ax_var_struct=load_bin(\"ax_var_struct.feather\"),\n population_codes=load_pickle(\"population_ratios.pkl\"),\n global_codes=load_pickle(\"global_code_ratios.pkl\"),\n station_codes=load_pickle(\"station_code_ratios.pkl\"),\n spr_data=load_pickle(\"spr_data.pkl\"),\n )\n data.all_stations = data.ax_data[\"Station\"].cat.categories.to_list()\n data.all_weekdays = data.ax_data[\"DayOfWeek\"].cat.categories.to_list()\n data.all_timescales = [\"Time\", \"ShortTime\", \"Hour\", \"TimeSlot\"]\n data.var_info = {}\n for (var_id, struct) in data.ax_var_struct.groupby(\"Variable\"):\n data.var_info[var_id] = dict(\n Label=struct[\"Variable_Label\"].max(),\n Codes=struct[\"Label\"].to_list(),\n Order=list(range(len(struct[\"Label_Nr\"].to_list()))),\n )\n data.combi_var = {\n \"md_SexAgeEk\": (\n data.variable_table(\"md_SexAgeEk\")\n .iloc[:, 0]\n .str.split(\"/ \", expand=True)\n .rename(columns={0: \"md_sex\", 1: \"md_agenatrep\", 2: \"md_ek\"})\n )\n }\n return data", "def open_from(self, f: BinaryIO):\n raise NotImplementedError", "def _load_file(self):\n getLogger(__name__).debug(\"Loading {} in {} mode.\".format(self.filename, self.mode))\n try:\n kwargs = {'driver': 'H5FD_CORE'} if self.in_memory else {}\n self.file = tables.open_file(self.filename, mode='a' if self.mode == 'write' else 'r', **kwargs)\n except (IOError, OSError):\n raise\n\n # get important cal params\n self.nominal_wavelength_bins = self.nyquist_wavelengths()\n\n # get the beam image\n self.beamImage = self.file.get_node('/beammap/map').read()\n self._flagArray = self.file.get_node('/beammap/flag') # The absence of .read() here is correct\n self.nXPix, self.nYPix = self.beamImage.shape\n\n # get the photontable\n self.photonTable = self.file.get_node('/photons/photontable')", "def load(self, filename):\n raise NotImplementedError", "def load_data(self):\n raise NotImplementedError()", "def load_data():\r\n print ('Loadng all the file one time......')\r\n if not os.path.exists('cifar.pkl'):\r\n set_data()\r\n with open('cifar.pkl', 'rb') as cifar_pickle:\r\n data = six.moves.cPickle.load(cifar_pickle)\r\n return data", "def load_file():\n global list_of_table, data_base, new_data\n open_name = askopenfilename()\n\n if Path(open_name).suffix == '.db':\n data_base = open_name\n data_base = str(data_base)\n new_data_base = parse(data_base)\n new_data = update_list_tables(new_data_base)\n new_data.clear()\n\n else:\n mistake_db_file()", "def load(self, ocadfile=None):\n if ocadfile:\n (ocadName, ext) = os.path.splitext(ocadfile)\n self.ocadName = ocadName\n else:\n raise OcadfileException(\"File not specified!\")\n \n try:\n self.__open()\n self.block = None\n self.syhdr = None\n self.StringsStorage = None\n \n if self.ocad:\n self.__ocadHeader()\n self.__symbolsHeader() # Version <= 8\n self.__setup() # Version <= 8\n \n finally: \n self.__close()", "def load_file(*args, **kwargs): # real signature unknown\n pass", "def load(self) -> None:\n self._load_data()\n self._load_poses()\n self._load_timestamps()", "def load(self, filename=None):\n importer = aspecd.io.AdfImporter()\n importer.source = filename\n importer.import_into(self)", "def load(self):\n\n raise NotImplementedError", "def load(self, input):", "def load(self):", "def loadData(self, file):\n self.data = batchImport(file, self.ps)", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def load(self, input):\n pass", "def load_test_file():\n hou.hipFile.load(\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"data\",\n \"test_api_integration.hipnc\",\n ),\n ignore_load_warnings=True,\n )\n\n yield\n\n hou.hipFile.clear()", "def _read(self):\n # initializng data dictionary\n self.data={}\n\n f = FortranFile(self.filename)\n # Default omnivor binary header\n self.data['MK'] = f.readInts('i')\n self.data['itime'] = f.readInts('i')\n self.data['version'] = f.readString()\n self.data['file_id'] = f.readInts('i')\n self.data['sversion'] = f.readString()\n # Velocity field\n self.data['stype'] = f.readString()\n self.data['is_grid'] = f.readInts('i')\n nCPs = f.readInts('i')\n self.data['nCPs'] = nCPs\n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n #print('File is a velocity grid file')\n n1 = f.readInts('i')\n n2 = f.readInts('i')\n n3 = f.readInts('i')\n self.data['n1'] = n1\n self.data['n2'] = n2\n self.data['n3'] = n3\n self.data['is_straight'] = f.readInts('i')\n self.data['v1'] = f.readReals(real_char)\n self.data['v2'] = f.readReals(real_char)\n self.data['v3'] = f.readReals(real_char)\n\n CPs_raw = f.readReals(real_char)\n Utot_raw = f.readReals(real_char)\n CPs = np.reshape(CPs_raw,(3,nCPs),order = 'F')\n Utot = np.reshape(Utot_raw,(3,nCPs),order = 'F')\n\n acc=-1\n CPsTab = np.zeros((3, n1,n2,n3))\n UtotTab = np.zeros((3, n1,n2,n3))\n # Reshaping the nasty way (this is natural order). \n for i in range(0,n1):\n for j in range(0,n2):\n for k in range(0,n3):\n acc=acc+1\n CPsTab[0:3,i,j,k] = CPs[0:3,acc]\n UtotTab[0:3,i,j,k] = Utot[0:3,acc]\n\n self.data['CPs'] = CPs\n self.data['CPsTab'] = CPsTab\n self.data['Utot'] = Utot\n self.data['UtotTab'] = UtotTab", "def load_all_data_from_file(self) -> None:\n self.load_gene_data_from_file()\n self.load_ontology_from_file(ontology_type=DataType.GO, ontology_url=self.go_ontology_url,\n ontology_cache_path=self.go_ontology_cache_path,\n config=self.config)\n self.load_associations_from_file(associations_type=DataType.GO, associations_url=self.go_associations_url,\n associations_cache_path=self.go_associations_cache_path, config=self.config)\n self.load_ontology_from_file(ontology_type=DataType.DO, ontology_url=self.do_ontology_url,\n ontology_cache_path=self.do_ontology_cache_path, config=self.config)\n self.load_associations_from_file(associations_type=DataType.DO, associations_url=self.do_associations_url,\n associations_cache_path=self.do_associations_cache_path,\n association_additional_cache_path=self.do_associations_new_cache_path,\n association_additional_url=self.do_associations_new_url, config=self.config)\n self.load_ontology_from_file(ontology_type=DataType.EXPR, ontology_url=self.expression_ontology_url,\n ontology_cache_path=self.expression_ontology_cache_path, config=self.config)\n self.load_associations_from_file(associations_type=DataType.EXPR,\n associations_url=self.expression_associations_url,\n associations_cache_path=self.expression_associations_cache_path,\n config=self.config)\n self.load_orthology_from_file()\n self.load_expression_cluster_data()\n self.load_protein_domain_information()", "def load_model(self, filename):\r\n pass", "def test1_loading(self):\n\t\tprint \"\\nTEST 1: Loading ontologies from %s folder.\\n=================\" % DATA_FOLDER\n\t\t\n\t\tfor f in os.listdir(DATA_FOLDER):\n\t\t\tif not f.startswith('.'):\n\t\t\t\tprint \"Loading... >\", f\t\t\n\t\t\t\t\n\t\t\t\to = ontospy.Ontology(DATA_FOLDER + f)\n\t\t\t\t\n\t\t\t\tself.assertEqual(type(o), ontospy.Ontology)\n\t\t\t\tprint \"Success.\"", "def get_model_data_from_files(self, oc):\r\n # Load model related files\r\n model_path = self.config['DATA_PATH'] + self.config['CUSTOMER_NAME'] + '/models/'\r\n\r\n features_file = model_path + self.task + '_' + str(oc) + '_features.txt'\r\n dummies_file = model_path + self.task + '_' + str(oc) + '_dummies.txt'\r\n model_file = model_path + self.task + '_' + str(oc) + '.joblib'\r\n\r\n if os.path.isfile(features_file) and os.path.isfile(dummies_file) and os.path.isfile(model_file):\r\n model = joblib.load(model_file)\r\n features = open(features_file, 'r', encoding=self.config['DATA_ENCODING']).read().rstrip('\\n').split(self.config['DATA_SEPARATOR'])\r\n dummies = open(dummies_file, 'r', encoding=self.config['DATA_ENCODING']).read().rstrip('\\n').split(self.config['DATA_SEPARATOR'])\r\n return (model, features, dummies)\r\n return (None, None, None)", "def dataLoad():\n try:\n try: #Python3\n f = open(__file__ + \".csv\",\"rt\")\n except: #Python2\n f = open(__file__ + \".csv\",\"rb\")\n data = f.read().split(',')\n entryCol.entry0.delete(0,END)\n entryCol.entry0.insert(0,data[0])\n entryCol.entry1.delete(0,END)\n entryCol.entry1.insert(0,data[1])\n entryCol.entry2.delete(0,END)\n entryCol.entry2.insert(0,data[2])\n entryCol.entry3.delete(0,END)\n entryCol.entry3.insert(0,data[3])\n botWind.writeN(\"DataLoad: File\")\n except:\n botWind.writeN(\"DataLoad: Default\")", "def _load_obcfile(casename=None): \n\n data={}\n\n if casename==None:\n print('_load_obcfile requires a filename to load.')\n return\n try:\n fp=open(casename+'_obc.dat','r')\n except IOError:\n print('_load_obcfile: invalid case name.')\n return data\n\n obc_str=fp.readline().split('=')\n obc_num=int(obc_str[1])\n t_data1=np.genfromtxt(casename+'_obc.dat',skip_header=1)\n fp.close()\n\n data['obcf_num']=obc_num\n data['obcf_numbers']=t_data1[:,0]\n data['obcf_nodes']=t_data1[:,1]\n data['obcf_value']=t_data1[:,2]\n\n \n return data", "def load(self):\n raise NotImplementedError", "def load(self):\n raise NotImplementedError", "def read_data(feature_file, label_file):", "def load(cls, f, model, ext_unit_dict=None):\n msg = (\n \"Model object must be of type flopy.mfusg.MfUsg\\n\"\n f\"but received type: {type(model)}.\"\n )\n assert isinstance(model, MfUsg), msg\n\n if model.verbose:\n print(\"loading bcf package file...\")\n\n f_obj = get_open_file_object(f, \"r\")\n\n # dataset 0 -- header\n while True:\n line = f_obj.readline()\n if line[0] != \"#\":\n break\n\n # determine problem dimensions\n nlay = model.nlay\n dis = model.get_package(\"DIS\")\n if dis is None:\n dis = model.get_package(\"DISU\")\n njag = dis.njag\n\n # Item 1: ipakcb, HDRY, IWDFLG, WETFCT, IWETIT, IHDWET - line already read above\n if model.verbose:\n print(\" loading ipakcb, HDRY, IWDFLG, WETFCT, IWETIT, IHDWET...\")\n text_list = line_parse(line)\n ipakcb, hdry, iwdflg, wetfct, iwetit, ihdwet = (\n int(text_list[0]),\n float(text_list[1]),\n int(text_list[2]),\n float(text_list[3]),\n int(text_list[4]),\n int(text_list[5]),\n )\n\n ikvflag = type_from_iterable(\n text_list, index=6, _type=int, default_val=0\n )\n ikcflag = type_from_iterable(\n text_list, index=7, _type=int, default_val=0\n )\n\n # LAYCON array\n laycon, intercellt = cls._load_laycon(f_obj, model)\n\n # TRPY array\n if model.verbose:\n print(\" loading TRPY...\")\n trpy = Util2d.load(\n f_obj, model, (nlay,), np.float32, \"trpy\", ext_unit_dict\n )\n\n # property data for each layer based on options\n transient = not dis.steady.all()\n anis = any(t != 1 for t in trpy)\n anglex = 0\n if (not model.structured) and anis:\n if model.verbose:\n print(\"loading ANGLEX...\")\n anglex = Util2d.load(\n f_obj, model, (njag,), np.float32, \"anglex\", ext_unit_dict\n )\n\n # hy, kv, storage\n (sf1, tran, hy, vcont, sf2, wetdry, kv) = cls._load_layer_arrays(\n f_obj,\n model,\n nlay,\n ext_unit_dict,\n transient,\n laycon,\n ikvflag,\n ikcflag,\n iwdflg,\n )\n\n # Ksat mfusg\n ksat = 0\n if (not model.structured) and abs(ikcflag == 1):\n if model.verbose:\n print(\" loading ksat (njag)...\")\n ksat = Util2d.load(\n f_obj, model, (njag,), np.float32, \"ksat\", ext_unit_dict\n )\n\n f_obj.close()\n\n # set package unit number\n unitnumber, filenames = get_unitnumber_from_ext_unit_dict(\n model, cls, ext_unit_dict, ipakcb\n )\n\n # create instance of bcf object\n bcf = cls(\n model,\n ipakcb=ipakcb,\n intercellt=intercellt,\n laycon=laycon,\n trpy=trpy,\n hdry=hdry,\n iwdflg=iwdflg,\n wetfct=wetfct,\n iwetit=iwetit,\n ihdwet=ihdwet,\n ikvflag=ikvflag,\n ikcflag=ikcflag,\n tran=tran,\n hy=hy,\n vcont=vcont,\n kv=kv,\n anglex=anglex,\n ksat=ksat,\n sf1=sf1,\n sf2=sf2,\n wetdry=wetdry,\n unitnumber=unitnumber,\n filenames=filenames,\n )\n\n # return bcf object\n return bcf", "def _load(self):\n if self.file_path.exists():\n with open(self.file_path) as fid:\n self.data = json.load(fid)", "def _load(self):\n raise NotImplementedError()", "def Load(self):\n\t\tfile = open(self.fileName, 'r')\n\t\tself.hdr = file.readline().split('\\n')[0].split(',')\n\t\t\n\t\tfor line in file.readlines():\n\t\t\ttokens = line.split('\\n')[0].split(',')\n\t\t\tif int(tokens[1]) == 0:\n\t\t\t\tself.h0.append(tokens[0])\n\t\t\telse:\n\t\t\t\tself.h1.append(tokens[0])\n\t\tfile.close()\n\t\tself.numH1 = len(self.h1)\n\t\tself.numH0 = len(self.h0)", "def load_model_file(device_index):\n print(\"\\nStart loading model...\")\n\n return kdp_wrapper.isi_load_nef(device_index, MODEL_FILE, ISI_APP_ID)", "def load(self):\n #print self.fileInfo.name\n progress = self.progress\n filePath = os.path.join(self.fileInfo.dir,self.fileInfo.name)\n self.fileSize = os.path.getsize(filePath)\n #--Localize\n cells = self.cells\n records = self.records\n canSave = self.canSave\n skipObjRecords = self.skipObjRecords\n contTypes = set(['CREC','CNTC','NPCC'])\n levTypes = set(('LEVC','LEVI'))\n debrisIds = self.debrisIds\n debrisTypes = set(debrisIds.keys())\n #--Header\n inPath = os.path.join(self.fileInfo.dir,self.fileInfo.name)\n ins = Tes3Reader(self.fileInfo.name,file(inPath,'rb'))\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n self.tes3 = Tes3(name,size,delFlag,recFlag,ins,True)\n if not canSave: del self.tes3.others[:]\n #--Progress info\n progress = self.progress\n progress(0.0,'Loading '+self.fileInfo.name)\n #--Raw data read\n while not ins.atEnd():\n #--Get record info and handle it\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n #print \"%s [%d]\" % (name,size)\n #--CELL?\n if name == 'CELL':\n record = Cell(name,size,delFlag,recFlag,ins,0,skipObjRecords)\n cells.append(record)\n if canSave: records.append(record)\n #--Contents\n elif canSave and name in contTypes:\n if name == 'CREC':\n record = Crec(name,size,delFlag,recFlag,ins,True)\n elif name == 'CNTC':\n record = Cntc(name,size,delFlag,recFlag,ins,True)\n else:\n record = Npcc(name,size,delFlag,recFlag,ins,True)\n self.conts.append(record)\n self.conts_id[record.getId()] = record\n records.append(record)\n #--File Map\n elif name == 'FMAP':\n record = Fmap(name,size,delFlag,recFlag,ins)\n self.fmap = record\n records.append(record)\n #--Landscapes\n elif name == 'LAND':\n record = Land(name,size,delFlag,recFlag,ins)\n self.lands[record.getId()] = record\n records.append(record)\n #--Scripts\n elif canSave and name == 'SCPT':\n record = Scpt(name,size,delFlag,recFlag,ins,True)\n records.append(record)\n if record.getRef():\n self.refs_scpt[record] = record.getRef()\n #--Save debris info?\n elif name in debrisTypes:\n record = Record(name,size,delFlag,recFlag,ins)\n id = record.getId()\n if id:\n debrisIds[name].append(id.lower())\n if canSave:\n records.append(record)\n #--Skip Non-cell?\n elif not canSave:\n ins.seek(size,1,name)\n #--Keep non-cell?\n else:\n records.append(Record(name,size,delFlag,recFlag,ins))\n #--Done Reading\n ins.close()\n #--Analyze Cells\n cntCells = 0\n progress.setMax(len(self.cells))\n for cell in self.cells:\n cell.load(None,1)\n self.cells_id[cell.getId()] = cell\n if not canSave:\n cell.data = None #--Free some memory\n #--Progress\n cntCells += 1\n progress(cntCells)\n #--Scripts\n if self.refs_scpt:\n self.updateScptRefs()", "def import_data_helper(self): \n if len(self.components) == 1:\n hapi.fetch(TableName = self.tablename, M = self.components[0][0], I = self.components[0][1], numin = self.min_x, numax = self.max_x)\n else: \n global_id = []\n for c in self.components:\n global_id.append(hapi.ISO[c][0])\n hapi.fetch_by_ids(TableName = self.tablename, iso_id_list = global_id, numin = self.min_x, numax = self.max_x)", "def load(file_name):\n ferme_fenetre()\n Hitori(file_name)", "def load_binary_data(self, encoding='utf8'):\n\n # TODO use smart_open again when https://github.com/RaRe-Technologies/smart_open/issues/207 will be fixed\n with open(self.file_name, 'rb') as f:\n self.load_model_params(f)\n self.load_dict(f, encoding=encoding)\n self.load_vectors(f)", "def loaditems(self, fh):\n pass", "def main():\n print \"=\" * 78\n print \"%s %s\" % (__prog_name__, __version__)\n debug, input_file_names = check_cli()\n if not input_file_names:\n _error(\"No input file name found!\\n\\n%s\" % __help__)\n for input_file_name in input_file_names:\n print \"* Reading\", input_file_name\n file_base_name = os.path.splitext(os.path.basename(input_file_name))[0]\n file_dir_name = os.path.dirname(input_file_name)\n sections = {}\n tex_map = {}\n with open(input_file_name, 'rU') as in_fd:\n sections = get_sections(in_fd.read())\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"sec\",\n {\"sections\": sections})\n\n if not sections:\n _error(\"Nothing could be read from '%s'.\\nIs this an Oolite .dat file?\" \\\n % input_file_name)\n\n # Magically call the 'check' functions\n for name in sections.keys():\n f_name = \"check_%s\" % name.lower()\n if f_name in globals().keys():\n if not globals()[f_name](sections):\n _error(\"Number of entries in '%s' section is different as declared!\" % name)\n\n def get_data(name, sections=sections):\n \"\"\"Returns the 'data' object from the 'name' one found in the\n 'sections' one.\n :sections: dictionary: Object returned by 'get_sections'.\n :name: string: The name of the section to get the 'data'.\n Returns a list of 'lines'.\n \"\"\"\n return sections.get(name, {}).get(\"data\", [])\n\n oti_file_name = build_file_path(file_dir_name, file_base_name, \"oti\")\n tex_map = parse_names(get_data(\"NAMES\"), oti_file_name)\n\n tex_refs, tex_lines_out = parse_textures(get_data(\"TEXTURES\"))\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"tex\",\n {\"tex_refs\": tex_refs,\n \"tex_lines_out\": tex_lines_out})\n\n # Update the tex_map object if textures indexes and names are both\n # used in 'TEXTURES'.\n if sorted(tex_map.keys()) != sorted(tex_refs.get(\"named\").keys()):\n tex_map = update_tex_map(tex_map,\n set(tex_refs[\"named\"].keys()).difference(tex_map.keys()))\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"txm\",\n {\"tex_map\": tex_map})\n\n n_verts, vertex_lines_out = parse_vertex(get_data(\"VERTEX\"))\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"ver\",\n {\"n_verts\": n_verts,\n \"vertex_lines_out\": vertex_lines_out})\n\n n_normals, normals_lines_out = parse_normals(get_data(\"NORMALS\"))\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"nor\",\n {\"n_normals\": n_normals,\n \"normals_lines_out\": normals_lines_out})\n\n n_faces, faces_groups = parse_faces(get_data(\"FACES\"), tex_refs,\n normals_lines_out)\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"fac\",\n {\"n_faces\": n_faces,\n \"faces_groups\": faces_groups})\n\n output_file_name = build_file_path(file_dir_name,\n file_base_name, 'obj')\n material_file_name = build_file_path(file_dir_name,\n file_base_name, 'mtl')\n mtl_lib_file = os.path.basename(material_file_name)\n\n write_obj(output_file_name, file_base_name, mtl_lib_file,\n tex_lines_out, tex_map, n_verts, vertex_lines_out,\n n_normals, normals_lines_out, n_faces, faces_groups)\n\n write_mtl(material_file_name, tex_map)\n\n _exit(\"* Done\")", "def loadIni(self):\n reLoadFiles = re.compile(r'^\\[Game Files\\](.*)')\n reLoadFile = re.compile(r'GameFile[0-9]+=(.*)$')\n #--Read file\n self.mtime = getmtime(self.path)\n self.size = os.path.getsize(self.path)\n ins = file(self.path,'rt')\n #--Pre-Load Lines\n del self.preLoadLines[:]\n del self.postLoadLines[:]\n while True:\n line = ins.readline()\n if not line: \n ins.close()\n raise Tes3Error('Morrowind.ini', _('Morrowind.ini: [GameFiles] section not found.'))\n maLoadFiles = reLoadFiles.match(line)\n if maLoadFiles: break\n self.preLoadLines.append(line)\n #--Load Files \n self.loadFilesComment = maLoadFiles.group(1)\n del self.loadFiles[:]\n del self.loadFilesBad[:]\n while True:\n line = ins.readline()\n maLoadFile = reLoadFile.match(line)\n if not maLoadFile: \n if line: self.postLoadLines.append(line)\n break\n loadFile = unicode(maLoadFile.group(1), 'latin-1')\n loadPath = os.path.join(self.dir,'Data Files',loadFile)\n loadExt = os.path.splitext(loadPath)[-1].lower()\n if len(self.loadFiles) == 255:\n self.loadFilesExtra.append(loadFile)\n elif os.path.exists(loadPath) and re.match('^\\.es[pm]$',loadExt):\n self.loadFiles.append(loadFile)\n else:\n self.loadFilesBad.append(loadFile)\n #--Post-Load Lines\n while True:\n line = ins.readline()\n if not line: break\n self.postLoadLines.append(line)\n #--Done\n ins.close()", "def load(self,filename=None): # return True\r\n pass", "def _load(self):\n\n # number of non-data header details at top of data file\n header = 1\n\n # open file\n weatherData = []\n with open(self.wfile) as myfile:\n if (self.lines > 0):\n weatherData = [next(myfile) for x in xrange(self.lines + header)]\n else:\n weatherData = myfile.readlines()\n\n # get data stream from first line\n streamHeader = weatherData.pop(0).rstrip()\n if (streamHeader == 'FULL'):\n self.dataStream = 0\n elif (streamHeader == 'ADVANCED'):\n self.dataStream = 1\n elif (streamHeader == 'BASIC'):\n self.dataStream = 2\n else:\n print \"Error: unecognised data stream from file %s\" % (self.wfile)\n return -1\n\n # read data\n inputData = []\n for line in weatherData:\n entries = line.split()\n inputData.append(entries)\n\n # copy all into np array\n self.data = np.array(inputData)\n\n return 0", "def load_data():\r\n global labelNames\r\n print(\"Loading Data...\")\r\n\r\n fnpath = \"rawdata\\\\cifar-10-batches-py\"\r\n fnprefix = 'data_batch_'\r\n fnlblnames = 'batches.meta'\r\n fntstbatch = 'test_batch'\r\n\r\n labelNames = unpickle(path.join(fnpath, fnlblnames))\r\n label_names = []\r\n for label in labelNames['label_names']:\r\n label_names.append(\"\".join(map(chr, label)))\r\n labelNames['label_names'] = label_names\r\n\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fntstbatch)))\r\n for n in range(1, 6):\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fnprefix + str(n))))", "def load(fnames, tag='', inst_id=''):\n\n # Save each file to the output DataFrame\n data = load_csv_data(fnames, read_csv_kwargs={'index_col': 0,\n 'parse_dates': True})\n\n # Assign the meta data\n meta, status_desc = mm_ace.common_metadata()\n flux_desc = '5-min averaged Differential '\n\n meta['status_e'] = {meta.labels.units: '',\n meta.labels.name: 'Diff e- Flux Status',\n meta.labels.notes: '',\n meta.labels.desc: status_desc,\n meta.labels.fill_val: np.nan,\n meta.labels.min_val: 0,\n meta.labels.max_val: 9}\n meta['status_p'] = {meta.labels.units: '',\n meta.labels.name: 'Diff Proton Flux Status',\n meta.labels.notes: '',\n meta.labels.desc: status_desc,\n meta.labels.fill_val: np.nan,\n meta.labels.min_val: 0,\n meta.labels.max_val: 9}\n meta['anis_ind'] = {meta.labels.units: '',\n meta.labels.name: 'Anisotropy Index',\n meta.labels.notes: '',\n meta.labels.desc: 'Range: 0.0 - 2.0',\n meta.labels.fill_val: -1.0,\n meta.labels.min_val: 0.0,\n meta.labels.max_val: 2.0}\n meta['eflux_38-53'] = {meta.labels.units: 'particles/cm2-s-ster-MeV',\n meta.labels.name: 'Diff e- Flux 38-53 eV',\n meta.labels.notes: '',\n meta.labels.desc:\n ''.join([flux_desc,\n 'Electron Flux between 35-53 eV']),\n meta.labels.fill_val: -1.0e5,\n meta.labels.min_val: -np.inf,\n meta.labels.max_val: np.inf}\n meta['eflux_175-315'] = {meta.labels.units: 'particles/cm2-s-ster-MeV',\n meta.labels.name: 'Diff e- Flux 175-315 eV',\n meta.labels.notes: '',\n meta.labels.desc:\n ''.join([flux_desc,\n 'Electron Flux between 175-315 eV']),\n meta.labels.fill_val: -1.0e5,\n meta.labels.min_val: -np.inf,\n meta.labels.max_val: np.inf}\n meta['pflux_47-68'] = {meta.labels.units: 'particles/cm2-s-ster-MeV',\n meta.labels.name: 'Diff Proton Flux 47-68 keV',\n meta.labels.notes: '',\n meta.labels.desc:\n ''.join([flux_desc,\n 'Proton Flux between 47-68 keV']),\n meta.labels.fill_val: -1.0e5,\n meta.labels.min_val: -np.inf,\n meta.labels.max_val: np.inf}\n meta['pflux_115-195'] = {meta.labels.units: 'particles/cm2-s-ster-MeV',\n meta.labels.name: 'Diff Proton Flux 115-195 keV',\n meta.labels.notes: '',\n meta.labels.desc:\n ''.join([flux_desc,\n 'Proton Flux between 115-195 keV']),\n meta.labels.fill_val: -1.0e5,\n meta.labels.min_val: -np.inf,\n meta.labels.max_val: np.inf}\n meta['pflux_310-580'] = {meta.labels.units: 'particles/cm2-s-ster-MeV',\n meta.labels.name: 'Diff Proton Flux 310-580 keV',\n meta.labels.notes: '',\n meta.labels.desc:\n ''.join([flux_desc,\n 'Proton Flux between 310-580 keV']),\n meta.labels.fill_val: -1.0e5,\n meta.labels.min_val: -np.inf,\n meta.labels.max_val: np.inf}\n meta['pflux_795-1193'] = {meta.labels.units: 'particles/cm2-s-ster-MeV',\n meta.labels.name: 'Diff Proton Flux 795-1193 keV',\n meta.labels.notes: '',\n meta.labels.desc:\n ''.join([flux_desc,\n 'Proton Flux between 795-1193 keV']),\n meta.labels.fill_val: -1.0e5,\n meta.labels.min_val: -np.inf,\n meta.labels.max_val: np.inf}\n meta['pflux_1060-1900'] = {meta.labels.units: 'particles/cm2-s-ster-MeV',\n meta.labels.name:\n 'Diff Proton Flux 1060-1900 keV',\n meta.labels.notes: '',\n meta.labels.desc:\n ''.join([flux_desc,\n 'Proton Flux between 1060-1900 keV']),\n meta.labels.fill_val: -1.0e5,\n meta.labels.min_val: -np.inf,\n meta.labels.max_val: np.inf}\n return data, meta", "def load_data(filename) :\r\n data = Data()\r\n data.load(filename)\r\n return data", "def load(self):\n self.data = NSPSpecIO().read(self.path)", "def _read_raw_file(\n self, fname, allow_maxshield, preload, do_check_ext=True, verbose=None\n ):\n logger.info(\"Opening raw data file %s...\" % fname)\n\n # Read in the whole file if preload is on and .fif.gz (saves time)\n if not _file_like(fname):\n if do_check_ext:\n endings = (\n \"raw.fif\",\n \"raw_sss.fif\",\n \"raw_tsss.fif\",\n \"_meg.fif\",\n \"_eeg.fif\",\n \"_ieeg.fif\",\n )\n endings += tuple([f\"{e}.gz\" for e in endings])\n check_fname(fname, \"raw\", endings)\n # filename\n fname = str(_check_fname(fname, \"read\", True, \"fname\"))\n ext = os.path.splitext(fname)[1].lower()\n whole_file = preload if \".gz\" in ext else False\n del ext\n else:\n # file-like\n if not preload:\n raise ValueError(\"preload must be used with file-like objects\")\n whole_file = True\n fname_rep = _get_fname_rep(fname)\n ff, tree, _ = fiff_open(fname, preload=whole_file)\n with ff as fid:\n # Read the measurement info\n\n info, meas = read_meas_info(fid, tree, clean_bads=True)\n annotations = _read_annotations_fif(fid, tree)\n\n # Locate the data of interest\n raw_node = dir_tree_find(meas, FIFF.FIFFB_RAW_DATA)\n if len(raw_node) == 0:\n raw_node = dir_tree_find(meas, FIFF.FIFFB_CONTINUOUS_DATA)\n if len(raw_node) == 0:\n raw_node = dir_tree_find(meas, FIFF.FIFFB_IAS_RAW_DATA)\n if len(raw_node) == 0:\n raise ValueError(\"No raw data in %s\" % fname_rep)\n _check_maxshield(allow_maxshield)\n with info._unlock():\n info[\"maxshield\"] = True\n del meas\n\n if len(raw_node) == 1:\n raw_node = raw_node[0]\n\n # Process the directory\n directory = raw_node[\"directory\"]\n nent = raw_node[\"nent\"]\n nchan = int(info[\"nchan\"])\n first = 0\n first_samp = 0\n first_skip = 0\n\n # Get first sample tag if it is there\n if directory[first].kind == FIFF.FIFF_FIRST_SAMPLE:\n tag = read_tag(fid, directory[first].pos)\n first_samp = int(tag.data.item())\n first += 1\n _check_entry(first, nent)\n\n # Omit initial skip\n if directory[first].kind == FIFF.FIFF_DATA_SKIP:\n # This first skip can be applied only after we know the bufsize\n tag = read_tag(fid, directory[first].pos)\n first_skip = int(tag.data.item())\n first += 1\n _check_entry(first, nent)\n\n raw = _RawShell()\n raw.filename = fname\n raw.first_samp = first_samp\n if info[\"meas_date\"] is None and annotations is not None:\n # we need to adjust annotations.onset as when there is no meas\n # date set_annotations considers that the origin of time is the\n # first available sample (ignores first_samp)\n annotations.onset -= first_samp / info[\"sfreq\"]\n raw.set_annotations(annotations)\n\n # Go through the remaining tags in the directory\n raw_extras = list()\n nskip = 0\n orig_format = None\n\n for k in range(first, nent):\n ent = directory[k]\n # There can be skips in the data (e.g., if the user unclicked)\n # an re-clicked the button\n if ent.kind == FIFF.FIFF_DATA_SKIP:\n tag = read_tag(fid, ent.pos)\n nskip = int(tag.data.item())\n elif ent.kind == FIFF.FIFF_DATA_BUFFER:\n # Figure out the number of samples in this buffer\n if ent.type == FIFF.FIFFT_DAU_PACK16:\n nsamp = ent.size // (2 * nchan)\n elif ent.type == FIFF.FIFFT_SHORT:\n nsamp = ent.size // (2 * nchan)\n elif ent.type == FIFF.FIFFT_FLOAT:\n nsamp = ent.size // (4 * nchan)\n elif ent.type == FIFF.FIFFT_DOUBLE:\n nsamp = ent.size // (8 * nchan)\n elif ent.type == FIFF.FIFFT_INT:\n nsamp = ent.size // (4 * nchan)\n elif ent.type == FIFF.FIFFT_COMPLEX_FLOAT:\n nsamp = ent.size // (8 * nchan)\n elif ent.type == FIFF.FIFFT_COMPLEX_DOUBLE:\n nsamp = ent.size // (16 * nchan)\n else:\n raise ValueError(\n \"Cannot handle data buffers of type \" \"%d\" % ent.type\n )\n if orig_format is None:\n if ent.type == FIFF.FIFFT_DAU_PACK16:\n orig_format = \"short\"\n elif ent.type == FIFF.FIFFT_SHORT:\n orig_format = \"short\"\n elif ent.type == FIFF.FIFFT_FLOAT:\n orig_format = \"single\"\n elif ent.type == FIFF.FIFFT_DOUBLE:\n orig_format = \"double\"\n elif ent.type == FIFF.FIFFT_INT:\n orig_format = \"int\"\n elif ent.type == FIFF.FIFFT_COMPLEX_FLOAT:\n orig_format = \"single\"\n elif ent.type == FIFF.FIFFT_COMPLEX_DOUBLE:\n orig_format = \"double\"\n\n # Do we have an initial skip pending?\n if first_skip > 0:\n first_samp += nsamp * first_skip\n raw.first_samp = first_samp\n first_skip = 0\n\n # Do we have a skip pending?\n if nskip > 0:\n raw_extras.append(\n dict(\n ent=None,\n first=first_samp,\n nsamp=nskip * nsamp,\n last=first_samp + nskip * nsamp - 1,\n )\n )\n first_samp += nskip * nsamp\n nskip = 0\n\n # Add a data buffer\n raw_extras.append(\n dict(\n ent=ent,\n first=first_samp,\n last=first_samp + nsamp - 1,\n nsamp=nsamp,\n )\n )\n first_samp += nsamp\n\n next_fname = _get_next_fname(fid, fname_rep, tree)\n\n # reformat raw_extras to be a dict of list/ndarray rather than\n # list of dict (faster access)\n raw_extras = {key: [r[key] for r in raw_extras] for key in raw_extras[0]}\n for key in raw_extras:\n if key != \"ent\": # dict or None\n raw_extras[key] = np.array(raw_extras[key], int)\n if not np.array_equal(raw_extras[\"last\"][:-1], raw_extras[\"first\"][1:] - 1):\n raise RuntimeError(\"FIF file appears to be broken\")\n bounds = np.cumsum(\n np.concatenate([raw_extras[\"first\"][:1], raw_extras[\"nsamp\"]])\n )\n raw_extras[\"bounds\"] = bounds\n assert len(raw_extras[\"bounds\"]) == len(raw_extras[\"ent\"]) + 1\n # store the original buffer size\n buffer_size_sec = np.median(raw_extras[\"nsamp\"]) / info[\"sfreq\"]\n del raw_extras[\"first\"]\n del raw_extras[\"last\"]\n del raw_extras[\"nsamp\"]\n\n raw.last_samp = first_samp - 1\n raw.orig_format = orig_format\n\n # Add the calibration factors\n cals = np.zeros(info[\"nchan\"])\n for k in range(info[\"nchan\"]):\n cals[k] = info[\"chs\"][k][\"range\"] * info[\"chs\"][k][\"cal\"]\n\n raw._cals = cals\n raw._raw_extras = raw_extras\n logger.info(\n \" Range : %d ... %d = %9.3f ... %9.3f secs\"\n % (\n raw.first_samp,\n raw.last_samp,\n float(raw.first_samp) / info[\"sfreq\"],\n float(raw.last_samp) / info[\"sfreq\"],\n )\n )\n\n raw.info = info\n\n logger.info(\"Ready.\")\n\n return raw, next_fname, buffer_size_sec", "def test_readfile(self):\n fname = os.path.join(self.datadir, 'monol_testA_E3-50_rebin4_gti') + \\\n HEN_FILE_EXTENSION\n command = \"{0}\".format(fname)\n\n hen.io.main(command.split())", "def load(self,factory={}):\n canSave = self.canSave\n #--Header\n inPath = os.path.join(self.fileInfo.dir,self.fileInfo.name)\n ins = Tes3Reader(self.fileInfo.name,file(inPath,'rb'))\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n self.tes3 = Tes3(name,size,delFlag,recFlag,ins,True)\n #--Raw data read\n dial = None\n while not ins.atEnd():\n #--Get record info and handle it\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n #--SCPT?\n if name == 'SCPT':\n record = Scpt(name,size,delFlag,recFlag,ins,True)\n self.scripts.append(record)\n if canSave: self.records.append(record)\n #--Non-dials?\n elif canSave:\n record = Record(name,size,delFlag,recFlag,ins)\n self.records.append(record)\n else:\n ins.seek(size,1,'Record')\n #--Done Reading\n ins.close()", "def load_main_function(self, options, data=None):\n\n if self.rdmc.app.redfishinst.is_redfish:\n eth_type = \"ethernetinterface.\"\n else:\n eth_type = \"ethernetnetworkinterface.\"\n\n linking_dict = {\n \"manager_interfaces\": eth_type,\n \"systems_interfaces\": eth_type,\n \"manager_network_services\": \"networkprotocol.\",\n \"ilo_date_time\": \"datetime.\",\n }\n\n if not data:\n data = {}\n try:\n if options.encryption:\n with open(self.eth_file, \"rb\") as file_handle:\n data = json.loads(\n Encryption().decrypt_file(\n file_handle.read(), options.encryption\n )\n )\n else:\n with open(self.eth_file, \"rb\") as file_handle:\n data = json.loads(file_handle.read())\n except:\n raise InvalidFileInputError(\n \"Invalid file formatting found. Verify the file has a \"\n \"valid JSON format.\"\n )\n\n for ilotype, subsect in data.items():\n _type = ilotype.split(\".\")[0]\n for _path in subsect:\n if not subsect[_path]:\n continue\n elif (\n \"ethernetinterface\" in _type.lower()\n or \"ethernetnetworkinterface\" in _type.lower()\n ):\n if \"managers\" in _path.lower():\n self.load_ethernet_aux(_type, _path, data[ilotype][_path])\n elif \"systems\" in _path.lower():\n self.rdmc.ui.warn(\n \"Systems Ethernet Interfaces '%s' \"\n \"cannot be modified.\" % _path\n )\n continue\n elif \"datetime\" in _type.lower():\n if \"StaticNTPServers\" in list(subsect.get(_path).keys()):\n # must set NTP Servers to static in OEM then reset iLO for StaticNTPServers\n # property to appear in iLODateTime\n if self.rdmc.app.redfishinst.is_redfish:\n eth_config_type = \"ethernetinterface\"\n else:\n eth_config_type = \"ethernetnetworkinterface\"\n for key in list(data.keys()):\n if key.split(\".\")[0].lower() == eth_config_type:\n eth_config_type = key\n for _path in data[eth_config_type]:\n if \"managers\" in _path.lower():\n try:\n data[eth_config_type][_path][\"DHCPv4\"][\n \"UseNTPServers\"\n ] = True\n data[eth_config_type][_path][\"DHCPv6\"][\n \"UseNTPServers\"\n ] = True\n data[eth_config_type][_path][\"Oem\"][\n self.rdmc.app.typepath.defs.oemhp\n ][\"DHCPv4\"][\"UseNTPServers\"] = True\n data[eth_config_type][_path][\"Oem\"][\n self.rdmc.app.typepath.defs.oemhp\n ][\"DHCPv6\"][\"UseNTPServers\"] = True\n self.load_ethernet_aux(\n eth_config_type,\n _path,\n data[eth_config_type][_path],\n )\n except KeyError:\n self.rdmc.ui.printer(\n \"Unable to configure \"\n \"'UseNTPServers' for '%s'.\\n\" % _path\n )\n self.rdmc.ui.printer(\n \"iLO must be reset in order for \"\n \"changes to static network time protocol servers to \"\n \"take effect.\\n\"\n )", "def test_load(api):\n # upload file to file.io servers\n uploaded_file = api.upload(\n tag='test_file',\n expiry='1d',\n path='tests/test_file.txt'\n )\n\n # check that instance of FileIO has these fields\n assert uploaded_file.link\n assert uploaded_file.key\n assert uploaded_file.tag\n assert uploaded_file.path\n\n # check that the uploaded file was added to uploaded files list\n assert api.show_uploads()\n\n # check that our list is not empty\n assert api.file_obj_list\n\n # export the file in json format\n api.export('tests/exported.json')\n\n # check that exported file exists\n assert path.isfile('tests/exported.json')\n\n # set it to empty list\n api.file_obj_list = []\n\n # load the file in json format\n api.load('tests/exported.json')\n\n # remove the file\n remove('tests/exported.json')\n\n # check that the uploaded file was added to uploaded files list\n assert api.show_uploads()\n\n # check that our list is not empty\n assert api.file_obj_list\n\n # export the file in pkl format\n api.export('tests/exported.pkl')\n\n # set it to empty list\n api.file_obj_list = []\n\n # load the file in pkl format\n api.load('tests/exported.pkl')\n\n # remove exported.pkl file\n remove('tests/exported.pkl')\n\n # check that the uploaded file was added to uploaded files list\n assert api.show_uploads()\n\n # check that our list is not empty\n assert api.file_obj_list", "def load_data(self):\n \n # only loader implemented so far !\n try:\n _ascii_array = Utilities.load_ascii(filename=self.filename, sep='')\n start_row = TOF._first_line_number_with_real_data(_ascii_array[0, 0])\n\n _tof_column = _ascii_array[start_row:, 0]\n\n if not TOF._is_this_numeric(_tof_column[0]):\n start_row += 1\n\n _tof_column = _ascii_array[start_row:, 0]\n _counts_column = _ascii_array[start_row:, 1]\n\n self.tof_array = _tof_column\n self.counts_array = _counts_column\n return\n\n except IndexError:\n pass # try another format\n\n try:\n _ascii_array = Utilities.load_ascii(filename=self.filename, sep=',')\n start_row = TOF._first_line_number_with_real_data(_ascii_array[0, 0])\n\n _tof_column = _ascii_array[start_row:, 0] # first row must be excluded in this format\n\n if not TOF._is_this_numeric(_tof_column[0]):\n start_row += 1\n\n _tof_column = _ascii_array[start_row:, 0]\n _counts_column = _ascii_array[start_row:, 1]\n\n self.tof_array = _tof_column\n self.counts_array = _counts_column\n return\n\n except IndexError:\n raise IndexError(\"Format not implemented!\")", "def load(self, fname, snver=1):\n self._data = self._io.load(fname, snver=snver)", "def __init__(self):\n f = open(configuration.dataDirectory+'Muon_ID_iso_Efficiencies_Run_2012ABCD_53X.pkl', 'r')\n if f :\n self._map = pickle.load(f)\n self._range = ''\n else :\n print 'ERROR: Input file for muon SF not existing!'", "def import_equipment_from_file(self, filename='') -> None:\n if not filename:\n filename = cif_file_open_dialog(filter=\"CIF file (*.cif *.cif_od *.cfx)\")\n if not filename:\n print('No file given')\n return\n doc = read_document_from_cif_file(filename)\n if not doc:\n return\n for block in doc:\n self._import_block(block, filename)\n self.show_equipment()", "def import_file(self, *args, **kwargs):\n filename = self.file\n self.completed_layers = []\n err = GdalErrorHandler()\n gdal.PushErrorHandler(err.handler)\n gdal.UseExceptions()\n configuration_options = kwargs.get('configuration_options', [{'index': 0}])\n\n # Configuration options should be a list at this point since the importer can process multiple layers in a\n # single import\n if isinstance(configuration_options, dict):\n configuration_options = [configuration_options]\n\n data, inspector = self.open_source_datastore(filename, *args, **kwargs)\n\n datastore_layers = inspector.describe_fields()\n\n if len(datastore_layers) == 0:\n logger.debug('No Dataset found')\n\n layers_info = []\n\n # Add index for any layers configured by name\n for layer_configuration in configuration_options:\n if 'layer_name' in layer_configuration:\n lookup = 'layer_name'\n elif 'index' in layer_configuration:\n lookup = 'index'\n else:\n lookup = None\n logger.debug('could not find lookup')\n continue\n\n for datastore_layer in datastore_layers:\n if datastore_layer.get(lookup) == layer_configuration.get(lookup):\n layer_configuration.update(datastore_layer)\n layers_info.append(layer_configuration)\n\n for layer_options in layers_info:\n if layer_options['raster']:\n \"\"\"\n File is a raster, we need to convert into optimized GeoTiff\n and skip any further testing or loading into target_store\n \"\"\"\n # Increment filename to make sure target doesn't exists\n filedir, filebase = os.path.split(filename)\n outfile = '%s.tif' % os.path.splitext(filebase)[0]\n fileout = increment_filename(os.path.join(RASTER_FILES, outfile))\n raster_import(layer_options['path'], fileout)\n self.completed_layers.append([fileout, layer_options])\n else:\n target_file, _ = self.open_target_datastore(self.target_store)\n target_create_options = []\n\n # Prevent numeric field overflow for shapefiles https://trac.osgeo.org/gdal/ticket/5241\n if target_file.GetDriver().GetName() == 'PostgreSQL':\n target_create_options.append('PRECISION=NO')\n\n layer_options['modified_fields'] = {}\n layer = data.GetLayer(layer_options.get('index'))\n layer_name = layer_options.get('name', layer.GetName().lower())\n layer_type = self.get_layer_type(layer, data)\n srs = layer.GetSpatialRef()\n\n if layer_name.lower() == 'ogrgeojson':\n try:\n layer_name = os.path.splitext(os.path.basename(filename))[0].lower()\n except IndexError:\n pass\n\n layer_name = launder(str(layer_name))\n\n # default the layer to 4326 if a spatial reference is not provided\n if not srs:\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4326)\n\n # pass the srs authority code to handlers\n if srs.AutoIdentifyEPSG() == 0:\n layer_options['srs'] = '{0}:{1}'.format(srs.GetAuthorityName(None), srs.GetAuthorityCode(None))\n\n n = 0\n while True:\n n += 1\n try:\n target_layer = self.create_target_dataset(target_file, layer_name, srs, layer_type,\n options=target_create_options)\n except RuntimeError as e:\n # logger.exception('exception in creating target dataset')\n # the layer already exists in the target store, increment the name\n if 'Use the layer creation option OVERWRITE=YES to replace it.' in e.message:\n layer_name = increment(layer_name)\n\n # try 100 times to increment then break\n if n >= 100:\n break\n\n continue\n else:\n raise e\n break\n\n # adding fields to new layer\n layer_definition = ogr.Feature(layer.GetLayerDefn())\n source_fid = None\n\n wkb_field = 0\n\n for i in range(layer_definition.GetFieldCount()):\n\n field_def = layer_definition.GetFieldDefnRef(i)\n\n if field_def.GetName() == target_layer.GetFIDColumn() and field_def.GetType() != 0:\n field_def.SetType(0)\n\n if field_def.GetName() != 'wkb_geometry':\n target_layer.CreateField(field_def)\n new_name = target_layer.GetLayerDefn().GetFieldDefn(i - wkb_field).GetName()\n old_name = field_def.GetName()\n\n if new_name != old_name:\n layer_options['modified_fields'][old_name] = new_name\n\n if old_name == target_layer.GetFIDColumn() and not layer.GetFIDColumn():\n source_fid = i\n else:\n wkb_field = 1\n\n if wkb_field is not 0:\n layer.SetIgnoredFields(['wkb_geometry'])\n\n for i in range(0, layer.GetFeatureCount()):\n feature = layer.GetFeature(i)\n\n if feature and feature.geometry():\n\n if not layer.GetFIDColumn():\n feature.SetFID(-1)\n\n if feature.geometry().GetGeometryType() != target_layer.GetGeomType() and \\\n target_layer.GetGeomType() in range(4, 7):\n\n conversion_function = ogr.ForceToMultiPolygon\n\n if target_layer.GetGeomType() == 5:\n conversion_function = ogr.ForceToMultiLineString\n\n elif target_layer.GetGeomType() == 4:\n conversion_function = ogr.ForceToMultiPoint\n\n geom = ogr.CreateGeometryFromWkb(feature.geometry().ExportToWkb())\n feature.SetGeometry(conversion_function(geom))\n\n if source_fid is not None:\n feature.SetFID(feature.GetField(source_fid))\n\n try:\n target_layer.CreateFeature(feature)\n\n except:\n for field in range(0, feature.GetFieldCount()):\n if feature.GetFieldType(field) == ogr.OFTString:\n try:\n feature.GetField(field).decode('utf8')\n except UnicodeDecodeError:\n feature.SetField(field, decode(feature.GetField(field)))\n except AttributeError:\n continue\n try:\n target_layer.CreateFeature(feature)\n except err as e:\n logger.error('Create feature failed: {0}'.format(gdal.GetLastErrorMsg()))\n raise e\n\n self.completed_layers.append([target_layer.GetName(), layer_options])\n\n return self.completed_layers", "def load(self,factory={}):\n canSave = self.canSave\n InfoClass = factory.get('INFO',InfoS) #--Info class from factory.\n #--Header\n inPath = os.path.join(self.fileInfo.dir,self.fileInfo.name)\n ins = Tes3Reader(self.fileInfo.name,file(inPath,'rb'))\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n self.tes3 = Tes3(name,size,delFlag,recFlag,ins,True)\n #--Raw data read\n dial = None\n while not ins.atEnd():\n #--Get record info and handle it\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n #--DIAL?\n if name == 'DIAL':\n dial = Dial(name,size,delFlag,recFlag,ins,True)\n self.dials.append(dial)\n if canSave: self.records.append(dial)\n #--INFO?\n elif name == 'INFO':\n info = InfoClass(name,size,delFlag,recFlag,ins,True)\n self.records.append(info)\n dial.infos.append(info)\n self.infos[(dial.type,dial.id,info.id)] = info\n #--Non-dials?\n elif canSave:\n record = Record(name,size,delFlag,recFlag,ins)\n self.records.append(record)\n else:\n ins.seek(size,1,'Record')\n #--Done Reading\n ins.close()", "def loadFromFile():\n try:\n f1 = open( \"friendshipMap.p\", \"rb\" )\n friendship_map = pickle.load(f1)\n f1.close()\n f2 = open( \"businessReviews.p\", \"rb\" )\n business_reviews = pickle.load(f2)\n f2.close()\n f3 = open( \"degreeCentrality.p\", \"rb\" )\n degree_centrality_map = pickle.load(f3)\n f3.close()\n f4 = open( \"closenessCentrality.p\", \"rb\" )\n closeness_centrality_map = pickle.load(f4)\n f4.close()\n f5 = open( \"betweennessCentrality.p\", \"rb\" )\n betweenness_centrality_map = pickle.load(f5)\n f5.close()\n except IOError as e:\n sys.stderr.write(\"I/O error({0}): {1}\".format(e.errno, e.strerror)+'\\n')\n sys.stderr.write('Try running with -buildClean = clean!\\n')\n\n return (friendship_map, business_reviews, degree_centrality_map, closeness_centrality_map, betweenness_centrality_map, YGraph)", "def load_data_part(fname):\n if \"_data\" not in fname:\n return None\n # Read data\n data = pd.read_csv(fname)\n # events file\n events_fname = fname.replace('_data', '_events')\n # read event file\n labels = pd.read_csv(events_fname)\n clean = data.drop(['id'], axis=1) # remove id\n labels = labels.drop(['id'], axis=1) # remove id\n return clean, labels", "def load_pfile(self, **kwargs):\r\n pfile = kwargs['pfile']\r\n filetype = kwargs['filetype']\r\n\r\n # Loads the pfile and finds the indices, still need to sync and parse.\r\n self.pfile = PFILE(pfile, filetype=filetype)\r\n # self.pfile.sync(tstep='auto')\r", "def __load_model(self):\n loaded = load(self.__file_name)\n self.__model = loaded['model']\n self.__meta_data = loaded['metadata']\n self.__is_ready = True", "def load(self, input):\n return", "def load(self):\n raise NotImplementedError()", "def load(self):\n raise NotImplementedError()", "def load(self):\n canSave = self.canSave\n #--Header\n inPath = os.path.join(self.fileInfo.dir,self.fileInfo.name)\n ins = Tes3Reader(self.fileInfo.name,file(inPath,'rb'))\n (name,size,delFlag,recFlag) = ins.unpack('4s3i',16,'REC_HEAD')\n self.tes3 = Tes3(name,size,delFlag,recFlag,ins,True)\n #--Raw data read\n while not ins.atEnd():\n #--Get record info and handle it\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n #--LEVC?\n if name == 'LEVC':\n levc = Levc(name,size,delFlag,recFlag,ins,True)\n self.levcs[levc.id] = levc\n if canSave: self.records.append(levc)\n #print ' Added:',levc.id\n elif name == 'LEVI':\n levi = Levi(name,size,delFlag,recFlag,ins,True)\n self.levis[levi.id] = levi\n if canSave: self.records.append(levi)\n #print ' Added:',levi.id\n #--Other\n elif canSave:\n record = Record(name,size,delFlag,recFlag,ins)\n self.records.append(record)\n else:\n ins.seek(size,1,'Record')\n #--Done Reading\n ins.close()", "def load(path):\n pass", "def doImport(self,textFile):\n self.loadText(textFile)\n self.getBooks()\n #self.copyBooks()\n self.genLibData()\n self.genLibCells()\n self.sortRecords()", "def load_synthetic_data():\n\n pickle_object = FM().data_file \n\n with pickle_object.open('rb') as data_file: \n return pickle.load(data_file)", "def _load(self):\n service_manager = helper_util.getServiceManager(self.hostname, self.port,\n self.uno_path,\n self.office_binary_path)\n desktop = service_manager.createInstance(\"com.sun.star.frame.Desktop\")\n uno_url = self.systemPathToFileUrl(self.document_url)\n uno_document = desktop.loadComponentFromURL(uno_url, \"_blank\", 0, ())\n if not uno_document:\n raise AttributeError(\"This document can not be loaded or is empty\")\n if self.refresh:\n # Before converting to expected format, refresh dynamic\n # value inside document.\n dispatcher = service_manager.createInstance(\"com.sun.star.frame.DispatchHelper\")\n for uno_command in ('UpdateFields', 'UpdateAll', 'UpdateInputFields',\n 'UpdateAllLinks', 'UpdateCharts',):\n dispatcher.executeDispatch(uno_document.getCurrentController().getFrame(),\n '.uno:%s' % uno_command, '', 0, ())\n module_manager = service_manager.createInstance(\"com.sun.star.frame.ModuleManager\")\n self.document_type = module_manager.identify(uno_document)\n self.document_loaded = uno_document", "def F(f):\n return datafile(f, __name__)", "def load(self):\n settings_path = os.path.join(self.file_path, \"__file_data.json\")\n if os.path.exists( settings_path ):\n self.fileList = simplejson.loads( open( settings_path, 'r' ).read() )\n\n settings_path = os.path.join(self.file_path, \"__user_data.json\")\n if os.path.exists( settings_path ):\n self.userList = simplejson.loads( open( settings_path, 'r' ).read() )", "def do_load(self, line):\n cmd_args = io.parse_cmd_args(line, io.load_cmd_pattern)\n if cmd_args:\n success = self.manager.load(**cmd_args)\n if success:\n self.console_print(\"Yippee! load successful!\", settings.INFO_FORMAT)\n else:\n self.console_print(\"Sorry, the data could not be loaded from file.\", settings.ERROR_FORMAT)\n else:\n self.console_print(settings.COMMMAND_ARGS_ERROR_MSG, settings.ERROR_FORMAT)", "def read_input():\n\n filenames = sorted(glob.glob(\"%s/openflow_input/*\" % root_dir))\n\n for filename in filenames:\n log(\"Processing struct file: \" + filename)\n ofinput = process_input_file(filename)\n\n # Populate global state\n for wire_version in ofinput.wire_versions:\n version_name = of_g.of_version_wire2name[wire_version]\n versions[version_name]['classes'].update(copy.deepcopy(ofinput.classes))\n of_g.ordered_classes[wire_version].extend(ofinput.ordered_classes)", "def load(self, *args, **kwargs):\n pass", "def _load(self, file_path, **kwargs):\n raise NotImplementedError()", "def LoadBatch(filename):", "def load (self, filename) :\n\t\tserialFile = open (filename, \"rb\")\n\t\tself.production_rules = pickle.load (serialFile)\n\t\tself.unitrelation = pickle.load (serialFile)\n\t\tself.labels = pickle.load (serialFile)\n\t\tself.keeper = pickle.load (serialFile)\n\t\tself.strnodes = pickle.load(serialFile)\n\t\tself.tokens = pickle.load (serialFile)\n\t\tserialFile.close()", "def __init__(self, filename=None,\n astrotarget=None,data_index=0,\n dataslice0=None,dataslice1=None,\n empty=False, **kwargs):\n self.__build__(data_index=data_index)\n\n if empty:\n return\n \n if filename is not None:\n force_it = kwargs.pop(\"force_it\",True)\n self.load(filename,force_it=force_it,\n dataslice0=dataslice0,\n dataslice1=dataslice1,\n **kwargs)\n # - Set the target if any\n if astrotarget is not None:\n self.set_target(astrotarget)", "def load_data(self):\n if self.debug:\n print(\"Loading data\")", "def load_data(self, **kwargs):\n type = kwargs['type']\n file = kwargs['file']\n with open(file, 'r') as data_file:\n for line in data_file:\n line = line[:-1]\n items_dict = ast.literal_eval(line)\n\n item = type.from_dict(items_dict)\n\n self.add_item(item, lambda i: i.uid)", "def _import(self, datadict):\n self.GUID = datadict.get(\"GUID\", uuid.uuid1())\n self.FileName = datadict.get(\"FileName\", \"\")\n self.Name = datadict.get(\"Name\", \"\")\n self.Projects = datadict.get(\"Projects\", [])\n self.VSVersion = datadict.get(\"VSVersion\", None)", "def load_object(fpath):\r\n with open(fpath, 'rb') as i:\r\n return pickle.load(i)", "def load_aif(\n obj: aif.Graph,\n name: t.Optional[str] = None,\n config: Config = DefaultConfig,\n) -> Graph:\n g = config.GraphClass(name)\n\n for aif_node in obj[\"nodes\"]:\n node = (\n atom_from_aif(aif_node, config)\n if aif_node[\"type\"] == \"I\"\n else scheme_from_aif(aif_node, config)\n )\n\n if node:\n g.add_node(node)\n\n for aif_edge in obj[\"edges\"]:\n if edge := edge_from_aif(aif_edge, g.nodes, config):\n g.add_edge(edge)\n\n return g", "def __init__(self):\n f = open(configuration.dataDirectory+'MuonEfficiencies_Run_2012A_2012_B_53X.pkl', 'r')\n if f :\n self._map = pickle.load(f)\n self._eta_range = ''\n self._pt_range = ''\n else :\n print 'ERROR: Input file for Trigger efficiencies not existing!'", "def __init__(self):\n f = open(configuration.dataDirectory+'MuonEfficiencies_Run_2012A_2012_B_53X.pkl', 'r')\n if f :\n self._map = pickle.load(f)\n self._eta_range = ''\n self._pt_range = ''\n else :\n print 'ERROR: Input file for Trigger efficiencies not existing!'", "def load(logFile):\n pass #TODO", "def _read_file(self):\n\n with open(self.file_name, 'rb') as f:\n new_test = struct.unpack('<l', f.read(8)[4:])[0]\n f.close()\n\n with open(self.file_name, 'rb') as f:\n old_test = struct.unpack('<h', f.read(6)[4:])[0]\n f.close()\n\n with open(self.file_name, 'rb') as f:\n other_test = struct.unpack('<l', f.read(20)[16:])[0]\n f.close()\n\n open_file = open(self.file_name, 'rb')\n\n if (other_test==202):\n raw = open_file.read(1236)[11:]\n self.model = '202'\n elif ((not new_test==102) and old_test==102):\n raw = open_file.read(1133)\n self.model = '102old'\n elif (new_test==102 and old_test==102):\n raw = open_file.read(1224)\n self.model = '102new'\n\n self.header = DpHeader(raw, self.model)\n\n self.data = DpData(open_file, \n self.model, \n self.header.interferogram_size, \n self.header.number_of_coadds, \n 2048*self.header.zero_fill,\n self.header.laser_wavelength_microns, \n self.header.dispersion_constant_xm,\n self.header.dispersion_constant_xb)\n\n open_file.close()" ]
[ "0.6358744", "0.62189686", "0.6196685", "0.6185245", "0.6043738", "0.5999552", "0.59746337", "0.5964088", "0.5944215", "0.5927775", "0.5897965", "0.58831894", "0.5869329", "0.5848849", "0.58181727", "0.5810083", "0.5804512", "0.577736", "0.576331", "0.57621175", "0.5762104", "0.57602143", "0.5758326", "0.57557267", "0.57436126", "0.5741986", "0.57318664", "0.57318664", "0.57318664", "0.57318664", "0.57044774", "0.5691296", "0.56688166", "0.56122416", "0.56080985", "0.55898607", "0.55884653", "0.5579047", "0.55579287", "0.5551302", "0.5551302", "0.55457485", "0.5540401", "0.5537634", "0.55355173", "0.5524949", "0.5512765", "0.5511954", "0.55038965", "0.5497772", "0.5492163", "0.5490701", "0.54899055", "0.54882944", "0.54860896", "0.5479889", "0.5466846", "0.5462482", "0.54620886", "0.5423454", "0.5423006", "0.5403777", "0.5401884", "0.5394161", "0.5387714", "0.5383772", "0.53721434", "0.53716564", "0.5371453", "0.5368885", "0.5364547", "0.53587323", "0.53563404", "0.5356022", "0.5350395", "0.5347933", "0.5346808", "0.5346808", "0.53461576", "0.5342228", "0.5339782", "0.5335759", "0.5333362", "0.53278315", "0.53257835", "0.53243214", "0.5323729", "0.5319341", "0.5318134", "0.53174126", "0.5311326", "0.53105253", "0.5308928", "0.530245", "0.52990204", "0.52923906", "0.5292363", "0.5285798", "0.5285798", "0.52853703", "0.528348" ]
0.0
-1
Returns the name of the dataset series which this datasource operates on
def getName(self): return self.name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dataset_name(self):\n return self.dataset.name", "def get_dataset_name(self):\n raise NotImplementedError", "def get_dataset_name(self):\n return self.dataset_name", "def dataset_name(self):\n return self._dataset_name", "def series_names(self):\r\n return self.names", "def short_name(self):\r\n raise NotImplementedError('BaseDataSource::short_name not specified.')", "def name(self):\n if ( self._typeSensor == _production):\n name = \"myEnedis.%s.production\" %(self._myDataSensorEnedis.get_PDL_ID())\n else:\n name = \"myEnedis.%s\" %(self._myDataSensorEnedis.get_PDL_ID())\n return name", "def __get_dataset_name(self):\n d = gdal.Open(self.fname)\n # Get band metadata\n b = d.GetRasterBand(1)\n md = b.GetMetadata()\n\n if 'data_var' in md:\n return md['data_var']\n else:\n fnames = d.GetFileList()\n if len(fnames) > 2:\n d = gdal.Open(fnames[1])\n # Get band metadata\n b = d.GetRasterBand(1)\n md = b.GetMetadata()\n if 'data_var' in md:\n return md['data_var']\n else:\n return 'data'\n else:\n return 'data'", "def assemble_chart(self):\n return self.name", "def GetSeriesColumnName(series):\n return '{} & {} & QC {}'.format(series.site_code, series.variable_code, series.quality_control_level_code)", "def series_id(self) -> str:\n return self.get_main_information()['ParentSeries']", "def name(self):\n return 'data_extraction_for_' + '_'.join(self.names).lower()", "def _dataset_name(self):\n return f'Libri{self.task}Mix'", "def name(self):\n return f\"{habitica.DOMAIN}_{self._name}_{self._sensor_name}\"", "def get_data_name(self, idx):\n name = None\n if type(idx) is int:\n n = self.data_count()\n assert 0 <= idx <= n - 1, \"Bad data index\"\n name = self.data[idx].name\n return(name)", "def axis_name(self):\n return self._axis_name", "def name(self):\n return self._sensor.name", "def get_sensor_name(self):\n return self.data[1]", "def name(self) -> str:\n return f\"{self._inst} {self._sid_data['sid']} {self._data[self._sid_data['sid_name']]}\"", "def get_filename(self):\n return self.ds_filename", "def dc_name(self):\n return self.container_name", "def get_measurement_objects_name(self):\n return self.object_name.value", "def get_archive_name(self) -> str:\n # TODO: Support for user-defined or metadata-based (e.g. title) name\n return \"dataset\"", "def getSeriesFK(self):\n return self.getDbRecord().getColumnValue(SERIES_COLUMN)", "def name(self):\n return self.__name__", "def get_name(self):\n raise NotImplementedError", "def get_name(self):\n raise NotImplementedError", "def get_name(self):\n raise NotImplementedError", "def get_name(self):\n raise NotImplementedError", "def get_name(self):\n\n\t\treturn self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name ( self ) :\n return self.__name if self.__name else ''", "def get_name(self):\n\t\treturn self.__name", "def getName(self):\n return self.__name__", "def getDatasetName(sitemover, datasetDict, lfn, pdsname):\n # (dsname_report is the same as dsname but might contain _subNNN parts)\n\n # get the dataset name from the dictionary\n if datasetDict:\n try:\n dsname = datasetDict[lfn]\n except Exception, e:\n tolog(\"!!WARNING!!2999!! Could not get dsname from datasetDict for file %s: %s, %s (using default %s)\" % (lfn, e, str(datasetDict), pdsname))\n dsname = pdsname\n else:\n dsname = pdsname\n\n # save the original dsname for the tracing report\n dsname_report = dsname\n\n # remove any _subNNN parts from the dataset name (from now on dsname will only be used to create SE destination paths)\n dsname = sitemover.removeSubFromDatasetName(dsname)\n\n tolog(\"File %s will go to dataset %s\" % (lfn, dsname))\n\n return dsname, dsname_report", "def series_axis(self):\n return self.container['series_axis']", "def get_sensor_name(self):\n\n return self._sensor_results_list[0].get_sensor_model()", "def get_name(self):\n return self.__name", "def get_name(self):\n return self.__name", "def get_name(self):\n return self.__name", "def name(self):\n return f\"{self._tc_object.name} {SENSOR_TYPES[self.type][0]}\"", "def name(self) -> str:\n return self.observation.name", "def get_name(self):\n return self.col_name", "def name(self):\n return f\"{self._name} {SENSOR_TYPES[self.sensor][0]}\"", "def get_name(self):\n\t\treturn self.source.get_name()", "def get_name(self):\r\n return self.__name", "def get_name(self):\r\n return self.__name", "def get_name(self):\r\n return self.__name", "def get_name(self):\r\n return self.__name", "def get_name(self):\r\n raise NotImplementedError", "def name(self) -> str:\n return self.__name", "def name(self) -> str:\n return self.__name", "def name(self) -> str:\n return self.__name", "def name(self) -> str:\n return self.__name", "def name(self) -> str:\n return self.__name", "def name(self) -> str:\n return self.__name", "def name(self) -> str:\n return self.__name", "def getName(self):\n return _libsbml.SBase_getName(self)", "def getName(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n return f\"{self._name} {self._sensor_name}\"", "def name(self):\n return f\"{self._name}_{self._sensor}\"", "def name(self):\n return self.__class__.__name__", "def name(self):\n return self.__class__.__name__", "def name(self):\n return self.__class__.__name__", "def name(self):\n return self.__class__.__name__", "def name(self):\n return self.__class__.__name__", "def get_name(self) -> str:\n raise NotImplementedError", "def name(self):\n return self.measurement_profile.name", "def get_name(self):\n return", "def getCoaddDatasetName(self):\n warpType = self.config.warpType\n suffix = \"\" if warpType == \"direct\" else warpType[0].upper() + warpType[1:]\n return self.config.coaddName + \"Coadd\" + suffix", "def getName(self):\n return self.__name", "def name(self):\n\t\treturn self._name", "def name(self):\n\t\treturn self._name", "def name(self):\n\n return self.__class__.__name__", "def dataset_id(self) -> str:\n return self._dataset_id", "def get_name(self):\n pass", "def get_name(self):\n pass", "def getname(self):\n return self.__name", "def name(self):\n raise NotImplementedError # pragma: no cover", "def get_name(self):\n return self._label", "def get_name(self):\n return self._sName", "def getName(self):\r\n return self.__name__" ]
[ "0.76386154", "0.7613764", "0.7546239", "0.75300765", "0.7156447", "0.6760011", "0.6729094", "0.6698387", "0.66739553", "0.6590775", "0.6491004", "0.64055467", "0.6366336", "0.63660336", "0.63432425", "0.63402456", "0.63139105", "0.6290302", "0.6242632", "0.62166566", "0.6213114", "0.62119496", "0.62027806", "0.6202306", "0.6178776", "0.6159616", "0.6159616", "0.6159616", "0.6159616", "0.6154153", "0.61507297", "0.61507297", "0.61507297", "0.61507297", "0.61507297", "0.61507297", "0.61507297", "0.61507297", "0.61507297", "0.61507297", "0.6142661", "0.61153936", "0.61031866", "0.6099745", "0.6094658", "0.60743725", "0.6074302", "0.6074302", "0.6074302", "0.60698426", "0.6069239", "0.60685843", "0.60685307", "0.6062064", "0.6056453", "0.6056453", "0.6056453", "0.6056453", "0.60386145", "0.60333526", "0.60333526", "0.60333526", "0.60333526", "0.60333526", "0.60333526", "0.60333526", "0.60332686", "0.60317624", "0.6028028", "0.6028028", "0.6028028", "0.6028028", "0.6028028", "0.6028028", "0.6028028", "0.6028028", "0.6028028", "0.6028028", "0.6027506", "0.6021898", "0.6004214", "0.6004214", "0.6004214", "0.6004214", "0.6004214", "0.60029376", "0.6001677", "0.59987205", "0.5992488", "0.59914035", "0.5986139", "0.5986139", "0.5986116", "0.59745616", "0.5974038", "0.5974038", "0.59738463", "0.59700704", "0.5969258", "0.59650075", "0.59642047" ]
0.0
-1
Returns the ctf of the dataset series which this datasource operates on
def getColorTransferFunction(self): return self.ctf
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getCDF(self):\n return self.cdfSample", "def CFL(self):\n return self.__CFL", "def get_cffts(self):\n return [\n rfft(self.nx, self.dx, fft=self.tfft, ny=self.ny,\n dy=self.dy).get_cfft(),\n rfft(self.nx, self.dx, fft=self.efft, ny=self.ny,\n dy=self.dy).get_cfft(),\n rfft(self.nx, self.dx, fft=self.bfft, ny=self.ny,\n dy=self.dy).get_cfft()\n ]", "def cpf(self):\n return self._cpf", "def get_cfft(self):\n return self.get_rfft().get_cfft()", "def tctfdfc(x):\n if isinstance(x,Fdf) :\n pass\n else : \n x = Fdf.constant(x)\n return x", "def get_crds_fc(self, axes=None, shaped=False):\n return self._src_crds.get_crds_fc(axes=axes, shaped=shaped)", "def coherency(self):\r\n data = self.input.data\r\n tseries_length = data.shape[0]\r\n spectrum_length = self.spectrum.shape[-1]\r\n\r\n coherency = np.zeros((tseries_length,\r\n tseries_length,\r\n spectrum_length), dtype=complex)\r\n\r\n for i in range(tseries_length):\r\n for j in range(i, tseries_length):\r\n coherency[i][j] = tsa.coherency_spec(self.spectrum[i][j],\r\n self.spectrum[i][i],\r\n self.spectrum[j][j])\r\n\r\n idx = tril_indices(tseries_length, -1)\r\n coherency[idx[0], idx[1], ...] = coherency[idx[1], idx[0], ...].conj()\r\n\r\n return coherency", "def cdf(x, point):\n raise NotImplementedError(\"The cdf method has not yet been implemented.\")", "def coherence(self):\r\n\r\n #XXX Calculate this from the standard output, instead of recalculating\r\n #the coherence:\r\n\r\n tseries_length = self.input.data.shape[0]\r\n spectrum_length = self.spectrum.shape[-1]\r\n coherence = np.zeros((tseries_length,\r\n tseries_length,\r\n spectrum_length))\r\n\r\n for i in range(tseries_length):\r\n for j in range(i, tseries_length):\r\n coherence[i][j] = tsa.coherence_spec(self.spectrum[i][j],\r\n self.spectrum[i][i],\r\n self.spectrum[j][j])\r\n\r\n idx = tril_indices(tseries_length, -1)\r\n coherence[idx[0], idx[1], ...] = coherence[idx[1], idx[0], ...].conj()\r\n\r\n return coherence", "def getCoaddDatasetName(self):\n warpType = self.config.warpType\n suffix = \"\" if warpType == \"direct\" else warpType[0].upper() + warpType[1:]\n return self.config.coaddName + \"Coadd\" + suffix", "def fcvs(self): \n return self._link_reg.fcvs", "def cdf(data_r, data_f, xlabel: str = 'Values', ylabel: str = 'Cumulative Sum', ax=None):\n x1 = np.sort(data_r)\n x2 = np.sort(data_f)\n y = np.arange(1, len(data_r) + 1) / len(data_r)\n\n ax = ax if ax else plt.subplots()[1]\n\n axis_font = {'size': '14'}\n ax.set_xlabel(xlabel, **axis_font)\n ax.set_ylabel(ylabel, **axis_font)\n\n ax.grid()\n ax.plot(x1, y, marker='o', linestyle='none', label='Real', ms=8)\n ax.plot(x2, y, marker='o', linestyle='none', label='Synthetic', alpha=0.5)\n ax.tick_params(axis='both', which='major', labelsize=8)\n ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1), ncol=3)\n\n # If labels are strings, rotate them vertical\n if isinstance(data_r, pd.Series) and data_r.dtypes == 'object':\n ax.set_xticklabels(data_r.value_counts().sort_index().index, rotation='vertical')\n\n if ax is None:\n plt.show()", "def cf(self):\n if hasattr(self, \"_cf_cache\"):\n return self._cf_cache\n return np.array([conf.cf for conf in self.configurations], dtype=int)", "def crd(self):\r\n return self.__trajectory[0]", "def getFactura(self): \n return self.caja", "def getFactura(self): \n return self.caja", "def _scfconv_from_ccdata(self):\n\n lines = [f\"scf-first 1 THROUGH {len(self.ccdata.scfenergies)}\"]\n\n for scfenergy in self.ccdata.scfenergies:\n lines.append(f\"{scfenergy:15.6f}\")\n\n return lines", "def get_ctf(ima):\n\tfrom EMAN2 import EMAN2Ctf\n\tctf_params = ima.get_attr(\"ctf\")\t\n\treturn ctf_params.defocus, ctf_params.cs, ctf_params.voltage, ctf_params.apix, ctf_params.bfactor, ctf_params.ampcont, ctf_params.dfdiff, ctf_params.dfang", "def getcfix(self):\n cfix_ = ctypes.c_double()\n res = __library__.MSK_XX_getcfix(self.__nativep,ctypes.byref(cfix_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n cfix_ = cfix_.value\n _cfix_return_value = cfix_\n return (_cfix_return_value)", "def c(self):\n if self.__c is not None:\n return self.__c\n else:\n raise ValueError(\"Run .fit() first!\")", "def cdf(self,x):\n if self.functionType == 'cdf':\n cdfValue = self.cdfFunc(x)\n else:\n cdfValue = self.pdfFunc.integral(self.data[0][0],x)\n return cdfValue", "def getcfix(self): # 3\n res,resargs = self.__obj.getcfix()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _cfix_return_value = resargs\n return _cfix_return_value", "def get_crds_cc(self, axes=None, shaped=False):\n return self._src_crds.get_crds_cc(axes=axes, shaped=shaped)", "def cdf(self, x):\n\n pi = 3.1415926536\n mean = self.mean\n stddev = self.stddev\n\n x1 = (x - mean) / (stddev * (2 ** 0.5))\n\n erf1 = (2/pi**0.5)\n erf2 = (x1-((x1**3)/3)+((x1**5)/10)-((x1**7)/42)+((x1**9)/216))\n erf = erf1 * erf2\n cdf = (1/2)*(1+erf)\n\n return cdf", "def get_frequency(self):\r\n return self.f", "def tocsc(self):\n return self.tocsr().tocsc()", "def encoding_cctf(self):\n\n return self._encoding_cctf", "def cfunc_type(self):\n tif = ida_typeinf.tinfo_t()\n result = self.get_func_type(tif)\n if not result:\n return\n return tif", "def icdf(self, value):\n return self._normal.icdf(value)", "def covar_samp(self):\n if self.count <= 1:\n return None\n return self.Ck / (self.count - 1)", "def cdf(self,x):\n if self.base == 'natural':\n cdfValue = (math.log(x)-self.lowerBound)/(self.upperBound-self.lowerBound)\n else:\n cdfValue = (math.log10(x)-self.lowerBound)/(self.upperBound-self.lowerBound)\n return cdfValue", "def get_cdt(self):\n return None if self.is_raw() else self.structure.compounddatatype", "def get_crd_fc(self, axis, shaped=False):\n return self._src_crds.get_fc(axis, shaped=shaped)", "def tcvs(self): \n return self._link_reg.tcvs", "def _ref_dc(self):\n val_ref = self.meta[globals._ref_ds_attr]\n ref_dc = parse(globals._ds_short_name_attr, val_ref)[0]\n return ref_dc", "def coherency(self):\r\n coherency = tsa.cache_to_coherency(self.cache, self.ij)\r\n\r\n return coherency", "def dc_coupled(self):\n return self._dc_coupled", "def getFunctionStyle(self,cpArray,X0):\n cpArray=tf.cast(tf.convert_to_tensor(cpArray),dtype=tf.float64)\n X0 = tf.cast(tf.convert_to_tensor(X0),dtype=tf.float64)\n func = self.lamda_computeCPdiff(X0)\n gatheredCps = tf.map_fn(func,cpArray,parallel_iterations=32,back_prop=False)\n return gatheredCps", "def cdf(self,x):\n if hasattr(x,'__len__'):\n returnCdf = np.array([self.cdf(i) for i in x])\n else:\n returnCdf = self._distribution.cdf(x)\n return returnCdf", "def cdf(self,x):\n sortedMapping = sorted(self.mapping.items(), key=operator.itemgetter(0))\n if x == sortedMapping[-1][0]:\n return 1.0\n if x in self.values:\n cumulative=0.0\n for element in sortedMapping:\n cumulative += element[1]\n if x == ( float(element[0]) if self.isFloat else element[0] ):\n return cumulative\n else:\n if self.isFloat:\n cumulative=0.0\n for element in sortedMapping:\n cumulative += element[1]\n if x >= element[0]:\n return cumulative\n # if we reach this point we must error out\n self.raiseAnError(IOError,'Categorical distribution cannot calculate cdf for ' + str(x))", "def c(self):\n return self._c", "def _df_reg(self):\n return self.k", "def ctof(temp):\n return temp * 9/5 + 32 # functions should be surrounded by 2 blank lines", "def coherency(time_series, csd_method=None):\r\n if csd_method is None:\r\n csd_method = {'this_method': 'welch'} # The default\r\n\r\n f, fxy = get_spectra(time_series, csd_method)\r\n\r\n #A container for the coherencys, with the size and shape of the expected\r\n #output:\r\n c = np.zeros((time_series.shape[0],\r\n time_series.shape[0],\r\n f.shape[0]), dtype=complex) # Make sure it's complex\r\n\r\n for i in range(time_series.shape[0]):\r\n for j in range(i, time_series.shape[0]):\r\n c[i][j] = coherency_spec(fxy[i][j], fxy[i][i], fxy[j][j])\r\n\r\n idx = tril_indices(time_series.shape[0], -1)\r\n c[idx[0], idx[1], ...] = c[idx[1], idx[0], ...].conj() # Make it symmetric\r\n\r\n return f, c", "def c(self):\r\n return self.__c", "def cdf(self,x):\n return self.categoricalDist.cdf(x)", "def get_cfi(data):\n if data is None:\n raise EmptyDataError('[!] Invalid data value')\n\n result = TA.CFI(data)\n if result is None:\n raise IndicatorException\n return result", "def get_fcc_level(self):\n raise NotImplementedError", "def cactiStyle(requestContext, seriesList, system=None):\n if 0 == len(seriesList):\n return seriesList\n if system:\n fmt = lambda x:\"%.2f%s\" % format_units(x,system=system)\n else:\n fmt = lambda x:\"%.2f\"%x\n nameLen = max([0] + [len(getattr(series,\"name\")) for series in seriesList])\n lastLen = max([0] + [len(fmt(int(safeLast(series) or 3))) for series in seriesList]) + 3\n maxLen = max([0] + [len(fmt(int(safeMax(series) or 3))) for series in seriesList]) + 3\n minLen = max([0] + [len(fmt(int(safeMin(series) or 3))) for series in seriesList]) + 3\n for series in seriesList:\n name = series.name\n last = safeLast(series)\n maximum = safeMax(series)\n minimum = safeMin(series)\n if last is None:\n last = NAN\n else:\n last = fmt(float(last))\n\n if maximum is None:\n maximum = NAN\n else:\n maximum = fmt(float(maximum))\n if minimum is None:\n minimum = NAN\n else:\n minimum = fmt(float(minimum))\n\n series.name = \"%*s Current:%*s Max:%*s Min:%*s \" % \\\n (-nameLen, series.name,\n -lastLen, last,\n -maxLen, maximum,\n -minLen, minimum)\n return seriesList", "def getScatteringSignal(self):\r\n\t\treturn self.scatData", "def cdf(self,x):\n if self.method == 'spline':\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n cdfValue = self._distribution.cdf(coordinate)\n else:\n self.raiseAnError(NotImplementedError,'cdf not yet implemented for ' + self.method + ' method')\n return cdfValue", "def getSeriesFK(self):\n return self.getDbRecord().getColumnValue(SERIES_COLUMN)", "def scatters(self):\n\t\treturn self._scatter", "def ct(self):\n return self._ct", "def dc(self):\n return np.array(self['dc'], dtype=np.float32) / 1000", "def n_cf(self):\n return self._configurations[0].n_cf", "def get_covid_term() -> pd.DataFrame:\n return NOTICE_GETTER.term", "def get_cb_model_freq():\n\talexander_orig_dr1 = 1361.25 * u.MHz\n\treturn alexander_orig_dr1", "def cci(self) -> float:\n return self._cci", "def cole_coeff(self):\n return self.diseq_coeff(standardize=True)", "def get_series(series):\n if series == 'acs1':\n return census.acs1dp\n elif series == 'acs5':\n return census.acs5\n elif series == 'sf1':\n return census.sf1\n elif series == 'sf3':\n return census.sf3\n else:\n return None", "def fiftyk_dac_num(self):\n return self._fiftyk_dac_num", "def _get_concentration(self, state):\n return self.fc(state.float_features).exp() + self.EPSILON", "def getCF(self):\r\n return self.lEq;", "def csb(self):\n\n def myfunc(incident_energy):\n return XrayLibWrap_Energy(self.Z, \"csb\", incident_energy)\n\n return myfunc", "def cdf(self, value):\n return self._normal.cdf(value)", "def cdf(self, x, mu, **kwargs):\n if not hasattr(self, \"_cdfsample\"):\n # - just get it once\n self._cdfsample = self.rvs(mu, size=1e4, nsample=1e4)\n if is_arraylike(x):\n return np.asarray([float(len(self._cdfsample[self._cdfsample<x_]))/ 1e4\n for x_ in x])\n return float(len(self._cdfsample[self._cdfsample<x]))/ 1e4", "def calculate_cdf(self):\n df_cdf = self.get_cdf_data()\n return ECDF(df_cdf['minute'])", "def dcpl(self):\n # easy enough\n return self._dataset._pyre_id.dcpl", "def getConversionFactor(self):\n return _libsbml.Species_getConversionFactor(self)", "def cdf(self, x):\n\n if type(x) is np.float64:\n x = np.array([x])\n\n ndx = [np.argmin(np.abs(self.xs - x[i])) for i in range(x.size)]\n\n return self.ys[ndx]", "def decoding_cctf(self):\n\n return self._decoding_cctf", "def dc_data(self):\n return self._dc_data", "def make_factor_source(series):\n return ColumnDataSource(data={'factors': series.unique()})", "def fci(country_id: str, measure: _FCI_MEASURE = _FCI_MEASURE.FCI, *, source: str = None,\n real_time: bool = False, request_id: Optional[str] = None) -> pd.Series:\n if real_time:\n raise NotImplementedError('real-time FCI data is not available')\n\n type_ = QueryType(inflection.titleize(measure.value))\n if measure == _FCI_MEASURE.REAL_FCI or measure == _FCI_MEASURE.REAL_TWI_CONTRIBUTION:\n ds = Dataset('FCI')\n df = ds.get_data(geographyId=country_id)\n if measure == _FCI_MEASURE.REAL_FCI:\n measure = 'realFCI'\n else:\n measure = 'realTWIContribution'\n series = ExtendedSeries(dtype=float) if (measure not in df.columns) else ExtendedSeries(df[measure])\n series.dataset_ids = ('FCI',)\n return series\n\n q = GsDataApi.build_market_data_query([country_id], query_type=type_, source=source,\n real_time=real_time)\n df = _market_data_timed(q, request_id)\n return _extract_series_from_df(df, type_, True)", "def get_icdf(self, xx):\n return self.parent.ppf(xx)", "def get_viscous_cfl(discr, eos, dt, cv):\n return dt / get_viscous_timestep(discr, eos=eos, cv=cv)", "def n_cs(self):\n pass", "def ch_amount(self):\n return self.timeseries.shape[1]", "def tc(self):\n return np.sum(self.tcs)", "def cs(self):\n return self._cs", "def reverse_CDF(self):\n self.series_y = 1. - self.series_y\n self.quantile_series_y_lower = 1. - self.quantile_series_y_lower\n self.quantile_series_y_upper = 1. - self.quantile_series_y_upper", "def assemble_chart(self):\n return self.name", "def component_type(self):\n return 'ct'", "def getDataSetType(self):\n return self.__data_set_type__", "def getCDFValue(self, value):\n cdfValue = self.cdfSample.get(value, None)\n if cdfValue != None:\n return cdfValue\n \n cdfValue = self.cdfFunction(value)\n self.cdfSample[value] = cdfValue\n return cdfValue", "def cdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n cdfValue = self._distribution.cdf(coordinate)\n return cdfValue", "def cdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n cdfValue = self._distribution.cdf(coordinate)\n return cdfValue", "def cdf(self, x):\n from scipy.special import betainc\n sq_x = x * x\n return np.where(\n sq_x < 1., betainc(self.m / 2.0, self.n / 2.0, sq_x),\n np.ones_like(x))", "def cs(self):\n\n def myfunc(incident_energy):\n return XrayLibWrap_Energy(self.Z, \"cs\", incident_energy)\n\n return myfunc", "def cc(self):\n try:\n from scipy.sparse import cs_graph_components\n _, label = cs_graph_components(self.adjacency())\n except:\n pass\n lil = self.to_coo_matrix().tolil().rows.tolist()\n label = lil_cc(lil)\n return label", "def second_category_axis(self):\n return self.container['second_category_axis']", "def _get_traffic_class_cos(self):\n return self.__traffic_class_cos", "def calculate(self):\n\n return self.confusion_matrix.fn", "def __cnc(cls, sens_mv, we_c):\n if we_c is None:\n return None\n\n cnc = we_c / (sens_mv / 1000.0)\n\n # print(\"A4Datum__cnc: we_c:%s cnc:%f\" % (we_c, cnc), file=sys.stderr)\n\n return cnc", "def df_reg(self):\n return self._df_reg", "def cdf(self, alpha): #Plot empirical cfd with confidence interval\n x = self.x\n n = len(x)\n y = np.arange(1, n+1)/n\n \n #Computing confidence interval with the Dvoretzky–Kiefer–Wolfowitz method based on the empirical points\n F1 = []\n F2 = []\n for i in range(0, n):\n e = (((mt.log(2/alpha))/(2*n))**0.5) \n F1.append(y[i] - e)\n F2.append(y[i] + e) \n plt.plot(sorted(x), y, label='Empirical CDF')\n plt.plot(sorted(x), F1, linestyle='--', color='red', alpha = 0.8, lw = 0.9, label = 'Dvoretzky–Kiefer–Wolfowitz Confidence Bands')\n plt.plot(sorted(x), F2, linestyle='--', color='red', alpha = 0.8, lw = 0.9)\n plt.ylabel('Cumulative Distribution Function')\n plt.xlabel('Observed Data')\n plt.legend()\n plt.show()\n \n return(y)", "def cf_profile(self):\n return self.gen_profile() / self.sam_sys_inputs['system_capacity']", "def cf_profile(self):\n return self.gen_profile() / self.sam_sys_inputs['system_capacity']" ]
[ "0.6114998", "0.5954782", "0.5948087", "0.5920047", "0.59179395", "0.58852816", "0.5781732", "0.57313186", "0.57007056", "0.56663144", "0.5602908", "0.5576217", "0.55340517", "0.5467268", "0.5431777", "0.539905", "0.539905", "0.5388791", "0.5385939", "0.53720737", "0.53636706", "0.53411496", "0.5322548", "0.5310017", "0.5297604", "0.52846134", "0.5278062", "0.52724767", "0.52551216", "0.52371514", "0.5232755", "0.5206213", "0.52029634", "0.51924735", "0.5183661", "0.5183041", "0.5178247", "0.5177974", "0.51691633", "0.51678383", "0.5153432", "0.51469266", "0.5139363", "0.5131394", "0.51298964", "0.5124347", "0.5118544", "0.5118314", "0.51132095", "0.51117074", "0.5110755", "0.50963557", "0.50801843", "0.5073414", "0.50692683", "0.50578475", "0.50498366", "0.50484073", "0.5039423", "0.50301564", "0.5006748", "0.50036377", "0.49999183", "0.4998922", "0.49978065", "0.49955595", "0.4994862", "0.49862435", "0.49820098", "0.49731013", "0.49698755", "0.4968734", "0.4965747", "0.49655133", "0.4957994", "0.49565908", "0.49563527", "0.49514169", "0.49454102", "0.49274433", "0.49227017", "0.49187818", "0.49154907", "0.4914583", "0.49133956", "0.49106553", "0.48907107", "0.48899338", "0.48899338", "0.48897523", "0.48791623", "0.48748165", "0.4872317", "0.48694846", "0.48650694", "0.4864046", "0.48604745", "0.48586187", "0.48576757", "0.48576757" ]
0.5846474
6
Returns number of images in this data source.
def getNumberOfImages(self): return self.numberOfImages
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_image_count(self):\n return self._num_images", "def get_num_of_images(self):", "def num_of_images(self):\n return len(self.data['image_infos'])", "def numberOfImages(self):\n return len(self.imageList)", "def __len__(self):\n return self.num_images", "def __len__(self):\n return self.num_images", "def __len__(self):\n return self.num_images", "def __len__(self):\n return self.num_images", "def __len__(self):\n return self.num_images", "def count_images(self):\n\t\treturn self.session.query(Image.id).count()", "def count(self):\n \n return len(self.img_lst)", "def get_n_images(self) -> int:\n try:\n return self.header[\"NumberOfImagesInMosaic\"]\n except KeyError:\n raise KeyError(messages.MISSING_NUMBER_OF_IMAGES)", "def getDataSetCount(self):\n\t\treturn int(self.numberOfImages / self.slicesPerTimepoint)", "def size(self):\n\t\t\treturn len(self.image_ids)", "def __len__(self):\n\n return len(self.images)", "def __len__(self):\n return len(self.images)", "def __len__(self):\n return len(self.images)", "def __len__(self):\n return int(np.ceil(len(self.image_filenames) / (self.batch_size)))", "def __len__(self):\n return len(self.db.list_nodes('/images'))", "def __len__(self):\n return math.ceil(self.number_of_images / self.batch_size)", "def _get_num_objects_per_step(self, worker_id=0):\n data_layer = self.get_data_layer(worker_id)\n num_images = tf.shape(data_layer.input_tensors['source_tensors'][0])[0]\n return num_images", "def __len__(self) -> int:\n import h5py\n\n with h5py.File(\n os.path.join(self.root, self.data_dir, self.img_file_name), \"r\"\n ) as f:\n num_datapoints: int = f[self.split][\"pv_log\"].shape[0]\n\n return num_datapoints", "def GetNumberOfMemoryImages(self):\r\n number = INT()\r\n r = CALL('GetNumberOfMemoryImages',self,INT(self.seq),byref(number))\r\n return self.CheckForSuccessError(r)", "def __len__(self):\n return self.images.size(0)", "def count(self):\n return self.data_container.count", "def count(self):\r\n return self.data_array.size", "def numPixels(self):\n self._logger.debug(\"numPixels\")\n return self.count", "def getNumberPhoto(guide):\n return len(guide.photos.all())", "def __len__(self):\n return len(self.imgs_path)", "def __len__(self):\n\n return math.ceil(len(self.img_files) * self.gen_count / self.batch_size)", "def n_images_acquired(self):\n n = ct.c_long()\n self.lib.GetTotalNumberImagesAcquired(ct.pointer(n))\n return n.value", "def __len__(self):\n return len(self.image_paths)", "def __len__(self):\n return len(self.image_paths)", "def __len__(self):\n return len(self.image_paths)", "def data_count(self):\n return(len(self.data))", "def __len__(self):\n return len(self.image_file_names)", "def get_num_eval_images(hparams):\n num_images_map = {\n 'imagenet': 50000,\n 'cifar10': 10000,\n }\n if hparams.input_data.input_fn not in num_images_map:\n raise ValueError(\n f'Unknown dataset size for input_fn {hparams.input_data.input_fn}')\n\n return num_images_map[hparams.input_data.input_fn]", "def nb_im(self, code):\n return len(os.listdir(self._im_dir[code]))", "def numPixels(self):\n\t\treturn self.size", "def numPixels(self):\n\t\treturn self.size", "def width(self) -> int:\n return self._image_data.width", "def number_of_photos(self):\n return Submission.objects.filter(theme__contest=self).count()", "def __len__(self):\n return len(self.img_paths)", "def get_data_ninstances(self):\n return self.data_ninstances", "def __len__(self):\n return len(self.image_names)", "def count(self):\n return len(self.wallpapers)", "def __len__(self):\n return self._dataset.size(dirs=self._dirs)", "def dimension_count(self):\n return self._dimensionCount", "def data_count(self):\r\n\r\n shp = self.df.shape\r\n row_count = shp[0]\r\n return row_count", "def getNbRows(self):\n return self.data.shape[1]", "def number_of_data_nodes(self):\n return int(self._data['number_of_data_nodes'])", "def get_num_train_images(hparams):\n num_images_map = {\n 'imagenet': 1281167,\n 'cifar10': 50000,\n }\n if hparams.input_data.input_fn not in num_images_map:\n raise ValueError(\n f'Unknown dataset size for input_fn {hparams.input_data.input_fn}')\n\n num_images = num_images_map[hparams.input_data.input_fn]\n\n if hparams.input_data.max_samples > 0:\n return min(num_images, hparams.input_data.max_samples)\n return num_images", "def getNeuronCount(self):\n\t\treturn self.loader.getNeuronCount()", "def size(self):\n\t\treturn self._count", "def __number_of_files(self):\n self.__get_files()\n return len(self.files)", "def __len__(self):\r\n return len(self.img_names)", "def file_count(self) -> int:\n if self.dataset is None:\n raise ValueError('No known dataset found!')\n return self._max_file_count", "def __len__(self):\n return len(self.rimgdataset)", "def num_sources(self):\n return len(self._sources)", "def __len__( self ):\n return len( self._raster_data )", "def getCount(self):\n return _osgAnimation.Target_getCount(self)", "def max_scanned_images(self):\n return self._max_scanned_images", "def getNumDimensions(self):\n return len(self.di.keys())", "def getnrfiles(self):\n return len(self.filenames)", "def getSampleCount(self):\r\n return len(self._data)", "def __len__(self):\n # print(\"len: \" + str(math.floor(len([name for name in os.listdir(self.imgs_dir) if os.path.isfile(self.imgs_dir+'//'+name)])/self.batch_size)-1)\n return math.floor(len([name for name in os.listdir(self.imgs_dir) if\n os.path.isfile(self.imgs_dir + '//' + name)]) / self.batch_size)", "def count_data(self):\n try:\n ndata = len(self.x)\n logger.info(\"Number of data points: {0}\".format(ndata))\n except AttributeError:\n logger.error(\"Data object has not been defined\")\n ndata = 0\n return ndata", "def getNumRows(self) -> int:\n ...", "def count(self):\n return self.size()", "def num_entries(self):\r\n raise NotImplementedError('BaseDataSource::num_entries not specified.')", "def getNumData(self):\n return len(self.data)", "def num_rows(self) -> str:\n return pulumi.get(self, \"num_rows\")", "def GetNumberOfObjects(self) -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3ISS3_GetNumberOfObjects(self)", "def count(self):\n self._fetch_if_needed()\n return len(self._result_cache.get('rows', []))", "def GetNumberOfObjects(self) -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS3IUS3_GetNumberOfObjects(self)", "def size(self):\n ret = 0\n for ii in self.__data:\n ret += int(ii.get_size())\n return ret", "def height(self) -> int:\n return self._image_data.height", "def GetNumberOfObjects(self) -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3IUS3_GetNumberOfObjects(self)", "def count(self) -> int:\n return self._adapter.count()", "def dataset_size(self):\n if not self._dataset_size:\n # pylint: disable=attribute-defined-outside-init\n self._dataset_size = count_file_lines(\n self._hparams.source_dataset.files)\n return self._dataset_size", "def getNrEntries(self):\n return len(self.data)", "def getNumTiles(self):\n return (self.width) * (self.height)", "async def totalImages(self, tags):\n with async_timeout.timeout(10):\n url = self.urlGen(tags=tags, PID=0)\n async with self.session.get(url=url) as XMLData:\n XMLData = await XMLData.read()\n XMLData = ET.XML(XMLData)\n XML = self.ParseXML(XMLData)\n return int(XML['posts']['@count'])\n return None", "def GetNumberOfObjects(self) -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF3IUS3_GetNumberOfObjects(self)", "def GetNumberOfObjects(self) -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS2ISS2_GetNumberOfObjects(self)", "def __len__(self):\n return len(self.image_info)", "def __len__(self):\n return len(self.image_info)", "def getNumTiles(self):\n return self.height * self.width", "def getNumRows(self):\n return self.__rows", "def _get_image_dimensions(self):\n\t\timageWidth = int(self.labels['IMAGE']['LINE_SAMPLES'])\n\t\timageHeight = int(self.labels['IMAGE']['LINES'])\n\t\treturn imageWidth, imageHeight", "def numberFiles(self):\n return self.n", "def get_data_dimensions(self):\n return image_utils.convert_shape_indexing(self._get_data_dimensions_rc(),\"rc\",self.image_indexing)", "def band_count(self):\n return self.dataset.RasterCount if self.dataset else None", "def GetNumberOfObjects(self) -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS3ISS3_GetNumberOfObjects(self)", "def GetNumberOfObjects(self) -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF3ISS3_GetNumberOfObjects(self)", "def nsim(self):\n return len(self)", "def get_count(self):\n\n\t\treturn self.__count", "def get_num_records(self):\n return self.__num_records", "def count(self):\n return len(self.read_ints())", "def voxel_count(self):\n return self.cols * self.rows * self.sections" ]
[ "0.84259796", "0.83975625", "0.83702207", "0.80581206", "0.77916324", "0.77916324", "0.77916324", "0.77916324", "0.77916324", "0.77843183", "0.7672272", "0.7612054", "0.74140126", "0.7412375", "0.72965276", "0.7274649", "0.7274649", "0.72326416", "0.72228235", "0.7218599", "0.70864445", "0.708459", "0.70683193", "0.7059244", "0.69326055", "0.68178713", "0.68022555", "0.6799224", "0.6784809", "0.672149", "0.6714932", "0.6709631", "0.6709631", "0.6709631", "0.66932553", "0.66850114", "0.66800624", "0.6673351", "0.6670925", "0.6670925", "0.666576", "0.66079414", "0.6593638", "0.65874344", "0.65594745", "0.65571356", "0.6538444", "0.6503468", "0.64994514", "0.6495293", "0.64683235", "0.6468276", "0.6465324", "0.64622283", "0.6455188", "0.6454062", "0.6446032", "0.64350474", "0.6434601", "0.64269894", "0.6423044", "0.6415868", "0.6402428", "0.6398995", "0.63773715", "0.637591", "0.63552314", "0.6350531", "0.633134", "0.6326437", "0.6321258", "0.6320779", "0.63159865", "0.6315664", "0.63123333", "0.63069254", "0.63001543", "0.62974346", "0.6296235", "0.6294599", "0.629305", "0.62883407", "0.6288087", "0.62861985", "0.6283187", "0.62821525", "0.62821525", "0.6281817", "0.62785566", "0.6265037", "0.6259835", "0.62595475", "0.6254175", "0.62541527", "0.62522084", "0.62486404", "0.6243422", "0.62416446", "0.62396556", "0.62332577" ]
0.85196555
0