text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _instance(self, cls, *args, **kwargs): """Return the instance. :param cls: the class to create the instance from :param args: given to the ``__init__`` method :param kwargs: given to the ``__init__`` method """
logger.debug(f'args: {args}, kwargs: {kwargs}') return cls(*args, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def instance(self, name=None, *args, **kwargs): """Create a new instance using key ``name``. :param name: the name of the class (by default) or the key name of the class used to find the class :param args: given to the ``__init__`` method :param kwargs: given to the ``__init__`` method """
logger.info(f'new instance of {name}') t0 = time() name = self.default_name if name is None else name logger.debug(f'creating instance of {name}') class_name, params = self._class_name_params(name) cls = self._find_class(class_name) params.update(kwargs) if self._has_init_config(cls): logger.debug(f'found config parameter') params['config'] = self.config if self._has_init_name(cls): logger.debug(f'found name parameter') params['name'] = name if logger.level >= logging.DEBUG: for k, v in params.items(): logger.debug(f'populating {k} -> {v} ({type(v)})') inst = self._instance(cls, *args, **params) logger.info(f'created {name} instance of {cls.__name__} ' + f'in {(time() - t0):.2f}s') return inst
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def load(self, name=None, *args, **kwargs): "Load the instance of the object from the stash." inst = self.stash.load(name) if inst is None: inst = self.instance(name, *args, **kwargs) logger.debug(f'loaded (conf mng) instance: {inst}') return inst
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def dump(self, name: str, inst): "Save the object instance to the stash." self.stash.dump(name, inst)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clear_global(self): """Clear only any cached global data. """
vname = self.varname logger.debug(f'global clearning {vname}') if vname in globals(): logger.debug('removing global instance var: {}'.format(vname)) del globals()[vname]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clear(self): """Clear the data, and thus, force it to be created on the next fetch. This is done by removing the attribute from ``owner``, deleting it from globals and removing the file from the disk. """
vname = self.varname if self.path.exists(): logger.debug('deleting cached work: {}'.format(self.path)) self.path.unlink() if self.owner is not None and hasattr(self.owner, vname): logger.debug('removing instance var: {}'.format(vname)) delattr(self.owner, vname) self.clear_global()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _load_or_create(self, *argv, **kwargs): """Invoke the file system operations to get the data, or create work. If the file does not exist, calling ``__do_work__`` and save it. """
if self.path.exists(): self._info('loading work from {}'.format(self.path)) with open(self.path, 'rb') as f: obj = pickle.load(f) else: self._info('saving work to {}'.format(self.path)) with open(self.path, 'wb') as f: obj = self._do_work(*argv, **kwargs) pickle.dump(obj, f) return obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def has_data(self): """Return whether or not the stash has any data available or not."""
if not hasattr(self, '_has_data'): try: next(iter(self.delegate.keys())) self._has_data = True except StopIteration: self._has_data = False return self._has_data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _get_instance_path(self, name): "Return a path to the pickled data with key ``name``." fname = self.pattern.format(**{'name': name}) logger.debug(f'path {self.create_path}: {self.create_path.exists()}') self._create_path_dir() return Path(self.create_path, fname)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def shelve(self): """Return an opened shelve object. """
logger.info('creating shelve data') fname = str(self.create_path.absolute()) inst = sh.open(fname, writeback=self.writeback) self.is_open = True return inst
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def delete(self, name=None): "Delete the shelve data file." logger.info('clearing shelve data') self.close() for path in Path(self.create_path.parent, self.create_path.name), \ Path(self.create_path.parent, self.create_path.name + '.db'): logger.debug(f'clearing {path} if exists: {path.exists()}') if path.exists(): path.unlink() break
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def close(self): "Close the shelve object, which is needed for data consistency." if self.is_open: logger.info('closing shelve data') try: self.shelve.close() self._shelve.clear() except Exception: self.is_open = False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _map(self, data_item): "Map ``data_item`` separately in each thread." delegate = self.delegate logger.debug(f'mapping: {data_item}') if self.clobber or not self.exists(data_item.id): logger.debug(f'exist: {data_item.id}: {self.exists(data_item.id)}') delegate.dump(data_item.id, data_item)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_all(self, workers=None, limit=None, n_expected=None): """Load all instances witih multiple threads. :param workers: number of workers to use to load instances, which defaults to what was given in the class initializer :param limit: return a maximum, which defaults to no limit :param n_expected: rerun the iteration on the data if we didn't find enough data, or more specifically, number of found data points is less than ``n_expected``; defaults to all """
if not self.has_data: self._preempt(True) # we did the best we could (avoid repeat later in this method) n_expected = 0 keys = tuple(self.delegate.keys()) if n_expected is not None and len(keys) < n_expected: self._preempt(True) keys = self.delegate.keys() keys = it.islice(limit, keys) if limit is not None else keys pool = self._create_thread_pool(workers) logger.debug(f'workers={workers}, keys: {keys}') try: return iter(pool.map(self.delegate.load, keys)) finally: pool.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def spawn_api(self, app, decorator=None): """Auto-generate server endpoints implementing the API into this Flask app"""
if decorator: assert type(decorator).__name__ == 'function' self.is_server = True self.app = app if self.local: # Re-generate client callers, this time as local and passing them the app self._generate_client_callers(app) return spawn_server_api(self.name, app, self.api_spec, self.error_callback, decorator)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _update_expression(self): '''Update internal expression.''' self._expression = re.compile( '^{0}(?P<index>(?P<padding>0*)\d+?){1}$' .format(re.escape(self.head), re.escape(self.tail)) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def is_contiguous(self): '''Return whether entire collection is contiguous.''' previous = None for index in self.indexes: if previous is None: previous = index continue if index != (previous + 1): return False previous = index return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def holes(self): '''Return holes in collection. Return :py:class:`~clique.collection.Collection` of missing indexes. ''' missing = set([]) previous = None for index in self.indexes: if previous is None: previous = index continue if index != (previous + 1): missing.update(range(previous + 1, index)) previous = index return Collection(self.head, self.tail, self.padding, indexes=missing)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def separate(self): '''Return contiguous parts of collection as separate collections. Return as list of :py:class:`~clique.collection.Collection` instances. ''' collections = [] start = None end = None for index in self.indexes: if start is None: start = index end = start continue if index != (end + 1): collections.append( Collection(self.head, self.tail, self.padding, indexes=set(range(start, end + 1))) ) start = index end = index if start is None: collections.append( Collection(self.head, self.tail, self.padding) ) else: collections.append( Collection(self.head, self.tail, self.padding, indexes=range(start, end + 1)) ) return collections
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def format_check(settings): """ Check the format of a osmnet_config object. Parameters settings : dict osmnet_config as a dictionary Returns ------- Nothing """
valid_keys = ['logs_folder', 'log_file', 'log_console', 'log_name', 'log_filename', 'keep_osm_tags'] for key in list(settings.keys()): assert key in valid_keys, \ ('{} not found in list of valid configuation keys').format(key) assert isinstance(key, str), ('{} must be a string').format(key) if key == 'keep_osm_tags': assert isinstance(settings[key], list), \ ('{} must be a list').format(key) for value in settings[key]: assert all(isinstance(element, str) for element in value), \ 'all elements must be a string' if key == 'log_file' or key == 'log_console': assert isinstance(settings[key], bool), \ ('{} must be boolean').format(key)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_dict(self): """ Return a dict representation of an osmnet osmnet_config instance. """
return {'logs_folder': self.logs_folder, 'log_file': self.log_file, 'log_console': self.log_console, 'log_name': self.log_name, 'log_filename': self.log_filename, 'keep_osm_tags': self.keep_osm_tags }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def osm_filter(network_type): """ Create a filter to query Overpass API for the specified OSM network type. Parameters network_type : string, {'walk', 'drive'} denoting the type of street network to extract Returns ------- osm_filter : string """
filters = {} # drive: select only roads that are drivable by normal 2 wheel drive # passenger vehicles both private and public # roads. Filter out un-drivable roads and service roads tagged as parking, # driveway, or emergency-access filters['drive'] = ('["highway"!~"cycleway|footway|path|pedestrian|steps' '|track|proposed|construction|bridleway|abandoned' '|platform|raceway|service"]' '["motor_vehicle"!~"no"]["motorcar"!~"no"]' '["service"!~"parking|parking_aisle|driveway' '|emergency_access"]') # walk: select only roads and pathways that allow pedestrian access both # private and public pathways and roads. # Filter out limited access roadways and allow service roads filters['walk'] = ('["highway"!~"motor|proposed|construction|abandoned' '|platform|raceway"]["foot"!~"no"]' '["pedestrians"!~"no"]') if network_type in filters: osm_filter = filters[network_type] else: raise ValueError('unknown network_type "{}"'.format(network_type)) return osm_filter
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def osm_net_download(lat_min=None, lng_min=None, lat_max=None, lng_max=None, network_type='walk', timeout=180, memory=None, max_query_area_size=50*1000*50*1000, custom_osm_filter=None): """ Download OSM ways and nodes within a bounding box from the Overpass API. Parameters lat_min : float southern latitude of bounding box lng_min : float eastern longitude of bounding box lat_max : float northern latitude of bounding box lng_max : float western longitude of bounding box network_type : string Specify the network type where value of 'walk' includes roadways where pedestrians are allowed and pedestrian pathways and 'drive' includes driveable roadways. timeout : int the timeout interval for requests and to pass to Overpass API memory : int server memory allocation size for the query, in bytes. If none, server will use its default allocation size max_query_area_size : float max area for any part of the geometry, in the units the geometry is in: any polygon bigger will get divided up for multiple queries to Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in area, if units are meters)) custom_osm_filter : string, optional specify custom arguments for the way["highway"] query to OSM. Must follow Overpass API schema. For example to request highway ways that are service roads use: '["highway"="service"]' Returns ------- response_json : dict """
# create a filter to exclude certain kinds of ways based on the requested # network_type if custom_osm_filter is None: request_filter = osm_filter(network_type) else: request_filter = custom_osm_filter response_jsons_list = [] response_jsons = [] # server memory allocation in bytes formatted for Overpass API query if memory is None: maxsize = '' else: maxsize = '[maxsize:{}]'.format(memory) # define the Overpass API query # way["highway"] denotes ways with highway keys and {filters} returns # ways with the requested key/value. the '>' makes it recurse so we get # ways and way nodes. maxsize is in bytes. # turn bbox into a polygon and project to local UTM polygon = Polygon([(lng_max, lat_min), (lng_min, lat_min), (lng_min, lat_max), (lng_max, lat_max)]) geometry_proj, crs_proj = project_geometry(polygon, crs={'init': 'epsg:4326'}) # subdivide the bbox area poly if it exceeds the max area size # (in meters), then project back to WGS84 geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry( geometry_proj, max_query_area_size=max_query_area_size) geometry, crs = project_geometry(geometry_proj_consolidated_subdivided, crs=crs_proj, to_latlong=True) log('Requesting network data within bounding box from Overpass API ' 'in {:,} request(s)'.format(len(geometry))) start_time = time.time() # loop through each polygon in the geometry for poly in geometry: # represent bbox as lng_max, lat_min, lng_min, lat_max and round # lat-longs to 8 decimal places to create # consistent URL strings lng_max, lat_min, lng_min, lat_max = poly.bounds query_template = '[out:json][timeout:{timeout}]{maxsize};' \ '(way["highway"]' \ '{filters}({lat_min:.8f},{lng_max:.8f},' \ '{lat_max:.8f},{lng_min:.8f});>;);out;' query_str = query_template.format(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min, lng_max=lng_max, filters=request_filter, timeout=timeout, maxsize=maxsize) response_json = overpass_request(data={'data': query_str}, timeout=timeout) response_jsons_list.append(response_json) log('Downloaded OSM network data within bounding box from Overpass ' 'API in {:,} request(s) and' ' {:,.2f} seconds'.format(len(geometry), time.time()-start_time)) # stitch together individual json results for json in response_jsons_list: try: response_jsons.extend(json['elements']) except KeyError: pass # remove duplicate records resulting from the json stitching start_time = time.time() record_count = len(response_jsons) if record_count == 0: raise Exception('Query resulted in no data. Check your query ' 'parameters: {}'.format(query_str)) else: response_jsons_df = pd.DataFrame.from_records(response_jsons, index='id') nodes = response_jsons_df[response_jsons_df['type'] == 'node'] nodes = nodes[~nodes.index.duplicated(keep='first')] ways = response_jsons_df[response_jsons_df['type'] == 'way'] ways = ways[~ways.index.duplicated(keep='first')] response_jsons_df = pd.concat([nodes, ways], axis=0) response_jsons_df.reset_index(inplace=True) response_jsons = response_jsons_df.to_dict(orient='records') if record_count - len(response_jsons) > 0: log('{:,} duplicate records removed. Took {:,.2f} seconds'.format( record_count - len(response_jsons), time.time() - start_time)) return {'elements': response_jsons}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def overpass_request(data, pause_duration=None, timeout=180, error_pause_duration=None): """ Send a request to the Overpass API via HTTP POST and return the JSON response Parameters data : dict or OrderedDict key-value pairs of parameters to post to Overpass API pause_duration : int how long to pause in seconds before requests, if None, will query Overpass API status endpoint to find when next slot is available timeout : int the timeout interval for the requests library error_pause_duration : int how long to pause in seconds before re-trying requests if error Returns ------- response_json : dict """
# define the Overpass API URL, then construct a GET-style URL url = 'http://www.overpass-api.de/api/interpreter' start_time = time.time() log('Posting to {} with timeout={}, "{}"'.format(url, timeout, data)) response = requests.post(url, data=data, timeout=timeout) # get the response size and the domain, log result size_kb = len(response.content) / 1000. domain = re.findall(r'//(?s)(.*?)/', url)[0] log('Downloaded {:,.1f}KB from {} in {:,.2f} seconds' .format(size_kb, domain, time.time()-start_time)) try: response_json = response.json() if 'remark' in response_json: log('Server remark: "{}"'.format(response_json['remark'], level=lg.WARNING)) except Exception: # 429 = 'too many requests' and 504 = 'gateway timeout' from server # overload. handle these errors by recursively # calling overpass_request until a valid response is achieved if response.status_code in [429, 504]: # pause for error_pause_duration seconds before re-trying request if error_pause_duration is None: error_pause_duration = get_pause_duration() log('Server at {} returned status code {} and no JSON data. ' 'Re-trying request in {:.2f} seconds.' .format(domain, response.status_code, error_pause_duration), level=lg.WARNING) time.sleep(error_pause_duration) response_json = overpass_request(data=data, pause_duration=pause_duration, timeout=timeout) # else, this was an unhandled status_code, throw an exception else: log('Server at {} returned status code {} and no JSON data' .format(domain, response.status_code), level=lg.ERROR) raise Exception('Server returned no JSON data.\n{} {}\n{}' .format(response, response.reason, response.text)) return response_json
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_pause_duration(recursive_delay=5, default_duration=10): """ Check the Overpass API status endpoint to determine how long to wait until next slot is available. Parameters recursive_delay : int how long to wait between recursive calls if server is currently running a query default_duration : int if fatal error, function falls back on returning this value Returns ------- pause_duration : int """
try: response = requests.get('http://overpass-api.de/api/status') status = response.text.split('\n')[3] status_first_token = status.split(' ')[0] except Exception: # if status endpoint cannot be reached or output parsed, log error # and return default duration log('Unable to query http://overpass-api.de/api/status', level=lg.ERROR) return default_duration try: # if first token is numeric, it indicates the number of slots # available - no wait required available_slots = int(status_first_token) pause_duration = 0 except Exception: # if first token is 'Slot', it tells you when your slot will be free if status_first_token == 'Slot': utc_time_str = status.split(' ')[3] utc_time = date_parser.parse(utc_time_str).replace(tzinfo=None) pause_duration = math.ceil( (utc_time - dt.datetime.utcnow()).total_seconds()) pause_duration = max(pause_duration, 1) # if first token is 'Currently', it is currently running a query so # check back in recursive_delay seconds elif status_first_token == 'Currently': time.sleep(recursive_delay) pause_duration = get_pause_duration() else: # any other status is unrecognized - log an error and return # default duration log('Unrecognized server status: "{}"'.format(status), level=lg.ERROR) return default_duration return pause_duration
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def project_geometry(geometry, crs, to_latlong=False): """ Project a shapely Polygon or MultiPolygon from WGS84 to UTM, or vice-versa Parameters geometry : shapely Polygon or MultiPolygon the geometry to project crs : int the starting coordinate reference system of the passed-in geometry to_latlong : bool if True, project from crs to WGS84, if False, project from crs to local UTM zone Returns ------- geometry_proj, crs : tuple (projected shapely geometry, crs of the projected geometry) """
gdf = gpd.GeoDataFrame() gdf.crs = crs gdf.name = 'geometry to project' gdf['geometry'] = None gdf.loc[0, 'geometry'] = geometry gdf_proj = project_gdf(gdf, to_latlong=to_latlong) geometry_proj = gdf_proj['geometry'].iloc[0] return geometry_proj, gdf_proj.crs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_node(e): """ Process a node element entry into a dict suitable for going into a Pandas DataFrame. Parameters e : dict individual node element in downloaded OSM json Returns ------- node : dict """
node = {'id': e['id'], 'lat': e['lat'], 'lon': e['lon']} if 'tags' in e: if e['tags'] is not np.nan: for t, v in list(e['tags'].items()): if t in config.settings.keep_osm_tags: node[t] = v return node
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_way(e): """ Process a way element entry into a list of dicts suitable for going into a Pandas DataFrame. Parameters e : dict individual way element in downloaded OSM json Returns ------- way : dict waynodes : list of dict """
way = {'id': e['id']} if 'tags' in e: if e['tags'] is not np.nan: for t, v in list(e['tags'].items()): if t in config.settings.keep_osm_tags: way[t] = v # nodes that make up a way waynodes = [] for n in e['nodes']: waynodes.append({'way_id': e['id'], 'node_id': n}) return way, waynodes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_network_osm_query(data): """ Convert OSM query data to DataFrames of ways and way-nodes. Parameters data : dict Result of an OSM query. Returns ------- nodes, ways, waynodes : pandas.DataFrame """
if len(data['elements']) == 0: raise RuntimeError('OSM query results contain no data.') nodes = [] ways = [] waynodes = [] for e in data['elements']: if e['type'] == 'node': nodes.append(process_node(e)) elif e['type'] == 'way': w, wn = process_way(e) ways.append(w) waynodes.extend(wn) nodes = pd.DataFrame.from_records(nodes, index='id') ways = pd.DataFrame.from_records(ways, index='id') waynodes = pd.DataFrame.from_records(waynodes, index='way_id') return (nodes, ways, waynodes)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ways_in_bbox(lat_min, lng_min, lat_max, lng_max, network_type, timeout=180, memory=None, max_query_area_size=50*1000*50*1000, custom_osm_filter=None): """ Get DataFrames of OSM data in a bounding box. Parameters lat_min : float southern latitude of bounding box lng_min : float eastern longitude of bounding box lat_max : float northern latitude of bounding box lng_max : float western longitude of bounding box network_type : {'walk', 'drive'}, optional Specify the network type where value of 'walk' includes roadways where pedestrians are allowed and pedestrian pathways and 'drive' includes driveable roadways. timeout : int the timeout interval for requests and to pass to Overpass API memory : int server memory allocation size for the query, in bytes. If none, server will use its default allocation size max_query_area_size : float max area for any part of the geometry, in the units the geometry is in: any polygon bigger will get divided up for multiple queries to Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in area, if units are meters)) custom_osm_filter : string, optional specify custom arguments for the way["highway"] query to OSM. Must follow Overpass API schema. For example to request highway ways that are service roads use: '["highway"="service"]' Returns ------- nodes, ways, waynodes : pandas.DataFrame """
return parse_network_osm_query( osm_net_download(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min, lng_max=lng_max, network_type=network_type, timeout=timeout, memory=memory, max_query_area_size=max_query_area_size, custom_osm_filter=custom_osm_filter))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def intersection_nodes(waynodes): """ Returns a set of all the nodes that appear in 2 or more ways. Parameters waynodes : pandas.DataFrame Mapping of way IDs to node IDs as returned by `ways_in_bbox`. Returns ------- intersections : set Node IDs that appear in 2 or more ways. """
counts = waynodes.node_id.value_counts() return set(counts[counts > 1].index.values)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def node_pairs(nodes, ways, waynodes, two_way=True): """ Create a table of node pairs with the distances between them. Parameters nodes : pandas.DataFrame Must have 'lat' and 'lon' columns. ways : pandas.DataFrame Table of way metadata. waynodes : pandas.DataFrame Table linking way IDs to node IDs. Way IDs should be in the index, with a column called 'node_ids'. two_way : bool, optional Whether the routes are two-way. If True, node pairs will only occur once. Default is True. Returns ------- pairs : pandas.DataFrame Will have columns of 'from_id', 'to_id', and 'distance'. The index will be a MultiIndex of (from id, to id). The distance metric is in meters. """
start_time = time.time() def pairwise(l): return zip(islice(l, 0, len(l)), islice(l, 1, None)) intersections = intersection_nodes(waynodes) waymap = waynodes.groupby(level=0, sort=False) pairs = [] for id, row in ways.iterrows(): nodes_in_way = waymap.get_group(id).node_id.values nodes_in_way = [x for x in nodes_in_way if x in intersections] if len(nodes_in_way) < 2: # no nodes to connect in this way continue for from_node, to_node in pairwise(nodes_in_way): if from_node != to_node: fn = nodes.loc[from_node] tn = nodes.loc[to_node] distance = round(gcd(fn.lat, fn.lon, tn.lat, tn.lon), 6) col_dict = {'from_id': from_node, 'to_id': to_node, 'distance': distance} for tag in config.settings.keep_osm_tags: try: col_dict.update({tag: row[tag]}) except KeyError: pass pairs.append(col_dict) if not two_way: col_dict = {'from_id': to_node, 'to_id': from_node, 'distance': distance} for tag in config.settings.keep_osm_tags: try: col_dict.update({tag: row[tag]}) except KeyError: pass pairs.append(col_dict) pairs = pd.DataFrame.from_records(pairs) if pairs.empty: raise Exception('Query resulted in no connected node pairs. Check ' 'your query parameters or bounding box') else: pairs.index = pd.MultiIndex.from_arrays([pairs['from_id'].values, pairs['to_id'].values]) log('Edge node pairs completed. Took {:,.2f} seconds' .format(time.time()-start_time)) return pairs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_lines(in_file): """Returns a list of lines from a input markdown file."""
with open(in_file, 'r') as inf: in_contents = inf.read().split('\n') return in_contents
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tag_and_collect(lines, id_tag=True, back_links=False, exclude_h=None, remove_dashes=False): """ Gets headlines from the markdown document and creates anchor tags. Keyword arguments: lines: a list of sublists where every sublist represents a line from a Markdown document. id_tag: if true, creates inserts a the <a id> tags (not req. by GitHub) back_links: if true, adds "back to top" links below each headline exclude_h: header levels to exclude. E.g., [2, 3] excludes level 2 and 3 headings. Returns a tuple of 2 lists: 1st list: A modified version of the input list where <a id="some-header"></a> anchor tags where inserted above the header lines (if github is False). 2nd list: A list of 3-value sublists, where the first value represents the heading, the second value the string that was inserted assigned to the IDs in the anchor tags, and the third value is an integer that reprents the headline level. E.g., """
out_contents = [] headlines = [] for l in lines: saw_headline = False orig_len = len(l) l = l.lstrip() if l.startswith(('# ', '## ', '### ', '#### ', '##### ', '###### ')): # comply with new markdown standards # not a headline if '#' not followed by whitespace '##no-header': if not l.lstrip('#').startswith(' '): continue # not a headline if more than 6 '#': if len(l) - len(l.lstrip('#')) > 6: continue # headers can be indented by at most 3 spaces: if orig_len - len(l) > 3: continue # ignore empty headers if not set(l) - {'#', ' '}: continue saw_headline = True slugified = slugify_headline(l, remove_dashes) if not exclude_h or not slugified[-1] in exclude_h: if id_tag: id_tag = '<a class="mk-toclify" id="%s"></a>'\ % (slugified[1]) out_contents.append(id_tag) headlines.append(slugified) out_contents.append(l) if back_links and saw_headline: out_contents.append('[[back to top](#table-of-contents)]') return out_contents, headlines
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_toc(headlines, hyperlink=True, top_link=False, no_toc_header=False): """ Creates the table of contents from the headline list that was returned by the tag_and_collect function. Keyword Arguments: headlines: list of lists e.g., ['Some header lvl3', 'some-header-lvl3', 3] hyperlink: Creates hyperlinks in Markdown format if True, e.g., '- [Some header lvl1](#some-header-lvl1)' top_link: if True, add a id tag for linking the table of contents itself (for the back-to-top-links) no_toc_header: suppresses TOC header if True. Returns a list of headlines for a table of contents in Markdown format, """
processed = [] if not no_toc_header: if top_link: processed.append('<a class="mk-toclify" id="table-of-contents"></a>\n') processed.append('# Table of Contents') for line in headlines: if hyperlink: item = '%s- [%s](#%s)' % ((line[2]-1)*' ', line[0], line[1]) else: item = '%s- %s' % ((line[2]-1)*' ', line[0]) processed.append(item) processed.append('\n') return processed
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_markdown(toc_headlines, body, spacer=0, placeholder=None): """ Returns a string with the Markdown output contents incl. the table of contents. Keyword arguments: toc_headlines: lines for the table of contents as created by the create_toc function. body: contents of the Markdown file including ID-anchor tags as returned by the tag_and_collect function. spacer: Adds vertical space after the table of contents. Height in pixels. placeholder: If a placeholder string is provided, the placeholder will be replaced by the TOC instead of inserting the TOC at the top of the document """
if spacer: spacer_line = ['\n<div style="height:%spx;"></div>\n' % (spacer)] toc_markdown = "\n".join(toc_headlines + spacer_line) else: toc_markdown = "\n".join(toc_headlines) body_markdown = "\n".join(body).strip() if placeholder: markdown = body_markdown.replace(placeholder, toc_markdown) else: markdown = toc_markdown + body_markdown return markdown
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def output_markdown(markdown_cont, output_file): """ Writes to an output file if `outfile` is a valid path. """
if output_file: with open(output_file, 'w') as out: out.write(markdown_cont)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def markdown_toclify(input_file, output_file=None, github=False, back_to_top=False, nolink=False, no_toc_header=False, spacer=0, placeholder=None, exclude_h=None, remove_dashes=False): """ Function to add table of contents to markdown files. Parameters input_file: str Path to the markdown input file. output_file: str (defaul: None) Path to the markdown output file. github: bool (default: False) Uses GitHub TOC syntax if True. back_to_top: bool (default: False) Inserts back-to-top links below headings if True. nolink: bool (default: False) Creates the table of contents without internal links if True. no_toc_header: bool (default: False) Suppresses the Table of Contents header if True spacer: int (default: 0) Inserts horizontal space (in pixels) after the table of contents. placeholder: str (default: None) Inserts the TOC at the placeholder string instead of inserting the TOC at the top of the document. exclude_h: list (default None) Excludes header levels, e.g., if [2, 3], ignores header levels 2 and 3 in the TOC. remove_dashes: bool (default: False) Removes dashes from headline slugs Returns cont: str Markdown contents including the TOC. """
raw_contents = read_lines(input_file) cleaned_contents = remove_lines(raw_contents, remove=('[[back to top]', '<a class="mk-toclify"')) processed_contents, raw_headlines = tag_and_collect( cleaned_contents, id_tag=not github, back_links=back_to_top, exclude_h=exclude_h, remove_dashes=remove_dashes ) leftjustified_headlines = positioning_headlines(raw_headlines) processed_headlines = create_toc(leftjustified_headlines, hyperlink=not nolink, top_link=not nolink and not github, no_toc_header=no_toc_header) if nolink: processed_contents = cleaned_contents cont = build_markdown(toc_headlines=processed_headlines, body=processed_contents, spacer=spacer, placeholder=placeholder) if output_file: output_markdown(cont, output_file) return cont
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def url_parse(name): """parse urls with different prefixes"""
position = name.find("github.com") if position >= 0: if position != 0: position_1 = name.find("www.github.com") position_2 = name.find("http://github.com") position_3 = name.find("https://github.com") if position_1*position_2*position_3 != 0: exception() sys.exit(0) name = name[position+11:] if name.endswith('/'): name = name[:-1] return name else: if name.endswith('/'): name = name[:-1] return name
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_req(url): """simple get request"""
request = urllib.request.Request(url) request.add_header('Authorization', 'token %s' % API_TOKEN) try: response = urllib.request.urlopen(request).read().decode('utf-8') return response except urllib.error.HTTPError: exception() sys.exit(0)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def geturl_req(url): """get request that returns 302"""
request = urllib.request.Request(url) request.add_header('Authorization', 'token %s' % API_TOKEN) try: response_url = urllib.request.urlopen(request).geturl() return response_url except urllib.error.HTTPError: exception() sys.exit(0)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _parse_special_fields(self, data): """ Helper method that parses special fields to Python objects :param data: response from Monzo API request :type data: dict """
self.created = parse_date(data.pop('created')) if data.get('settled'): # Not always returned self.settled = parse_date(data.pop('settled')) # Merchant field can contain either merchant ID or the whole object if (data.get('merchant') and not isinstance(data['merchant'], six.text_type)): self.merchant = MonzoMerchant(data=data.pop('merchant'))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _save_token_on_disk(self): """Helper function that saves the token on disk"""
token = self._token.copy() # Client secret is needed for token refreshing and isn't returned # as a pared of OAuth token by default token.update(client_secret=self._client_secret) with codecs.open(config.TOKEN_FILE_PATH, 'w', 'utf8') as f: json.dump( token, f, ensure_ascii=False, sort_keys=True, indent=4, )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_oauth_token(self): """ Get Monzo access token via OAuth2 `authorization code` grant type. Official docs: https://monzo.com/docs/#acquire-an-access-token :returns: OAuth 2 access token :rtype: dict """
url = urljoin(self.api_url, '/oauth2/token') oauth = OAuth2Session( client_id=self._client_id, redirect_uri=config.REDIRECT_URI, ) token = oauth.fetch_token( token_url=url, code=self._auth_code, client_secret=self._client_secret, ) return token
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _refresh_oath_token(self): """ Refresh Monzo OAuth 2 token. Official docs: https://monzo.com/docs/#refreshing-access :raises UnableToRefreshTokenException: when token couldn't be refreshed """
url = urljoin(self.api_url, '/oauth2/token') data = { 'grant_type': 'refresh_token', 'client_id': self._client_id, 'client_secret': self._client_secret, 'refresh_token': self._token['refresh_token'], } token_response = requests.post(url, data=data) token = token_response.json() # Not ideal, but that's how Monzo API returns errors if 'error' in token: raise CantRefreshTokenError( "Unable to refresh the token: {}".format(token) ) self._token = token self._save_token_on_disk()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_response(self, method, endpoint, params=None): """ Helper method to handle HTTP requests and catch API errors :param method: valid HTTP method :type method: str :param endpoint: API endpoint :type endpoint: str :param params: extra parameters passed with the request :type params: dict :returns: API response :rtype: Response """
url = urljoin(self.api_url, endpoint) try: response = getattr(self._session, method)(url, params=params) # Check if Monzo API returned HTTP 401, which could mean that the # token is expired if response.status_code == 401: raise TokenExpiredError except TokenExpiredError: # For some reason 'requests-oauthlib' automatic token refreshing # doesn't work so we do it here semi-manually self._refresh_oath_token() self._session = OAuth2Session( client_id=self._client_id, token=self._token, ) response = getattr(self._session, method)(url, params=params) if response.status_code != requests.codes.ok: raise MonzoAPIError( "Something went wrong: {}".format(response.json()) ) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def whoami(self): """ Get information about the access token. Official docs: https://monzo.com/docs/#authenticating-requests :returns: access token details :rtype: dict """
endpoint = '/ping/whoami' response = self._get_response( method='get', endpoint=endpoint, ) return response.json()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def accounts(self, refresh=False): """ Returns a list of accounts owned by the currently authorised user. It's often used when deciding whether to require explicit account ID or use the only available one, so we cache the response by default. Official docs: https://monzo.com/docs/#list-accounts :param refresh: decides if the accounts information should be refreshed :type refresh: bool :returns: list of Monzo accounts :rtype: list of MonzoAccount """
if not refresh and self._cached_accounts: return self._cached_accounts endpoint = '/accounts' response = self._get_response( method='get', endpoint=endpoint, ) accounts_json = response.json()['accounts'] accounts = [MonzoAccount(data=account) for account in accounts_json] self._cached_accounts = accounts return accounts
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def balance(self, account_id=None): """ Returns balance information for a specific account. Official docs: https://monzo.com/docs/#read-balance :param account_id: Monzo account ID :type account_id: str :raises: ValueError :returns: Monzo balance instance :rtype: MonzoBalance """
if not account_id: if len(self.accounts()) == 1: account_id = self.accounts()[0].id else: raise ValueError("You need to pass account ID") endpoint = '/balance' response = self._get_response( method='get', endpoint=endpoint, params={ 'account_id': account_id, }, ) return MonzoBalance(data=response.json())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pots(self, refresh=False): """ Returns a list of pots owned by the currently authorised user. Official docs: https://monzo.com/docs/#pots :param refresh: decides if the pots information should be refreshed. :type refresh: bool :returns: list of Monzo pots :rtype: list of MonzoPot """
if not refresh and self._cached_pots: return self._cached_pots endpoint = '/pots/listV1' response = self._get_response( method='get', endpoint=endpoint, ) pots_json = response.json()['pots'] pots = [MonzoPot(data=pot) for pot in pots_json] self._cached_pots = pots return pots
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transactions(self, account_id=None, reverse=True, limit=None): """ Returns a list of transactions on the user's account. Official docs: https://monzo.com/docs/#list-transactions :param account_id: Monzo account ID :type account_id: str :param reverse: whether transactions should be in in descending order :type reverse: bool :param limit: how many transactions should be returned; None for all :type limit: int :returns: list of Monzo transactions :rtype: list of MonzoTransaction """
if not account_id: if len(self.accounts()) == 1: account_id = self.accounts()[0].id else: raise ValueError("You need to pass account ID") endpoint = '/transactions' response = self._get_response( method='get', endpoint=endpoint, params={ 'account_id': account_id, }, ) # The API does not allow reversing the list or limiting it, so to do # the basic query of 'get the latest transaction' we need to always get # all transactions and do the reversing and slicing in Python # I send Monzo an email, we'll se how they'll respond transactions = response.json()['transactions'] if reverse: transactions.reverse() if limit: transactions = transactions[:limit] return [MonzoTransaction(data=t) for t in transactions]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transaction(self, transaction_id, expand_merchant=False): """ Returns an individual transaction, fetched by its id. Official docs: https://monzo.com/docs/#retrieve-transaction :param transaction_id: Monzo transaction ID :type transaction_id: str :param expand_merchant: whether merchant data should be included :type expand_merchant: bool :returns: Monzo transaction details :rtype: MonzoTransaction """
endpoint = '/transactions/{}'.format(transaction_id) data = dict() if expand_merchant: data['expand[]'] = 'merchant' response = self._get_response( method='get', endpoint=endpoint, params=data, ) return MonzoTransaction(data=response.json()['transaction'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def launcher(): """Launch it."""
parser = OptionParser() parser.add_option( '-f', '--file', dest='filename', default='agents.csv', help='snmposter configuration file' ) options, args = parser.parse_args() factory = SNMPosterFactory() snmpd_status = subprocess.Popen( ["service", "snmpd", "status"], stdout=subprocess.PIPE ).communicate()[0] if "is running" in snmpd_status: message = "snmd service is running. Please stop it and try again." print >> sys.stderr, message sys.exit(1) try: factory.configure(options.filename) except IOError: print >> sys.stderr, "Error opening %s." % options.filename sys.exit(1) factory.start()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_auth_string(self): """Create auth string from credentials."""
auth_info = '{}:{}'.format(self.sauce_username, self.sauce_access_key) return base64.b64encode(auth_info.encode('utf-8')).decode('utf-8')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_auth_headers(self, content_type): """Add authorization header."""
headers = self.make_headers(content_type) headers['Authorization'] = 'Basic {}'.format(self.get_auth_string()) return headers
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def request(self, method, url, body=None, content_type='application/json'): """Send http request."""
headers = self.make_auth_headers(content_type) connection = http_client.HTTPSConnection(self.apibase) connection.request(method, url, body, headers=headers) response = connection.getresponse() data = response.read() connection.close() if response.status not in [200, 201]: raise SauceException('{}: {}.\nSauce Status NOT OK'.format( response.status, response.reason), response=response) return json.loads(data.decode('utf-8'))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_user(self): """Access basic account information."""
method = 'GET' endpoint = '/rest/v1/users/{}'.format(self.client.sauce_username) return self.client.request(method, endpoint)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_user(self, username, password, name, email): """Create a sub account."""
method = 'POST' endpoint = '/rest/v1/users/{}'.format(self.client.sauce_username) body = json.dumps({'username': username, 'password': password, 'name': name, 'email': email, }) return self.client.request(method, endpoint, body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_subaccounts(self): """Get a list of sub accounts associated with a parent account."""
method = 'GET' endpoint = '/rest/v1/users/{}/list-subaccounts'.format( self.client.sauce_username) return self.client.request(method, endpoint)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_siblings(self): """Get a list of sibling accounts associated with provided account."""
method = 'GET' endpoint = '/rest/v1.1/users/{}/siblings'.format( self.client.sauce_username) return self.client.request(method, endpoint)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_subaccount_info(self): """Get information about a sub account."""
method = 'GET' endpoint = '/rest/v1/users/{}/subaccounts'.format( self.client.sauce_username) return self.client.request(method, endpoint)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def change_access_key(self): """Change access key of your account."""
method = 'POST' endpoint = '/rest/v1/users/{}/accesskey/change'.format( self.client.sauce_username) return self.client.request(method, endpoint)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_usage(self, start=None, end=None): """Access historical account usage data."""
method = 'GET' endpoint = '/rest/v1/users/{}/usage'.format(self.client.sauce_username) data = {} if start: data['start'] = start if end: data['end'] = end if data: endpoint = '?'.join([endpoint, urlencode(data)]) return self.client.request(method, endpoint)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_platforms(self, automation_api='all'): """Get a list of objects describing all the OS and browser platforms currently supported on Sauce Labs."""
method = 'GET' endpoint = '/rest/v1/info/platforms/{}'.format(automation_api) return self.client.request(method, endpoint)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_jobs(self, full=None, limit=None, skip=None, start=None, end=None, output_format=None): """List jobs belonging to a specific user."""
method = 'GET' endpoint = '/rest/v1/{}/jobs'.format(self.client.sauce_username) data = {} if full is not None: data['full'] = full if limit is not None: data['limit'] = limit if skip is not None: data['skip'] = skip if start is not None: data['from'] = start if end is not None: data['to'] = end if output_format is not None: data['format'] = output_format if data: endpoint = '?'.join([endpoint, urlencode(data)]) return self.client.request(method, endpoint)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_job(self, job_id, build=None, custom_data=None, name=None, passed=None, public=None, tags=None): """Edit an existing job."""
method = 'PUT' endpoint = '/rest/v1/{}/jobs/{}'.format(self.client.sauce_username, job_id) data = {} if build is not None: data['build'] = build if custom_data is not None: data['custom-data'] = custom_data if name is not None: data['name'] = name if passed is not None: data['passed'] = passed if public is not None: data['public'] = public if tags is not None: data['tags'] = tags body = json.dumps(data) return self.client.request(method, endpoint, body=body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stop_job(self, job_id): """Terminates a running job."""
method = 'PUT' endpoint = '/rest/v1/{}/jobs/{}/stop'.format( self.client.sauce_username, job_id) return self.client.request(method, endpoint)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_job_asset_url(self, job_id, filename): """Get details about the static assets collected for a specific job."""
return 'https://saucelabs.com/rest/v1/{}/jobs/{}/assets/{}'.format( self.client.sauce_username, job_id, filename)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_auth_token(self, job_id, date_range=None): """Get an auth token to access protected job resources. https://wiki.saucelabs.com/display/DOCS/Building+Links+to+Test+Results """
key = '{}:{}'.format(self.client.sauce_username, self.client.sauce_access_key) if date_range: key = '{}:{}'.format(key, date_range) return hmac.new(key.encode('utf-8'), job_id.encode('utf-8'), md5).hexdigest()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def upload_file(self, filepath, overwrite=True): """Uploads a file to the temporary sauce storage."""
method = 'POST' filename = os.path.split(filepath)[1] endpoint = '/rest/v1/storage/{}/{}?overwrite={}'.format( self.client.sauce_username, filename, "true" if overwrite else "false") with open(filepath, 'rb') as filehandle: body = filehandle.read() return self.client.request(method, endpoint, body, content_type='application/octet-stream')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_stored_files(self): """Check which files are in your temporary storage."""
method = 'GET' endpoint = '/rest/v1/storage/{}'.format(self.client.sauce_username) return self.client.request(method, endpoint)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_tunnels(self): """Retrieves all running tunnels for a specific user."""
method = 'GET' endpoint = '/rest/v1/{}/tunnels'.format(self.client.sauce_username) return self.client.request(method, endpoint)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_tunnel(self, tunnel_id): """Get information for a tunnel given its ID."""
method = 'GET' endpoint = '/rest/v1/{}/tunnels/{}'.format( self.client.sauce_username, tunnel_id) return self.client.request(method, endpoint)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def apply(patch): """Apply a patch. The patch's :attr:`~Patch.obj` attribute is injected into the patch's :attr:`~Patch.destination` under the patch's :attr:`~Patch.name`. This is a wrapper around calling ``setattr(patch.destination, patch.name, patch.obj)``. Parameters patch : gorilla.Patch Patch. Raises ------ RuntimeError Overwriting an existing attribute is not allowed when the setting :attr:`Settings.allow_hit` is set to ``True``. Note ---- If both the attributes :attr:`Settings.allow_hit` and :attr:`Settings.store_hit` are ``True`` but that the target attribute seems to have already been stored, then it won't be stored again to avoid losing the original attribute that was stored the first time around. """
settings = Settings() if patch.settings is None else patch.settings # When a hit occurs due to an attribute at the destination already existing # with the patch's name, the existing attribute is referred to as 'target'. try: target = get_attribute(patch.destination, patch.name) except AttributeError: pass else: if not settings.allow_hit: raise RuntimeError( "An attribute named '%s' already exists at the destination " "'%s'. Set a different name through the patch object to avoid " "a name clash or set the setting 'allow_hit' to True to " "overwrite the attribute. In the latter case, it is " "recommended to also set the 'store_hit' setting to True in " "order to store the original attribute under a different " "name so it can still be accessed." % (patch.name, patch.destination.__name__)) if settings.store_hit: original_name = _ORIGINAL_NAME % (patch.name,) if not hasattr(patch.destination, original_name): setattr(patch.destination, original_name, target) setattr(patch.destination, patch.name, patch.obj)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def patch(destination, name=None, settings=None): """Decorator to create a patch. The object being decorated becomes the :attr:`~Patch.obj` attribute of the patch. Parameters destination : object Patch destination. name : str Name of the attribute at the destination. settings : gorilla.Settings Settings. Returns ------- object The decorated object. See Also -------- :class:`Patch`. """
def decorator(wrapped): base = _get_base(wrapped) name_ = base.__name__ if name is None else name settings_ = copy.deepcopy(settings) patch = Patch(destination, name_, wrapped, settings=settings_) data = get_decorator_data(base, set_default=True) data.patches.append(patch) return wrapped return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def patches(destination, settings=None, traverse_bases=True, filter=default_filter, recursive=True, use_decorators=True): """Decorator to create a patch for each member of a module or a class. Parameters destination : object Patch destination. settings : gorilla.Settings Settings. traverse_bases : bool If the object is a class, the base classes are also traversed. filter : function Attributes for which the function returns ``False`` are skipped. The function needs to define two parameters: ``name``, the attribute name, and ``obj``, the attribute value. If ``None``, no attribute is skipped. recursive : bool If ``True``, and a hit occurs due to an attribute at the destination already existing with the given name, and both the member and the target attributes are classes, then instead of creating a patch directly with the member attribute value as is, a patch for each of its own members is created with the target as new destination. use_decorators : bool Allows to take any modifier decorator into consideration to allow for more granular customizations. Returns ------- object The decorated object. Note ---- A 'target' differs from a 'destination' in that a target represents an existing attribute at the destination about to be hit by a patch. See Also -------- :class:`Patch`, :func:`create_patches`. """
def decorator(wrapped): settings_ = copy.deepcopy(settings) patches = create_patches( destination, wrapped, settings=settings_, traverse_bases=traverse_bases, filter=filter, recursive=recursive, use_decorators=use_decorators) data = get_decorator_data(_get_base(wrapped), set_default=True) data.patches.extend(patches) return wrapped return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def destination(value): """Modifier decorator to update a patch's destination. This only modifies the behaviour of the :func:`create_patches` function and the :func:`patches` decorator, given that their parameter ``use_decorators`` is set to ``True``. Parameters value : object Patch destination. Returns ------- object The decorated object. """
def decorator(wrapped): data = get_decorator_data(_get_base(wrapped), set_default=True) data.override['destination'] = value return wrapped return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def settings(**kwargs): """Modifier decorator to update a patch's settings. This only modifies the behaviour of the :func:`create_patches` function and the :func:`patches` decorator, given that their parameter ``use_decorators`` is set to ``True``. Parameters kwargs Settings to update. See :class:`Settings` for the list. Returns ------- object The decorated object. """
def decorator(wrapped): data = get_decorator_data(_get_base(wrapped), set_default=True) data.override.setdefault('settings', {}).update(kwargs) return wrapped return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter(value): """Modifier decorator to force the inclusion or exclusion of an attribute. This only modifies the behaviour of the :func:`create_patches` function and the :func:`patches` decorator, given that their parameter ``use_decorators`` is set to ``True``. Parameters value : bool ``True`` to force inclusion, ``False`` to force exclusion, and ``None`` to inherit from the behaviour defined by :func:`create_patches` or :func:`patches`. Returns ------- object The decorated object. """
def decorator(wrapped): data = get_decorator_data(_get_base(wrapped), set_default=True) data.filter = value return wrapped return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_patches(destination, root, settings=None, traverse_bases=True, filter=default_filter, recursive=True, use_decorators=True): """Create a patch for each member of a module or a class. Parameters destination : object Patch destination. root : object Root object, either a module or a class. settings : gorilla.Settings Settings. traverse_bases : bool If the object is a class, the base classes are also traversed. filter : function Attributes for which the function returns ``False`` are skipped. The function needs to define two parameters: ``name``, the attribute name, and ``obj``, the attribute value. If ``None``, no attribute is skipped. recursive : bool If ``True``, and a hit occurs due to an attribute at the destination already existing with the given name, and both the member and the target attributes are classes, then instead of creating a patch directly with the member attribute value as is, a patch for each of its own members is created with the target as new destination. use_decorators : bool ``True`` to take any modifier decorator into consideration to allow for more granular customizations. Returns ------- list of gorilla.Patch The patches. Note ---- A 'target' differs from a 'destination' in that a target represents an existing attribute at the destination about to be hit by a patch. See Also -------- :func:`patches`. """
if filter is None: filter = _true out = [] root_patch = Patch(destination, '', root, settings=settings) stack = collections.deque((root_patch,)) while stack: parent_patch = stack.popleft() members = _get_members(parent_patch.obj, traverse_bases=traverse_bases, filter=None, recursive=False) for name, value in members: patch = Patch(parent_patch.destination, name, value, settings=copy.deepcopy(parent_patch.settings)) if use_decorators: base = _get_base(value) decorator_data = get_decorator_data(base) filter_override = (None if decorator_data is None else decorator_data.filter) if ((filter_override is None and not filter(name, value)) or filter_override is False): continue if decorator_data is not None: patch._update(**decorator_data.override) elif not filter(name, value): continue if recursive and isinstance(value, _CLASS_TYPES): try: target = get_attribute(patch.destination, patch.name) except AttributeError: pass else: if isinstance(target, _CLASS_TYPES): patch.destination = target stack.append(patch) continue out.append(patch) return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_patches(modules, recursive=True): """Find all the patches created through decorators. Parameters modules : list of module Modules and/or packages to search the patches in. recursive : bool ``True`` to search recursively in subpackages. Returns ------- list of gorilla.Patch Patches found. Raises ------ TypeError The input is not a valid package or module. See Also -------- :func:`patch`, :func:`patches`. """
out = [] modules = (module for package in modules for module in _module_iterator(package, recursive=recursive)) for module in modules: members = _get_members(module, filter=None) for _, value in members: base = _get_base(value) decorator_data = get_decorator_data(base) if decorator_data is None: continue out.extend(decorator_data.patches) return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_attribute(obj, name): """Retrieve an attribute while bypassing the descriptor protocol. As per the built-in |getattr()|_ function, if the input object is a class then its base classes might also be searched until the attribute is found. Parameters obj : object Object to search the attribute in. name : str Name of the attribute. Returns ------- object The attribute found. Raises ------ AttributeError The attribute couldn't be found. .. |getattr()| replace:: ``getattr()`` .. _getattr(): https://docs.python.org/library/functions.html#getattr """
objs = inspect.getmro(obj) if isinstance(obj, _CLASS_TYPES) else [obj] for obj_ in objs: try: return object.__getattribute__(obj_, name) except AttributeError: pass raise AttributeError("'%s' object has no attribute '%s'" % (type(obj), name))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_decorator_data(obj, set_default=False): """Retrieve any decorator data from an object. Parameters obj : object Object. set_default : bool If no data is found, a default one is set on the object and returned, otherwise ``None`` is returned. Returns ------- gorilla.DecoratorData The decorator data or ``None``. """
if isinstance(obj, _CLASS_TYPES): datas = getattr(obj, _DECORATOR_DATA, {}) data = datas.setdefault(obj, None) if data is None and set_default: data = DecoratorData() datas[obj] = data setattr(obj, _DECORATOR_DATA, datas) else: data = getattr(obj, _DECORATOR_DATA, None) if data is None and set_default: data = DecoratorData() setattr(obj, _DECORATOR_DATA, data) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_base(obj): """Unwrap decorators to retrieve the base object."""
if hasattr(obj, '__func__'): obj = obj.__func__ elif isinstance(obj, property): obj = obj.fget elif isinstance(obj, (classmethod, staticmethod)): # Fallback for Python < 2.7 back when no `__func__` attribute # was defined for those descriptors. obj = obj.__get__(None, object) else: return obj return _get_base(obj)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_members(obj, traverse_bases=True, filter=default_filter, recursive=True): """Retrieve the member attributes of a module or a class. The descriptor protocol is bypassed."""
if filter is None: filter = _true out = [] stack = collections.deque((obj,)) while stack: obj = stack.popleft() if traverse_bases and isinstance(obj, _CLASS_TYPES): roots = [base for base in inspect.getmro(obj) if base not in (type, object)] else: roots = [obj] members = [] seen = set() for root in roots: for name, value in _iteritems(getattr(root, '__dict__', {})): if name not in seen and filter(name, value): members.append((name, value)) seen.add(name) members = sorted(members) for _, value in members: if recursive and isinstance(value, _CLASS_TYPES): stack.append(value) out.extend(members) return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _module_iterator(root, recursive=True): """Iterate over modules."""
yield root stack = collections.deque((root,)) while stack: package = stack.popleft() # The '__path__' attribute of a package might return a list of paths if # the package is referenced as a namespace. paths = getattr(package, '__path__', []) for path in paths: modules = pkgutil.iter_modules([path]) for finder, name, is_package in modules: module_name = '%s.%s' % (package.__name__, name) module = sys.modules.get(module_name, None) if module is None: # Import the module through the finder to support package # namespaces. module = _load_module(finder, module_name) if is_package: if recursive: stack.append(module) yield module else: yield module
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _update(self, **kwargs): """Update some attributes. If a 'settings' attribute is passed as a dict, then it updates the content of the settings, if any, instead of completely overwriting it. """
for key, value in _iteritems(kwargs): if key == 'settings': if isinstance(value, dict): if self.settings is None: self.settings = Settings(**value) else: self.settings._update(**value) else: self.settings = copy.deepcopy(value) else: setattr(self, key, value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_collection(self, path): """To get pagewise data."""
while True: items = self.get(path) req = self.req for item in items: yield item if req.links and req.links['next'] and\ req.links['next']['rel'] == 'next': path = req.links['next']['url'] else: break
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def collection(self, path): """To return all items generated by get collection."""
data = [] for item in self.get_collection(path): data.append(item) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_projects_search(self, searchstring): """List projects with searchstring."""
log.debug('List all projects with: %s' % searchstring) return self.collection('projects/search/%s.json' % quote_plus(searchstring))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_project(self, data): """Create a project."""
# http://teampasswordmanager.com/docs/api-projects/#create_project log.info('Create project: %s' % data) NewID = self.post('projects.json', data).get('id') log.info('Project has been created with ID %s' % NewID) return NewID
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def change_parent_of_project(self, ID, NewParrentID): """Change parent of project."""
# http://teampasswordmanager.com/docs/api-projects/#change_parent log.info('Change parrent for project %s to %s' % (ID, NewParrentID)) data = {'parent_id': NewParrentID} self.put('projects/%s/change_parent.json' % ID, data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_security_of_project(self, ID, data): """Update security of project."""
# http://teampasswordmanager.com/docs/api-projects/#update_project_security log.info('Update project %s security %s' % (ID, data)) self.put('projects/%s/security.json' % ID, data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_passwords_search(self, searchstring): """List passwords with searchstring."""
log.debug('List all passwords with: %s' % searchstring) return self.collection('passwords/search/%s.json' % quote_plus(searchstring))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_password(self, data): """Create a password."""
# http://teampasswordmanager.com/docs/api-passwords/#create_password log.info('Create new password %s' % data) NewID = self.post('passwords.json', data).get('id') log.info('Password has been created with ID %s' % NewID) return NewID
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_password(self, ID, data): """Update a password."""
# http://teampasswordmanager.com/docs/api-passwords/#update_password log.info('Update Password %s with %s' % (ID, data)) self.put('passwords/%s.json' % ID, data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_security_of_password(self, ID, data): """Update security of a password."""
# http://teampasswordmanager.com/docs/api-passwords/#update_security_password log.info('Update security of password %s with %s' % (ID, data)) self.put('passwords/%s/security.json' % ID, data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_custom_fields_of_password(self, ID, data): """Update custom fields definitions of a password."""
# http://teampasswordmanager.com/docs/api-passwords/#update_cf_password log.info('Update custom fields of password %s with %s' % (ID, data)) self.put('passwords/%s/custom_fields.json' % ID, data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unlock_password(self, ID, reason): """Unlock a password."""
# http://teampasswordmanager.com/docs/api-passwords/#unlock_password log.info('Unlock password %s, Reason: %s' % (ID, reason)) self.unlock_reason = reason self.put('passwords/%s/unlock.json' % ID)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_mypasswords_search(self, searchstring): """List my passwords with searchstring."""
# http://teampasswordmanager.com/docs/api-my-passwords/#list_passwords log.debug('List MyPasswords with %s' % searchstring) return self.collection('my_passwords/search/%s.json' % quote_plus(searchstring))