text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Establish connection to GCP. <END_TASK> <USER_TASK:> Description: def conn_gcp(cred, crid): """Establish connection to GCP."""
gcp_auth_type = cred.get('gcp_auth_type', "S") if gcp_auth_type == "A": # Application Auth gcp_crd_ia = CONFIG_DIR + ".gcp_libcloud_a_auth." + cred['gcp_proj_id'] gcp_crd = {'user_id': cred['gcp_client_id'], 'key': cred['gcp_client_sec'], 'project': cred['gcp_proj_id'], 'auth_type': "IA", 'credential_file': gcp_crd_ia} else: # Service Account Auth gcp_pem = CONFIG_DIR + cred['gcp_pem_file'] gcp_crd_sa = CONFIG_DIR + ".gcp_libcloud_s_auth." + cred['gcp_proj_id'] gcp_crd = {'user_id': cred['gcp_svc_acct_email'], 'key': gcp_pem, 'project': cred['gcp_proj_id'], 'credential_file': gcp_crd_sa} driver = get_driver(Provider.GCE) try: gcp_obj = driver(**gcp_crd) except SSLError as e: abort_err("\r SSL Error with GCP: {}".format(e)) except (InvalidCredsError, ValueError) as e: abort_err("\r Error with GCP Credentials: {}".format(e)) return {crid: gcp_obj}
<SYSTEM_TASK:> Establish connection to AliCloud service. <END_TASK> <USER_TASK:> Description: def conn_ali(cred, crid): """Establish connection to AliCloud service."""
driver = get_driver(Provider.ALIYUN_ECS) try: ali_obj = driver(cred['ali_access_key_id'], cred['ali_access_key_secret'], region=cred['ali_region']) except SSLError as e: abort_err("\r SSL Error with AliCloud: {}".format(e)) except InvalidCredsError as e: abort_err("\r Error with AliCloud Credentials: {}".format(e)) return {crid: ali_obj}
<SYSTEM_TASK:> Parse a single XML document for this list. <END_TASK> <USER_TASK:> Description: def parse(self, uri=None, fh=None, str_data=None, **kwargs): """Parse a single XML document for this list. Accepts either a uri (uri or default if parameter not specified), or a filehandle (fh) or a string (str_data). Note that this method does not handle the case of a sitemapindex+sitemaps. LEGACY SUPPORT - the parameter str may be used in place of str_data but is deprecated and will be removed in a later version. """
if (uri is not None): try: fh = URLopener().open(uri) except IOError as e: raise Exception( "Failed to load sitemap/sitemapindex from %s (%s)" % (uri, str(e))) elif (str_data is not None): fh = io.StringIO(str_data) elif ('str' in kwargs): # Legacy support for str argument, see # https://github.com/resync/resync/pull/21 # One test for this in tests/test_list_base.py self.logger.warn( "Legacy parse(str=...), use parse(str_data=...) instead") fh = io.StringIO(kwargs['str']) if (fh is None): raise Exception("Nothing to parse") s = self.new_sitemap() s.parse_xml( fh=fh, resources=self, capability=self.capability_name, sitemapindex=False) self.parsed_index = s.parsed_index
<SYSTEM_TASK:> Write a single sitemap or sitemapindex XML document. <END_TASK> <USER_TASK:> Description: def write(self, basename="/tmp/resynclist.xml"): """Write a single sitemap or sitemapindex XML document. Must be overridden to support multi-file lists. """
self.default_capability() fh = open(basename, 'w') s = self.new_sitemap() s.resources_as_xml(self, fh=fh, sitemapindex=self.sitemapindex) fh.close()
<SYSTEM_TASK:> Add resource if change is not None else ChangeTypeError. <END_TASK> <USER_TASK:> Description: def add_if_changed(self, resource): """Add resource if change is not None else ChangeTypeError."""
if (resource.change is not None): self.resources.append(resource) else: raise ChangeTypeError(resource.change)
<SYSTEM_TASK:> Add a resource change or an iterable collection of them. <END_TASK> <USER_TASK:> Description: def add(self, resource): """Add a resource change or an iterable collection of them. Allows multiple resource_change objects for the same resource (ie. URI) and preserves the order of addition. """
if isinstance(resource, collections.Iterable): for r in resource: self.add_if_changed(r) else: self.add_if_changed(resource)
<SYSTEM_TASK:> Add items from a ResourceContainer resources. <END_TASK> <USER_TASK:> Description: def add_changed_resources(self, resources, change=None): """Add items from a ResourceContainer resources. If change is specified then the attribute is set in the Resource objects created. """
for resource in resources: rc = Resource(resource=resource, change=change) self.add(rc)
<SYSTEM_TASK:> Create new hashlib objects for each hash we are going to calculate. <END_TASK> <USER_TASK:> Description: def initialize_hashes(self): """Create new hashlib objects for each hash we are going to calculate."""
if ('md5' in self.hashes): self.md5_calc = hashlib.md5() if ('sha-1' in self.hashes): self.sha1_calc = hashlib.sha1() if ('sha-256' in self.hashes): self.sha256_calc = hashlib.sha256()
<SYSTEM_TASK:> Compute hash digests for a file. <END_TASK> <USER_TASK:> Description: def compute_for_file(self, file, block_size=2**14): """Compute hash digests for a file. Calculate the hashes based on one read through the file. Optional block_size parameter controls memory used to do calculations. This should be a multiple of 128 bytes. """
self.initialize_hashes() f = open(file, 'rb') while True: data = f.read(block_size) if not data: break if (self.md5_calc is not None): self.md5_calc.update(data) if (self.sha1_calc is not None): self.sha1_calc.update(data) if (self.sha256_calc is not None): self.sha256_calc.update(data) f.close()
<SYSTEM_TASK:> Remove dashes, spaces, and convert isbn to uppercase before saving <END_TASK> <USER_TASK:> Description: def pre_save(self, model_instance, add): """Remove dashes, spaces, and convert isbn to uppercase before saving when clean_isbn is enabled"""
value = getattr(model_instance, self.attname) if self.clean_isbn and value not in EMPTY_VALUES: cleaned_isbn = value.replace(' ', '').replace('-', '').upper() setattr(model_instance, self.attname, cleaned_isbn) return super(ISBNField, self).pre_save(model_instance, add)
<SYSTEM_TASK:> Check path for each sensor and record wall proximity <END_TASK> <USER_TASK:> Description: def update_sensors(self): """ Check path for each sensor and record wall proximity """
assert isinstance(self.player.cshape.center, eu.Vector2) pos = self.player.cshape.center a = math.radians(self.player.rotation) for sensor in self.player.sensors: sensor.sensed_type = 'wall' rad = a + sensor.angle dis = min(self.distance_to_tile(pos, rad), sensor.max_range) # Keep state of sensed range, `dis` is from center sensor.proximity = dis - self.player.radius # Check for collisions with items # List of items within sensor range, do for each sensor's range if self.mode['items'] and len(self.mode['items']) > 0: nears = self.collman.ranked_objs_near(self.player, sensor.max_range) for near in nears: other, other_dis = near # Distances are from edge to edge see #2 other_dis += self.player.radius # Skip if further if other_dis > dis: continue # Determine if within `fov` other_rad = math.atan2(other.x - self.player.x, other.y - self.player.y) # Round to bearing within one revolution other_rad = other_rad % (math.pi*2) round_rad = rad % (math.pi*2) if abs(other_rad - round_rad) < (sensor.fov/2): sensor.proximity = other_dis - self.player.radius sensor.sensed_type = other.btype dis = other_dis # Redirect sensor lines # TODO: Decouple into view rendering end = pos.copy() end.x += math.sin(rad) * dis end.y += math.cos(rad) * dis sensor.line.start = pos sensor.line.end = end sensor.line.color = self.player.palette[sensor.sensed_type] + (int(255*0.5),)
<SYSTEM_TASK:> Create state from sensors and battery <END_TASK> <USER_TASK:> Description: def get_state(self): """ Create state from sensors and battery """
# Include battery level in state battery = self.player.stats['battery']/100 # Create observation from sensor proximities # TODO: Have state persist, then update columns by `sensed_type` # Multi-channel; detecting `items` if len(self.mode['items']) > 0: observation = [] for sensor in self.player.sensors: col = [] # Always include range in channel 0 col.append(sensor.proximity_norm()) for item_type in self.mode['items']: if sensor.sensed_type == item_type: col.append(sensor.proximity_norm()) else: # Default to 1 (`max_range/max_range`) col.append(1) observation.append(col) if 'battery' in self.mode: observation.append([battery,1,1]) # Single-channel; walls only else: observation = [o.proximity_norm() for o in self.player.sensors] if 'battery' in self.mode: observation.append(battery) return observation
<SYSTEM_TASK:> Compile a set of regexps for files to be exlcuded from scans. <END_TASK> <USER_TASK:> Description: def compile_excludes(self): """Compile a set of regexps for files to be exlcuded from scans."""
self.compiled_exclude_files = [] for pattern in self.exclude_files: try: self.compiled_exclude_files.append(re.compile(pattern)) except re.error as e: raise ValueError( "Bad python regex in exclude '%s': %s" % (pattern, str(e)))
<SYSTEM_TASK:> True if file should be exclude based on name pattern. <END_TASK> <USER_TASK:> Description: def exclude_file(self, file): """True if file should be exclude based on name pattern."""
for pattern in self.compiled_exclude_files: if (pattern.match(file)): return(True) return(False)
<SYSTEM_TASK:> Create or extend resource_list with resources from disk scan. <END_TASK> <USER_TASK:> Description: def from_disk(self, resource_list=None, paths=None): """Create or extend resource_list with resources from disk scan. Assumes very simple disk path to URL mapping (in self.mapping): chop path and replace with url_path. Returns the new or extended ResourceList object. If a resource_list is specified then items are added to that rather than creating a new one. If paths is specified then these are used instead of the set of local paths in self.mapping. Example usage with mapping start paths: mapper=Mapper('http://example.org/path','/path/to/files') rlb = ResourceListBuilder(mapper=mapper) m = rlb.from_disk() Example usage with explicit paths: mapper=Mapper('http://example.org/path','/path/to/files') rlb = ResourceListBuilder(mapper=mapper) m = rlb.from_disk(paths=['/path/to/files/a','/path/to/files/b']) """
num = 0 # Either use resource_list passed in or make a new one if (resource_list is None): resource_list = ResourceList() # Compile exclude pattern matches self.compile_excludes() # Work out start paths from map if not explicitly specified if (paths is None): paths = [] for map in self.mapper.mappings: paths.append(map.dst_path) # Set start time unless already set (perhaps because building in # chunks) if (resource_list.md_at is None): resource_list.md_at = datetime_to_str() # Run for each map in the mappings for path in paths: self.logger.info("Scanning disk from %s" % (path)) self.from_disk_add_path(path=path, resource_list=resource_list) # Set end time resource_list.md_completed = datetime_to_str() return(resource_list)
<SYSTEM_TASK:> Add to resource_list with resources from disk scan starting at path. <END_TASK> <USER_TASK:> Description: def from_disk_add_path(self, path=None, resource_list=None): """Add to resource_list with resources from disk scan starting at path."""
# sanity if (path is None or resource_list is None or self.mapper is None): raise ValueError("Must specify path, resource_list and mapper") # is path a directory or a file? for each file: create Resource object, # add, increment counter if (sys.version_info < (3, 0)): path = path.decode('utf-8') if os.path.isdir(path): num_files = 0 for dirpath, dirs, files in os.walk(path, topdown=True): for file_in_dirpath in files: num_files += 1 if (num_files % 50000 == 0): self.logger.info( "ResourceListBuilder.from_disk_add_path: %d files..." % (num_files)) self.add_file(resource_list=resource_list, dir=dirpath, file=file_in_dirpath) # prune list of dirs based on self.exclude_dirs for exclude in self.exclude_dirs: if exclude in dirs: self.logger.debug("Excluding dir %s" % (exclude)) dirs.remove(exclude) else: # single file self.add_file(resource_list=resource_list, file=path)
<SYSTEM_TASK:> Add a single file to resource_list. <END_TASK> <USER_TASK:> Description: def add_file(self, resource_list=None, dir=None, file=None): """Add a single file to resource_list. Follows object settings of set_path, set_hashes and set_length. """
try: if self.exclude_file(file): self.logger.debug("Excluding file %s" % (file)) return # get abs filename and also URL if (dir is not None): file = os.path.join(dir, file) if (not os.path.isfile(file) or not ( self.include_symlinks or not os.path.islink(file))): return uri = self.mapper.dst_to_src(file) if (uri is None): raise Exception("Internal error, mapping failed") file_stat = os.stat(file) except OSError as e: sys.stderr.write("Ignoring file %s (error: %s)" % (file, str(e))) return timestamp = file_stat.st_mtime # UTC r = Resource(uri=uri, timestamp=timestamp) if (self.set_path): # add full local path r.path = file if (self.set_hashes): hasher = Hashes(self.set_hashes, file) if ('md5' in self.set_hashes): r.md5 = hasher.md5 if ('sha-1' in self.set_hashes): r.sha1 = hasher.sha1 if ('sha-256' in self.set_hashes): r.sha256 = hasher.sha256 if (self.set_length): # add length r.length = file_stat.st_size resource_list.add(r)
<SYSTEM_TASK:> Decorator to require API authentication using OAuth token. <END_TASK> <USER_TASK:> Description: def require_api_auth(allow_anonymous=False): """Decorator to require API authentication using OAuth token. :param allow_anonymous: Allow access without OAuth token (default: ``False``). """
def wrapper(f): """Wrap function with oauth require decorator.""" f_oauth_required = oauth2.require_oauth()(f) @wraps(f) def decorated(*args, **kwargs): """Require OAuth 2.0 Authentication.""" if not hasattr(current_user, 'login_via_oauth2'): if not current_user.is_authenticated: if allow_anonymous: return f(*args, **kwargs) abort(401) if current_app.config['ACCOUNTS_JWT_ENABLE']: # Verify the token current_oauth2server.jwt_veryfication_factory( request.headers) # fully logged in with normal session return f(*args, **kwargs) else: # otherwise, try oauth2 return f_oauth_required(*args, **kwargs) return decorated return wrapper
<SYSTEM_TASK:> r"""Decorator to require a list of OAuth scopes. <END_TASK> <USER_TASK:> Description: def require_oauth_scopes(*scopes): r"""Decorator to require a list of OAuth scopes. Decorator must be preceded by a ``require_api_auth()`` decorator. Note, API key authentication is bypassing this check. :param \*scopes: List of scopes required. """
required_scopes = set(scopes) def wrapper(f): @wraps(f) def decorated(*args, **kwargs): # Variable requests.oauth is only defined for oauth requests (see # require_api_auth() above). if hasattr(request, 'oauth') and request.oauth is not None: token_scopes = set(request.oauth.access_token.scopes) if not required_scopes.issubset(token_scopes): abort(403) return f(*args, **kwargs) return decorated return wrapper
<SYSTEM_TASK:> Print Table for dict=formatted list conditionally include numbers. <END_TASK> <USER_TASK:> Description: def indx_table(node_dict, tbl_mode=False): """Print Table for dict=formatted list conditionally include numbers."""
nt = PrettyTable() nt.header = False nt.padding_width = 2 nt.border = False clr_num = C_TI + "NUM" clr_name = C_TI + "NAME" clr_state = "STATE" + C_NORM t_lu = {True: [clr_num, "NAME", "REGION", "CLOUD", "SIZE", "PUBLIC IP", clr_state], False: [clr_name, "REGION", "CLOUD", "SIZE", "PUBLIC IP", clr_state]} nt.add_row(t_lu[tbl_mode]) for i, node in node_dict.items(): state = C_STAT[node.state] + node.state + C_NORM inum = C_WARN + str(i) + C_NORM if node.public_ips: n_ip = node.public_ips else: n_ip = "-" r_lu = {True: [inum, node.name, node.zone, node.cloud, node.size, n_ip, state], False: [node.name, node.zone, node.cloud, node.size, n_ip, state]} nt.add_row(r_lu[tbl_mode]) if not tbl_mode: print(nt) else: idx_tbl = nt.get_string() return idx_tbl
<SYSTEM_TASK:> Generates a static copy of the sources <END_TASK> <USER_TASK:> Description: def build(pattern=None, path='.'): """Generates a static copy of the sources """
path = abspath(path) c = Clay(path) c.build(pattern)
<SYSTEM_TASK:> Transform a ``frequency_map`` into a map of probability using the sum of all frequencies as the total. <END_TASK> <USER_TASK:> Description: def frequency_to_probability(frequency_map, decorator=lambda f: f): """Transform a ``frequency_map`` into a map of probability using the sum of all frequencies as the total. Example: >>> frequency_to_probability({'a': 2, 'b': 2}) {'a': 0.5, 'b': 0.5} Args: frequency_map (dict): The dictionary to transform decorator (function): A function to manipulate the probability Returns: Dictionary of ngrams to probability """
total = sum(frequency_map.values()) return {k: decorator(v / total) for k, v in frequency_map.items()}
<SYSTEM_TASK:> Calculate the index of coincidence for one or more ``texts``. <END_TASK> <USER_TASK:> Description: def index_of_coincidence(*texts): """Calculate the index of coincidence for one or more ``texts``. The results are averaged over multiple texts to return the delta index of coincidence. Examples: >>> index_of_coincidence("aabbc") 0.2 >>> index_of_coincidence("aabbc", "abbcc") 0.2 Args: *texts (variable length argument list): The texts to analyze Returns: Decimal value of the index of coincidence Raises: ValueError: If texts is empty ValueError: If any text is less that 2 character long """
if not texts: raise ValueError("texts must not be empty") return statistics.mean(_calculate_index_of_coincidence(frequency_analyze(text), len(text)) for text in texts)
<SYSTEM_TASK:> A measure of how similar frequency_map is to the uniform distribution. <END_TASK> <USER_TASK:> Description: def _calculate_index_of_coincidence(frequency_map, length): """A measure of how similar frequency_map is to the uniform distribution. Or the probability that two letters picked randomly are alike. """
if length <= 1: return 0 # We cannot error here as length can legitimiately be 1. # Imagine a ciphertext of length 3 and a key of length 2. # Spliting this text up and calculating the index of coincidence results in ['AC', 'B'] # IOC of B will be calcuated for the 2nd column of the key. We could represent the same # encryption with a key of length 3 but then we encounter the same problem. This is also # legitimiate encryption scheme we cannot ignore. Hence we have to deal with this fact here # A value of 0 will impact the overall mean, however it does make some sense when you ask the question # How many ways to choose 2 letters from the text, if theres only 1 letter then the answer is 0. # Mathemtical combination, number of ways to choose 2 letters, no replacement, order doesnt matter combination_of_letters = sum(freq * (freq - 1) for freq in frequency_map.values()) return combination_of_letters / (length * (length - 1))
<SYSTEM_TASK:> Calculate the Chi Squared statistic by comparing ``source_frequency`` with ``target_frequency``. <END_TASK> <USER_TASK:> Description: def chi_squared(source_frequency, target_frequency): """Calculate the Chi Squared statistic by comparing ``source_frequency`` with ``target_frequency``. Example: >>> chi_squared({'a': 2, 'b': 3}, {'a': 1, 'b': 2}) 0.1 Args: source_frequency (dict): Frequency map of the text you are analyzing target_frequency (dict): Frequency map of the target language to compare with Returns: Decimal value of the chi-squared statistic """
# Ignore any symbols from source that are not in target. # TODO: raise Error if source_len is 0? target_prob = frequency_to_probability(target_frequency) source_len = sum(v for k, v in source_frequency.items() if k in target_frequency) result = 0 for symbol, prob in target_prob.items(): symbol_frequency = source_frequency.get(symbol, 0) # Frequecy is 0 if it doesnt appear in source result += _calculate_chi_squared(symbol_frequency, prob, source_len) return result
<SYSTEM_TASK:> A measure of the observed frequency of the symbol versus the expected frequency. <END_TASK> <USER_TASK:> Description: def _calculate_chi_squared(source_freq, target_prob, source_len): """A measure of the observed frequency of the symbol versus the expected frequency. If the value is 0 then the texts are exactly alike for that symbol. """
expected = source_len * target_prob return (source_freq - expected)**2 / expected
<SYSTEM_TASK:> Dynamically import the python module with the ngram defined as a dictionary. <END_TASK> <USER_TASK:> Description: def _load_ngram(name): """Dynamically import the python module with the ngram defined as a dictionary. Since bigger ngrams are large files its wasteful to always statically import them if they're not used. """
module = importlib.import_module('lantern.analysis.english_ngrams.{}'.format(name)) return getattr(module, name)
<SYSTEM_TASK:> Score ``text`` using ``score_functions``. <END_TASK> <USER_TASK:> Description: def score(text, *score_functions): """Score ``text`` using ``score_functions``. Examples: >>> score("abc", function_a) >>> score("abc", function_a, function_b) Args: text (str): The text to score *score_functions (variable length argument list): functions to score with Returns: Arithmetic mean of scores Raises: ValueError: If score_functions is empty """
if not score_functions: raise ValueError("score_functions must not be empty") return statistics.mean(func(text) for func in score_functions)
<SYSTEM_TASK:> Compute the score of a text by determing if a pattern matches. <END_TASK> <USER_TASK:> Description: def PatternMatch(regex): """Compute the score of a text by determing if a pattern matches. Example: >>> fitness = PatternMatch("flag{.*}") >>> fitness("flag{example}") 0 >>> fitness("junk") -1 Args: regex (str): regular expression string to use as a pattern """
pattern = re.compile(regex) return lambda text: -1 if pattern.search(text) is None else 0
<SYSTEM_TASK:> Makes a request using the currently open session. <END_TASK> <USER_TASK:> Description: def _request(self, url, params={}): """Makes a request using the currently open session. :param url: A url fragment to use in the creation of the master url """
r = self._session.get(url=url, params=params, headers=DEFAULT_ORIGIN) return r
<SYSTEM_TASK:> Send out an event and call it's associated functions <END_TASK> <USER_TASK:> Description: def emit(self, event, *args, **kwargs): """Send out an event and call it's associated functions :param event: Name of the event to trigger """
for func in self._registered_events[event].values(): func(*args, **kwargs)
<SYSTEM_TASK:> Remove all functions for all events, or one event if one is specifed. <END_TASK> <USER_TASK:> Description: def remove_all_listeners(self, event=None): """Remove all functions for all events, or one event if one is specifed. :param event: Optional event you wish to remove all functions from """
if event is not None: self._registered_events[event] = OrderedDict() else: self._registered_events = defaultdict(OrderedDict)
<SYSTEM_TASK:> Returns the current state of the local spotify client <END_TASK> <USER_TASK:> Description: def get_current_status(self): """Returns the current state of the local spotify client"""
url = get_url("/remote/status.json") params = {"oauth": self._oauth_token, "csrf": self._csrf_token} r = self._request(url=url, params=params) return r.json()
<SYSTEM_TASK:> Pauses the spotify player <END_TASK> <USER_TASK:> Description: def pause(self, pause=True): """Pauses the spotify player :param pause: boolean value to choose the pause/play state """
url: str = get_url("/remote/pause.json") params = { "oauth": self._oauth_token, "csrf": self._csrf_token, "pause": "true" if pause else "false", } self._request(url=url, params=params)
<SYSTEM_TASK:> Authenticate with the api <END_TASK> <USER_TASK:> Description: def authenticate(self): """ Authenticate with the api """
resp = self.session.get(self.api_url, auth=self.auth) resp = self._process_response(resp) return resp
<SYSTEM_TASK:> Take a config file as input and generate screenshots <END_TASK> <USER_TASK:> Description: def generate_screenshots(self): """ Take a config file as input and generate screenshots """
headers = {'content-type': 'application/json', 'Accept': 'application/json'} resp = requests.post(self.api_url, data=json.dumps(self.config), \ headers=headers, auth=self.auth) resp = self._process_response(resp) return resp.json()
<SYSTEM_TASK:> Return true if the screenshots job is done <END_TASK> <USER_TASK:> Description: def screenshots_done(self, jobid): """ Return true if the screenshots job is done """
resp = self.session.get(os.path.join(self.api_url, '{0}.json'.format(jobid))) resp = self._process_response(resp) return True if resp.json()['state'] == 'done' else False
<SYSTEM_TASK:> Add a single item in random open position <END_TASK> <USER_TASK:> Description: def add_item(self, radius, item_type): """ Add a single item in random open position """
assert isinstance(radius, int) or isinstance(radius, float) assert isinstance(item_type, str) separation_scale = 1.1 min_separation = separation_scale * radius # Removable item item = Collidable(0, 0, radius, item_type, self.pics[item_type], True) cntTrys = 0 while cntTrys < 100: cx = radius + random.random() * (self.width - 2.0 * radius) cy = radius + random.random() * (self.height - 2.0 * radius) # Test if colliding with wall. # Top left cells = [] cells.append(self.map_layer.get_at_pixel(cx-radius, cy-radius)) # Top right cells.append(self.map_layer.get_at_pixel(cx+radius, cy-radius)) # Bottom left cells.append(self.map_layer.get_at_pixel(cx-radius, cy+radius)) # Bottom right cells.append(self.map_layer.get_at_pixel(cx+radius, cy+radius)) wall = False for cell in cells: wall = cell and cell.tile and cell.tile.id > 0 if wall: break if wall: continue item.update_center(eu.Vector2(cx, cy)) if self.collman.any_near(item, min_separation) is None: self.add(item, z=self.z) self.z += 1 self.collman.add(item) break cntTrys += 1
<SYSTEM_TASK:> Test player for collisions with items <END_TASK> <USER_TASK:> Description: def update_collisions(self): """ Test player for collisions with items """
if not self.mode['items'] or len(self.mode['items']) == 0: return # update collman # FIXME: Why update each frame? self.collman.clear() for z, node in self.children: if hasattr(node, 'cshape') and type(node.cshape) == cm.CircleShape: self.collman.add(node) # interactions player - others for other in self.collman.iter_colliding(self.player): typeball = other.btype self.logger.debug('collision', typeball) # TODO: Limit player position on non-removable items #if not other.removable: # pass if other.removable: self.to_remove.append(other) self.reward_item(typeball) # # elif (typeball == 'wall' or # typeball == 'gate' and self.cnt_food > 0): # self.level_losed() # # elif typeball == 'gate': # self.level_conquered() self.remove_items()
<SYSTEM_TASK:> Register an admin view on this admin instance. <END_TASK> <USER_TASK:> Description: def register_view(self, view_class, *args, **kwargs): """Register an admin view on this admin instance. :param view_class: The view class name passed to the view factory. :param args: Positional arugments for view class. :param kwargs: Keyword arguments to view class. """
protected_view_class = self.view_class_factory(view_class) if 'endpoint' not in kwargs: kwargs['endpoint'] = view_class(*args, **kwargs).endpoint self.admin.add_view(protected_view_class(*args, **kwargs))
<SYSTEM_TASK:> Load administration interface from entry point group. <END_TASK> <USER_TASK:> Description: def load_entry_point_group(self, entry_point_group): """Load administration interface from entry point group. :param str entry_point_group: Name of the entry point group. """
for ep in pkg_resources.iter_entry_points(group=entry_point_group): admin_ep = dict(ep.load()) keys = tuple( k in admin_ep for k in ('model', 'modelview', 'view_class')) if keys == (False, False, True): self.register_view( admin_ep.pop('view_class'), *admin_ep.pop('args', []), **admin_ep.pop('kwargs', {}) ) elif keys == (True, True, False): warnings.warn( 'Usage of model and modelview kwargs are deprecated in ' 'favor of view_class, args and kwargs.', PendingDeprecationWarning ) self.register_view( admin_ep.pop('modelview'), admin_ep.pop('model'), admin_ep.pop('session', db.session), **admin_ep ) else: raise Exception( 'Admin entry point dictionary must contain ' 'either "view_class" OR "model" and "modelview" keys.')
<SYSTEM_TASK:> Ranomdly generates a url for use in requests. <END_TASK> <USER_TASK:> Description: def get_url(url): """Ranomdly generates a url for use in requests. Generates a hostname with the port and the provided suffix url provided :param url: A url fragment to use in the creation of the master url """
sub = "{0}.spotilocal.com".format("".join(choices(ascii_lowercase, k=10))) return "http://{0}:{1}{2}".format(sub, DEFAULT_PORT, url)
<SYSTEM_TASK:> Factory for creating protected admin view classes. <END_TASK> <USER_TASK:> Description: def protected_adminview_factory(base_class): """Factory for creating protected admin view classes. The factory will ensure that the admin view will check if a user is authenticated and has the necessary permissions (as defined by the permission factory). The factory creates a new class using the provided class as base class and overwrites ``is_accessible()`` and ``inaccessible_callback()`` methods. Super is called for both methods, so the base class can implement further restrictions if needed. :param base_class: Class to use as base class. :type base_class: :class:`flask_admin.base.BaseView` :returns: Admin view class which provides authentication and authorization. """
class ProtectedAdminView(base_class): """Admin view class protected by authentication.""" def _handle_view(self, name, **kwargs): """Override Talisman CSP header configuration for admin views. Flask-Admin extension is not CSP compliant (see: https://github.com/flask-admin/flask-admin/issues/1135). To avoid UI malfunctions, the CSP header (globally set on each request by Talisman extension) must be overridden and removed. Remove this code if and when Flask-Admin will be completely CSP compliant. """ invenio_app = current_app.extensions.get('invenio-app', None) if invenio_app: setattr(invenio_app.talisman.local_options, 'content_security_policy', None) return super(ProtectedAdminView, self)._handle_view(name, **kwargs) def is_accessible(self): """Require authentication and authorization.""" return current_user.is_authenticated and \ current_admin.permission_factory(self).can() and \ super(ProtectedAdminView, self).is_accessible() def inaccessible_callback(self, name, **kwargs): """Redirect to login if user is not logged in. :param name: View function name. :param kwargs: Passed to the superclass' `inaccessible_callback`. """ if not current_user.is_authenticated: # Redirect to login page if user is not logged in. return redirect(url_for( current_app.config['ADMIN_LOGIN_ENDPOINT'], next=request.url)) super(ProtectedAdminView, self).inaccessible_callback( name, **kwargs) return ProtectedAdminView
<SYSTEM_TASK:> Decorator to retrieve Token object and check user permission. <END_TASK> <USER_TASK:> Description: def token_getter(is_personal=True, is_internal=False): """Decorator to retrieve Token object and check user permission. :param is_personal: Search for a personal token. (Default: ``True``) :param is_internal: Search for a internal token. (Default: ``False``) """
def wrapper(f): @wraps(f) def decorated(*args, **kwargs): if 'token_id' not in kwargs: abort(500) token = Token.query.filter_by( id=kwargs.pop('token_id'), user_id=current_user.get_id(), is_personal=is_personal, is_internal=is_internal, ).first() if token is None: abort(404) return f(token, *args, **kwargs) return decorated return wrapper
<SYSTEM_TASK:> Show permission garanted to authorized application token. <END_TASK> <USER_TASK:> Description: def token_permission_view(token): """Show permission garanted to authorized application token."""
scopes = [current_oauth2server.scopes[x] for x in token.scopes] return render_template( "invenio_oauth2server/settings/token_permission_view.html", token=token, scopes=scopes, )
<SYSTEM_TASK:> Write status dict to client status file. <END_TASK> <USER_TASK:> Description: def set_state(self, site, timestamp=None): """Write status dict to client status file. FIXME - should have some file lock to avoid race """
parser = ConfigParser() parser.read(self.status_file) status_section = 'incremental' if (not parser.has_section(status_section)): parser.add_section(status_section) if (timestamp is None): parser.remove_option( status_section, self.config_site_to_name(site)) else: parser.set( status_section, self.config_site_to_name(site), str(timestamp)) with open(self.status_file, 'w') as configfile: parser.write(configfile) configfile.close()
<SYSTEM_TASK:> Write or return XML for a set of resources in sitemap format. <END_TASK> <USER_TASK:> Description: def resources_as_xml(self, resources, sitemapindex=False, fh=None): """Write or return XML for a set of resources in sitemap format. Arguments: - resources - either an iterable or iterator of Resource objects; if there an md attribute this will go to <rs:md> if there an ln attribute this will go to <rs:ln> - sitemapindex - set True to write sitemapindex instead of sitemap - fh - write to filehandle fh instead of returning string """
# element names depending on sitemapindex or not root_element = ('sitemapindex' if (sitemapindex) else 'urlset') item_element = ('sitemap' if (sitemapindex) else 'url') # namespaces and other settings namespaces = {'xmlns': SITEMAP_NS, 'xmlns:rs': RS_NS} root = Element(root_element, namespaces) if (self.pretty_xml): root.text = "\n" # <rs:ln> if (hasattr(resources, 'ln')): for ln in resources.ln: self.add_element_with_atts_to_etree(root, 'rs:ln', ln) # <rs:md> if (hasattr(resources, 'md')): self.add_element_with_atts_to_etree(root, 'rs:md', resources.md) # <url> entries from either an iterable or an iterator for r in resources: e = self.resource_etree_element(r, element_name=item_element) root.append(e) # have tree, now serialize tree = ElementTree(root) xml_buf = None if (fh is None): xml_buf = io.StringIO() fh = xml_buf if (sys.version_info >= (3, 0)): tree.write( fh, encoding='unicode', xml_declaration=True, method='xml') elif (sys.version_info >= (2, 7)): tree.write( fh, encoding='UTF-8', xml_declaration=True, method='xml') else: # python2.6 tree.write(fh, encoding='UTF-8') if (xml_buf is not None): if (sys.version_info >= (3, 0)): return(xml_buf.getvalue()) else: return(xml_buf.getvalue().decode('utf-8'))
<SYSTEM_TASK:> Parse XML Sitemap and add to resources object. <END_TASK> <USER_TASK:> Description: def parse_xml(self, fh=None, etree=None, resources=None, capability=None, sitemapindex=None): """Parse XML Sitemap and add to resources object. Reads from fh or etree and adds resources to a resorces object (which must support the add method). Returns the resources object. Also sets self.resources_created to be the number of resources created. We adopt a very lax approach here. The parsing is properly namespace aware but we search just for the elements wanted and leave everything else alone. This method will read either sitemap or sitemapindex documents. Behavior depends on the sitemapindex parameter: - None - will read either - False - SitemapIndexError exception if sitemapindex detected - True - SitemapIndexError exception if sitemap detected Will set self.parsed_index based on whether a sitemap or sitemapindex document was read: - False - sitemap - True - sitemapindex """
if (resources is None): resources = ResourceContainer() if (fh is not None): etree = parse(fh) elif (etree is None): raise ValueError("Neither fh or etree set") # check root element: urlset (for sitemap), sitemapindex or bad root_tag = etree.getroot().tag resource_tag = None # will be <url> or <sitemap> depending on type self.parsed_index = None if (root_tag == '{' + SITEMAP_NS + "}urlset"): self.parsed_index = False if (sitemapindex is not None and sitemapindex): raise SitemapIndexError( "Got sitemap when expecting sitemapindex", etree) resource_tag = '{' + SITEMAP_NS + "}url" elif (root_tag == '{' + SITEMAP_NS + "}sitemapindex"): self.parsed_index = True if (sitemapindex is not None and not sitemapindex): raise SitemapIndexError( "Got sitemapindex when expecting sitemap", etree) resource_tag = '{' + SITEMAP_NS + "}sitemap" else: raise SitemapParseError( "XML is not sitemap or sitemapindex (root element is <%s>)" % root_tag) # have what we expect, read it in_preamble = True self.resources_created = 0 seen_top_level_md = False for e in etree.getroot().getchildren(): # look for <rs:md> and <rs:ln>, first <url> ends # then look for resources in <url> blocks if (e.tag == resource_tag): in_preamble = False # any later rs:md or rs:ln is error r = self.resource_from_etree(e, self.resource_class) try: resources.add(r) except SitemapDupeError: self.logger.warning( "dupe of: %s (lastmod=%s)" % (r.uri, r.lastmod)) self.resources_created += 1 elif (e.tag == "{" + RS_NS + "}md"): if (in_preamble): if (seen_top_level_md): raise SitemapParseError( "Multiple <rs:md> at top level of sitemap") else: resources.md = self.md_from_etree(e, 'preamble') seen_top_level_md = True else: raise SitemapParseError( "Found <rs:md> after first <url> in sitemap") elif (e.tag == "{" + RS_NS + "}ln"): if (in_preamble): resources.ln.append(self.ln_from_etree(e, 'preamble')) else: raise SitemapParseError( "Found <rs:ln> after first <url> in sitemap") else: # element we don't recognize, ignore pass # check that we read to right capability document if (capability is not None): if ('capability' not in resources.md): if (capability == 'resourcelist'): self.logger.warning( 'No capability specified in sitemap, assuming resourcelist') resources.md['capability'] = 'resourcelist' else: raise SitemapParseError("Expected to read a %s document, but no capability specified in sitemap" % (capability)) if (resources.md['capability'] != capability): raise SitemapParseError("Expected to read a %s document, got %s" % (capability, resources.md['capability'])) # return the resource container object return(resources)
<SYSTEM_TASK:> Return xml.etree.ElementTree.Element representing the resource. <END_TASK> <USER_TASK:> Description: def resource_etree_element(self, resource, element_name='url'): """Return xml.etree.ElementTree.Element representing the resource. Returns and element for the specified resource, of the form <url> with enclosed properties that are based on the sitemap with extensions for ResourceSync. """
e = Element(element_name) sub = Element('loc') sub.text = resource.uri e.append(sub) if (resource.timestamp is not None): # Create appriate element for timestamp sub = Element('lastmod') sub.text = str(resource.lastmod) # W3C Datetime in UTC e.append(sub) md_atts = {} for att in ('capability', 'change', 'hash', 'length', 'path', 'mime_type', 'md_at', 'md_completed', 'md_from', 'md_until'): val = getattr(resource, att, None) if (val is not None): md_atts[self._xml_att_name(att)] = str(val) if (len(md_atts) > 0): md = Element('rs:md', md_atts) e.append(md) # add any <rs:ln> if (hasattr(resource, 'ln') and resource.ln is not None): for ln in resource.ln: self.add_element_with_atts_to_etree(e, 'rs:ln', ln) if (self.pretty_xml): e.tail = "\n" return(e)
<SYSTEM_TASK:> Return string for the resource as part of an XML sitemap. <END_TASK> <USER_TASK:> Description: def resource_as_xml(self, resource): """Return string for the resource as part of an XML sitemap. Returns a string with the XML snippet representing the resource, without any XML declaration. """
e = self.resource_etree_element(resource) if (sys.version_info >= (3, 0)): # python3.x return(tostring(e, encoding='unicode', method='xml')) elif (sys.version_info >= (2, 7)): s = tostring(e, encoding='UTF-8', method='xml') else: # must not specify method='xml' in python2.6 s = tostring(e, encoding='UTF-8') # Chop off XML declaration that is added in 2.x... sigh return(s.replace("<?xml version='1.0' encoding='UTF-8'?>\n", ''))
<SYSTEM_TASK:> Construct a Resource from an etree. <END_TASK> <USER_TASK:> Description: def resource_from_etree(self, etree, resource_class): """Construct a Resource from an etree. Parameters: etree - the etree to parse resource_class - class of Resource object to create The parsing is properly namespace aware but we search just for the elements wanted and leave everything else alone. Will raise an error if there are multiple <loc> or multiple <lastmod> elements. Otherwise, provided there is a <loc> element then will go ahead and extract as much as possible. All errors raised are SitemapParseError with messages intended to help debug problematic sitemap XML. """
loc_elements = etree.findall('{' + SITEMAP_NS + "}loc") if (len(loc_elements) > 1): raise SitemapParseError( "Multiple <loc> elements while parsing <url> in sitemap") elif (len(loc_elements) == 0): raise SitemapParseError( "Missing <loc> element while parsing <url> in sitemap") else: loc = loc_elements[0].text if (loc is None or loc == ''): raise SitemapParseError( "Bad <loc> element with no content while parsing <url> in sitemap") # must at least have a URI, make this object resource = resource_class(uri=loc) # and hopefully a lastmod datetime (but none is OK) lastmod_elements = etree.findall('{' + SITEMAP_NS + "}lastmod") if (len(lastmod_elements) > 1): raise SitemapParseError( "Multiple <lastmod> elements while parsing <url> in sitemap") elif (len(lastmod_elements) == 1): resource.lastmod = lastmod_elements[0].text # proceed to look for other resource attributes in an rs:md element md_elements = etree.findall('{' + RS_NS + "}md") if (len(md_elements) > 1): raise SitemapParseError( "Found multiple (%d) <rs:md> elements for %s", (len(md_elements), loc)) elif (len(md_elements) == 1): # have on element, look at attributes md = self.md_from_etree(md_elements[0], context=loc) # simple attributes that map directly to Resource object attributes for att in ('capability', 'change', 'length', 'path', 'mime_type'): if (att in md): setattr(resource, att, md[att]) # The ResourceSync beta spec lists md5, sha-1 and sha-256 fixity # digest types. Parse and warn of errors ignored. if ('hash' in md): try: resource.hash = md['hash'] except ValueError as e: self.logger.warning("%s in <rs:md> for %s" % (str(e), loc)) # look for rs:ln elements (optional) ln_elements = etree.findall('{' + RS_NS + "}ln") if (len(ln_elements) > 0): resource.ln = [] for ln_element in ln_elements: resource.ln.append(self.ln_from_etree(ln_element, loc)) return(resource)
<SYSTEM_TASK:> Add element with name and atts to etree iff there are any atts. <END_TASK> <USER_TASK:> Description: def add_element_with_atts_to_etree(self, etree, name, atts): """Add element with name and atts to etree iff there are any atts. Parameters: etree - an etree object name - XML element name atts - dicts of attribute values. Attribute names are transformed """
xml_atts = {} for att in atts.keys(): val = atts[att] if (val is not None): xml_atts[self._xml_att_name(att)] = str(val) if (len(xml_atts) > 0): e = Element(name, xml_atts) if (self.pretty_xml): e.tail = "\n" etree.append(e)
<SYSTEM_TASK:> Make a safe path name from uri. <END_TASK> <USER_TASK:> Description: def path_from_uri(self, uri): """Make a safe path name from uri. In the case that uri is already a local path then the same path is returned. """
(scheme, netloc, path, params, query, frag) = urlparse(uri) if (netloc == ''): return(uri) path = '/'.join([netloc, path]) path = re.sub('[^\w\-\.]', '_', path) path = re.sub('__+', '_', path) path = re.sub('[_\.]+$', '', path) path = re.sub('^[_\.]+', '', path) return(path)
<SYSTEM_TASK:> Return the src URI from the dst filepath. <END_TASK> <USER_TASK:> Description: def dst_to_src(self, dst_file): """Return the src URI from the dst filepath. This does not rely on the destination filepath actually existing on the local filesystem, just on pattern matching. Return source URI on success, None on failure. """
m = re.match(self.dst_path + "/(.*)$", dst_file) if (m is None): return(None) rel_path = m.group(1) return(self.src_uri + '/' + rel_path)
<SYSTEM_TASK:> True if the mapping is unsafe for an update. <END_TASK> <USER_TASK:> Description: def unsafe(self): """True if the mapping is unsafe for an update. Applies only to local source. Returns True if the paths for source and destination are the same, or if one is a component of the other path. """
(scheme, netloc, path, params, query, frag) = urlparse(self.src_uri) if (scheme != ''): return(False) s = os.path.normpath(self.src_uri) d = os.path.normpath(self.dst_path) lcp = os.path.commonprefix([s, d]) return(s == lcp or d == lcp)
<SYSTEM_TASK:> Remove ``exclude`` symbols from ``text``. <END_TASK> <USER_TASK:> Description: def remove(text, exclude): """Remove ``exclude`` symbols from ``text``. Example: >>> remove("example text", string.whitespace) 'exampletext' Args: text (str): The text to modify exclude (iterable): The symbols to exclude Returns: ``text`` with ``exclude`` symbols removed """
exclude = ''.join(str(symbol) for symbol in exclude) return text.translate(str.maketrans('', '', exclude))
<SYSTEM_TASK:> Split ``text`` into ``n_columns`` many columns. <END_TASK> <USER_TASK:> Description: def split_columns(text, n_columns): """Split ``text`` into ``n_columns`` many columns. Example: >>> split_columns("example", 2) ['eape', 'xml'] Args: text (str): The text to split n_columns (int): The number of columns to create Returns: List of columns Raises: ValueError: If n_cols is <= 0 or >= len(text) """
if n_columns <= 0 or n_columns > len(text): raise ValueError("n_columns must be within the bounds of 1 and text length") return [text[i::n_columns] for i in range(n_columns)]
<SYSTEM_TASK:> Combine ``columns`` into a single string. <END_TASK> <USER_TASK:> Description: def combine_columns(columns): """Combine ``columns`` into a single string. Example: >>> combine_columns(['eape', 'xml']) 'example' Args: columns (iterable): ordered columns to combine Returns: String of combined columns """
columns_zipped = itertools.zip_longest(*columns) return ''.join(x for zipped in columns_zipped for x in zipped if x)
<SYSTEM_TASK:> Generator to yield ngrams in ``text``. <END_TASK> <USER_TASK:> Description: def iterate_ngrams(text, n): """Generator to yield ngrams in ``text``. Example: >>> for ngram in iterate_ngrams("example", 4): ... print(ngram) exam xamp ampl mple Args: text (str): text to iterate over n (int): size of window for iteration Returns: Generator expression to yield the next ngram in the text Raises: ValueError: If n is non positive """
if n <= 0: raise ValueError("n must be a positive integer") return [text[i: i + n] for i in range(len(text) - n + 1)]
<SYSTEM_TASK:> Group ``text`` into blocks of ``size``. <END_TASK> <USER_TASK:> Description: def group(text, size): """Group ``text`` into blocks of ``size``. Example: >>> group("test", 2) ['te', 'st'] Args: text (str): text to separate size (int): size of groups to split the text into Returns: List of n-sized groups of text Raises: ValueError: If n is non positive """
if size <= 0: raise ValueError("n must be a positive integer") return [text[i:i + size] for i in range(0, len(text), size)]
<SYSTEM_TASK:> Compute the value of the right-hand side of the system of ODEs. <END_TASK> <USER_TASK:> Description: def _evaluate_rhs(cls, funcs, nodes, problem): """ Compute the value of the right-hand side of the system of ODEs. Parameters ---------- basis_funcs : list(function) nodes : numpy.ndarray problem : TwoPointBVPLike Returns ------- evaluated_rhs : list(float) """
evald_funcs = cls._evaluate_functions(funcs, nodes) evald_rhs = problem.rhs(nodes, *evald_funcs, **problem.params) return evald_rhs
<SYSTEM_TASK:> Return collocation residuals. <END_TASK> <USER_TASK:> Description: def _compute_residuals(self, coefs_array, basis_kwargs, boundary_points, nodes, problem): """ Return collocation residuals. Parameters ---------- coefs_array : numpy.ndarray basis_kwargs : dict problem : TwoPointBVPLike Returns ------- resids : numpy.ndarray """
coefs_list = self._array_to_list(coefs_array, problem.number_odes) derivs, funcs = self._construct_approximation(basis_kwargs, coefs_list) resids = self._assess_approximation(boundary_points, derivs, funcs, nodes, problem) return resids
<SYSTEM_TASK:> Construct a collection of derivatives and functions that approximate <END_TASK> <USER_TASK:> Description: def _construct_approximation(self, basis_kwargs, coefs_list): """ Construct a collection of derivatives and functions that approximate the solution to the boundary value problem. Parameters ---------- basis_kwargs : dict(str: ) coefs_list : list(numpy.ndarray) Returns ------- basis_derivs : list(function) basis_funcs : list(function) """
derivs = self._construct_derivatives(coefs_list, **basis_kwargs) funcs = self._construct_functions(coefs_list, **basis_kwargs) return derivs, funcs
<SYSTEM_TASK:> Return a list of derivatives given a list of coefficients. <END_TASK> <USER_TASK:> Description: def _construct_derivatives(self, coefs, **kwargs): """Return a list of derivatives given a list of coefficients."""
return [self.basis_functions.derivatives_factory(coef, **kwargs) for coef in coefs]
<SYSTEM_TASK:> Return a list of functions given a list of coefficients. <END_TASK> <USER_TASK:> Description: def _construct_functions(self, coefs, **kwargs): """Return a list of functions given a list of coefficients."""
return [self.basis_functions.functions_factory(coef, **kwargs) for coef in coefs]
<SYSTEM_TASK:> Construct a representation of the solution to the boundary value problem. <END_TASK> <USER_TASK:> Description: def _solution_factory(self, basis_kwargs, coefs_array, nodes, problem, result): """ Construct a representation of the solution to the boundary value problem. Parameters ---------- basis_kwargs : dict(str : ) coefs_array : numpy.ndarray problem : TwoPointBVPLike result : OptimizeResult Returns ------- solution : SolutionLike """
soln_coefs = self._array_to_list(coefs_array, problem.number_odes) soln_derivs = self._construct_derivatives(soln_coefs, **basis_kwargs) soln_funcs = self._construct_functions(soln_coefs, **basis_kwargs) soln_residual_func = self._interior_residuals_factory(soln_derivs, soln_funcs, problem) solution = solutions.Solution(basis_kwargs, soln_funcs, nodes, problem, soln_residual_func, result) return solution
<SYSTEM_TASK:> Solve a boundary value problem using the collocation method. <END_TASK> <USER_TASK:> Description: def solve(self, basis_kwargs, boundary_points, coefs_array, nodes, problem, **solver_options): """ Solve a boundary value problem using the collocation method. Parameters ---------- basis_kwargs : dict Dictionary of keyword arguments used to build basis functions. coefs_array : numpy.ndarray Array of coefficients for basis functions defining the initial condition. problem : bvp.TwoPointBVPLike A two-point boundary value problem (BVP) to solve. solver_options : dict Dictionary of options to pass to the non-linear equation solver. Return ------ solution: solutions.SolutionLike An instance of the SolutionLike class representing the solution to the two-point boundary value problem (BVP) Notes ----- """
result = optimize.root(self._compute_residuals, x0=coefs_array, args=(basis_kwargs, boundary_points, nodes, problem), **solver_options) solution = self._solution_factory(basis_kwargs, result.x, nodes, problem, result) return solution
<SYSTEM_TASK:> The Last-Modified data in ISO8601 syntax, Z notation. <END_TASK> <USER_TASK:> Description: def datetime_to_str(dt='now', no_fractions=False): """The Last-Modified data in ISO8601 syntax, Z notation. The lastmod is stored as unix timestamp which is already in UTC. At preesent this code will return 6 decimal digits if any fraction of a second is given. It would perhaps be better to return only the number of decimal digits necessary, up to a resultion of 1 microsecond. Special cases: - Returns datetime str for now if no parameter given. - Returns None if None is supplied. """
if (dt is None): return None elif (dt == 'now'): dt = time.time() if (no_fractions): dt = int(dt) else: dt += 0.0000001 # improve rounding to microseconds return datetime.utcfromtimestamp(dt).isoformat() + 'Z'
<SYSTEM_TASK:> Set timestamp from an W3C Datetime Last-Modified value. <END_TASK> <USER_TASK:> Description: def str_to_datetime(s, context='datetime'): """Set timestamp from an W3C Datetime Last-Modified value. The sitemaps.org specification says that <lastmod> values must comply with the W3C Datetime format (http://www.w3.org/TR/NOTE-datetime). This is a restricted subset of ISO8601. In particular, all forms that include a time must include a timezone indication so there is no notion of local time (which would be tricky on the web). The forms allowed are: Year: YYYY (eg 1997) Year and month: YYYY-MM (eg 1997-07) Complete date: YYYY-MM-DD (eg 1997-07-16) Complete date plus hours and minutes: YYYY-MM-DDThh:mmTZD (eg 1997-07-16T19:20+01:00) Complete date plus hours, minutes and seconds: YYYY-MM-DDThh:mm:ssTZD (eg 1997-07-16T19:20:30+01:00) Complete date plus hours, minutes, seconds and a decimal fraction of a second YYYY-MM-DDThh:mm:ss.sTZD (eg 1997-07-16T19:20:30.45+01:00) where: TZD = time zone designator (Z or +hh:mm or -hh:mm) We do not anticipate the YYYY and YYYY-MM forms being used but interpret them as YYYY-01-01 and YYYY-MM-01 respectively. All dates are interpreted as having time 00:00:00.0 UTC. Datetimes not specified to the level of seconds are intepreted as 00.0 seconds. """
t = None if (s is None): return(t) if (s == ''): raise ValueError('Attempt to set empty %s' % (context)) # Make a date into a full datetime m = re.match(r"\d\d\d\d(\-\d\d(\-\d\d)?)?$", s) if (m is not None): if (m.group(1) is None): s += '-01-01' elif (m.group(2) is None): s += '-01' s += 'T00:00:00Z' # Now have datetime with timezone info m = re.match(r"(.*\d{2}:\d{2}:\d{2})(\.\d+)([^\d].*)?$", s) # Chop out fractional seconds if present fractional_seconds = 0 if (m is not None): s = m.group(1) if (m.group(3) is not None): s += m.group(3) fractional_seconds = float(m.group(2)) # Now check that only allowed formats supplied (the parse # function in dateutil is rather lax) and separate out # timezone information to be handled separately # # Seems that one should be able to handle timezone offset # with dt.tzinfo module but this has variation in behavior # between python 2.6 and 2.7... so do here for now m = re.match(r"(\d\d\d\d\-\d\d\-\d\dT\d\d:\d\d(:\d\d)?)(Z|([+-])" "(\d\d):(\d\d))$", s) if (m is None): raise ValueError("Bad datetime format (%s)" % s) str = m.group(1) + 'Z' dt = dateutil_parser.parse(str) offset_seconds = 0 if (m.group(3) != 'Z'): hh = int(m.group(5)) mm = int(m.group(6)) if (hh > 23 or mm > 59): raise ValueError("Bad timezone offset (%s)" % s) offset_seconds = hh * 3600 + mm * 60 if (m.group(4) == '-'): offset_seconds = -offset_seconds # timetuple() ignores timezone information so we have to add in # the offset here, and any fractional component of the seconds return(timegm(dt.timetuple()) + offset_seconds + fractional_seconds)
<SYSTEM_TASK:> Returns True if any of the required address components is missing <END_TASK> <USER_TASK:> Description: def need_geocoding(self): """ Returns True if any of the required address components is missing """
need_geocoding = False for attribute, component in self.required_address_components.items(): if not getattr(self, attribute): need_geocoding = True break # skip extra loops return need_geocoding
<SYSTEM_TASK:> Do a backend geocoding if needed <END_TASK> <USER_TASK:> Description: def geocode(self, commit=False, force=False): """ Do a backend geocoding if needed """
if self.need_geocoding() or force: result = get_cached(getattr(self, self.geocoded_by), provider='google') if result.status == 'OK': for attribute, components in self.required_address_components.items(): for component in components: if not getattr(self, attribute) or force: attr_val = getattr(result, component, None) if attr_val: setattr(self, attribute, attr_val) if commit: self.save()
<SYSTEM_TASK:> Look for link with specified rel, return href from it or None. <END_TASK> <USER_TASK:> Description: def link_href(self, rel): """Look for link with specified rel, return href from it or None."""
link = self.link(rel) if (link is not None): link = link['href'] return(link)
<SYSTEM_TASK:> Set capability name in md. <END_TASK> <USER_TASK:> Description: def default_capability(self): """Set capability name in md. Every ResourceSync document should have the top-level capability attributes. """
if ('capability' not in self.md and self.capability_name is not None): self.md['capability'] = self.capability_name
<SYSTEM_TASK:> Add a resource or an iterable collection of resources to this container. <END_TASK> <USER_TASK:> Description: def add(self, resource): """Add a resource or an iterable collection of resources to this container. Must be implemented in derived class. """
if isinstance(resource, collections.Iterable): for r in resource: self.resources.append(r) else: self.resources.append(resource)
<SYSTEM_TASK:> Remove all resources with timestamp earlier than that given. <END_TASK> <USER_TASK:> Description: def prune_before(self, timestamp): """Remove all resources with timestamp earlier than that given. Returns the number of entries removed. Will raise an excpetion if there are any entries without a timestamp. """
n = 0 pruned = [] for r in self.resources: if (r.timestamp is None): raise Exception("Entry %s has no timestamp" % (r.uri)) elif (r.timestamp >= timestamp): pruned.append(r) else: n += 1 self.resources = pruned return(n)
<SYSTEM_TASK:> Remove all but the last entry for a given resource URI. <END_TASK> <USER_TASK:> Description: def prune_dupes(self): """Remove all but the last entry for a given resource URI. Returns the number of entries removed. Also removes all entries for a given URI where the first entry is a create and the last entry is a delete. """
n = 0 pruned1 = [] seen = set() deletes = {} for r in reversed(self.resources): if (r.uri in seen): n += 1 if (r.uri in deletes): deletes[r.uri] = r.change else: pruned1.append(r) seen.add(r.uri) if (r.change == 'deleted'): deletes[r.uri] = r.change # go through all deletes and prune if first was create pruned2 = [] for r in reversed(pruned1): if (r.uri in deletes and deletes[r.uri] == 'created'): n += 1 else: pruned2.append(r) self.resources = pruned2 return(n)
<SYSTEM_TASK:> Return datetime string for use with time attributes. <END_TASK> <USER_TASK:> Description: def _str_datetime_now(self, x=None): """Return datetime string for use with time attributes. Handling depends on input: 'now' - returns datetime for now number - assume datetime values, generate string other - no change, return same value """
if (x == 'now'): # Now, this is wht datetime_to_str() with no arg gives return(datetime_to_str()) try: # Test for number junk = x + 0.0 return datetime_to_str(x) except TypeError: # Didn't look like a number, treat as string return x
<SYSTEM_TASK:> Creates and returns a new randomly generated map <END_TASK> <USER_TASK:> Description: def map(self, width, height): """ Creates and returns a new randomly generated map """
template = ti.load(os.path.join(script_dir, 'assets', 'template.tmx'))['map0'] #template.set_view(0, 0, template.px_width, template.px_height) template.set_view(0, 0, width*template.tw, height*template.th) # TODO: Save the generated map. #epoch = int(time.time()) #filename = 'map_' + str(epoch) + '.tmx' # Draw borders border_x = template.cells[width] for y in xrange(0,height+1): border_x[y].tile = template.cells[0][0].tile for x in xrange(0,width): template.cells[x][height].tile = template.cells[0][0].tile # Start within borders #self.recursive_division(template.cells, 3, (template.px_width/template.tw)-1, (template.px_height/template.th)-1, 0, 0) self.recursive_division(template.cells, 3, width, height, 0, 0) return template
<SYSTEM_TASK:> Convert node data from nested-list to sorted dict. <END_TASK> <USER_TASK:> Description: def make_node_dict(outer_list, sort="zone"): """Convert node data from nested-list to sorted dict."""
raw_dict = {} x = 1 for inner_list in outer_list: for node in inner_list: raw_dict[x] = node x += 1 if sort == "name": # sort by provider - name srt_dict = OrderedDict(sorted(raw_dict.items(), key=lambda k: (k[1].cloud, k[1].name.lower()))) else: # sort by provider - zone - name srt_dict = OrderedDict(sorted(raw_dict.items(), key=lambda k: (k[1].cloud, k[1].zone, k[1].name.lower()))) x = 1 node_dict = {} for i, v in srt_dict.items(): node_dict[x] = v x += 1 return node_dict
<SYSTEM_TASK:> Read providers from configfile and de-duplicate it. <END_TASK> <USER_TASK:> Description: def config_prov(config): """Read providers from configfile and de-duplicate it."""
try: providers = [e.strip() for e in (config['info'] ['providers']).split(',')] except KeyError as e: print("Error reading config item: {}".format(e)) sys.exit() providers = list(OrderedDict.fromkeys(providers)) return providers
<SYSTEM_TASK:> Read credentials from configfile. <END_TASK> <USER_TASK:> Description: def config_cred(config, providers): """Read credentials from configfile."""
expected = ['aws', 'azure', 'gcp', 'alicloud'] cred = {} to_remove = [] for item in providers: if any(item.startswith(itemb) for itemb in expected): try: cred[item] = dict(list(config[item].items())) except KeyError as e: print("No credentials section in config file for {} -" " provider will be skipped.".format(e)) to_remove.append(item) else: print("Unsupported provider: '{}' listed in config - ignoring" .format(item)) to_remove.append(item) return cred, to_remove
<SYSTEM_TASK:> Provide access to the complete hash string. <END_TASK> <USER_TASK:> Description: def hash(self): """Provide access to the complete hash string. The hash string may have zero or more hash values with appropriate prefixes. All hash values are assumed to be strings """
hashvals = [] if (self.md5 is not None): hashvals.append('md5:' + self.md5) if (self.sha1 is not None): hashvals.append('sha-1:' + self.sha1) if (self.sha256 is not None): hashvals.append('sha-256:' + self.sha256) if (len(hashvals) > 0): return(' '.join(hashvals)) return(None)
<SYSTEM_TASK:> Parse space separated set of values. <END_TASK> <USER_TASK:> Description: def hash(self, hash): """Parse space separated set of values. See specification at: http://tools.ietf.org/html/draft-snell-atompub-link-extensions-09 which defines many types. We implement md5, sha-1, sha-256 """
self.md5 = None self.sha1 = None self.sha256 = None if (hash is None): return hash_seen = set() errors = [] for entry in hash.split(): (hash_type, value) = entry.split(':', 1) if (hash_type in hash_seen): errors.append("Ignored duplicate hash type %s" % (hash_type)) else: hash_seen.add(hash_type) if (hash_type == 'md5'): self.md5 = value elif (hash_type == 'sha-1'): self.sha1 = value elif (hash_type == 'sha-256'): self.sha256 = value else: errors.append("Ignored unsupported hash type (%s)" % (hash_type)) if (len(errors) > 0): raise ValueError(". ".join(errors))
<SYSTEM_TASK:> Create an link with specified rel. <END_TASK> <USER_TASK:> Description: def link_add(self, rel, href, **atts): """Create an link with specified rel. Will add a link even if one with that rel already exists. """
self.link_set(rel, href, allow_duplicates=True, **atts)
<SYSTEM_TASK:> Equality or near equality test for resources. <END_TASK> <USER_TASK:> Description: def equal(self, other, delta=0.0): """Equality or near equality test for resources. Equality means: 1. same uri, AND 2. same timestamp WITHIN delta if specified for either, AND 3. same md5 if specified for both, AND 4. same length if specified for both """
if (other is None): return False if (self.uri != other.uri): return(False) if (self.timestamp is not None or other.timestamp is not None): # not equal if only one timestamp specified if (self.timestamp is None or other.timestamp is None or abs(self.timestamp - other.timestamp) >= delta): return(False) if ((self.md5 is not None and other.md5 is not None) and self.md5 != other.md5): return(False) if ((self.length is not None and other.length is not None) and self.length != other.length): return(False) return(True)
<SYSTEM_TASK:> Score a text by comparing its frequency distribution against another. <END_TASK> <USER_TASK:> Description: def ChiSquared(target_frequency): """Score a text by comparing its frequency distribution against another. Note: It is easy to be penalised without knowing it when using this scorer. English frequency ngrams are capital letters, meaning when using it any text you score against must be all capitals for it to give correct results. I am aware of the issue and will work on a fix. Todo: Maybe include paramter for ngram size. Havent had a use case for this yet. Once there is evidence it is needed, I will add it. Example: >>> fitness = ChiSquared(english.unigrams) >>> fitness("ABC") -32.2 Args: target_frequency (dict): symbol to frequency mapping of the distribution to compare with """
def inner(text): text = ''.join(text) return -chi_squared(frequency_analyze(text), target_frequency) return inner
<SYSTEM_TASK:> Simulate the generator used by simulator <END_TASK> <USER_TASK:> Description: def my_resource_list(): """Simulate the generator used by simulator"""
rl = ResourceList( resources=iter(my_resources), count=len(my_resources) ) rl.max_sitemap_entries = max_sitemap_entries return(rl)
<SYSTEM_TASK:> Add a single resource, check for dupes. <END_TASK> <USER_TASK:> Description: def add(self, resource, replace=False): """Add a single resource, check for dupes."""
uri = resource.uri for r in self: if (uri == r.uri): if (replace): r = resource return else: raise ResourceListDupeError( "Attempt to add resource already in resource_list") # didn't find it in list, add to end self.append(resource)
<SYSTEM_TASK:> Add a resource or an iterable collection of resources. <END_TASK> <USER_TASK:> Description: def add(self, resource, replace=False): """Add a resource or an iterable collection of resources. Will throw a ValueError if the resource (ie. same uri) already exists in the ResourceList, unless replace=True. """
if isinstance(resource, collections.Iterable): for r in resource: self.resources.add(r, replace) else: self.resources.add(resource, replace)
<SYSTEM_TASK:> Compare this ResourceList object with that specified as src. <END_TASK> <USER_TASK:> Description: def compare(self, src): """Compare this ResourceList object with that specified as src. The parameter src must also be a ResourceList object, it is assumed to be the source, and the current object is the destination. This written to work for any objects in self and sc, provided that the == operator can be used to compare them. The functioning of this method depends on the iterators for self and src providing access to the resource objects in URI order. """
dst_iter = iter(self.resources) src_iter = iter(src.resources) same = ResourceList() updated = ResourceList() deleted = ResourceList() created = ResourceList() dst_cur = next(dst_iter, None) src_cur = next(src_iter, None) while ((dst_cur is not None) and (src_cur is not None)): # print 'dst='+dst_cur+' src='+src_cur if (dst_cur.uri == src_cur.uri): if (dst_cur == src_cur): same.add(dst_cur) else: updated.add(src_cur) dst_cur = next(dst_iter, None) src_cur = next(src_iter, None) elif (not src_cur or dst_cur.uri < src_cur.uri): deleted.add(dst_cur) dst_cur = next(dst_iter, None) elif (not dst_cur or dst_cur.uri > src_cur.uri): created.add(src_cur) src_cur = next(src_iter, None) else: raise Exception("this should not be possible") # what do we have leftover in src or dst lists? while (dst_cur is not None): deleted.add(dst_cur) dst_cur = next(dst_iter, None) while (src_cur is not None): created.add(src_cur) src_cur = next(src_iter, None) # have now gone through both lists return(same, updated, deleted, created)
<SYSTEM_TASK:> Return set of hashes uses in this resource_list. <END_TASK> <USER_TASK:> Description: def hashes(self): """Return set of hashes uses in this resource_list."""
hashes = set() if (self.resources is not None): for resource in self: if (resource.md5 is not None): hashes.add('md5') if (resource.sha1 is not None): hashes.add('sha-1') if (resource.sha256 is not None): hashes.add('sha-256') return(hashes)
<SYSTEM_TASK:> append slice_count to the end of a filename <END_TASK> <USER_TASK:> Description: def _build_sliced_filepath(filename, slice_count): """ append slice_count to the end of a filename """
root = os.path.splitext(filename)[0] ext = os.path.splitext(filename)[1] new_filepath = ''.join((root, str(slice_count), ext)) return _build_filepath_for_phantomcss(new_filepath)
<SYSTEM_TASK:> Prepare screenshot filename for use with phantomcss. <END_TASK> <USER_TASK:> Description: def _build_filepath_for_phantomcss(filepath): """ Prepare screenshot filename for use with phantomcss. ie, append 'diff' to the end of the file if a baseline exists """
try: if os.path.exists(filepath): new_root = '.'.join((os.path.splitext(filepath)[0], 'diff')) ext = os.path.splitext(filepath)[1] diff_filepath = ''.join((new_root, ext)) if os.path.exists(diff_filepath): print 'removing stale diff: {0}'.format(diff_filepath) os.remove(diff_filepath) return diff_filepath else: return filepath except OSError, e: print e
<SYSTEM_TASK:> Build a useful filename for an image from the screenshot json metadata <END_TASK> <USER_TASK:> Description: def _build_filename_from_browserstack_json(j): """ Build a useful filename for an image from the screenshot json metadata """
filename = '' device = j['device'] if j['device'] else 'Desktop' if j['state'] == 'done' and j['image_url']: detail = [device, j['os'], j['os_version'], j['browser'], j['browser_version'], '.jpg'] filename = '_'.join(item.replace(" ", "_") for item in detail if item) else: print 'screenshot timed out, ignoring this result' return filename
<SYSTEM_TASK:> Slice an image into parts slice_size tall. <END_TASK> <USER_TASK:> Description: def _long_image_slice(in_filepath, out_filepath, slice_size): """ Slice an image into parts slice_size tall. """
print 'slicing image: {0}'.format(in_filepath) img = Image.open(in_filepath) width, height = img.size upper = 0 left = 0 slices = int(math.ceil(height / slice_size)) count = 1 for slice in range(slices): # if we are at the end, set the lower bound to be the bottom of the image if count == slices: lower = height else: lower = int(count * slice_size) # set the bounding box! The important bit bbox = (left, upper, width, lower) working_slice = img.crop(bbox) upper += slice_size # save the slice new_filepath = _build_sliced_filepath(out_filepath, count) working_slice.save(new_filepath) count += 1
<SYSTEM_TASK:> delete files in dir that match pattern <END_TASK> <USER_TASK:> Description: def _purge(dir, pattern, reason=''): """ delete files in dir that match pattern """
for f in os.listdir(dir): if re.search(pattern, f): print "Purging file {0}. {1}".format(f, reason) os.remove(os.path.join(dir, f))
<SYSTEM_TASK:> Break ``ciphertext`` using hill climbing. <END_TASK> <USER_TASK:> Description: def crack(ciphertext, *fitness_functions, ntrials=30, nswaps=3000): """Break ``ciphertext`` using hill climbing. Note: Currently ntrails and nswaps default to magic numbers. Generally the trend is, the longer the text, the lower the number of trials you need to run, because the hill climbing will lead to the best answer faster. Because randomness is involved, there is the possibility of the correct decryption not being found. In this circumstance you just need to run the code again. Example: >>> decryptions = crack("XUOOB", fitness.english.quadgrams) >>> print(decryptions[0]) HELLO Args: ciphertext (str): The text to decrypt *fitness_functions (variable length argument list): Functions to score decryption with Keyword Args: ntrials (int): The number of times to run the hill climbing algorithm nswaps (int): The number of rounds to find a local maximum Returns: Sorted list of decryptions Raises: ValueError: If nswaps or ntrails are not positive integers ValueError: If no fitness_functions are given """
if ntrials <= 0 or nswaps <= 0: raise ValueError("ntrials and nswaps must be positive integers") # Find a local maximum by swapping two letters and scoring the decryption def next_node_inner_climb(node): # Swap 2 characters in the key a, b = random.sample(range(len(node)), 2) node[a], node[b] = node[b], node[a] plaintext = decrypt(node, ciphertext) node_score = score(plaintext, *fitness_functions) return node, node_score, Decryption(plaintext, ''.join(node), node_score) # Outer climb rereuns hill climb ntrials number of times each time at a different start location def next_node_outer_climb(node): random.shuffle(node) key, best_score, outputs = hill_climb(nswaps, node[:], next_node_inner_climb) return key, best_score, outputs[-1] # The last item in this list is the item with the highest score _, _, decryptions = hill_climb(ntrials, list(string.ascii_uppercase), next_node_outer_climb) return sorted(decryptions, reverse=True)
<SYSTEM_TASK:> value can be between 0 and 359 <END_TASK> <USER_TASK:> Description: def set_heading(self, value): """value can be between 0 and 359"""
return self.write(request.SetHeading(self.seq, value))
<SYSTEM_TASK:> value can be between 0x00 and 0xFF <END_TASK> <USER_TASK:> Description: def set_back_led_output(self, value): """value can be between 0x00 and 0xFF"""
return self.write(request.SetBackLEDOutput(self.seq, value))
<SYSTEM_TASK:> speed can have value between 0x00 and 0xFF <END_TASK> <USER_TASK:> Description: def roll(self, speed, heading, state=1): """ speed can have value between 0x00 and 0xFF heading can have value between 0 and 359 """
return self.write(request.Roll(self.seq, speed, heading, state ))