text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> General purpose export method, gets file type <END_TASK> <USER_TASK:> Description: def export(self, folder_path, format=None): """ General purpose export method, gets file type from filepath extension Valid output formats currently are: Trackline: trackline or trkl or *.trkl Shapefile: shapefile or shape or shp or *.shp NetCDF: netcdf or nc or *.nc """
if format is None: raise ValueError("Must export to a specific format, no format specified.") format = format.lower() if format == "trackline" or format[-4:] == "trkl": ex.Trackline.export(folder=folder_path, particles=self.particles, datetimes=self.datetimes) elif format == "shape" or format == "shapefile" or format[-3:] == "shp": ex.GDALShapefile.export(folder=folder_path, particles=self.particles, datetimes=self.datetimes) elif format == "netcdf" or format[-2:] == "nc": ex.NetCDF.export(folder=folder_path, particles=self.particles, datetimes=self.datetimes, summary=str(self)) elif format == "pickle" or format[-3:] == "pkl" or format[-6:] == "pickle": ex.Pickle.export(folder=folder_path, particles=self.particles, datetimes=self.datetimes)
<SYSTEM_TASK:> Parse passed arguments from shell. <END_TASK> <USER_TASK:> Description: def _parse(args): """Parse passed arguments from shell."""
ordered = [] opt_full = dict() opt_abbrev = dict() args = args + [''] # Avoid out of range i = 0 while i < len(args) - 1: arg = args[i] arg_next = args[i+1] if arg.startswith('--'): if arg_next.startswith('-'): raise ValueError('{} lacks value'.format(arg)) else: opt_full[arg[2:]] = arg_next i += 2 elif arg.startswith('-'): if arg_next.startswith('-'): raise ValueError('{} lacks value'.format(arg)) else: opt_abbrev[arg[1:]] = arg_next i += 2 else: ordered.append(arg) i += 1 return ordered, opt_full, opt_abbrev
<SYSTEM_TASK:> Construct optional args' key and abbreviated key from signature. <END_TASK> <USER_TASK:> Description: def _construct_optional(params): """Construct optional args' key and abbreviated key from signature."""
args = [] filtered = {key: arg.default for key, arg in params.items() if arg.default != inspect._empty} for key, default in filtered.items(): arg = OptionalArg(full=key, abbrev=key[0].lower(), default=default) args.append(arg) args_full, args_abbrev = dict(), dict() # Resolve conflicts known_count = defaultdict(int) for arg in args: args_full[arg.full] = arg if known_count[arg.abbrev] == 0: args_abbrev[arg.abbrev] = arg elif known_count[arg.abbrev] == 1: new_abbrev = arg.abbrev.upper() args_full[arg.full] = OptionalArg(full=arg.full, abbrev=new_abbrev, default=arg.default) args_abbrev[new_abbrev] = args_full[arg.full] else: new_abbrev = arg.abbrev.upper() + str(known_count[arg.abbrev]) args_full[arg.full] = OptionalArg(full=arg.full, abbrev=new_abbrev, default=arg.default) args_abbrev[new_abbrev] = args_full[arg.full] known_count[arg.abbrev] += 1 return args_full, args_abbrev
<SYSTEM_TASK:> Provide keyboard access for element, if it not has. <END_TASK> <USER_TASK:> Description: def _keyboard_access(self, element): """ Provide keyboard access for element, if it not has. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement """
# pylint: disable=no-self-use if not element.has_attribute('tabindex'): tag = element.get_tag_name() if (tag == 'A') and (not element.has_attribute('href')): element.set_attribute('tabindex', '0') elif ( (tag != 'A') and (tag != 'INPUT') and (tag != 'BUTTON') and (tag != 'SELECT') and (tag != 'TEXTAREA') ): element.set_attribute('tabindex', '0')
<SYSTEM_TASK:> Add a type of event in element. <END_TASK> <USER_TASK:> Description: def _add_event_in_element(self, element, event): """ Add a type of event in element. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :param event: The type of event. :type event: str """
if not self.main_script_added: self._generate_main_scripts() if self.script_list is not None: self.id_generator.generate_id(element) self.script_list.append_text( event + "Elements.push('" + element.get_attribute('id') + "');" )
<SYSTEM_TASK:> Read lines from process.stdout and echo them to sys.stdout. <END_TASK> <USER_TASK:> Description: def tee(process, filter): """Read lines from process.stdout and echo them to sys.stdout. Returns a list of lines read. Lines are not newline terminated. The 'filter' is a callable which is invoked for every line, receiving the line as argument. If the filter returns True, the line is echoed to sys.stdout. """
lines = [] while True: line = process.stdout.readline() if line: if sys.version_info[0] >= 3: line = decode(line) stripped_line = line.rstrip() if filter(stripped_line): sys.stdout.write(line) lines.append(stripped_line) elif process.poll() is not None: process.stdout.close() break return lines
<SYSTEM_TASK:> Read lines from process.stderr and echo them to sys.stderr. <END_TASK> <USER_TASK:> Description: def tee2(process, filter): """Read lines from process.stderr and echo them to sys.stderr. The 'filter' is a callable which is invoked for every line, receiving the line as argument. If the filter returns True, the line is echoed to sys.stderr. """
while True: line = process.stderr.readline() if line: if sys.version_info[0] >= 3: line = decode(line) stripped_line = line.rstrip() if filter(stripped_line): sys.stderr.write(line) elif process.returncode is not None: process.stderr.close() break
<SYSTEM_TASK:> Run 'args' and return a two-tuple of exit code and lines read. <END_TASK> <USER_TASK:> Description: def run(args, echo=True, echo2=True, shell=False, cwd=None, env=None): """Run 'args' and return a two-tuple of exit code and lines read. If 'echo' is True, the stdout stream is echoed to sys.stdout. If 'echo2' is True, the stderr stream is echoed to sys.stderr. The 'echo' and 'echo2' arguments may be callables, in which case they are used as tee filters. If 'shell' is True, args are executed via the shell. The 'cwd' argument causes the child process to be executed in cwd. The 'env' argument allows to pass a dict replacing os.environ. """
if not callable(echo): echo = On() if echo else Off() if not callable(echo2): echo2 = On() if echo2 else Off() process = Popen( args, stdout=PIPE, stderr=PIPE, shell=shell, cwd=cwd, env=env ) with background_thread(tee2, (process, echo2)): lines = tee(process, echo) return process.returncode, lines
<SYSTEM_TASK:> Registers the given representer class with this registry, using <END_TASK> <USER_TASK:> Description: def register_representer_class(self, representer_class): """ Registers the given representer class with this registry, using its MIME content type as the key. """
if representer_class in self.__rpr_classes.values(): raise ValueError('The representer class "%s" has already been ' 'registered.' % representer_class) self.__rpr_classes[representer_class.content_type] = representer_class if issubclass(representer_class, MappingResourceRepresenter): # Create and hold a mapping registry for the registered resource # representer class. mp_reg = representer_class.make_mapping_registry() self.__mp_regs[representer_class.content_type] = mp_reg
<SYSTEM_TASK:> Registers a representer factory for the given combination of resource <END_TASK> <USER_TASK:> Description: def register(self, resource_class, content_type, configuration=None): """ Registers a representer factory for the given combination of resource class and content type. :param configuration: representer configuration. A default instance will be created if this is not given. :type configuration: :class:`everest.representers.config.RepresenterConfiguration` """
if not issubclass(resource_class, Resource): raise ValueError('Representers can only be registered for ' 'resource classes (got: %s).' % resource_class) if not content_type in self.__rpr_classes: raise ValueError('No representer class has been registered for ' 'content type "%s".' % content_type) # Register a factory resource -> representer for the given combination # of resource class and content type. rpr_cls = self.__rpr_classes[content_type] self.__rpr_factories[(resource_class, content_type)] = \ rpr_cls.create_from_resource_class if issubclass(rpr_cls, MappingResourceRepresenter): # Create or update an attribute mapping. mp_reg = self.__mp_regs[content_type] mp = mp_reg.find_mapping(resource_class) if mp is None: # No mapping was registered yet for this resource class or any # of its base classes; create a new one on the fly. new_mp = mp_reg.create_mapping(resource_class, configuration) elif not configuration is None: if resource_class is mp.mapped_class: # We have additional configuration for an existing mapping. mp.configuration.update(configuration) new_mp = mp else: # We have a derived class with additional configuration. new_mp = mp_reg.create_mapping( resource_class, configuration=mp.configuration) new_mp.configuration.update(configuration) elif not resource_class is mp.mapped_class: # We have a derived class without additional configuration. new_mp = mp_reg.create_mapping(resource_class, configuration=mp.configuration) else: # We found a dynamically created mapping for the right class # without additional configuration; do not create a new one. new_mp = None if not new_mp is None: # Store the new (or updated) mapping. mp_reg.set_mapping(new_mp)
<SYSTEM_TASK:> Creates a representer for the given combination of resource and <END_TASK> <USER_TASK:> Description: def create(self, resource_class, content_type): """ Creates a representer for the given combination of resource and content type. This will also find representer factories that were registered for a base class of the given resource. """
rpr_fac = self.__find_representer_factory(resource_class, content_type) if rpr_fac is None: # Register a representer with default configuration on the fly # and look again. self.register(resource_class, content_type) rpr_fac = self.__find_representer_factory(resource_class, content_type) return rpr_fac(resource_class)
<SYSTEM_TASK:> Get the GPG keyring directory for a particular application. <END_TASK> <USER_TASK:> Description: def get_gpg_home( appname, config_dir=None ): """ Get the GPG keyring directory for a particular application. Return the path. """
assert is_valid_appname(appname) config_dir = get_config_dir( config_dir ) path = os.path.join( config_dir, "gpgkeys", appname ) return path
<SYSTEM_TASK:> Make a temporary directory to hold GPG keys that are not <END_TASK> <USER_TASK:> Description: def make_gpg_tmphome( prefix=None, config_dir=None ): """ Make a temporary directory to hold GPG keys that are not going to be stored to the application's keyring. """
if prefix is None: prefix = "tmp" config_dir = get_config_dir( config_dir ) tmppath = os.path.join( config_dir, "tmp" ) if not os.path.exists( tmppath ): os.makedirs( tmppath, 0700 ) tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath ) return tmpdir
<SYSTEM_TASK:> Store a key locally to our app keyring. <END_TASK> <USER_TASK:> Description: def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ): """ Store a key locally to our app keyring. Does NOT put it into a blockchain ID Return the key ID on success Return None on error """
assert is_valid_appname(appname) key_bin = str(key_bin) assert len(key_bin) > 0 if gpghome is None: config_dir = get_config_dir( config_dir ) keydir = make_gpg_home( appname, config_dir=config_dir ) else: keydir = gpghome gpg = gnupg.GPG( homedir=keydir ) res = gpg.import_keys( key_bin ) try: assert res.count == 1, "Failed to store key (%s)" % res except AssertionError, e: log.exception(e) log.error("Failed to store key to %s" % keydir) log.debug("res: %s" % res.__dict__) log.debug("(%s)\n%s" % (len(key_bin), key_bin)) return None return res.fingerprints[0]
<SYSTEM_TASK:> Remove a public key locally from our local app keyring <END_TASK> <USER_TASK:> Description: def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ): """ Remove a public key locally from our local app keyring Return True on success Return False on error """
assert is_valid_appname(appname) if gpghome is None: config_dir = get_config_dir( config_dir ) keydir = get_gpg_home( appname, config_dir=config_dir ) else: keydir = gpghome gpg = gnupg.GPG( homedir=keydir ) res = gpg.delete_keys( [key_id] ) if res.status == 'Must delete secret key first': # this is a private key res = gpg.delete_keys( [key_id], secret=True ) try: assert res.status == 'ok', "Failed to delete key (%s)" % res except AssertionError, e: log.exception(e) log.error("Failed to delete key '%s'" % key_id) log.debug("res: %s" % res.__dict__) return False return True
<SYSTEM_TASK:> Download a GPG key from a key server. <END_TASK> <USER_TASK:> Description: def gpg_download_key( key_id, key_server, config_dir=None ): """ Download a GPG key from a key server. Do not import it into any keyrings. Return the ASCII-armored key """
config_dir = get_config_dir( config_dir ) tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir ) gpg = gnupg.GPG( homedir=tmpdir ) recvdat = gpg.recv_keys( key_server, key_id ) fingerprint = None try: assert recvdat.count == 1 assert len(recvdat.fingerprints) == 1 fingerprint = recvdat.fingerprints[0] except AssertionError, e: log.exception(e) log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server)) shutil.rmtree( tmpdir ) return None keydat = gpg.export_keys( [fingerprint] ) shutil.rmtree( tmpdir ) return str(keydat)
<SYSTEM_TASK:> Get the key ID of a given serialized key <END_TASK> <USER_TASK:> Description: def gpg_key_fingerprint( key_data, config_dir=None ): """ Get the key ID of a given serialized key Return the fingerprint on success Return None on error """
key_data = str(key_data) config_dir = get_config_dir( config_dir ) tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir ) gpg = gnupg.GPG( homedir=tmpdir ) res = gpg.import_keys( key_data ) try: assert res.count == 1, "Failed to import key" assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints" fingerprint = res.fingerprints[0] shutil.rmtree(tmpdir) return fingerprint except AssertionError, e: log.exception(e) shutil.rmtree(tmpdir) return None
<SYSTEM_TASK:> Verify that a given serialized key, when imported, has the given key ID. <END_TASK> <USER_TASK:> Description: def gpg_verify_key( key_id, key_data, config_dir=None ): """ Verify that a given serialized key, when imported, has the given key ID. Return True on success Return False on error """
key_data = str(key_data) config_dir = get_config_dir( config_dir ) sanitized_key_id = "".join( key_id.upper().split(" ") ) if len(sanitized_key_id) < 16: log.debug("Fingerprint is too short to be secure") return False fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir ) if fingerprint is None: log.debug("Failed to fingerprint key") return False if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ): log.debug("Imported key does not match the given ID") return False else: return True
<SYSTEM_TASK:> Get the ASCII-armored key, given the ID <END_TASK> <USER_TASK:> Description: def gpg_export_key( appname, key_id, config_dir=None, include_private=False ): """ Get the ASCII-armored key, given the ID """
assert is_valid_appname(appname) config_dir = get_config_dir( config_dir ) keydir = get_gpg_home( appname, config_dir=config_dir ) gpg = gnupg.GPG( homedir=keydir ) keydat = gpg.export_keys( [key_id], secret=include_private ) if not keydat: log.debug("Failed to export key %s from '%s'" % (key_id, keydir)) assert keydat return keydat
<SYSTEM_TASK:> Fetch a GPG public key from the given URL. <END_TASK> <USER_TASK:> Description: def gpg_fetch_key( key_url, key_id=None, config_dir=None ): """ Fetch a GPG public key from the given URL. Supports anything urllib2 supports. If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it. The key is not accepted into any keyrings. Return the key data on success. If key_id is given, verify the key matches. Return None on error, or on failure to carry out any key verification """
dat = None from_blockstack = False # make sure it's valid try: urlparse.urlparse(key_url) except: log.error("Invalid URL") return None if "://" in key_url and not key_url.lower().startswith("iks://"): opener = None key_data = None # handle blockstack:// URLs if key_url.startswith("blockstack://"): blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) ) opener = urllib2.build_opener( blockstack_opener ) from_blockstack = True elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"): # fetch, but at least try not to look like a bot opener = urllib2.build_opener() opener.addheaders = [('User-agent', 'Mozilla/5.0')] else: # defaults opener = urllib2.build_opener() try: f = opener.open( key_url ) key_data_str = f.read() key_data = None if from_blockstack: # expect: {'key name': 'PEM string'} key_data_dict = json.loads(key_data_str) assert len(key_data_dict) == 1, "Got multiple keys" key_data = str(key_data_dict[key_data_dict.keys()[0]]) else: # expect: PEM string key_data = key_data_str f.close() except Exception, e: log.exception(e) if key_id is not None: log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url)) else: log.error("Failed to fetch key from '%s'" % key_url) return None # verify, if we have the ID. # if we don't have the key ID, then we must be fetching from blockstack # (since then the data will have already been verified by the protocol, using locally-hosted trusted information) if not from_blockstack and key_id is None: log.error( "No key ID given for key located at %s" % key_url ) return None if key_id is not None: rc = gpg_verify_key( key_id, key_data, config_dir=config_dir ) if not rc: log.error("Failed to verify key %s" % key_id) return None dat = key_data else: # iks protocol, fetch from keyserver key_server = key_url if '://' in key_server: key_server = urlparse.urlparse(key_server).netloc dat = gpg_download_key( key_id, key_server, config_dir=config_dir ) assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url) return dat
<SYSTEM_TASK:> Put an application GPG key. <END_TASK> <USER_TASK:> Description: def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ): """ Put an application GPG key. Stash the private key locally to an app-specific keyring. Return {'status': True, 'key_url': ..., 'key_data': ...} on success Return {'error': ...} on error If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash) This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID on a successful execution. It is up to you to wait until the transaction is confirmed before using the key. Otherwise, the key is stored to mutable storage. """
assert is_valid_appname(appname) assert is_valid_keyname(keyname) try: keydir = make_gpg_home( appname, config_dir=config_dir ) key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir ) assert key_id is not None, "Failed to stash key" log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir)) except Exception, e: log.exception(e) log.error("Failed to store GPG key '%s'" % keyname) return {'error': "Failed to store GPG key locally"} # get public key... assert is_valid_appname(appname) try: pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir ) except: return {'error': 'Failed to load key'} fq_key_name = "gpg.%s.%s" % (appname, keyname) key_url = None if not immutable: res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys ) if 'error' in res: return res key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] ) else: res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys ) if 'error' in res: return res key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] ) res['key_url'] = key_url res['key_data'] = pubkey_data res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir ) log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url)) return res
<SYSTEM_TASK:> Remove an application GPG key. <END_TASK> <USER_TASK:> Description: def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ): """ Remove an application GPG key. Unstash the local private key. Return {'status': True, ...} on success Return {'error': ...} on error If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take on the order of an hour to complete on the blockchain. A transaction ID will be returned to you on successful deletion, and it will be up to you to wait for the transaction to get confirmed. """
assert is_valid_appname(appname) assert is_valid_keyname(keyname) fq_key_name = "gpg.%s.%s" % (appname, keyname) result = {} dead_pubkey_dict = None dead_pubkey = None key_id = None if not immutable: # find the key first, so we can get the key ID and then remove it locally dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys ) if 'error' in dead_pubkey_dict: return dead_pubkey_dict else: # need the key ID so we can unstash locally dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy ) if 'error' in dead_pubkey_dict: return dead_pubkey_dict dead_pubkey_kv = dead_pubkey_dict['data'] assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ] key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir ) assert key_id is not None, "Failed to load pubkey fingerprint" # actually delete if not immutable: result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys ) else: result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy ) if 'error' in result: return result # unstash try: rc = gpg_unstash_key( appname, key_id, config_dir=config_dir ) assert rc, "Failed to unstash key" except: log.warning("Failed to remove private key for '%s'" % key_id ) result['warning'] = "Failed to remove private key" if os.environ.get('BLOCKSTACK_TEST') is not None: # make sure this never happens in testing raise return result
<SYSTEM_TASK:> Sign a file on disk. <END_TASK> <USER_TASK:> Description: def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ): """ Sign a file on disk. @sender_key_info should be a dict with { 'key_id': ... 'key_data': ... 'app_name': ... } Return {'status': True, 'sig': ...} on success Return {'error': ...} on error """
if config_dir is None: config_dir = get_config_dir() # ingest keys tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir ) try: sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir ) except Exception, e: log.exception(e) shutil.rmtree(tmpdir) return {'error': 'No such private key'} res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir ) if res is None: shutil.rmtree(tmpdir) return {'error': 'Failed to load sender private key'} # do the signature gpg = gnupg.GPG( homedir=tmpdir ) res = None with open(path_to_sign, "r") as fd_in: res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True ) shutil.rmtree(tmpdir) if not res: log.debug("sign_file error: %s" % res.__dict__) log.debug("signer: %s" % sender_key_info['key_id']) return {'error': 'Failed to sign data'} return {'status': True, 'sig': res.data }
<SYSTEM_TASK:> Verify a file on disk was signed by the given sender. <END_TASK> <USER_TASK:> Description: def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ): """ Verify a file on disk was signed by the given sender. @sender_key_info should be a dict with { 'key_id': ... 'key_data': ... 'app_name'; ... } Return {'status': True} on success Return {'error': ...} on error """
if config_dir is None: config_dir = get_config_dir() # ingest keys tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir ) res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir ) if res is None: shutil.rmtree(tmpdir) return {'error': 'Failed to stash key %s' % sender_key_info['key_id']} # stash detached signature fd, path = tempfile.mkstemp( prefix=".sig-verify-" ) f = os.fdopen(fd, "w") f.write( sigdata ) f.flush() os.fsync(f.fileno()) f.close() # verify gpg = gnupg.GPG( homedir=tmpdir ) with open(path, "r") as fd_in: res = gpg.verify_file( fd_in, data_filename=path_to_verify ) shutil.rmtree(tmpdir) try: os.unlink(path) except: pass if not res: log.debug("verify_file error: %s" % res.__dict__) return {'error': 'Failed to decrypt data'} log.debug("verification succeeded from keys in %s" % config_dir) return {'status': True}
<SYSTEM_TASK:> Encrypt a stream of data for a set of keys. <END_TASK> <USER_TASK:> Description: def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ): """ Encrypt a stream of data for a set of keys. @sender_key_info should be a dict with { 'key_id': ... 'key_data': ... 'app_name'; ... } Return {'status': True} on success Return {'error': ...} on error """
if config_dir is None: config_dir = get_config_dir() # ingest keys tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir ) for key_info in recipient_key_infos: res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir ) if res is None: shutil.rmtree(tmpdir) return {'error': 'Failed to stash key %s' % key_info['key_id']} # copy over our key try: sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir ) except Exception, e: log.exception(e) shutil.rmtree(tmpdir) return {'error': 'No such private key'} res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir ) if res is None: shutil.rmtree(tmpdir) return {'error': 'Failed to load sender private key'} recipient_key_ids = [r['key_id'] for r in recipient_key_infos] # do the encryption gpg = gnupg.GPG( homedir=tmpdir ) res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True ) shutil.rmtree(tmpdir) if res.status != 'encryption ok': log.debug("encrypt_file error: %s" % res.__dict__) log.debug("recipients: %s" % recipient_key_ids) log.debug("signer: %s" % sender_key_info['key_id']) return {'error': 'Failed to encrypt data'} return {'status': True}
<SYSTEM_TASK:> Decrypt a stream of data using key info <END_TASK> <USER_TASK:> Description: def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ): """ Decrypt a stream of data using key info for a private key we own. @my_key_info and @sender_key_info should be data returned by gpg_app_get_key { 'key_id': ... 'key_data': ... 'app_name': ... } Return {'status': True, 'sig': ...} on success Return {'status': True} on succes Return {'error': ...} on error """
if config_dir is None: config_dir = get_config_dir() # ingest keys tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir ) res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir ) if res is None: shutil.rmtree(tmpdir) return {'error': 'Failed to stash key %s' % sender_key_info['key_id']} try: my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir ) except: shutil.rmtree(tmpdir) return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']} res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir ) if res is None: shutil.rmtree(tmpdir) return {'error': 'Failed to load private key'} # do the decryption gpg = gnupg.GPG( homedir=tmpdir ) res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True ) shutil.rmtree(tmpdir) if res.status != 'decryption ok': log.debug("decrypt_file: %s" % res.__dict__) return {'error': 'Failed to decrypt data'} log.debug("decryption succeeded from keys in %s" % config_dir) return {'status': True}
<SYSTEM_TASK:> Return the usage string for the primary command. <END_TASK> <USER_TASK:> Description: def get_primary_command_usage(message=''): # type: (str) -> str """Return the usage string for the primary command."""
if not settings.merge_primary_command and None in settings.subcommands: return format_usage(settings.subcommands[None].__doc__) if not message: message = '\n{}\n'.format(settings.message) if settings.message else '' doc = _DEFAULT_DOC.format(message=message) if None in settings.subcommands: return _merge_doc(doc, settings.subcommands[None].__doc__) return format_usage(doc)
<SYSTEM_TASK:> Print out a help message and exit the program. <END_TASK> <USER_TASK:> Description: def get_help_usage(command): # type: (str) -> None """Print out a help message and exit the program. Args: command: If a command value is supplied then print the help message for the command module if available. If the command is '-a' or '--all', then print the standard help message but with a full list of available commands. Raises: ValueError: Raised if the help message is requested for an invalid command or an unrecognized option is passed to help. """
if not command: doc = get_primary_command_usage() elif command in ('-a', '--all'): subcommands = [k for k in settings.subcommands if k is not None] available_commands = subcommands + ['help'] command_doc = '\nAvailable commands:\n{}\n'.format( '\n'.join(' {}'.format(c) for c in sorted(available_commands))) doc = get_primary_command_usage(command_doc) elif command.startswith('-'): raise ValueError("Unrecognized option '{}'.".format(command)) elif command in settings.subcommands: subcommand = settings.subcommands[command] doc = format_usage(subcommand.__doc__) docopt.docopt(doc, argv=('--help',))
<SYSTEM_TASK:> Format the docstring for display to the user. <END_TASK> <USER_TASK:> Description: def format_usage(doc, width=None): # type: (str, Optional[int]) -> str """Format the docstring for display to the user. Args: doc: The docstring to reformat for display. Returns: The docstring formatted to parse and display to the user. This includes dedenting, rewrapping, and translating the docstring if necessary. """
sections = doc.replace('\r', '').split('\n\n') width = width or get_terminal_size().columns or 80 return '\n\n'.join(_wrap_section(s.strip(), width) for s in sections)
<SYSTEM_TASK:> Parse a docopt-style string for commands and subcommands. <END_TASK> <USER_TASK:> Description: def parse_commands(docstring): # type: (str) -> Generator[Tuple[List[str], List[str]], None, None] """Parse a docopt-style string for commands and subcommands. Args: docstring: A docopt-style string to parse. If the string is not a valid docopt-style string, it will not yield and values. Yields: All tuples of commands and subcommands found in the docopt docstring. """
try: docopt.docopt(docstring, argv=()) except (TypeError, docopt.DocoptLanguageError): return except docopt.DocoptExit: pass for command in _parse_section('usage', docstring): args = command.split() commands = [] i = 0 for i, arg in enumerate(args): if arg[0].isalpha() and not arg[0].isupper(): commands.append(arg) else: break yield commands, args[i:]
<SYSTEM_TASK:> Merge two usage strings together. <END_TASK> <USER_TASK:> Description: def _merge_doc(original, to_merge): # type: (str, str) -> str """Merge two usage strings together. Args: original: The source of headers and initial section lines. to_merge: The source for the additional section lines to append. Returns: A new usage string that contains information from both usage strings. """
if not original: return to_merge or '' if not to_merge: return original or '' sections = [] for name in ('usage', 'arguments', 'options'): sections.append(_merge_section( _get_section(name, original), _get_section(name, to_merge) )) return format_usage('\n\n'.join(s for s in sections).rstrip())
<SYSTEM_TASK:> Merge two sections together. <END_TASK> <USER_TASK:> Description: def _merge_section(original, to_merge): # type: (str, str) -> str """Merge two sections together. Args: original: The source of header and initial section lines. to_merge: The source for the additional section lines to append. Returns: A new section string that uses the header of the original argument and the section lines from both. """
if not original: return to_merge or '' if not to_merge: return original or '' try: index = original.index(':') + 1 except ValueError: index = original.index('\n') name = original[:index].strip() section = '\n '.join( (original[index + 1:].lstrip(), to_merge[index + 1:].lstrip()) ).rstrip() return '{name}\n {section}'.format(name=name, section=section)
<SYSTEM_TASK:> Extract the named section from the source. <END_TASK> <USER_TASK:> Description: def _get_section(name, source): # type: (str, str) -> Optional[str] """Extract the named section from the source. Args: name: The name of the section to extract (e.g. "Usage"). source: The usage string to parse. Returns: A string containing only the requested section. If the section appears multiple times, each instance will be merged into a single section. """
pattern = re.compile( '^([^\n]*{name}[^\n]*\n?(?:[ \t].*?(?:\n|$))*)'.format(name=name), re.IGNORECASE | re.MULTILINE) usage = None for section in pattern.findall(source): usage = _merge_section(usage, section.strip()) return usage
<SYSTEM_TASK:> Wrap the given section string to the current terminal size. <END_TASK> <USER_TASK:> Description: def _wrap_section(source, width): # type: (str, int) -> str """Wrap the given section string to the current terminal size. Intelligently wraps the section string to the given width. When wrapping section lines, it auto-adjusts the spacing between terms and definitions. It also adjusts commands the fit the correct length for the arguments. Args: source: The section string to wrap. Returns: The wrapped section string. """
if _get_section('usage', source): return _wrap_usage_section(source, width) if _is_definition_section(source): return _wrap_definition_section(source, width) lines = inspect.cleandoc(source).splitlines() paragraphs = (textwrap.wrap(line, width, replace_whitespace=False) for line in lines) return '\n'.join(line for paragraph in paragraphs for line in paragraph)
<SYSTEM_TASK:> Determine if the source is a definition section. <END_TASK> <USER_TASK:> Description: def _is_definition_section(source): """Determine if the source is a definition section. Args: source: The usage string source that may be a section. Returns: True if the source describes a definition section; otherwise, False. """
try: definitions = textwrap.dedent(source).split('\n', 1)[1].splitlines() return all( re.match(r'\s\s+((?!\s\s).+)\s\s+.+', s) for s in definitions) except IndexError: return False
<SYSTEM_TASK:> Wrap the given usage section string to the current terminal size. <END_TASK> <USER_TASK:> Description: def _wrap_usage_section(source, width): # type: (str, int) -> str """Wrap the given usage section string to the current terminal size. Note: Commands arguments are wrapped to the column that the arguments began on the first line of the command. Args: source: The section string to wrap. Returns: The wrapped section string. """
if not any(len(line) > width for line in source.splitlines()): return source section_header = source[:source.index(':') + 1].strip() lines = [section_header] for commands, args in parse_commands(source): command = ' {} '.format(' '.join(commands)) max_len = width - len(command) sep = '\n' + ' ' * len(command) wrapped_args = sep.join(textwrap.wrap(' '.join(args), max_len)) full_command = command + wrapped_args lines += full_command.splitlines() return '\n'.join(lines)
<SYSTEM_TASK:> Wrap the given definition section string to the current terminal size. <END_TASK> <USER_TASK:> Description: def _wrap_definition_section(source, width): # type: (str, int) -> str """Wrap the given definition section string to the current terminal size. Note: Auto-adjusts the spacing between terms and definitions. Args: source: The section string to wrap. Returns: The wrapped section string. """
index = source.index('\n') + 1 definitions, max_len = _get_definitions(source[index:]) sep = '\n' + ' ' * (max_len + 4) lines = [source[:index].strip()] for arg, desc in six.iteritems(definitions): wrapped_desc = sep.join(textwrap.wrap(desc, width - max_len - 4)) lines.append(' {arg:{size}} {desc}'.format( arg=arg, size=str(max_len), desc=wrapped_desc )) return '\n'.join(lines)
<SYSTEM_TASK:> Extract a dictionary of arguments and definitions. <END_TASK> <USER_TASK:> Description: def _get_definitions(source): # type: (str) -> Tuple[Dict[str, str], int] """Extract a dictionary of arguments and definitions. Args: source: The source for a section of a usage string that contains definitions. Returns: A two-tuple containing a dictionary of all arguments and definitions as well as the length of the longest argument. """
max_len = 0 descs = collections.OrderedDict() # type: Dict[str, str] lines = (s.strip() for s in source.splitlines()) non_empty_lines = (s for s in lines if s) for line in non_empty_lines: if line: arg, desc = re.split(r'\s\s+', line.strip()) arg_len = len(arg) if arg_len > max_len: max_len = arg_len descs[arg] = desc return descs, max_len
<SYSTEM_TASK:> Yield each section line. <END_TASK> <USER_TASK:> Description: def _parse_section(name, source): # type: (str, str) -> List[str] """Yield each section line. Note: Depending on how it is wrapped, a section line can take up more than one physical line. Args: name: The name of the section to extract (e.g. "Usage"). source: The usage string to parse. Returns: A list containing each line, de-wrapped by whitespace from the source code. If the section is defined multiple times in the source code, all lines from all sections with that name will be returned. """
section = textwrap.dedent(_get_section(name, source)[7:]) commands = [] # type: List[str] for line in section.splitlines(): if not commands or line[:1].isalpha() and line[:1].islower(): commands.append(line) else: commands[-1] = '{} {}'.format(commands[-1].strip(), line.strip()) return commands
<SYSTEM_TASK:> I'm dead, so no behaviors should act on me <END_TASK> <USER_TASK:> Description: def move(self, particle, u, v, w, modelTimestep, **kwargs): """ I'm dead, so no behaviors should act on me """
# Kill the particle if it isn't settled and isn't already dead. if not particle.settled and not particle.dead: particle.die() # Still save the temperature and salinity for the model output temp = kwargs.get('temperature', None) if temp is not None and math.isnan(temp): temp = None particle.temp = temp salt = kwargs.get('salinity', None) if salt is not None and math.isnan(salt): salt = None particle.salt = salt u = 0 v = 0 w = 0 # Do the calculation to determine the new location result = AsaTransport.distance_from_location_using_u_v_w(u=u, v=v, w=w, timestep=modelTimestep, location=particle.location) result['u'] = u result['v'] = v result['w'] = w return result
<SYSTEM_TASK:> Calculate the datetimes of the model timesteps <END_TASK> <USER_TASK:> Description: def get_time_objects_from_model_timesteps(cls, times, start): """ Calculate the datetimes of the model timesteps times should start at 0 and be in seconds """
modelTimestep = [] newtimes = [] for i in xrange(0, len(times)): try: modelTimestep.append(times[i+1] - times[i]) except StandardError: modelTimestep.append(times[i] - times[i-1]) newtimes.append(start + timedelta(seconds=times[i])) return (modelTimestep, newtimes)
<SYSTEM_TASK:> Fill a shapely polygon with X number of points <END_TASK> <USER_TASK:> Description: def fill_polygon_with_points(cls, goal=None, polygon=None): """ Fill a shapely polygon with X number of points """
if goal is None: raise ValueError("Must specify the number of points (goal) to fill the polygon with") if polygon is None or (not isinstance(polygon, Polygon) and not isinstance(polygon, MultiPolygon)): raise ValueError("Must specify a polygon to fill points with") minx = polygon.bounds[0] maxx = polygon.bounds[2] miny = polygon.bounds[1] maxy = polygon.bounds[3] points = [] now = time.time() while len(points) < goal: random_x = random.uniform(minx, maxx) random_y = random.uniform(miny, maxy) p = Point(random_x, random_y) if p.within(polygon): points.append(p) logger.info("Filling polygon with points took %f seconds" % (time.time() - now)) return points
<SYSTEM_TASK:> Calculate the greate distance from a location using u, v, and w. <END_TASK> <USER_TASK:> Description: def distance_from_location_using_u_v_w(cls, u=None, v=None, w=None, timestep=None, location=None): """ Calculate the greate distance from a location using u, v, and w. u, v, and w must be in the same units as the timestep. Stick with seconds. """
# Move horizontally distance_horiz = 0 azimuth = 0 angle = 0 depth = location.depth if u is not 0 and v is not 0: s_and_d = AsaMath.speed_direction_from_u_v(u=u,v=v) # calculates velocity in m/s from transformed u and v distance_horiz = s_and_d['speed'] * timestep # calculate the horizontal distance in meters using the velocity and model timestep angle = s_and_d['direction'] # Great circle calculation # Calculation takes in azimuth (heading from North, so convert our mathematical angle to azimuth) azimuth = AsaMath.math_angle_to_azimuth(angle=angle) distance_vert = 0. if w is not None: # Move vertically # Depth is positive up, negative down. w wil be negative if moving down, and positive if moving up distance_vert = w * timestep depth += distance_vert # calculate the vertical distance in meters using w (m/s) and model timestep (s) if distance_horiz != 0: vertical_angle = math.degrees(math.atan(distance_vert / distance_horiz)) gc_result = AsaGreatCircle.great_circle(distance=distance_horiz, azimuth=azimuth, start_point=location) else: # Did we go up or down? vertical_angle = 0. if distance_vert < 0: # Down vertical_angle = 270. elif distance_vert > 0: # Up vertical_angle = 90. gc_result = { 'latitude': location.latitude, 'longitude': location.longitude, 'reverse_azimuth': 0 } #logger.info("Particle moving from %fm to %fm from a vertical speed of %f m/s over %s seconds" % (location.depth, depth, w, str(timestep))) gc_result['azimuth'] = azimuth gc_result['depth'] = depth gc_result['distance'] = distance_horiz gc_result['angle'] = angle gc_result['vertical_distance'] = distance_vert gc_result['vertical_angle'] = vertical_angle return gc_result
<SYSTEM_TASK:> Wait for all threads to complete <END_TASK> <USER_TASK:> Description: def shutdown(self): """Wait for all threads to complete"""
# cleanup self.started = False try: # nice way of doing things - let's wait until all items # in the queue are processed for t in self._threads: t.join() finally: # Emergency brake - if a KeyboardInterrupt is raised, # threads will finish processing current task and exit self.stopped = True
<SYSTEM_TASK:> Unpack a set of bytes into an integer. First pads to 4 bytes. <END_TASK> <USER_TASK:> Description: def _unpack_bytes(bytes): """ Unpack a set of bytes into an integer. First pads to 4 bytes. Little endian. """
if bytes == b'': return 0 int_length = 4 len_diff = int_length - len(bytes) bytes = bytes + len_diff * b'\x00' return struct.unpack("<L", bytes)[0]
<SYSTEM_TASK:> Returns all sprints, enriched with their assigned tasks. <END_TASK> <USER_TASK:> Description: def get_sprints(): """ Returns all sprints, enriched with their assigned tasks. The project should only have one ``sprints.py`` module. We will define it's path via the ``RAPID_PROTOTYPING_SPRINTS_MODULE`` setting. The setting should be the fully qualified name of the ``sprints.py`` module (i.e. ``projectname.context.sprints.sprints``). Furthermore the project can have any amount of ``*_costs.py`` modules in any folder (as long as they are on the pythonpath). This function will find all ``*_costs.py`` modules and add those tasks, that have been assigned to a sprint, to the corresponding sprints in the ``sprints.py`` module. """
sprints = load_member_from_setting( 'RAPID_PROTOTYPING_SPRINTS_MODULE') all_tasks = [] # TODO The onerror parameter is basically a workaround to ignore errors # The reason for that being, that in my case, the GeoDjango package was in # the path, permanently requesting certain libraries on import. Since they # were not present, the search was aborted with an OSError. for importer, package_name, _ in pkgutil.walk_packages( onerror=lambda p: p): if not package_name.endswith('_costs'): continue if not getattr(settings, 'TEST_RUN', None) and ( '.test_app.' in package_name): # pragma: nocover continue costs = load_member(package_name + '.costs') for task in costs: all_tasks.append(task) sorted_tasks = sorted(all_tasks, key=itemgetter('id')) for sprint in sprints: remaining_time = 0 sprint['tasks'] = [] for task in sorted_tasks: if task.get('sprint') == sprint.get('id'): if not task.get('actual_time'): remaining_time += \ task.get('developer_time') or task.get('time') sprint.get('tasks').append(task) sprint['remaining_time'] = remaining_time sprint['remaining_hours'] = round(float(remaining_time) / 60, 2) return sprints
<SYSTEM_TASK:> Adds 15% overhead costs to the list of costs. <END_TASK> <USER_TASK:> Description: def append_overhead_costs(costs, new_id, overhead_percentage=0.15): """ Adds 15% overhead costs to the list of costs. Usage:: from rapid_prototyping.context.utils import append_overhead_costs costs = [ .... ] costs = append_overhead_costs(costs, MAIN_ID + get_counter(counter)[0]) :param costs: Your final list of costs. :param new_id: The id that this new item should get. """
total_time = 0 for item in costs: total_time += item['time'] costs.append({ 'id': new_id, 'task': 'Overhead, Bufixes & Iterations', 'time': total_time * overhead_percentage, }, ) return costs
<SYSTEM_TASK:> returns False if there are no changes <END_TASK> <USER_TASK:> Description: def checkForChanges(f, sde, isTable): """ returns False if there are no changes """
# try simple feature count first fCount = int(arcpy.GetCount_management(f).getOutput(0)) sdeCount = int(arcpy.GetCount_management(sde).getOutput(0)) if fCount != sdeCount: return True fields = [fld.name for fld in arcpy.ListFields(f)] # filter out shape fields if not isTable: fields = filter_fields(fields) d = arcpy.Describe(f) shapeType = d.shapeType if shapeType == 'Polygon': shapeToken = 'SHAPE@AREA' elif shapeType == 'Polyline': shapeToken = 'SHAPE@LENGTH' elif shapeType == 'Point': shapeToken = 'SHAPE@XY' else: shapeToken = 'SHAPE@JSON' fields.append(shapeToken) def parseShape(shapeValue): if shapeValue is None: return 0 elif shapeType in ['Polygon', 'Polyline']: return shapeValue elif shapeType == 'Point': if shapeValue[0] is not None and shapeValue[1] is not None: return shapeValue[0] + shapeValue[1] else: return 0 else: return shapeValue outputSR = arcpy.Describe(f).spatialReference else: outputSR = None changed = False with arcpy.da.SearchCursor(f, fields, sql_clause=(None, 'ORDER BY OBJECTID')) as fCursor, \ arcpy.da.SearchCursor(sde, fields, sql_clause=(None, 'ORDER BY OBJECTID'), spatial_reference=outputSR) as sdeCursor: for fRow, sdeRow in izip(fCursor, sdeCursor): if fRow != sdeRow: # check shapes first if fRow[-1] != sdeRow[-1] and not isTable: if shapeType not in ['Polygon', 'Polyline', 'Point']: changed = True break fShape = parseShape(fRow[-1]) sdeShape = parseShape(sdeRow[-1]) try: assert_almost_equal(fShape, sdeShape, -1) # trim off shapes fRow = list(fRow[:-1]) sdeRow = list(sdeRow[:-1]) except AssertionError: changed = True break # trim microseconds since they can be off by one between file and sde databases for i in range(len(fRow)): if type(fRow[i]) is datetime: fRow = list(fRow) sdeRow = list(sdeRow) fRow[i] = fRow[i].replace(microsecond=0) try: sdeRow[i] = sdeRow[i].replace(microsecond=0) except: pass # compare all values except OBJECTID if fRow[1:] != sdeRow[1:]: changed = True break return changed
<SYSTEM_TASK:> Return max pages created by limit <END_TASK> <USER_TASK:> Description: def __total_pages(self) -> int: """ Return max pages created by limit """
row_count = self.model.query.count() if isinstance(row_count, int): return int(row_count / self.limit) return None
<SYSTEM_TASK:> Add arguments to an ArgumentParser or OptionParser for purposes of <END_TASK> <USER_TASK:> Description: def add_arguments(parser, default_level=logging.INFO): """ Add arguments to an ArgumentParser or OptionParser for purposes of grabbing a logging level. """
adder = ( getattr(parser, 'add_argument', None) or getattr(parser, 'add_option') ) adder( '-l', '--log-level', default=default_level, type=log_level, help="Set log level (DEBUG, INFO, WARNING, ERROR)")
<SYSTEM_TASK:> Setup logging with options or arguments from an OptionParser or <END_TASK> <USER_TASK:> Description: def setup(options, **kwargs): """ Setup logging with options or arguments from an OptionParser or ArgumentParser. Also pass any keyword arguments to the basicConfig call. """
params = dict(kwargs) params.update(level=options.log_level) logging.basicConfig(**params)
<SYSTEM_TASK:> Setup logging for 'requests' such that it logs details about the <END_TASK> <USER_TASK:> Description: def setup_requests_logging(level): """ Setup logging for 'requests' such that it logs details about the connection, headers, etc. """
requests_log = logging.getLogger("requests.packages.urllib3") requests_log.setLevel(level) requests_log.propagate = True # enable debugging at httplib level http_client.HTTPConnection.debuglevel = level <= logging.DEBUG
<SYSTEM_TASK:> Set the period for the timestamp. If period is 0 or None, no period <END_TASK> <USER_TASK:> Description: def _set_period(self, period): """ Set the period for the timestamp. If period is 0 or None, no period will be used. """
self._period = period if period: self._period_seconds = tempora.get_period_seconds(self._period) self._date_format = tempora.get_date_format_string( self._period_seconds) else: self._period_seconds = 0 self._date_format = ''
<SYSTEM_TASK:> Return the appropriate filename for the given time <END_TASK> <USER_TASK:> Description: def get_filename(self, t): """ Return the appropriate filename for the given time based on the defined period. """
root, ext = os.path.splitext(self.base_filename) # remove seconds not significant to the period if self._period_seconds: t -= t % self._period_seconds # convert it to a datetime object for formatting dt = datetime.datetime.utcfromtimestamp(t) # append the datestring to the filename # workaround for datetime.strftime not handling '' properly appended_date = ( dt.strftime(self._date_format) if self._date_format != '' else '' ) if appended_date: # in the future, it would be nice for this format # to be supplied as a parameter. result = root + ' ' + appended_date + ext else: result = self.base_filename return result
<SYSTEM_TASK:> Emit a record. <END_TASK> <USER_TASK:> Description: def emit(self, record): """ Emit a record. Output the record to the file, ensuring that the currently- opened file has the correct date. """
now = time.time() current_name = self.get_filename(now) try: if not self.stream.name == current_name: self._use_file(current_name) except AttributeError: # a stream has not been created, so create one. self._use_file(current_name) logging.StreamHandler.emit(self, record)
<SYSTEM_TASK:> Register a static directory handler with Mach9 by adding a route to the <END_TASK> <USER_TASK:> Description: def register(app, uri, file_or_directory, pattern, use_modified_since, use_content_range): # TODO: Though mach9 is not a file server, I feel like we should at least # make a good effort here. Modified-since is nice, but we could # also look into etags, expires, and caching """ Register a static directory handler with Mach9 by adding a route to the router and registering a handler. :param app: Mach9 :param file_or_directory: File or directory path to serve from :param uri: URL to serve from :param pattern: regular expression used to match files in the URL :param use_modified_since: If true, send file modified time, and return not modified if the browser's matches the server's :param use_content_range: If true, process header for range requests and sends the file part that is requested """
# If we're not trying to match a file directly, # serve from the folder if not path.isfile(file_or_directory): uri += '<file_uri:' + pattern + '>' async def _handler(request, file_uri=None): # Using this to determine if the URL is trying to break out of the path # served. os.path.realpath seems to be very slow if file_uri and '../' in file_uri: raise InvalidUsage("Invalid URL") # Merge served directory and requested file if provided # Strip all / that in the beginning of the URL to help prevent python # from herping a derp and treating the uri as an absolute path root_path = file_path = file_or_directory if file_uri: file_path = path.join( file_or_directory, sub('^[/]*', '', file_uri)) # URL decode the path sent by the browser otherwise we won't be able to # match filenames which got encoded (filenames with spaces etc) file_path = path.abspath(unquote(file_path)) if not file_path.startswith(path.abspath(unquote(root_path))): raise FileNotFound('File not found', path=file_or_directory, relative_url=file_uri) try: headers = {} # Check if the client has been sent this file before # and it has not been modified since stats = None if use_modified_since: stats = await stat(file_path) modified_since = strftime( '%a, %d %b %Y %H:%M:%S GMT', gmtime(stats.st_mtime)) if request.headers.get('If-Modified-Since') == modified_since: return HTTPResponse(status=304) headers['Last-Modified'] = modified_since _range = None if use_content_range: _range = None if not stats: stats = await stat(file_path) headers['Accept-Ranges'] = 'bytes' headers['Content-Length'] = str(stats.st_size) if request.method != 'HEAD': try: _range = ContentRangeHandler(request, stats) except HeaderNotFound: pass else: del headers['Content-Length'] for key, value in _range.headers.items(): headers[key] = value if request.method == 'HEAD': return HTTPResponse( headers=headers, content_type=guess_type(file_path)[0] or 'text/plain') else: return await file(file_path, headers=headers, _range=_range) except ContentRangeError: raise except Exception: raise FileNotFound('File not found', path=file_or_directory, relative_url=file_uri) app.route(uri, methods=['GET', 'HEAD'])(_handler)
<SYSTEM_TASK:> Evaluates a file containing a Python params dictionary. <END_TASK> <USER_TASK:> Description: def eval_py(self, _globals, _locals): """ Evaluates a file containing a Python params dictionary. """
try: params = eval(self.script, _globals, _locals) except NameError as e: raise Exception( 'Failed to evaluate parameters: {}' .format(str(e)) ) except ResolutionError as e: raise Exception('GetOutput: {}'.format(str(e))) return params
<SYSTEM_TASK:> Creates a new Parameter object from the given ParameterArgument. <END_TASK> <USER_TASK:> Description: def new(cls, arg): """ Creates a new Parameter object from the given ParameterArgument. """
content = None if arg.kind == 'file': if os.path.exists(arg.value): with open(arg.value, 'r') as f: content = f.read() else: raise Exception('File does not exist: {}'.format(arg.value)) elif arg.kind == 'cli': content = arg.value for source_cls in cls.sources: if source_cls.supports_source(arg): return source_cls(content) msg = 'Unsupported Parameter Source "{}"' raise Execption(msg.format(arg.value))
<SYSTEM_TASK:> Returns the minimal pitch between two neighboring nodes of the mesh in each direction. <END_TASK> <USER_TASK:> Description: def minimum_pitch(self): """ Returns the minimal pitch between two neighboring nodes of the mesh in each direction. :return: Minimal pitch in each direction. """
pitch = self.pitch minimal_pitch = [] for p in pitch: minimal_pitch.append(min(p)) return min(minimal_pitch)
<SYSTEM_TASK:> Returns nearest node indices and direction of opposite node. <END_TASK> <USER_TASK:> Description: def surrounding_nodes(self, position): """ Returns nearest node indices and direction of opposite node. :param position: Position inside the mesh to search nearest node for as (x,y,z) :return: Nearest node indices and direction of opposite node. """
n_node_index, n_node_position, n_node_error = self.nearest_node(position) if n_node_error == 0.0: index_mod = [] for i in range(len(n_node_index)): new_point = np.asarray(n_node_position) new_point[i] += 1.e-5*np.abs(new_point[i]) try: self.nearest_node(tuple(new_point)) index_mod.append(-1) except ValueError: index_mod.append(1) else: # Check if node_position is larger or smaller in resp. axes than position index_mod = [] for i in range(len(n_node_index)): if n_node_position[i] > position[i]: index_mod.append(-1) else: index_mod.append(1) return tuple(n_node_index), tuple(index_mod)
<SYSTEM_TASK:> Tokenize a string and return an iterator over its tokens. <END_TASK> <USER_TASK:> Description: def tokenize(self, string): """Tokenize a string and return an iterator over its tokens."""
it = colorise.compat.ifilter(None, self._pattern.finditer(string)) try: t = colorise.compat.next(it) except StopIteration: yield string, False return pos, buf, lm, escapeflag = -1, '', -1, False # Check if we need to yield any starting text if t.start() > 0: yield string[:t.start()], False pos = t.start() it = itertools.chain([t], it) for m in it: start = m.start() e, s = m.group(2) or '', m.group(3) escaped = e.count(self._ESCAPE) % 2 != 0 if escaped: buf += string[pos:m.end(2)-1] + s escapeflag = True else: buf += string[pos:m.start(3)] if buf: yield buf, escapeflag buf = '' escapeflag = False if lm == start: yield '', False yield s, False lm = m.end() pos = m.end() if buf: yield buf, escapeflag escapeflag = False if pos < len(string): yield string[pos:], False
<SYSTEM_TASK:> Parse color syntax from a formatted string. <END_TASK> <USER_TASK:> Description: def parse(self, format_string): """Parse color syntax from a formatted string."""
txt, state = '', 0 colorstack = [(None, None)] itokens = self.tokenize(format_string) for token, escaped in itokens: if token == self._START_TOKEN and not escaped: if txt: yield txt, colorstack[-1] txt = '' state += 1 colors = self.extract_syntax(colorise.compat.next(itokens)[0]) colorstack.append(tuple(b or a for a, b in zip(colorstack[-1], colors))) elif token == self._FMT_TOKEN and not escaped: # if state == 0: # raise ColorSyntaxError("Missing '{0}'" # .format(self._START_TOKEN)) if state % 2 != 0: state += 1 else: txt += token elif token == self._STOP_TOKEN and not escaped: if state < 2: raise ColorSyntaxError("Missing '{0}' or '{1}'" .format(self._STOP_TOKEN, self._FMT_TOKEN)) if txt: yield txt, colorstack[-1] txt = '' state -= 2 colorstack.pop() else: txt += token if state != 0: raise ColorSyntaxError("Invalid color format") if txt: yield txt, colorstack[-1]
<SYSTEM_TASK:> Create an Evidence instance from the given mapping <END_TASK> <USER_TASK:> Description: def from_mapping(cls, evidence_mapping): """Create an Evidence instance from the given mapping :param evidence_mapping: a mapping (e.g. dict) of values provided by Watson :return: a new Evidence """
return cls(metadata_map=MetadataMap.from_mapping(evidence_mapping['metadataMap']), copyright=evidence_mapping['copyright'], id=evidence_mapping['id'], terms_of_use=evidence_mapping['termsOfUse'], document=evidence_mapping['document'], title=evidence_mapping['title'], text=evidence_mapping['text'], value=evidence_mapping['value'])
<SYSTEM_TASK:> Check if a name should be excluded. <END_TASK> <USER_TASK:> Description: def should_be_excluded(name, exclude_patterns): """Check if a name should be excluded. Returns True if name matches at least one of the exclude patterns in the exclude_patterns list. """
for pattern in exclude_patterns: if fnmatch.fnmatch(name, pattern): return True return False
<SYSTEM_TASK:> Filter subdirs that have already been visited. <END_TASK> <USER_TASK:> Description: def filter_visited(curr_dir, subdirs, already_visited, follow_dirlinks, on_error): """Filter subdirs that have already been visited. This is used to avoid loops in the search performed by os.walk() in index_files_by_size. curr_dir is the path of the current directory, as returned by os.walk(). subdirs is the list of subdirectories for the current directory, as returned by os.walk(). already_visited is a set of tuples (st_dev, st_ino) of already visited directories. This set will not be modified. on error is a function f(OSError) -> None, to be called in case of error. Returns a tuple: the new (possibly filtered) subdirs list, and a new set of already visited directories, now including the subdirs. """
filtered = [] to_visit = set() _already_visited = already_visited.copy() try: # mark the current directory as visited, so we catch symlinks to it # immediately instead of after one iteration of the directory loop file_info = os.stat(curr_dir) if follow_dirlinks else os.lstat(curr_dir) _already_visited.add((file_info.st_dev, file_info.st_ino)) except OSError as e: on_error(e) for subdir in subdirs: full_path = os.path.join(curr_dir, subdir) try: file_info = os.stat(full_path) if follow_dirlinks else os.lstat(full_path) except OSError as e: on_error(e) continue if not follow_dirlinks and stat.S_ISLNK(file_info.st_mode): # following links to dirs is disabled, ignore this one continue dev_inode = (file_info.st_dev, file_info.st_ino) if dev_inode not in _already_visited: filtered.append(subdir) to_visit.add(dev_inode) else: on_error(OSError(errno.ELOOP, "directory loop detected", full_path)) return filtered, _already_visited.union(to_visit)
<SYSTEM_TASK:> Recursively index files under a root directory. <END_TASK> <USER_TASK:> Description: def index_files_by_size(root, files_by_size, exclude_dirs, exclude_files, follow_dirlinks): """Recursively index files under a root directory. Each regular file is added *in-place* to the files_by_size dictionary, according to the file size. This is a (possibly empty) dictionary of lists of filenames, indexed by file size. exclude_dirs is a list of glob patterns to exclude directories. exclude_files is a list of glob patterns to exclude files. follow_dirlinks controls whether to follow symbolic links to subdirectories while crawling. Returns True if there were any I/O errors while listing directories. Returns a list of error messages that occurred. If empty, there were no errors. """
# encapsulate the value in a list, so we can modify it by reference # inside the auxiliary function errors = [] already_visited = set() def _print_error(error): """Print a listing error to stderr. error should be an os.OSError instance. """ # modify the outside errors value; must be encapsulated in a list, # because if we assign to a variable here we just create an # independent local copy msg = "error listing '%s': %s" % (error.filename, error.strerror) sys.stderr.write("%s\n" % msg) errors.append(msg) # XXX: The actual root may be matched by the exclude pattern. Should we # prune it as well? for curr_dir, subdirs, filenames in os.walk(root, topdown=True, onerror=_print_error, followlinks=follow_dirlinks): # modify subdirs in-place to influence os.walk subdirs[:] = prune_names(subdirs, exclude_dirs) filenames = prune_names(filenames, exclude_files) # remove subdirs that have already been visited; loops can happen # if there's a symlink loop and follow_dirlinks==True, or if # there's a hardlink loop (which is usually a corrupted filesystem) subdirs[:], already_visited = filter_visited(curr_dir, subdirs, already_visited, follow_dirlinks, _print_error) for base_filename in filenames: full_path = os.path.join(curr_dir, base_filename) # avoid race condition: file can be deleted between os.walk() # seeing it and us calling os.lstat() try: file_info = os.lstat(full_path) except OSError as e: _print_error(e) continue # only want regular files, not symlinks if stat.S_ISREG(file_info.st_mode): size = file_info.st_size if size in files_by_size: # append to the list of files with the same size files_by_size[size].append(full_path) else: # start a new list for this file size files_by_size[size] = [full_path] return errors
<SYSTEM_TASK:> Calculate the MD5 hash of a file, up to length bytes. <END_TASK> <USER_TASK:> Description: def calculate_md5(filename, length): """Calculate the MD5 hash of a file, up to length bytes. Returns the MD5 in its binary form, as an 8-byte string. Raises IOError or OSError in case of error. """
assert length >= 0 # shortcut: MD5 of an empty string is 'd41d8cd98f00b204e9800998ecf8427e', # represented here in binary if length == 0: return '\xd4\x1d\x8c\xd9\x8f\x00\xb2\x04\xe9\x80\t\x98\xec\xf8\x42\x7e' md5_summer = hashlib.md5() f = open(filename, 'rb') try: bytes_read = 0 while bytes_read < length: chunk_size = min(MD5_CHUNK_SIZE, length - bytes_read) chunk = f.read(chunk_size) if not chunk: # found EOF: means length was larger than the file size, or # file was truncated while reading -- print warning? break md5_summer.update(chunk) bytes_read += len(chunk) finally: f.close() md5 = md5_summer.digest() return md5
<SYSTEM_TASK:> Find duplicates in a list of files, comparing up to `max_size` bytes. <END_TASK> <USER_TASK:> Description: def find_duplicates(filenames, max_size): """Find duplicates in a list of files, comparing up to `max_size` bytes. Returns a 2-tuple of two values: ``(duplicate_groups, errors)``. `duplicate_groups` is a (possibly empty) list of lists: the names of files that have at least two copies, grouped together. `errors` is a list of error messages that occurred. If empty, there were no errors. For example, assuming ``a1`` and ``a2`` are identical, ``c1`` and ``c2`` are identical, and ``b`` is different from all others:: >>> dups, errs = find_duplicates(['a1', 'a2', 'b', 'c1', 'c2'], 1024) >>> dups [['a1', 'a2'], ['c1', 'c2']] >>> errs [] Note that ``b`` is not included in the results, as it has no duplicates. """
errors = [] # shortcut: can't have duplicates if there aren't at least 2 files if len(filenames) < 2: return [], errors # shortcut: if comparing 0 bytes, they're all the same if max_size == 0: return [filenames], errors files_by_md5 = {} for filename in filenames: try: md5 = calculate_md5(filename, max_size) except EnvironmentError as e: msg = "unable to calculate MD5 for '%s': %s" % (filename, e.strerror) sys.stderr.write("%s\n" % msg) errors.append(msg) continue if md5 not in files_by_md5: # unique beginning so far; index it on its own files_by_md5[md5] = [filename] else: # found a potential duplicate (same beginning) files_by_md5[md5].append(filename) # Filter out the unique files (lists of files with the same md5 that # only contain 1 file), and create a list of the lists of duplicates. # Don't use values() because on Python 2 this creates a list of all # values (file lists), and that may be very large. duplicates = [l for l in py3compat.itervalues(files_by_md5) if len(l) >= 2] return duplicates, errors
<SYSTEM_TASK:> Recursively scan a list of directories, looking for duplicate files. <END_TASK> <USER_TASK:> Description: def find_duplicates_in_dirs(directories, exclude_dirs=None, exclude_files=None, follow_dirlinks=False): """Recursively scan a list of directories, looking for duplicate files. `exclude_dirs`, if provided, should be a list of glob patterns. Subdirectories whose names match these patterns are excluded from the scan. `exclude_files`, if provided, should be a list of glob patterns. Files whose names match these patterns are excluded from the scan. ``follow_dirlinks`` controls whether to follow symbolic links to subdirectories while crawling. Returns a 2-tuple of two values: ``(duplicate_groups, errors)``. `duplicate_groups` is a (possibly empty) list of lists: the names of files that have at least two copies, grouped together. `errors` is a list of error messages that occurred. If empty, there were no errors. For example, assuming ``./a1`` and ``/dir1/a2`` are identical, ``/dir1/c1`` and ``/dir2/c2`` are identical, ``/dir2/b`` is different from all others, that any subdirectories called ``tmp`` should not be scanned, and that files ending in ``.bak`` should be ignored: >>> dups, errs = find_duplicates_in_dirs(['.', '/dir1', '/dir2'], ['tmp'], ['*.bak']) >>> dups [['./a1', '/dir1/a2'], ['/dir1/c1', '/dir2/c2']] >>> errs [] """
if exclude_dirs is None: exclude_dirs = [] if exclude_files is None: exclude_files = [] errors_in_total = [] files_by_size = {} # First, group all files by size for directory in directories: sub_errors = index_files_by_size(directory, files_by_size, exclude_dirs, exclude_files, follow_dirlinks) errors_in_total += sub_errors all_duplicates = [] # Now, within each file size, check for duplicates. # # We use an iterator over the dict (which gives us the keys), instead # of explicitly accessing dict.keys(). On Python 2, dict.keys() returns # a list copy of the keys, which may be very large. for size in iter(files_by_size): # for large file sizes, divide them further into groups by matching # initial portion; how much of the file is used to match depends on # the file size if size >= PARTIAL_MD5_THRESHOLD: partial_size = min(round_up_to_mult(size // PARTIAL_MD5_READ_RATIO, PARTIAL_MD5_READ_MULT), PARTIAL_MD5_MAX_READ) possible_duplicates_list, sub_errors = find_duplicates(files_by_size[size], partial_size) errors_in_total += sub_errors else: # small file size, group them all together and do full MD5s possible_duplicates_list = [files_by_size[size]] # Do full MD5 scan on suspected duplicates. calculate_md5 (and # therefore find_duplicates) needs to know how many bytes to scan. # We're using the file's size, as per stat(); this is a problem if # the file is growing. We'll only scan up to the size the file had # when we indexed. Would be better to somehow tell calculate_md5 to # scan until EOF (e.g. give it a negative size). for possible_duplicates in possible_duplicates_list: duplicates, sub_errors = find_duplicates(possible_duplicates, size) all_duplicates += duplicates errors_in_total += sub_errors return all_duplicates, errors_in_total
<SYSTEM_TASK:> returns SkyCoord object with n positions randomly oriented on the unit sphere <END_TASK> <USER_TASK:> Description: def random_spherepos(n): """returns SkyCoord object with n positions randomly oriented on the unit sphere Parameters ---------- n : int number of positions desired Returns ------- c : ``SkyCoord`` object with random positions """
signs = np.sign(rand.uniform(-1,1,size=n)) thetas = Angle(np.arccos(rand.uniform(size=n)*signs),unit=u.rad) #random b/w 0 and 180 phis = Angle(rand.uniform(0,2*np.pi,size=n),unit=u.rad) c = SkyCoord(phis,thetas,1,representation='physicsspherical') return c
<SYSTEM_TASK:> Return a dict of all instance variables with truthy values, <END_TASK> <USER_TASK:> Description: def to_dict(self): """ Return a dict of all instance variables with truthy values, with key names camelized """
return { inflection.camelize(k, False): v for k, v in self.__dict__.items() if v }
<SYSTEM_TASK:> Returns the number of ancestors of this directory. <END_TASK> <USER_TASK:> Description: def depth(self): """ Returns the number of ancestors of this directory. """
return len(self.path.rstrip(os.sep).split(os.sep))
<SYSTEM_TASK:> Generates the parents until stop or the absolute <END_TASK> <USER_TASK:> Description: def ancestors(self, stop=None): """ Generates the parents until stop or the absolute root directory is reached. """
folder = self while folder.parent != stop: if folder.parent == folder: return yield folder.parent folder = folder.parent
<SYSTEM_TASK:> Checks if this folder is inside the given ancestor. <END_TASK> <USER_TASK:> Description: def is_descendant_of(self, ancestor): """ Checks if this folder is inside the given ancestor. """
stop = Folder(ancestor) for folder in self.ancestors(): if folder == stop: return True if stop.depth > folder.depth: return False return False
<SYSTEM_TASK:> Gets the fragment of the current path starting at root. <END_TASK> <USER_TASK:> Description: def get_relative_path(self, root): """ Gets the fragment of the current path starting at root. """
if self.path == root: return '' ancestors = self.ancestors(stop=root) return functools.reduce(lambda f, p: Folder(p.name).child(f), ancestors, self.name)
<SYSTEM_TASK:> Returns a File or Folder object that reperesents if the entire <END_TASK> <USER_TASK:> Description: def get_mirror(self, target_root, source_root=None): """ Returns a File or Folder object that reperesents if the entire fragment of this directory starting with `source_root` were copied to `target_root`. >>> Folder('/usr/local/hyde/stuff').get_mirror('/usr/tmp', source_root='/usr/local/hyde') Folder('/usr/tmp/stuff') """
fragment = self.get_relative_path( source_root if source_root else self.parent) return Folder(target_root).child(fragment)
<SYSTEM_TASK:> Returns a File or Folder object that would represent the given path. <END_TASK> <USER_TASK:> Description: def file_or_folder(path): """ Returns a File or Folder object that would represent the given path. """
target = unicode(path) return Folder(target) if os.path.isdir(target) else File(target)
<SYSTEM_TASK:> Return true if this is a binary file. <END_TASK> <USER_TASK:> Description: def is_binary(self): """Return true if this is a binary file."""
with open(self.path, 'rb') as fin: CHUNKSIZE = 1024 while 1: chunk = fin.read(CHUNKSIZE) if b'\0' in chunk: return True if len(chunk) < CHUNKSIZE: break return False
<SYSTEM_TASK:> Creates a temprorary file and writes the `text` into it <END_TASK> <USER_TASK:> Description: def make_temp(text): """ Creates a temprorary file and writes the `text` into it """
import tempfile (handle, path) = tempfile.mkstemp(text=True) os.close(handle) afile = File(path) afile.write(text) return afile
<SYSTEM_TASK:> Reads from the file and returns the content as a string. <END_TASK> <USER_TASK:> Description: def read_all(self, encoding='utf-8'): """ Reads from the file and returns the content as a string. """
logger.info("Reading everything from %s" % self) with codecs.open(self.path, 'r', encoding) as fin: read_text = fin.read() return read_text
<SYSTEM_TASK:> Writes the given text to the file using the given encoding. <END_TASK> <USER_TASK:> Description: def write(self, text, encoding="utf-8"): """ Writes the given text to the file using the given encoding. """
logger.info("Writing to %s" % self) with codecs.open(self.path, 'w', encoding) as fout: fout.write(text)
<SYSTEM_TASK:> Copies the file to the given destination. Returns a File <END_TASK> <USER_TASK:> Description: def copy_to(self, destination): """ Copies the file to the given destination. Returns a File object that represents the target file. `destination` must be a File or Folder object. """
target = self.__get_destination__(destination) logger.info("Copying %s to %s" % (self, target)) shutil.copy(self.path, unicode(destination)) return target
<SYSTEM_TASK:> Returns a folder object by combining the fragment to this folder's path <END_TASK> <USER_TASK:> Description: def child_folder(self, fragment): """ Returns a folder object by combining the fragment to this folder's path """
return Folder(os.path.join(self.path, Folder(fragment).path))
<SYSTEM_TASK:> Returns a path of a child item represented by `fragment`. <END_TASK> <USER_TASK:> Description: def child(self, fragment): """ Returns a path of a child item represented by `fragment`. """
return os.path.join(self.path, FS(fragment).path)
<SYSTEM_TASK:> Creates this directory and any of the missing directories in the path. <END_TASK> <USER_TASK:> Description: def make(self): """ Creates this directory and any of the missing directories in the path. Any errors that may occur are eaten. """
try: if not self.exists: logger.info("Creating %s" % self.path) os.makedirs(self.path) except os.error: pass return self
<SYSTEM_TASK:> Deletes the directory if it exists. <END_TASK> <USER_TASK:> Description: def delete(self): """ Deletes the directory if it exists. """
if self.exists: logger.info("Deleting %s" % self.path) shutil.rmtree(self.path)
<SYSTEM_TASK:> There is a bug in dir_util that makes `copy_tree` crash if a folder in <END_TASK> <USER_TASK:> Description: def _create_target_tree(self, target): """ There is a bug in dir_util that makes `copy_tree` crash if a folder in the tree has been deleted before and readded now. To workaround the bug, we first walk the tree and create directories that are needed. """
source = self with source.walker as walker: @walker.folder_visitor def visit_folder(folder): """ Create the mirror directory """ if folder != source: Folder(folder.get_mirror(target, source)).make()
<SYSTEM_TASK:> Copies the contents of this directory to the given destination. <END_TASK> <USER_TASK:> Description: def copy_contents_to(self, destination): """ Copies the contents of this directory to the given destination. Returns a Folder object that represents the moved directory. """
logger.info("Copying contents of %s to %s" % (self, destination)) target = Folder(destination) target.make() self._create_target_tree(target) dir_util.copy_tree(self.path, unicode(target)) return target
<SYSTEM_TASK:> Start a new thread to process Cron <END_TASK> <USER_TASK:> Description: def __start(self): """ Start a new thread to process Cron """
thread = Thread(target=self.__loop, args=()) thread.daemon = True # daemonize thread thread.start() self.__enabled = True
<SYSTEM_TASK:> Convert the passed attr to a BetterDict if the value is a dict <END_TASK> <USER_TASK:> Description: def __dict_to_BetterDict(self, attr): """Convert the passed attr to a BetterDict if the value is a dict Returns: The new value of the passed attribute."""
if type(self[attr]) == dict: self[attr] = BetterDict(self[attr]) return self[attr]
<SYSTEM_TASK:> Property that allows dot lookups of otherwise hidden attributes. <END_TASK> <USER_TASK:> Description: def _bd_(self): """Property that allows dot lookups of otherwise hidden attributes."""
if not getattr(self, '__bd__', False): self.__bd = BetterDictLookUp(self) return self.__bd
<SYSTEM_TASK:> Create or update an Activity Monitor item from some instance. <END_TASK> <USER_TASK:> Description: def create_or_update(sender, **kwargs): """ Create or update an Activity Monitor item from some instance. """
now = datetime.datetime.now() # I can't explain why this import fails unless it's here. from activity_monitor.models import Activity instance = kwargs['instance'] # Find this object's content type and model class. instance_content_type = ContentType.objects.get_for_model(sender) instance_model = sender content_object = instance_model.objects.get(id=instance.id) # check to see if the activity already exists. Will need later. try: activity = Activity.objects.get(content_type=instance_content_type, object_id=content_object.id) except: activity = None # We now know the content type, the model (sender), content type and content object. # We need to loop through ACTIVITY_MONITOR_MODELS in settings for other fields for activity_setting in settings.ACTIVITY_MONITOR_MODELS: this_app_label = activity_setting['model'].split('.')[0] this_model_label = activity_setting['model'].split('.')[1] this_content_type = ContentType.objects.get(app_label=this_app_label, model=this_model_label) if this_content_type == instance_content_type: # first, check to see if we even WANT to register this activity. # use the boolean 'check' field. Also, delete if needed. if 'check' in activity_setting: if getattr(instance, activity_setting['check']) is False: if activity: activity.delete() return # does it use the default manager (objects) or a custom manager? try: manager = activity_setting['manager'] except: manager = 'objects' # what field denotes the activity time? created is default try: timestamp = getattr(instance, activity_setting['date_field']) except: timestamp = getattr(instance, 'created') # if the given time stamp is a daterather than datetime type, # normalize it out to a datetime if type(timestamp) == type(now): clean_timestamp = timestamp else: clean_timestamp = datetime.datetime.combine(timestamp, datetime.time()) # Find a valid user object if 'user_field' in activity_setting: # pull the user object from instance using user_field user = getattr(instance, activity_setting['user_field']) elif this_model_label == 'user' or this_model_label == 'profile': # this IS auth.user or a Django 1.5 custom user user = instance else: # we didn't specify a user, so it must be instance.user user = instance.user # BAIL-OUT CHECKS # Determine all the reasons we would want to bail out. # Make sure it's not a future item, like a future-published blog entry. if clean_timestamp > now: return # or some really old content that was just re-saved for some reason if clean_timestamp < (now - datetime.timedelta(days=3)): return # or there's not a user object if not user: return # or the user is god or staff, and we're filtering out, don't add to monitor if user.is_superuser and 'filter_superuser' in activity_setting: return if user.is_staff and 'filter_staff' in activity_setting: return # build a default string representation # note that each activity can get back to the object via get_absolute_url() verb = activity_setting.get('verb', None) override_string = activity_setting.get('override_string', None) # MANAGER CHECK # Make sure the item "should" be registered, based on the manager argument. # If InstanceModel.manager.all() includes this item, then register. Otherwise, return. # Also, check to see if it should be deleted. try: getattr(instance_model, manager).get(pk=instance.pk) except instance_model.DoesNotExist: try: activity.delete() return except Activity.DoesNotExist: return if user and clean_timestamp and instance: if not activity: # If the activity didn't already exist, create it. activity = Activity( actor = user, content_type = instance_content_type, object_id = content_object.id, content_object = content_object, timestamp = clean_timestamp, verb = verb, override_string = override_string, ) activity.save() return activity
<SYSTEM_TASK:> Highlight the characters in s2 that differ from those in s1. <END_TASK> <USER_TASK:> Description: def highlight_differences(s1, s2, color): """Highlight the characters in s2 that differ from those in s1."""
ls1, ls2 = len(s1), len(s2) diff_indices = [i for i, (a, b) in enumerate(zip(s1, s2)) if a != b] print(s1) if ls2 > ls1: colorise.cprint('_' * (ls2-ls1), fg=color) else: print() colorise.highlight(s2, indices=diff_indices, fg=color, end='') if ls1 > ls2: colorise.cprint('_' * (ls1-ls2), fg=color) else: print()
<SYSTEM_TASK:> Create a Jinja2 `~jinja2.Environment`. <END_TASK> <USER_TASK:> Description: def create_jinja_env(): """Create a Jinja2 `~jinja2.Environment`. Returns ------- env : `jinja2.Environment` Jinja2 template rendering environment, configured to use templates in ``templates/``. """
template_dir = os.path.join(os.path.dirname(__file__), 'templates') env = jinja2.Environment( loader=jinja2.FileSystemLoader(template_dir), autoescape=jinja2.select_autoescape(['html']) ) env.filters['simple_date'] = filter_simple_date env.filters['paragraphify'] = filter_paragraphify return env
<SYSTEM_TASK:> Get calendar date probabilities <END_TASK> <USER_TASK:> Description: def d_cal(calibcurve, rcmean, w2, cutoff=0.0001, normal_distr=False, t_a=3, t_b=4): """Get calendar date probabilities Parameters ---------- calibcurve : CalibCurve Calibration curve. rcmean : scalar Reservoir-adjusted age. w2 : scalar r'$w^2_j(\theta)$' from pg 461 & 463 of Blaauw and Christen 2011. cutoff : scalar, optional Unknown. normal_distr : Bool, optional Use normal distribution for date errors. If False, then use Student's t-distribution. t_a : scalar, optional Student's t-distribution parameter, a. t_b - 1 must equal t_b. t_b : scalar, optional Student's t-distribution parameter, b. t_b - 1 must equal t_b. #Line 943 of Bacon.R #cc : calib_curve (3-col format) #rcmean : det['age'][i] - d_R #w2 : dat['error'][i]^2 + d_STD**2 """
assert t_b - 1 == t_a if normal_distr: # TODO(brews): Test this. Line 946 of Bacon.R. std = np.sqrt(calibcurve.error ** 2 + w2) dens = stats.norm(loc=rcmean, scale=std).pdf(calibcurve.c14age) else: # TODO(brews): Test this. Line 947 of Bacon.R. dens = (t_b + ((rcmean - calibcurve.c14age) ** 2) / (2 * (calibcurve.error ** 2 + w2))) ** (-1 * (t_a + 0.5)) cal = np.array([calibcurve.calbp.copy(), dens]).T cal[:, 1] = cal[:, 1] / cal[:, 1].sum() # "ensure that also very precise dates get a range of probabilities" cutoff_mask = cal[:, 1] > cutoff if cutoff_mask.sum() > 5: out = cal[cutoff_mask, :] else: calx = np.linspace(cal[:, 0].min(), cal[:, 0].max(), num=50) caly = np.interp(calx, cal[:, 0], cal[:, 1]) out = np.array([calx, caly / caly.sum()]).T return out
<SYSTEM_TASK:> Get density of calendar dates for chron date segment in core <END_TASK> <USER_TASK:> Description: def calibrate_dates(chron, calib_curve, d_r, d_std, cutoff=0.0001, normal_distr=False, t_a=[3], t_b=[4]): """Get density of calendar dates for chron date segment in core Parameters ---------- chron : DatedProxy-like calib_curve : CalibCurve or list of CalibCurves d_r : scalar or ndarray Carbon reservoir offset. d_std : scalar or ndarray Carbon reservoir offset error standard deviation. cutoff : scalar, optional Unknown. normal_distr : Bool, optional Use normal distribution for date errors. If False, then use Student's t-distribution. t_a : scalar or ndarray, optional Student's t-distribution parameter, a. t_a - 1 must equal t_b. t_b : scalar or ndarray, optional Student's t-distribution parameter, b. t_a - 1 must equal t_b. Returns ------- depth : ndarray Depth of dated sediment sample. probs : list of 2d arrays Density of calendar age for each dated sediment sample. For each sediment sample, the 2d array has two columns, the first is the calendar age. The second column is the density for that calendar age. """
# Python version of .bacon.calib() on line 908 in Bacon.R # .bacon.calib - line 908 # rcmean = 4128; w2 = 4225; t_a=3; t_b=4 # test = d_cal(cc = calib_curve.rename(columns = {0:'a', 1:'b', 2:'c'}), rcmean = 4128, w2 = 4225, t_a=t_a, # t_b=t_b, cutoff=cutoff, normal = normal) # Line 959 of Bacon.R # calib = list(dets.iloc[:, 3]) # Now Bacon goes and checks the ncol in the dets See line #960 in Bacon.R # Line #973 # TODO(brews): Check that `normal_dist` is used and documented correctly in docstring above. # TODO(brews): Check whether we call returned values densities, freqs or what options we should have. n = len(chron.depth) calib_curve = np.array(calib_curve) t_a = np.array(t_a) t_b = np.array(t_b) assert t_b - 1 == t_a d_r = np.array(d_r) d_std = np.array(d_std) if len(t_a) == 1: t_a = np.repeat(t_a, n) if len(t_b) == 1: t_b = np.repeat(t_b, n) if len(d_r) == 1: d_r = np.repeat(d_r, n) if len(d_std) == 1: d_std = np.repeat(d_std, n) if len(calib_curve) == 1: calib_curve = np.repeat(calib_curve, n) calib_probs = [] rcmean = chron.age - d_r w2 = chron.error ** 2 + d_std ** 2 for i in range(n): age_realizations = d_cal(calib_curve[i], rcmean=rcmean[i], w2=w2[i], t_a=t_a[i], t_b=t_b[i], cutoff=cutoff, normal_distr=normal_distr) calib_probs.append(age_realizations) return np.array(chron.depth), calib_probs
<SYSTEM_TASK:> Overide in appropriate way to prepare a logged in browser. <END_TASK> <USER_TASK:> Description: def _init_browser(self): """Overide in appropriate way to prepare a logged in browser."""
self.browser = splinter.Browser('phantomjs') self.browser.visit(self.server_url + "/youraccount/login") try: self.browser.fill('nickname', self.user) self.browser.fill('password', self.password) except: self.browser.fill('p_un', self.user) self.browser.fill('p_pw', self.password) self.browser.fill('login_method', self.login_method) self.browser.find_by_css('input[type=submit]').click()
<SYSTEM_TASK:> Upload a record to the server. <END_TASK> <USER_TASK:> Description: def upload_marcxml(self, marcxml, mode): """Upload a record to the server. :param marcxml: the XML to upload. :param mode: the mode to use for the upload. - "-i" insert new records - "-r" replace existing records - "-c" correct fields of records - "-a" append fields to records - "-ir" insert record or replace if it exists """
if mode not in ["-i", "-r", "-c", "-a", "-ir"]: raise NameError("Incorrect mode " + str(mode)) return requests.post(self.server_url + "/batchuploader/robotupload", data={'file': marcxml, 'mode': mode}, headers={'User-Agent': CFG_USER_AGENT})
<SYSTEM_TASK:> Returns the URL to this record. <END_TASK> <USER_TASK:> Description: def url(self): """ Returns the URL to this record. Returns None if not known """
if self.server_url is not None and \ self.recid is not None: return '/'.join( [self.server_url, CFG_SITE_RECORD, str(self.recid)]) else: return None
<SYSTEM_TASK:> Extracts the sets of keywords for each Twitter list. <END_TASK> <USER_TASK:> Description: def clean_list_of_twitter_list(list_of_twitter_lists, sent_tokenize, _treebank_word_tokenize, tagger, lemmatizer, lemmatize, stopset, first_cap_re, all_cap_re, digits_punctuation_whitespace_re, pos_set): """ Extracts the sets of keywords for each Twitter list. Inputs: - list_of_twitter_lists: A python list of Twitter lists in json format. - lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet". Output: - list_of_keyword_sets: A list of sets of keywords (i.e. not a bag-of-words) in python set format. - list_of_lemma_to_keywordbags: List of python dicts that map stems/lemmas to original topic keywords. """
list_of_keyword_sets = list() append_keyword_set = list_of_keyword_sets.append list_of_lemma_to_keywordbags = list() append_lemma_to_keywordbag = list_of_lemma_to_keywordbags.append if list_of_twitter_lists is not None: for twitter_list in list_of_twitter_lists: if twitter_list is not None: keyword_set, lemma_to_keywordbag = clean_twitter_list(twitter_list, sent_tokenize, _treebank_word_tokenize, tagger, lemmatizer, lemmatize, stopset, first_cap_re, all_cap_re, digits_punctuation_whitespace_re, pos_set) append_keyword_set(keyword_set) append_lemma_to_keywordbag(lemma_to_keywordbag) return list_of_keyword_sets, list_of_lemma_to_keywordbags