text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Return a start, end tuple of positions around the transcription-start <END_TASK> <USER_TASK:> Description: def tss(self, up=0, down=0): """ Return a start, end tuple of positions around the transcription-start site Parameters ---------- up : int if greature than 0, the strand is used to add this many upstream bases in the appropriate direction down : int if greature than 0, the strand is used to add this many downstream bases into the gene. """
if not self.is_gene_pred: return None tss = self.txEnd if self.strand == '-' else self.txStart start, end = tss, tss if self.strand == '+': start -= up end += down else: start += up end -= down start, end = end, start return max(0, start), max(end, start, 0)
<SYSTEM_TASK:> Return a start, end tuple of positions for the promoter region of this <END_TASK> <USER_TASK:> Description: def promoter(self, up=2000, down=0): """ Return a start, end tuple of positions for the promoter region of this gene Parameters ---------- up : int this distance upstream that is considered the promoter down : int the strand is used to add this many downstream bases into the gene. """
if not self.is_gene_pred: return None return self.tss(up=up, down=down)
<SYSTEM_TASK:> just the parts of the exons that are translated <END_TASK> <USER_TASK:> Description: def cds(self): """just the parts of the exons that are translated"""
ces = self.coding_exons if len(ces) < 1: return ces ces[0] = (self.cdsStart, ces[0][1]) ces[-1] = (ces[-1][0], self.cdsEnd) assert all((s < e for s, e in ces)) return ces
<SYSTEM_TASK:> return a boolean indicating whether this feature is downstream of <END_TASK> <USER_TASK:> Description: def is_downstream_of(self, other): """ return a boolean indicating whether this feature is downstream of `other` taking the strand of other into account """
if self.chrom != other.chrom: return None if getattr(other, "strand", None) == "-": # other feature is on - strand, so this must have higher start return self.end <= other.start return self.start >= other.end
<SYSTEM_TASK:> Return the sequence for this feature. <END_TASK> <USER_TASK:> Description: def sequence(self, per_exon=False): """ Return the sequence for this feature. if per-exon is True, return an array of exon sequences This sequence is never reverse complemented """
db = self.db if not per_exon: start = self.txStart + 1 return _sequence(db, self.chrom, start, self.txEnd) else: # TODO: use same strategy as cds_sequence to reduce # of requests. seqs = [] for start, end in self.exons: seqs.append(_sequence(db, self.chrom, start + 1, end)) return seqs
<SYSTEM_TASK:> perform an NCBI blast against the sequence of this feature <END_TASK> <USER_TASK:> Description: def ncbi_blast(self, db="nr", megablast=True, sequence=None): """ perform an NCBI blast against the sequence of this feature """
import requests requests.defaults.max_retries = 4 assert sequence in (None, "cds", "mrna") seq = self.sequence() if sequence is None else ("".join(self.cds_sequence if sequence == "cds" else self.mrna_sequence)) r = requests.post('http://blast.ncbi.nlm.nih.gov/Blast.cgi', timeout=20, data=dict( PROGRAM="blastn", #EXPECT=2, DESCRIPTIONS=100, ALIGNMENTS=0, FILTER="L", # low complexity CMD="Put", MEGABLAST=True, DATABASE=db, QUERY=">%s\n%s" % (self.name, seq) ) ) if not ("RID =" in r.text and "RTOE" in r.text): print("no results", file=sys.stderr) raise StopIteration rid = r.text.split("RID = ")[1].split("\n")[0] import time time.sleep(4) print("checking...", file=sys.stderr) r = requests.post('http://blast.ncbi.nlm.nih.gov/Blast.cgi', data=dict(RID=rid, format="Text", DESCRIPTIONS=100, DATABASE=db, CMD="Get", )) while "Status=WAITING" in r.text: print("checking...", file=sys.stderr) time.sleep(10) r = requests.post('http://blast.ncbi.nlm.nih.gov/Blast.cgi', data=dict(RID=rid, format="Text", CMD="Get", )) for rec in _ncbi_parse(r.text): yield rec
<SYSTEM_TASK:> make a request to the genome-browsers BLAT interface <END_TASK> <USER_TASK:> Description: def blat(self, db=None, sequence=None, seq_type="DNA"): """ make a request to the genome-browsers BLAT interface sequence is one of None, "mrna", "cds" returns a list of features that are hits to this sequence. """
from . blat_blast import blat, blat_all assert sequence in (None, "cds", "mrna") seq = self.sequence() if sequence is None else ("".join(self.cds_sequence if sequence == "cds" else self.mrna_sequence)) if isinstance(db, (tuple, list)): return blat_all(seq, self.gene_name, db, seq_type) else: return blat(seq, self.gene_name, db or self.db, seq_type)
<SYSTEM_TASK:> return a bed formatted string of this feature <END_TASK> <USER_TASK:> Description: def bed(self, *attrs, **kwargs): """ return a bed formatted string of this feature """
exclude = ("chrom", "start", "end", "txStart", "txEnd", "chromStart", "chromEnd") if self.is_gene_pred: return self.bed12(**kwargs) return "\t".join(map(str, ( [self.chrom, self.start, self.end] + [getattr(self, attr) for attr in attrs if not attr in exclude] )))
<SYSTEM_TASK:> Makes a HEAD request to find the final destination of a URL after <END_TASK> <USER_TASK:> Description: def dereference_url(url): """ Makes a HEAD request to find the final destination of a URL after following any redirects """
res = open_url(url, method='HEAD') res.close() return res.url
<SYSTEM_TASK:> Read the contents of a URL into memory, return <END_TASK> <USER_TASK:> Description: def read(url, **kwargs): """ Read the contents of a URL into memory, return """
response = open_url(url, **kwargs) try: return response.read() finally: response.close()
<SYSTEM_TASK:> Check whether zip file paths are all relative, and optionally in a <END_TASK> <USER_TASK:> Description: def check_extracted_paths(namelist, subdir=None): """ Check whether zip file paths are all relative, and optionally in a specified subdirectory, raises an exception if not namelist: A list of paths from the zip file subdir: If specified then check whether all paths in the zip file are under this subdirectory Python docs are unclear about the security of extract/extractall: https://docs.python.org/2/library/zipfile.html#zipfile.ZipFile.extractall https://docs.python.org/2/library/zipfile.html#zipfile.ZipFile.extract """
def relpath(p): # relpath strips a trailing sep # Windows paths may also use unix sep q = os.path.relpath(p) if p.endswith(os.path.sep) or p.endswith('/'): q += os.path.sep return q parent = os.path.abspath('.') if subdir: if os.path.isabs(subdir): raise FileException('subdir must be a relative path', subdir) subdir = relpath(subdir + os.path.sep) for name in namelist: if os.path.commonprefix([parent, os.path.abspath(name)]) != parent: raise FileException('Insecure path in zipfile', name) if subdir and os.path.commonprefix( [subdir, relpath(name)]) != subdir: raise FileException( 'Path in zipfile is not in required subdir', name)
<SYSTEM_TASK:> Automatically handle local and remote URLs, files and directories <END_TASK> <USER_TASK:> Description: def get_as_local_path(path, overwrite, progress=0, httpuser=None, httppassword=None): """ Automatically handle local and remote URLs, files and directories path: Either a local directory, file or remote URL. If a URL is given it will be fetched. If this is a zip it will be automatically expanded by default. overwrite: Whether to overwrite an existing file: 'error': Raise an exception 'backup: Renamed the old file and use the new one 'keep': Keep the old file, don't overwrite or raise an exception progress: Number of progress dots, default 0 (don't print) httpuser, httppass: Credentials for HTTP authentication return: A tuple (type, localpath) type: 'file': localpath is the path to a local file 'directory': localpath is the path to a local directory 'unzipped': localpath is the path to a local unzipped directory """
m = re.match('([A-Za-z]+)://', path) if m: # url_open handles multiple protocols so don't bother validating log.debug('Detected URL protocol: %s', m.group(1)) # URL should use / as the pathsep localpath = path.split('/')[-1] if not localpath: raise FileException( 'Remote path appears to be a directory', path) if os.path.exists(localpath): if overwrite == 'error': raise FileException('File already exists', localpath) elif overwrite == 'keep': log.info('Keeping existing %s', localpath) elif overwrite == 'backup': rename_backup(localpath) download(path, localpath, progress, httpuser=httpuser, httppassword=httppassword) else: raise Exception('Invalid overwrite flag: %s' % overwrite) else: download(path, localpath, progress, httpuser=httpuser, httppassword=httppassword) else: localpath = path log.debug("Local path: %s", localpath) if os.path.isdir(localpath): return 'directory', localpath if os.path.exists(localpath): return 'file', localpath # Somethings gone very wrong raise Exception('Local path does not exist: %s' % localpath)
<SYSTEM_TASK:> Allocates and initializes an encoder state. <END_TASK> <USER_TASK:> Description: def create(fs, channels, application): """Allocates and initializes an encoder state."""
result_code = ctypes.c_int() result = _create(fs, channels, application, ctypes.byref(result_code)) if result_code.value is not constants.OK: raise OpusError(result_code.value) return result
<SYSTEM_TASK:> Encodes an Opus frame from floating point input <END_TASK> <USER_TASK:> Description: def encode_float(encoder, pcm, frame_size, max_data_bytes): """Encodes an Opus frame from floating point input"""
pcm = ctypes.cast(pcm, c_float_pointer) data = (ctypes.c_char * max_data_bytes)() result = _encode_float(encoder, pcm, frame_size, data, max_data_bytes) if result < 0: raise OpusError(result) return array.array('c', data[:result]).tostring()
<SYSTEM_TASK:> create Tag Groups and Child Tags using data from terms dict <END_TASK> <USER_TASK:> Description: def generate(tagGroups, terms): """ create Tag Groups and Child Tags using data from terms dict """
rv = [] for pid in tagGroups: # In testing we may not have complete set if pid not in terms.keys(): continue groupData = terms[pid] groupName = "[%s] %s" % (pid, groupData['name']) groupDesc = groupData['desc'] children = [] group = dict(name=groupName, desc=groupDesc, set=children) rv.append(group) for cid in groupData['children']: cData = terms[cid] cName = "[%s] %s" % (cid, cData['name']) cDesc = cData['desc'] child = dict(name=cName, desc=cDesc) children.append(child) return json.dumps(rv, indent=2)
<SYSTEM_TASK:> We need to support deprecated behaviour for now which makes this <END_TASK> <USER_TASK:> Description: def _handle_args(self, cmd, args): """ We need to support deprecated behaviour for now which makes this quite complicated Current behaviour: - install: Installs a new server, existing server causes an error - install --upgrade: Installs or upgrades a server - install --managedb: Automatically initialise or upgrade the db Deprecated: - install --upgradedb --initdb: Replaced by install --managedb - install --upgradedb: upgrade the db, must exist - install --initdb: initialise the db - upgrade: Upgrades a server, must already exist - upgrade --upgradedb: Automatically upgrade the db returns: - Modified args object, flag to indicate new/existing/auto install """
if cmd == 'install': if args.upgrade: # Current behaviour: install or upgrade if args.initdb or args.upgradedb: raise Stop(10, ( 'Deprecated --initdb --upgradedb flags ' 'are incompatible with --upgrade')) newinstall = None else: # Current behaviour: Server must not exist newinstall = True if args.managedb: # Current behaviour if args.initdb or args.upgradedb: raise Stop(10, ( 'Deprecated --initdb --upgradedb flags ' 'are incompatible with --managedb')) args.initdb = True args.upgradedb = True else: if args.initdb or args.upgradedb: log.warn('--initdb and --upgradedb are deprecated, ' 'use --managedb') elif cmd == 'upgrade': # Deprecated behaviour log.warn( '"omero upgrade" is deprecated, use "omego install --upgrade"') cmd = 'install' args.upgrade = True # Deprecated behaviour: Server must exist newinstall = False else: raise Exception('Unexpected command: %s' % cmd) return args, newinstall
<SYSTEM_TASK:> Handle database initialisation and upgrade, taking into account <END_TASK> <USER_TASK:> Description: def handle_database(self): """ Handle database initialisation and upgrade, taking into account command line arguments """
# TODO: When initdb and upgradedb are dropped we can just test # managedb, but for backwards compatibility we need to support # initdb without upgradedb and vice-versa if self.args.initdb or self.args.upgradedb: db = DbAdmin(self.dir, None, self.args, self.external) status = db.check() log.debug('OMERO database upgrade status: %s', status) else: log.warn('OMERO database check disabled') return DB_INIT_NEEDED if status == DB_INIT_NEEDED: if self.args.initdb: log.debug('Initialising OMERO database') db.init() else: log.error('OMERO database not found') raise Stop(DB_INIT_NEEDED, 'Install/Upgrade failed: OMERO database not found') elif status == DB_UPGRADE_NEEDED: log.warn('OMERO database exists but is out of date') if self.args.upgradedb: log.debug('Upgrading OMERO database') db.upgrade() else: raise Stop( DB_UPGRADE_NEEDED, 'Pass --managedb or upgrade your OMERO database manually') else: assert status == DB_UPTODATE return status
<SYSTEM_TASK:> Runs a command as if from the command-line <END_TASK> <USER_TASK:> Description: def run(self, command): """ Runs a command as if from the command-line without the need for using popen or subprocess """
if isinstance(command, basestring): command = command.split() else: command = list(command) self.external.omero_cli(command)
<SYSTEM_TASK:> Parse a list of SQL files and return a dictionary of valid schema <END_TASK> <USER_TASK:> Description: def parse_schema_files(files): """ Parse a list of SQL files and return a dictionary of valid schema files where each key is a valid schema file and the corresponding value is a tuple containing the source and the target schema. """
f_dict = {} for f in files: root, ext = os.path.splitext(f) if ext != ".sql": continue vto, vfrom = os.path.split(root) vto = os.path.split(vto)[1] if is_schema(vto) and is_schema(vfrom): f_dict[f] = (vfrom, vto) return f_dict
<SYSTEM_TASK:> Dump the database using the postgres custom format <END_TASK> <USER_TASK:> Description: def dump(self): """ Dump the database using the postgres custom format """
dumpfile = self.args.dumpfile if not dumpfile: db, env = self.get_db_args_env() dumpfile = fileutils.timestamp_filename( 'omero-database-%s' % db['name'], 'pgdump') log.info('Dumping database to %s', dumpfile) if not self.args.dry_run: self.pgdump('-Fc', '-f', dumpfile)
<SYSTEM_TASK:> Get a dictionary of database connection parameters, and create an <END_TASK> <USER_TASK:> Description: def get_db_args_env(self): """ Get a dictionary of database connection parameters, and create an environment for running postgres commands. Falls back to omego defaults. """
db = { 'name': self.args.dbname, 'host': self.args.dbhost, 'user': self.args.dbuser, 'pass': self.args.dbpass } if not self.args.no_db_config: try: c = self.external.get_config(force=True) except Exception as e: log.warn('config.xml not found: %s', e) c = {} for k in db: try: db[k] = c['omero.db.%s' % k] except KeyError: log.info( 'Failed to lookup parameter omero.db.%s, using %s', k, db[k]) if not db['name']: raise Exception('Database name required') env = os.environ.copy() env['PGPASSWORD'] = db['pass'] return db, env
<SYSTEM_TASK:> Run a pg_dump command <END_TASK> <USER_TASK:> Description: def pgdump(self, *pgdumpargs): """ Run a pg_dump command """
db, env = self.get_db_args_env() args = ['-d', db['name'], '-h', db['host'], '-U', db['user'], '-w' ] + list(pgdumpargs) stdout, stderr = External.run( 'pg_dump', args, capturestd=True, env=env) if stderr: log.warn('stderr: %s', stderr) log.debug('stdout: %s', stdout) return stdout
<SYSTEM_TASK:> Set the directory of the server to be controlled <END_TASK> <USER_TASK:> Description: def set_server_dir(self, dir): """ Set the directory of the server to be controlled """
self.dir = os.path.abspath(dir) config = os.path.join(self.dir, 'etc', 'grid', 'config.xml') self.configured = os.path.exists(config)
<SYSTEM_TASK:> Returns a dictionary of all config.xml properties <END_TASK> <USER_TASK:> Description: def get_config(self, force=False): """ Returns a dictionary of all config.xml properties If `force = True` then ignore any cached state and read config.xml if possible setup_omero_cli() must be called before this method to import the correct omero module to minimise the possibility of version conflicts """
if not force and not self.has_config(): raise Exception('No config file') configxml = os.path.join(self.dir, 'etc', 'grid', 'config.xml') if not os.path.exists(configxml): raise Exception('No config file') try: # Attempt to open config.xml read-only, though this flag is not # present in early versions of OMERO 5.0 c = self._omero.config.ConfigXml( configxml, exclusive=False, read_only=True) except TypeError: c = self._omero.config.ConfigXml(configxml, exclusive=False) try: return c.as_map() finally: c.close()
<SYSTEM_TASK:> Imports the omero CLI module so that commands can be run directly. <END_TASK> <USER_TASK:> Description: def setup_omero_cli(self): """ Imports the omero CLI module so that commands can be run directly. Note Python does not allow a module to be imported multiple times, so this will only work with a single omero instance. This can have several surprising effects, so setup_omero_cli() must be explcitly called. """
if not self.dir: raise Exception('No server directory set') if 'omero.cli' in sys.modules: raise Exception('omero.cli can only be imported once') log.debug("Setting up omero CLI") lib = os.path.join(self.dir, "lib", "python") if not os.path.exists(lib): raise Exception("%s does not exist!" % lib) sys.path.insert(0, lib) import omero import omero.cli log.debug("Using omero CLI from %s", omero.cli.__file__) self.cli = omero.cli.CLI() self.cli.loadplugins() self._omero = omero
<SYSTEM_TASK:> Create a copy of the current environment for interacting with the <END_TASK> <USER_TASK:> Description: def setup_previous_omero_env(self, olddir, savevarsfile): """ Create a copy of the current environment for interacting with the current OMERO server installation """
env = self.get_environment(savevarsfile) def addpath(varname, p): if not os.path.exists(p): raise Exception("%s does not exist!" % p) current = env.get(varname) if current: env[varname] = p + os.pathsep + current else: env[varname] = p olddir = os.path.abspath(olddir) lib = os.path.join(olddir, "lib", "python") addpath("PYTHONPATH", lib) bin = os.path.join(olddir, "bin") addpath("PATH", bin) self.old_env = env
<SYSTEM_TASK:> Runs a command as if from the OMERO command-line without the need <END_TASK> <USER_TASK:> Description: def omero_cli(self, command): """ Runs a command as if from the OMERO command-line without the need for using popen or subprocess. """
assert isinstance(command, list) if not self.cli: raise Exception('omero.cli not initialised') log.info("Invoking CLI [current environment]: %s", " ".join(command)) self.cli.invoke(command, strict=True)
<SYSTEM_TASK:> Runs an executable with an array of arguments, optionally in the <END_TASK> <USER_TASK:> Description: def run(exe, args, capturestd=False, env=None): """ Runs an executable with an array of arguments, optionally in the specified environment. Returns stdout and stderr """
command = [exe] + args if env: log.info("Executing [custom environment]: %s", " ".join(command)) else: log.info("Executing : %s", " ".join(command)) start = time.time() # Temp files will be automatically deleted on close() # If run() throws the garbage collector should call close(), so don't # bother with try-finally outfile = None errfile = None if capturestd: outfile = tempfile.TemporaryFile() errfile = tempfile.TemporaryFile() # Use call instead of Popen so that stdin is connected to the console, # in case user input is required # On Windows shell=True is needed otherwise the modified environment # PATH variable is ignored. On Unix this breaks things. r = subprocess.call( command, env=env, stdout=outfile, stderr=errfile, shell=WINDOWS) stdout = None stderr = None if capturestd: outfile.seek(0) stdout = outfile.read() outfile.close() errfile.seek(0) stderr = errfile.read() errfile.close() end = time.time() if r != 0: log.error("Failed [%.3f s]", end - start) raise RunException( "Non-zero return code", exe, args, r, stdout, stderr) log.info("Completed [%.3f s]", end - start) return stdout, stderr
<SYSTEM_TASK:> Insert multiple documents into Elasticsearch. <END_TASK> <USER_TASK:> Description: def bulk_upsert(self, docs, namespace, timestamp): """Insert multiple documents into Elasticsearch."""
def docs_to_upsert(): doc = None for doc in docs: # Remove metadata and redundant _id index, doc_type = self._index_and_mapping(namespace) doc_id = u(doc.pop("_id")) document_action = { '_index': index, '_type': doc_type, '_id': doc_id, '_source': self._formatter.format_document(doc) } document_meta = { '_index': self.meta_index_name, '_type': self.meta_type, '_id': doc_id, '_source': { 'ns': namespace, '_ts': timestamp } } yield document_action yield document_meta if doc is None: raise errors.EmptyDocsError( "Cannot upsert an empty sequence of " "documents into Elastic Search") try: kw = {} if self.chunk_size > 0: kw['chunk_size'] = self.chunk_size responses = streaming_bulk(client=self.elastic, actions=docs_to_upsert(), **kw) for ok, resp in responses: if not ok: LOG.error( "Could not bulk-upsert document " "into ElasticSearch: %r" % resp) if self.auto_commit_interval == 0: self.commit() except errors.EmptyDocsError: # This can happen when mongo-connector starts up, there is no # config file, but nothing to dump pass
<SYSTEM_TASK:> Send buffered operations to Elasticsearch. <END_TASK> <USER_TASK:> Description: def send_buffered_operations(self): """Send buffered operations to Elasticsearch. This method is periodically called by the AutoCommitThread. """
with self.lock: try: action_buffer = self.BulkBuffer.get_buffer() if action_buffer: successes, errors = bulk(self.elastic, action_buffer) LOG.debug("Bulk request finished, successfully sent %d " "operations", successes) if errors: LOG.error( "Bulk request finished with errors: %r", errors) except es_exceptions.ElasticsearchException: LOG.exception("Bulk request failed with exception")
<SYSTEM_TASK:> Get the most recently modified document from Elasticsearch. <END_TASK> <USER_TASK:> Description: def get_last_doc(self): """Get the most recently modified document from Elasticsearch. This method is used to help define a time window within which documents may be in conflict after a MongoDB rollback. """
try: result = self.elastic.search( index=self.meta_index_name, body={ "query": {"match_all": {}}, "sort": [{"_ts": "desc"}], }, size=1 )["hits"]["hits"] for r in result: r['_source']['_id'] = r['_id'] return r['_source'] except es_exceptions.RequestError: # no documents so ES returns 400 because of undefined _ts mapping return None
<SYSTEM_TASK:> Parse an attribute signature <END_TASK> <USER_TASK:> Description: def parse_attr_signature(sig): """ Parse an attribute signature """
match = ATTR_SIG_RE.match(sig.strip()) if not match: raise RuntimeError('Attribute signature invalid, got ' + sig) name, _, params = match.groups() if params is not None and params.strip() != '': params = split_sig(params) params = [parse_param_signature(x) for x in params] else: params = [] return (name, params)
<SYSTEM_TASK:> Try and create a reference to a type on MSDN <END_TASK> <USER_TASK:> Description: def get_msdn_ref(name): """ Try and create a reference to a type on MSDN """
in_msdn = False if name in MSDN_VALUE_TYPES: name = MSDN_VALUE_TYPES[name] in_msdn = True if name.startswith('System.'): in_msdn = True if in_msdn: link = name.split('<')[0] if link in MSDN_LINK_MAP: link = MSDN_LINK_MAP[link] else: link = link.lower() url = 'https://msdn.microsoft.com/en-us/library/'+link+'.aspx' node = nodes.reference(name, shorten_type(name)) node['refuri'] = url node['reftitle'] = name return node else: return None
<SYSTEM_TASK:> Allocates and initializes a decoder state <END_TASK> <USER_TASK:> Description: def create(fs, channels): """Allocates and initializes a decoder state"""
result_code = ctypes.c_int() result = _create(fs, channels, ctypes.byref(result_code)) if result_code.value is not 0: raise OpusError(result_code.value) return result
<SYSTEM_TASK:> Gets the number of channels from an Opus packet <END_TASK> <USER_TASK:> Description: def packet_get_nb_channels(data): """Gets the number of channels from an Opus packet"""
data_pointer = ctypes.c_char_p(data) result = _packet_get_nb_channels(data_pointer) if result < 0: raise OpusError(result) return result
<SYSTEM_TASK:> Gets the number of frames in an Opus packet <END_TASK> <USER_TASK:> Description: def packet_get_nb_frames(data, length=None): """Gets the number of frames in an Opus packet"""
data_pointer = ctypes.c_char_p(data) if length is None: length = len(data) result = _packet_get_nb_frames(data_pointer, ctypes.c_int(length)) if result < 0: raise OpusError(result) return result
<SYSTEM_TASK:> Gets the number of samples per frame from an Opus packet <END_TASK> <USER_TASK:> Description: def packet_get_samples_per_frame(data, fs): """Gets the number of samples per frame from an Opus packet"""
data_pointer = ctypes.c_char_p(data) result = _packet_get_nb_frames(data_pointer, ctypes.c_int(fs)) if result < 0: raise OpusError(result) return result
<SYSTEM_TASK:> Decode an Opus frame <END_TASK> <USER_TASK:> Description: def decode(decoder, data, length, frame_size, decode_fec, channels=2): """Decode an Opus frame Unlike the `opus_decode` function , this function takes an additional parameter `channels`, which indicates the number of channels in the frame """
pcm_size = frame_size * channels * ctypes.sizeof(ctypes.c_int16) pcm = (ctypes.c_int16 * pcm_size)() pcm_pointer = ctypes.cast(pcm, c_int16_pointer) # Converting from a boolean to int decode_fec = int(bool(decode_fec)) result = _decode(decoder, data, length, pcm_pointer, frame_size, decode_fec) if result < 0: raise OpusError(result) return array.array('h', pcm).tostring()
<SYSTEM_TASK:> Show the differences between the old and new html document, as html. <END_TASK> <USER_TASK:> Description: def diff(old_html, new_html, cutoff=0.0, plaintext=False, pretty=False): """Show the differences between the old and new html document, as html. Return the document html with extra tags added to show changes. Add <ins> tags around newly added sections, and <del> tags to show sections that have been deleted. """
if plaintext: old_dom = parse_text(old_html) new_dom = parse_text(new_html) else: old_dom = parse_minidom(old_html) new_dom = parse_minidom(new_html) # If the two documents are not similar enough, don't show the changes. if not check_text_similarity(old_dom, new_dom, cutoff): return '<h2>The differences from the previous version are too large to show concisely.</h2>' dom = dom_diff(old_dom, new_dom) # HTML-specific cleanup. if not plaintext: fix_lists(dom) fix_tables(dom) # Only return html for the document body contents. body_elements = dom.getElementsByTagName('body') if len(body_elements) == 1: dom = body_elements[0] return minidom_tostring(dom, pretty=pretty)
<SYSTEM_TASK:> Iterate through opcodes, turning them into a series of insert and delete <END_TASK> <USER_TASK:> Description: def adjusted_ops(opcodes): """ Iterate through opcodes, turning them into a series of insert and delete operations, adjusting indices to account for the size of insertions and deletions. >>> def sequence_opcodes(old, new): return difflib.SequenceMatcher(a=old, b=new).get_opcodes() >>> list(adjusted_ops(sequence_opcodes('abc', 'b'))) [('delete', 0, 1, 0, 0), ('delete', 1, 2, 1, 1)] >>> list(adjusted_ops(sequence_opcodes('b', 'abc'))) [('insert', 0, 0, 0, 1), ('insert', 2, 2, 2, 3)] >>> list(adjusted_ops(sequence_opcodes('axxa', 'aya'))) [('delete', 1, 3, 1, 1), ('insert', 1, 1, 1, 2)] >>> list(adjusted_ops(sequence_opcodes('axa', 'aya'))) [('delete', 1, 2, 1, 1), ('insert', 1, 1, 1, 2)] >>> list(adjusted_ops(sequence_opcodes('ab', 'bc'))) [('delete', 0, 1, 0, 0), ('insert', 1, 1, 1, 2)] >>> list(adjusted_ops(sequence_opcodes('bc', 'ab'))) [('insert', 0, 0, 0, 1), ('delete', 2, 3, 2, 2)] """
while opcodes: op = opcodes.pop(0) tag, i1, i2, j1, j2 = op shift = 0 if tag == 'equal': continue if tag == 'replace': # change the single replace op into a delete then insert # pay careful attention to the variables here, there's no typo opcodes = [ ('delete', i1, i2, j1, j1), ('insert', i2, i2, j1, j2), ] + opcodes continue yield op if tag == 'delete': shift = -(i2 - i1) elif tag == 'insert': shift = +(j2 - j1) new_opcodes = [] for tag, i1, i2, j1, j2 in opcodes: new_opcodes.append(( tag, i1 + shift, i2 + shift, j1, j2, )) opcodes = new_opcodes
<SYSTEM_TASK:> Use difflib to get the opcodes for a set of matching blocks. <END_TASK> <USER_TASK:> Description: def get_opcodes(matching_blocks): """Use difflib to get the opcodes for a set of matching blocks."""
sm = difflib.SequenceMatcher(a=[], b=[]) sm.matching_blocks = matching_blocks return sm.get_opcodes()
<SYSTEM_TASK:> Use difflib to find matching blocks. <END_TASK> <USER_TASK:> Description: def match_blocks(hash_func, old_children, new_children): """Use difflib to find matching blocks."""
sm = difflib.SequenceMatcher( _is_junk, a=[hash_func(c) for c in old_children], b=[hash_func(c) for c in new_children], ) return sm
<SYSTEM_TASK:> Given a list of matching blocks, output the gaps between them. <END_TASK> <USER_TASK:> Description: def get_nonmatching_blocks(matching_blocks): """Given a list of matching blocks, output the gaps between them. Non-matches have the format (alo, ahi, blo, bhi). This specifies two index ranges, one in the A sequence, and one in the B sequence. """
i = j = 0 for match in matching_blocks: a, b, size = match yield (i, a, j, b) i = a + size j = b + size
<SYSTEM_TASK:> Given two lists of blocks, combine them, in the proper order. <END_TASK> <USER_TASK:> Description: def merge_blocks(a_blocks, b_blocks): """Given two lists of blocks, combine them, in the proper order. Ensure that there are no overlaps, and that they are for sequences of the same length. """
# Check sentinels for sequence length. assert a_blocks[-1][2] == b_blocks[-1][2] == 0 # sentinel size is 0 assert a_blocks[-1] == b_blocks[-1] combined_blocks = sorted(list(set(a_blocks + b_blocks))) # Check for overlaps. i = j = 0 for a, b, size in combined_blocks: assert i <= a assert j <= b i = a + size j = b + size return combined_blocks
<SYSTEM_TASK:> r"""Remove newlines in the xml. <END_TASK> <USER_TASK:> Description: def remove_newlines(xml): r"""Remove newlines in the xml. If the newline separates words in text, then replace with a space instead. >>> remove_newlines('<p>para one</p>\n<p>para two</p>') '<p>para one</p><p>para two</p>' >>> remove_newlines('<p>line one\nline two</p>') '<p>line one line two</p>' >>> remove_newlines('one\n1') 'one 1' >>> remove_newlines('hey!\nmore text!') 'hey! more text!' """
# Normalize newlines. xml = xml.replace('\r\n', '\n') xml = xml.replace('\r', '\n') # Remove newlines that don't separate text. The remaining ones do separate text. xml = re.sub(r'(?<=[>\s])\n(?=[<\s])', '', xml) xml = xml.replace('\n', ' ') return xml.strip()
<SYSTEM_TASK:> For html elements that should not have text nodes inside them, remove all <END_TASK> <USER_TASK:> Description: def remove_insignificant_text_nodes(dom): """ For html elements that should not have text nodes inside them, remove all whitespace. For elements that may have text, collapse multiple spaces to a single space. """
nodes_to_remove = [] for node in walk_dom(dom): if is_text(node): text = node.nodeValue if node.parentNode.tagName in _non_text_node_tags: nodes_to_remove.append(node) else: node.nodeValue = re.sub(r'\s+', ' ', text) for node in nodes_to_remove: remove_node(node)
<SYSTEM_TASK:> Get the child at the given index, or return None if it doesn't exist. <END_TASK> <USER_TASK:> Description: def get_child(parent, child_index): """ Get the child at the given index, or return None if it doesn't exist. """
if child_index < 0 or child_index >= len(parent.childNodes): return None return parent.childNodes[child_index]
<SYSTEM_TASK:> Get the node at the specified location in the dom. <END_TASK> <USER_TASK:> Description: def get_location(dom, location): """ Get the node at the specified location in the dom. Location is a sequence of child indices, starting at the children of the root element. If there is no node at this location, raise a ValueError. """
node = dom.documentElement for i in location: node = get_child(node, i) if not node: raise ValueError('Node at location %s does not exist.' % location) #TODO: line not covered return node
<SYSTEM_TASK:> Check whether two dom trees have similar text or not. <END_TASK> <USER_TASK:> Description: def check_text_similarity(a_dom, b_dom, cutoff): """Check whether two dom trees have similar text or not."""
a_words = list(tree_words(a_dom)) b_words = list(tree_words(b_dom)) sm = WordMatcher(a=a_words, b=b_words) if sm.text_ratio() >= cutoff: return True return False
<SYSTEM_TASK:> Insert the node before next_sibling. If next_sibling is None, append the node last instead. <END_TASK> <USER_TASK:> Description: def insert_or_append(parent, node, next_sibling): """ Insert the node before next_sibling. If next_sibling is None, append the node last instead. """
# simple insert if next_sibling: parent.insertBefore(node, next_sibling) else: parent.appendChild(node)
<SYSTEM_TASK:> Wrap the given tag around a node. <END_TASK> <USER_TASK:> Description: def wrap(node, tag): """Wrap the given tag around a node."""
wrap_node = node.ownerDocument.createElement(tag) parent = node.parentNode if parent: parent.replaceChild(wrap_node, node) wrap_node.appendChild(node) return wrap_node
<SYSTEM_TASK:> Wrap the given tag around the contents of a node. <END_TASK> <USER_TASK:> Description: def wrap_inner(node, tag): """Wrap the given tag around the contents of a node."""
children = list(node.childNodes) wrap_node = node.ownerDocument.createElement(tag) for c in children: wrap_node.appendChild(c) node.appendChild(wrap_node)
<SYSTEM_TASK:> Remove a node, replacing it with its children. <END_TASK> <USER_TASK:> Description: def unwrap(node): """Remove a node, replacing it with its children."""
for child in list(node.childNodes): node.parentNode.insertBefore(child, node) remove_node(node)
<SYSTEM_TASK:> Split the text by the regex, keeping all parts. <END_TASK> <USER_TASK:> Description: def full_split(text, regex): """ Split the text by the regex, keeping all parts. The parts should re-join back into the original text. >>> list(full_split('word', re.compile('&.*?'))) ['word'] """
while text: m = regex.search(text) if not m: yield text break left = text[:m.start()] middle = text[m.start():m.end()] right = text[m.end():] if left: yield left if middle: yield middle text = right
<SYSTEM_TASK:> Split the text by the given regexes, in priority order. <END_TASK> <USER_TASK:> Description: def multi_split(text, regexes): """ Split the text by the given regexes, in priority order. Make sure that the regex is parenthesized so that matches are returned in re.split(). Splitting on a single regex works like normal split. >>> '|'.join(multi_split('one two three', [r'\w+'])) 'one| |two| |three' Splitting on digits first separates the digits from their word >>> '|'.join(multi_split('one234five 678', [r'\d+', r'\w+'])) 'one|234|five| |678' Splitting on words first keeps the word with digits intact. >>> '|'.join(multi_split('one234five 678', [r'\w+', r'\d+'])) 'one234five| |678' """
def make_regex(s): return re.compile(s) if isinstance(s, basestring) else s regexes = [make_regex(r) for r in regexes] # Run the list of pieces through the regex split, splitting it into more # pieces. Once a piece has been matched, add it to finished_pieces and # don't split it again. The pieces should always join back together to form # the original text. piece_list = [text] finished_pieces = set() def apply_re(regex, piece_list): for piece in piece_list: if piece in finished_pieces: yield piece continue for s in full_split(piece, regex): if regex.match(s): finished_pieces.add(s) if s: yield s for regex in regexes: piece_list = list(apply_re(regex, piece_list)) assert ''.join(piece_list) == text return piece_list
<SYSTEM_TASK:> Find the total length of all words that match between the two sequences. <END_TASK> <USER_TASK:> Description: def match_length(self): """ Find the total length of all words that match between the two sequences."""
length = 0 for match in self.get_matching_blocks(): a, b, size = match length += self._text_length(self.a[a:a+size]) return length
<SYSTEM_TASK:> Unwrap items in the node list that have ancestors with the same tag. <END_TASK> <USER_TASK:> Description: def remove_nesting(dom, tag_name): """ Unwrap items in the node list that have ancestors with the same tag. """
for node in dom.getElementsByTagName(tag_name): for ancestor in ancestors(node): if ancestor is node: continue if ancestor is dom.documentElement: break if ancestor.tagName == tag_name: unwrap(node) break
<SYSTEM_TASK:> Sort the nodes of the dom in-place, based on a comparison function. <END_TASK> <USER_TASK:> Description: def sort_nodes(dom, cmp_func): """ Sort the nodes of the dom in-place, based on a comparison function. """
dom.normalize() for node in list(walk_dom(dom, elements_only=True)): prev_sib = node.previousSibling while prev_sib and cmp_func(prev_sib, node) == 1: node.parentNode.insertBefore(node, prev_sib) prev_sib = node.previousSibling
<SYSTEM_TASK:> Merge all adjacent tags with the specified tag name. <END_TASK> <USER_TASK:> Description: def merge_adjacent(dom, tag_name): """ Merge all adjacent tags with the specified tag name. Return the number of merges performed. """
for node in dom.getElementsByTagName(tag_name): prev_sib = node.previousSibling if prev_sib and prev_sib.nodeName == node.tagName: for child in list(node.childNodes): prev_sib.appendChild(child) remove_node(node)
<SYSTEM_TASK:> Wrap a copy of the given element around the contents of each of its <END_TASK> <USER_TASK:> Description: def distribute(node): """ Wrap a copy of the given element around the contents of each of its children, removing the node in the process. """
children = list(c for c in node.childNodes if is_element(c)) unwrap(node) tag_name = node.tagName for c in children: wrap_inner(c, tag_name)
<SYSTEM_TASK:> Save object to the database. Removes all other entries if there <END_TASK> <USER_TASK:> Description: def save(self, *args, **kwargs): """ Save object to the database. Removes all other entries if there are any. """
self.__class__.objects.exclude(id=self.id).delete() super(SingletonModel, self).save(*args, **kwargs)
<SYSTEM_TASK:> Get the mappings from MAGICC to OpenSCM regions. <END_TASK> <USER_TASK:> Description: def get_magicc_region_to_openscm_region_mapping(inverse=False): """Get the mappings from MAGICC to OpenSCM regions. This is not a pure inverse of the other way around. For example, we never provide "GLOBAL" as a MAGICC return value because it's unnecesarily confusing when we also have "World". Fortunately MAGICC doesn't ever read the name "GLOBAL" so this shouldn't matter. Parameters ---------- inverse : bool If True, return the inverse mappings i.e. MAGICC to OpenSCM mappings Returns ------- dict Dictionary of mappings """
def get_openscm_replacement(in_region): world = "World" if in_region in ("WORLD", "GLOBAL"): return world if in_region in ("BUNKERS"): return DATA_HIERARCHY_SEPARATOR.join([world, "Bunkers"]) elif in_region.startswith(("NH", "SH")): in_region = in_region.replace("-", "") hem = "Northern Hemisphere" if "NH" in in_region else "Southern Hemisphere" if in_region in ("NH", "SH"): return DATA_HIERARCHY_SEPARATOR.join([world, hem]) land_ocean = "Land" if "LAND" in in_region else "Ocean" return DATA_HIERARCHY_SEPARATOR.join([world, hem, land_ocean]) else: return DATA_HIERARCHY_SEPARATOR.join([world, in_region]) # we generate the mapping dynamically, the first name in the list # is the one which will be used for inverse mappings _magicc_regions = [ "WORLD", "GLOBAL", "OECD90", "ALM", "REF", "ASIA", "R5ASIA", "R5OECD", "R5REF", "R5MAF", "R5LAM", "R6OECD90", "R6REF", "R6LAM", "R6MAF", "R6ASIA", "NHOCEAN", "SHOCEAN", "NHLAND", "SHLAND", "NH-OCEAN", "SH-OCEAN", "NH-LAND", "SH-LAND", "SH", "NH", "BUNKERS", ] replacements = {} for magicc_region in _magicc_regions: openscm_region = get_openscm_replacement(magicc_region) # i.e. if we've already got a value for the inverse, we don't want to overwrite if (openscm_region in replacements.values()) and inverse: continue replacements[magicc_region] = openscm_region if inverse: return {v: k for k, v in replacements.items()} else: return replacements
<SYSTEM_TASK:> Convert MAGICC regions to OpenSCM regions <END_TASK> <USER_TASK:> Description: def convert_magicc_to_openscm_regions(regions, inverse=False): """ Convert MAGICC regions to OpenSCM regions Parameters ---------- regions : list_like, str Regions to convert inverse : bool If True, convert the other way i.e. convert OpenSCM regions to MAGICC7 regions Returns ------- ``type(regions)`` Set of converted regions """
if isinstance(regions, (list, pd.Index)): return [_apply_convert_magicc_to_openscm_regions(r, inverse) for r in regions] else: return _apply_convert_magicc_to_openscm_regions(regions, inverse)
<SYSTEM_TASK:> Convert MAGICC7 variables to OpenSCM variables <END_TASK> <USER_TASK:> Description: def convert_magicc7_to_openscm_variables(variables, inverse=False): """ Convert MAGICC7 variables to OpenSCM variables Parameters ---------- variables : list_like, str Variables to convert inverse : bool If True, convert the other way i.e. convert OpenSCM variables to MAGICC7 variables Returns ------- ``type(variables)`` Set of converted variables """
if isinstance(variables, (list, pd.Index)): return [ _apply_convert_magicc7_to_openscm_variables(v, inverse) for v in variables ] else: return _apply_convert_magicc7_to_openscm_variables(variables, inverse)
<SYSTEM_TASK:> Get the mappings from MAGICC6 to MAGICC7 variables. <END_TASK> <USER_TASK:> Description: def get_magicc6_to_magicc7_variable_mapping(inverse=False): """Get the mappings from MAGICC6 to MAGICC7 variables. Note that this mapping is not one to one. For example, "HFC4310", "HFC43-10" and "HFC-43-10" in MAGICC6 both map to "HFC4310" in MAGICC7 but "HFC4310" in MAGICC7 maps back to "HFC4310". Note that HFC-245fa was mistakenly labelled as HFC-245ca in MAGICC6. In reality, they are not the same thing. However, the MAGICC6 labelling was merely a typo so the mapping between the two is one-to-one. Parameters ---------- inverse : bool If True, return the inverse mappings i.e. MAGICC7 to MAGICC6 mappings Returns ------- dict Dictionary of mappings """
# we generate the mapping dynamically, the first name in the list # is the one which will be used for inverse mappings magicc6_simple_mapping_vars = [ "KYOTO-CO2EQ", "CO2I", "CO2B", "CH4", "N2O", "BC", "OC", "SOx", "NOx", "NMVOC", "CO", "SF6", "NH3", "CF4", "C2F6", "HFC4310", "HFC43-10", "HFC-43-10", "HFC4310", "HFC134a", "HFC143a", "HFC227ea", "CCl4", "CH3CCl3", "HFC245fa", "Halon 1211", "Halon 1202", "Halon 1301", "Halon 2402", "Halon1211", "Halon1202", "Halon1301", "Halon2402", "CH3Br", "CH3Cl", "C6F14", ] magicc6_sometimes_hyphen_vars = [ "CFC-11", "CFC-12", "CFC-113", "CFC-114", "CFC-115", "HCFC-22", "HFC-23", "HFC-32", "HFC-125", "HFC-134a", "HFC-143a", "HCFC-141b", "HCFC-142b", "HFC-227ea", "HFC-245fa", ] magicc6_sometimes_hyphen_vars = [ v.replace("-", "") for v in magicc6_sometimes_hyphen_vars ] + magicc6_sometimes_hyphen_vars magicc6_sometimes_underscore_vars = [ "HFC43_10", "CFC_11", "CFC_12", "CFC_113", "CFC_114", "CFC_115", "HCFC_22", "HCFC_141b", "HCFC_142b", ] magicc6_sometimes_underscore_replacements = { v: v.replace("_", "") for v in magicc6_sometimes_underscore_vars } special_case_replacements = { "FossilCO2": "CO2I", "OtherCO2": "CO2B", "MCF": "CH3CCL3", "CARB_TET": "CCL4", "MHALOSUMCFC12EQ": "MHALOSUMCFC12EQ", # special case to avoid confusion with MCF } one_way_replacements = {"HFC-245ca": "HFC245FA", "HFC245ca": "HFC245FA"} all_possible_magicc6_vars = ( magicc6_simple_mapping_vars + magicc6_sometimes_hyphen_vars + magicc6_sometimes_underscore_vars + list(special_case_replacements.keys()) + list(one_way_replacements.keys()) ) replacements = {} for m6v in all_possible_magicc6_vars: if m6v in special_case_replacements: replacements[m6v] = special_case_replacements[m6v] elif ( m6v in magicc6_sometimes_underscore_vars and not inverse ): # underscores one way replacements[m6v] = magicc6_sometimes_underscore_replacements[m6v] elif (m6v in one_way_replacements) and not inverse: replacements[m6v] = one_way_replacements[m6v] else: m7v = m6v.replace("-", "").replace(" ", "").upper() # i.e. if we've already got a value for the inverse, we don't # want to overwrite it if (m7v in replacements.values()) and inverse: continue replacements[m6v] = m7v if inverse: return {v: k for k, v in replacements.items()} else: return replacements
<SYSTEM_TASK:> Convert MAGICC6 variables to MAGICC7 variables <END_TASK> <USER_TASK:> Description: def convert_magicc6_to_magicc7_variables(variables, inverse=False): """ Convert MAGICC6 variables to MAGICC7 variables Parameters ---------- variables : list_like, str Variables to convert inverse : bool If True, convert the other way i.e. convert MAGICC7 variables to MAGICC6 variables Raises ------ ValueError If you try to convert HFC245ca, or some variant thereof, you will get a ValueError. The reason is that this variable was never meant to be included in MAGICC6, it was just an accident. See, for example, the text in the description section of ``pymagicc/MAGICC6/run/HISTRCP_HFC245fa_CONC.IN``: "...HFC245fa, rather than HFC245ca, is the actually used isomer.". Returns ------- ``type(variables)`` Set of converted variables """
if isinstance(variables, (list, pd.Index)): return [ _apply_convert_magicc6_to_magicc7_variables(v, inverse) for v in variables ] else: return _apply_convert_magicc6_to_magicc7_variables(variables, inverse)
<SYSTEM_TASK:> Get the mappings from Pint to Fortran safe units. <END_TASK> <USER_TASK:> Description: def get_pint_to_fortran_safe_units_mapping(inverse=False): """Get the mappings from Pint to Fortran safe units. Fortran can't handle special characters like "^" or "/" in names, but we need these in Pint. Conversely, Pint stores variables with spaces by default e.g. "Mt CO2 / yr" but we don't want these in the input files as Fortran is likely to think the whitespace is a delimiter. Parameters ---------- inverse : bool If True, return the inverse mappings i.e. Fortran safe to Pint mappings Returns ------- dict Dictionary of mappings """
replacements = {"^": "super", "/": "per", " ": ""} if inverse: replacements = {v: k for k, v in replacements.items()} # mapping nothing to something is obviously not going to work in the inverse # hence remove replacements.pop("") return replacements
<SYSTEM_TASK:> Convert Pint units to Fortran safe units <END_TASK> <USER_TASK:> Description: def convert_pint_to_fortran_safe_units(units, inverse=False): """ Convert Pint units to Fortran safe units Parameters ---------- units : list_like, str Units to convert inverse : bool If True, convert the other way i.e. convert Fortran safe units to Pint units Returns ------- ``type(units)`` Set of converted units """
if inverse: return apply_string_substitutions(units, FORTRAN_SAFE_TO_PINT_UNITS_MAPPING) else: return apply_string_substitutions(units, PINT_TO_FORTRAN_SAFE_UNITS_MAPPING)
<SYSTEM_TASK:> Overrides the base evaluation to set the value to the evaluation result of the value <END_TASK> <USER_TASK:> Description: def run_evaluate(self) -> None: """ Overrides the base evaluation to set the value to the evaluation result of the value expression in the schema """
result = None self.eval_error = False if self._needs_evaluation: result = self._schema.value.evaluate(self._evaluation_context) self.eval_error = result is None if self.eval_error: return # Only set the value if it conforms to the field type if not self._schema.is_type_of(result): try: result = self._schema.type_object(result) except Exception as err: logging.debug('{} in casting {} to {} for field {}. Error: {}'.format( type(err).__name__, result, self._schema.type, self._schema.fully_qualified_name, err)) self.eval_error = True return try: result = self._schema.sanitize_object(result) except Exception as err: logging.debug('{} in sanitizing {} of type {} for field {}. Error: {}'.format( type(err).__name__, result, self._schema.type, self._schema.fully_qualified_name, err)) self.eval_error = True return self.value = result
<SYSTEM_TASK:> Sets the value of a key to a supplied value <END_TASK> <USER_TASK:> Description: def set(self, key: Any, value: Any) -> None: """ Sets the value of a key to a supplied value """
if key is not None: self[key] = value
<SYSTEM_TASK:> Increments the value set against a key. If the key is not present, 0 is assumed as the initial state <END_TASK> <USER_TASK:> Description: def increment(self, key: Any, by: int = 1) -> None: """ Increments the value set against a key. If the key is not present, 0 is assumed as the initial state """
if key is not None: self[key] = self.get(key, 0) + by
<SYSTEM_TASK:> Inserts an item to the list as long as it is not None <END_TASK> <USER_TASK:> Description: def insert(self, index: int, obj: Any) -> None: """ Inserts an item to the list as long as it is not None """
if obj is not None: super().insert(index, obj)
<SYSTEM_TASK:> Get the THISFILE_DATTYPE and THISFILE_REGIONMODE flags for a given region set. <END_TASK> <USER_TASK:> Description: def get_dattype_regionmode(regions, scen7=False): """ Get the THISFILE_DATTYPE and THISFILE_REGIONMODE flags for a given region set. In all MAGICC input files, there are two flags: THISFILE_DATTYPE and THISFILE_REGIONMODE. These tell MAGICC how to read in a given input file. This function maps the regions which are in a given file to the value of these flags expected by MAGICC. Parameters ---------- regions : list_like The regions to get THISFILE_DATTYPE and THISFILE_REGIONMODE flags for. scen7 : bool, optional Whether the file we are getting the flags for is a SCEN7 file or not. Returns ------- dict Dictionary where the flags are the keys and the values are the value they should be set to for the given inputs. """
dattype_flag = "THISFILE_DATTYPE" regionmode_flag = "THISFILE_REGIONMODE" region_dattype_row = _get_dattype_regionmode_regions_row(regions, scen7=scen7) dattype = DATTYPE_REGIONMODE_REGIONS[dattype_flag.lower()][region_dattype_row].iloc[ 0 ] regionmode = DATTYPE_REGIONMODE_REGIONS[regionmode_flag.lower()][ region_dattype_row ].iloc[0] return {dattype_flag: dattype, regionmode_flag: regionmode}
<SYSTEM_TASK:> Get the region order expected by MAGICC. <END_TASK> <USER_TASK:> Description: def get_region_order(regions, scen7=False): """ Get the region order expected by MAGICC. Parameters ---------- regions : list_like The regions to get THISFILE_DATTYPE and THISFILE_REGIONMODE flags for. scen7 : bool, optional Whether the file we are getting the flags for is a SCEN7 file or not. Returns ------- list Region order expected by MAGICC for the given region set. """
region_dattype_row = _get_dattype_regionmode_regions_row(regions, scen7=scen7) region_order = DATTYPE_REGIONMODE_REGIONS["regions"][region_dattype_row].iloc[0] return region_order
<SYSTEM_TASK:> Get special code for MAGICC6 SCEN files. <END_TASK> <USER_TASK:> Description: def get_special_scen_code(regions, emissions): """ Get special code for MAGICC6 SCEN files. At the top of every MAGICC6 and MAGICC5 SCEN file there is a two digit number. The first digit, the 'scenfile_region_code' tells MAGICC how many regions data is being provided for. The second digit, the 'scenfile_emissions_code', tells MAGICC which gases are in the SCEN file. The variables which are part of ``PART_OF_SCENFILE_WITH_EMISSIONS_CODE_1`` are the emissions species which are expected when scenfile_emissions_code is 1. Similarly, ``PART_OF_SCENFILE_WITH_EMISSIONS_CODE_0`` defines the emissions species which are expected when scenfile_emissions_code is 0. Having these definitions allows Pymagicc to check that the right set of emissions has been provided before writing SCEN files. Parameters ---------- region : list_like Regions to get code for. emissions : list-like Emissions to get code for. Raises ------ ValueError If the special scen code cannot be determined. Returns ------- int The special scen code for the regions-emissions combination provided. """
if sorted(set(PART_OF_SCENFILE_WITH_EMISSIONS_CODE_0)) == sorted(set(emissions)): scenfile_emissions_code = 0 elif sorted(set(PART_OF_SCENFILE_WITH_EMISSIONS_CODE_1)) == sorted(set(emissions)): scenfile_emissions_code = 1 else: msg = "Could not determine scen special code for emissions {}".format(emissions) raise ValueError(msg) if set(regions) == set(["WORLD"]): scenfile_region_code = 1 elif set(regions) == set(["WORLD", "OECD90", "REF", "ASIA", "ALM"]): scenfile_region_code = 2 elif set(regions) == set(["WORLD", "R5OECD", "R5REF", "R5ASIA", "R5MAF", "R5LAM"]): scenfile_region_code = 3 elif set(regions) == set( ["WORLD", "R5OECD", "R5REF", "R5ASIA", "R5MAF", "R5LAM", "BUNKERS"] ): scenfile_region_code = 4 try: return scenfile_region_code * 10 + scenfile_emissions_code except NameError: msg = "Could not determine scen special code for regions {}".format(regions) raise ValueError(msg)
<SYSTEM_TASK:> Pull out a single config set from a parameters_out namelist. <END_TASK> <USER_TASK:> Description: def pull_cfg_from_parameters_out(parameters_out, namelist_to_read="nml_allcfgs"): """Pull out a single config set from a parameters_out namelist. This function returns a single file with the config that needs to be passed to MAGICC in order to do the same run as is represented by the values in ``parameters_out``. Parameters ---------- parameters_out : dict, f90nml.Namelist The parameters to dump namelist_to_read : str The namelist to read from the file. Returns ------- :obj:`f90nml.Namelist` An f90nml object with the cleaned, read out config. Examples -------- >>> cfg = pull_cfg_from_parameters_out(magicc.metadata["parameters"]) >>> cfg.write("/somewhere/else/ANOTHERNAME.cfg") """
single_cfg = Namelist({namelist_to_read: {}}) for key, value in parameters_out[namelist_to_read].items(): if "file_tuning" in key: single_cfg[namelist_to_read][key] = "" else: try: if isinstance(value, str): single_cfg[namelist_to_read][key] = value.strip(" \t\n\r").replace( "\x00", "" ) elif isinstance(value, list): clean_list = [v.strip(" \t\n\r").replace("\x00", "") for v in value] single_cfg[namelist_to_read][key] = [v for v in clean_list if v] else: assert isinstance(value, Number) single_cfg[namelist_to_read][key] = value except AttributeError: if isinstance(value, list): assert all([isinstance(v, Number) for v in value]) single_cfg[namelist_to_read][key] = value else: raise AssertionError( "Unexpected cause in out parameters conversion" ) return single_cfg
<SYSTEM_TASK:> Pull out a single config set from a MAGICC ``PARAMETERS.OUT`` file. <END_TASK> <USER_TASK:> Description: def pull_cfg_from_parameters_out_file( parameters_out_file, namelist_to_read="nml_allcfgs" ): """Pull out a single config set from a MAGICC ``PARAMETERS.OUT`` file. This function reads in the ``PARAMETERS.OUT`` file and returns a single file with the config that needs to be passed to MAGICC in order to do the same run as is represented by the values in ``PARAMETERS.OUT``. Parameters ---------- parameters_out_file : str The ``PARAMETERS.OUT`` file to read namelist_to_read : str The namelist to read from the file. Returns ------- :obj:`f90nml.Namelist` An f90nml object with the cleaned, read out config. Examples -------- >>> cfg = pull_cfg_from_parameters_out_file("PARAMETERS.OUT") >>> cfg.write("/somewhere/else/ANOTHERNAME.cfg") """
parameters_out = read_cfg_file(parameters_out_file) return pull_cfg_from_parameters_out( parameters_out, namelist_to_read=namelist_to_read )
<SYSTEM_TASK:> Convert an RCP name into the generic Pymagicc RCP name <END_TASK> <USER_TASK:> Description: def get_generic_rcp_name(inname): """Convert an RCP name into the generic Pymagicc RCP name The conversion is case insensitive. Parameters ---------- inname : str The name for which to get the generic Pymagicc RCP name Returns ------- str The generic Pymagicc RCP name Examples -------- >>> get_generic_rcp_name("RCP3PD") "rcp26" """
# TODO: move into OpenSCM mapping = { "rcp26": "rcp26", "rcp3pd": "rcp26", "rcp45": "rcp45", "rcp6": "rcp60", "rcp60": "rcp60", "rcp85": "rcp85", } try: return mapping[inname.lower()] except KeyError: error_msg = "No generic name for input: {}".format(inname) raise ValueError(error_msg)
<SYSTEM_TASK:> Join two sets of timeseries <END_TASK> <USER_TASK:> Description: def join_timeseries(base, overwrite, join_linear=None): """Join two sets of timeseries Parameters ---------- base : :obj:`MAGICCData`, :obj:`pd.DataFrame`, filepath Base timeseries to use. If a filepath, the data will first be loaded from disk. overwrite : :obj:`MAGICCData`, :obj:`pd.DataFrame`, filepath Timeseries to join onto base. Any points which are in both `base` and `overwrite` will be taken from `overwrite`. If a filepath, the data will first be loaded from disk. join_linear : tuple of len(2) A list/array which specifies the period over which the two timeseries should be joined. The first element is the start time of the join period, the second element is the end time of the join period. In the join period (excluding the start and end times), output data will be a linear interpolation between (the annually interpolated) `base` and `overwrite` data. If None, no linear join will be done and any points in (the annually interpolated) `overwrite` data will simply overwrite any points in `base`. Returns ------- :obj:`MAGICCData` The joint timeseries. The resulting data is linearly interpolated onto annual steps """
if join_linear is not None: if len(join_linear) != 2: raise ValueError("join_linear must have a length of 2") if isinstance(base, str): base = MAGICCData(base) elif isinstance(base, MAGICCData): base = deepcopy(base) if isinstance(overwrite, str): overwrite = MAGICCData(overwrite) elif isinstance(overwrite, MAGICCData): overwrite = deepcopy(overwrite) result = _join_timeseries_mdata(base, overwrite, join_linear) return MAGICCData(result)
<SYSTEM_TASK:> Determine the OpenSCM variable from a filepath. <END_TASK> <USER_TASK:> Description: def _get_openscm_var_from_filepath(filepath): """ Determine the OpenSCM variable from a filepath. Uses MAGICC's internal, implicit, filenaming conventions. Parameters ---------- filepath : str Filepath from which to determine the OpenSCM variable. Returns ------- str The OpenSCM variable implied by the filepath. """
reader = determine_tool(filepath, "reader")(filepath) openscm_var = convert_magicc7_to_openscm_variables( convert_magicc6_to_magicc7_variables(reader._get_variable_from_filepath()) ) return openscm_var
<SYSTEM_TASK:> Find the start and end of the embedded namelist. <END_TASK> <USER_TASK:> Description: def _find_nml(self): """ Find the start and end of the embedded namelist. Returns ------- (int, int) start and end index for the namelist """
nml_start = None nml_end = None for i in range(len(self.lines)): if self.lines[i].strip().startswith("&"): nml_start = i if self.lines[i].strip().startswith("/"): nml_end = i assert ( nml_start is not None and nml_end is not None ), "Could not find namelist within {}".format(self.filepath) return nml_end, nml_start
<SYSTEM_TASK:> Extract the tabulated data from the input file. <END_TASK> <USER_TASK:> Description: def process_data(self, stream, metadata): """ Extract the tabulated data from the input file. Parameters ---------- stream : Streamlike object A Streamlike object (nominally StringIO) containing the table to be extracted metadata : dict Metadata read in from the header and the namelist Returns ------- (pandas.DataFrame, dict) The first element contains the data, processed to the standard MAGICCData format. The second element is th updated metadata based on the processing performed. """
ch, metadata = self._get_column_headers_and_update_metadata(stream, metadata) df = self._convert_data_block_and_headers_to_df(stream) return df, metadata, ch
<SYSTEM_TASK:> Determine the file variable from the filepath. <END_TASK> <USER_TASK:> Description: def _get_variable_from_filepath(self): """ Determine the file variable from the filepath. Returns ------- str Best guess of variable name from the filepath """
try: return self.regexp_capture_variable.search(self.filepath).group(1) except AttributeError: self._raise_cannot_determine_variable_from_filepath_error()
<SYSTEM_TASK:> Parse the header for additional metadata. <END_TASK> <USER_TASK:> Description: def process_header(self, header): """ Parse the header for additional metadata. Parameters ---------- header : str All the lines in the header. Returns ------- dict The metadata in the header. """
metadata = {} for line in header.split("\n"): line = line.strip() for tag in self.header_tags: tag_text = "{}:".format(tag) if line.lower().startswith(tag_text): metadata[tag] = line[len(tag_text) + 1 :].strip() return metadata
<SYSTEM_TASK:> Read a data header line, ensuring that it starts with the expected header <END_TASK> <USER_TASK:> Description: def _read_data_header_line(self, stream, expected_header): """Read a data header line, ensuring that it starts with the expected header Parameters ---------- stream : :obj:`StreamIO` Stream object containing the text to read expected_header : str, list of strs Expected header of the data header line """
pos = stream.tell() expected_header = ( [expected_header] if isinstance(expected_header, str) else expected_header ) for exp_hd in expected_header: tokens = stream.readline().split() try: assert tokens[0] == exp_hd return tokens[1:] except AssertionError: stream.seek(pos) continue assertion_msg = "Expected a header token of {}, got {}".format( expected_header, tokens[0] ) raise AssertionError(assertion_msg)
<SYSTEM_TASK:> Read out the next chunk of memory <END_TASK> <USER_TASK:> Description: def read_chunk(self, t): """ Read out the next chunk of memory Values in fortran binary streams begin and end with the number of bytes :param t: Data type (same format as used by struct). :return: Numpy array if the variable is an array, otherwise a scalar. """
size = self.data[self.pos : self.pos + 4].cast("i")[0] d = self.data[self.pos + 4 : self.pos + 4 + size] assert ( self.data[self.pos + 4 + size : self.pos + 4 + size + 4].cast("i")[0] == size ) self.pos = self.pos + 4 + size + 4 res = np.array(d.cast(t)) # Return as a scalar or a numpy array if it is an array if res.size == 1: return res[0] return res
<SYSTEM_TASK:> Extract the tabulated data from the input file <END_TASK> <USER_TASK:> Description: def process_data(self, stream, metadata): """ Extract the tabulated data from the input file # Arguments stream (Streamlike object): A Streamlike object (nominally StringIO) containing the table to be extracted metadata (dict): metadata read in from the header and the namelist # Returns df (pandas.DataFrame): contains the data, processed to the standard MAGICCData format metadata (dict): updated metadata based on the processing performed """
index = np.arange(metadata["firstyear"], metadata["lastyear"] + 1) # The first variable is the global values globe = stream.read_chunk("d") assert len(globe) == len(index) regions = stream.read_chunk("d") num_regions = int(len(regions) / len(index)) regions = regions.reshape((-1, num_regions), order="F") data = np.concatenate((globe[:, np.newaxis], regions), axis=1) df = pd.DataFrame(data, index=index) if isinstance(df.index, pd.core.indexes.numeric.Float64Index): df.index = df.index.to_series().round(3) df.index.name = "time" regions = [ "World", "World|Northern Hemisphere|Ocean", "World|Northern Hemisphere|Land", "World|Southern Hemisphere|Ocean", "World|Southern Hemisphere|Land", ] variable = convert_magicc6_to_magicc7_variables( self._get_variable_from_filepath() ) variable = convert_magicc7_to_openscm_variables(variable) column_headers = { "variable": [variable] * (num_regions + 1), "region": regions, "unit": ["unknown"] * len(regions), "todo": ["SET"] * len(regions), } return df, metadata, self._set_column_defaults(column_headers)
<SYSTEM_TASK:> Reads the first part of the file to get some essential metadata <END_TASK> <USER_TASK:> Description: def process_header(self, data): """ Reads the first part of the file to get some essential metadata # Returns return (dict): the metadata in the header """
metadata = { "datacolumns": data.read_chunk("I"), "firstyear": data.read_chunk("I"), "lastyear": data.read_chunk("I"), "annualsteps": data.read_chunk("I"), } if metadata["annualsteps"] != 1: raise InvalidTemporalResError( "{}: Only annual files can currently be processed".format(self.filepath) ) return metadata
<SYSTEM_TASK:> Write a MAGICC input file from df and metadata <END_TASK> <USER_TASK:> Description: def write(self, magicc_input, filepath): """ Write a MAGICC input file from df and metadata Parameters ---------- magicc_input : :obj:`pymagicc.io.MAGICCData` MAGICCData object which holds the data to write filepath : str Filepath of the file to write to. """
self._filepath = filepath # TODO: make copy attribute for MAGICCData self.minput = deepcopy(magicc_input) self.data_block = self._get_data_block() output = StringIO() output = self._write_header(output) output = self._write_namelist(output) output = self._write_datablock(output) with open( filepath, "w", encoding="utf-8", newline=self._newline_char ) as output_file: output.seek(0) copyfileobj(output, output_file)
<SYSTEM_TASK:> Append any input which can be converted to MAGICCData to self. <END_TASK> <USER_TASK:> Description: def append(self, other, inplace=False, **kwargs): """ Append any input which can be converted to MAGICCData to self. Parameters ---------- other : MAGICCData, pd.DataFrame, pd.Series, str Source of data to append. inplace : bool If True, append ``other`` inplace, otherwise return a new ``MAGICCData`` instance. **kwargs Passed to ``MAGICCData`` constructor (only used if ``MAGICCData`` is not a ``MAGICCData`` instance). """
if not isinstance(other, MAGICCData): other = MAGICCData(other, **kwargs) if inplace: super().append(other, inplace=inplace) self.metadata.update(other.metadata) else: res = super().append(other, inplace=inplace) res.metadata = deepcopy(self.metadata) res.metadata.update(other.metadata) return res
<SYSTEM_TASK:> Write an input file to disk. <END_TASK> <USER_TASK:> Description: def write(self, filepath, magicc_version): """ Write an input file to disk. Parameters ---------- filepath : str Filepath of the file to write. magicc_version : int The MAGICC version for which we want to write files. MAGICC7 and MAGICC6 namelists are incompatible hence we need to know which one we're writing for. """
writer = determine_tool(filepath, "writer")(magicc_version=magicc_version) writer.write(self, filepath)
<SYSTEM_TASK:> Validates a set of attributes as identifiers in a spec <END_TASK> <USER_TASK:> Description: def validate_python_identifier_attributes(fully_qualified_name: str, spec: Dict[str, Any], *attributes: str) -> List[InvalidIdentifierError]: """ Validates a set of attributes as identifiers in a spec """
errors: List[InvalidIdentifierError] = [] checks: List[Tuple[Callable, InvalidIdentifierError.Reason]] = [ (lambda x: x.startswith('_'), InvalidIdentifierError.Reason.STARTS_WITH_UNDERSCORE), (lambda x: x.startswith('run_'), InvalidIdentifierError.Reason.STARTS_WITH_RUN), (lambda x: not x.isidentifier(), InvalidIdentifierError.Reason.INVALID_PYTHON_IDENTIFIER), ] for attribute in attributes: if attribute not in spec or spec.get(ATTRIBUTE_INTERNAL, False): continue for check in checks: if check[0](spec[attribute]): errors.append( InvalidIdentifierError(fully_qualified_name, spec, attribute, check[1])) break return errors
<SYSTEM_TASK:> Validates to ensure that a set of attributes are present in spec <END_TASK> <USER_TASK:> Description: def validate_required_attributes(fully_qualified_name: str, spec: Dict[str, Any], *attributes: str) -> List[RequiredAttributeError]: """ Validates to ensure that a set of attributes are present in spec """
return [ RequiredAttributeError(fully_qualified_name, spec, attribute) for attribute in attributes if attribute not in spec ]
<SYSTEM_TASK:> Validates to ensure that a set of attributes do not contain empty values <END_TASK> <USER_TASK:> Description: def validate_empty_attributes(fully_qualified_name: str, spec: Dict[str, Any], *attributes: str) -> List[EmptyAttributeError]: """ Validates to ensure that a set of attributes do not contain empty values """
return [ EmptyAttributeError(fully_qualified_name, spec, attribute) for attribute in attributes if not spec.get(attribute, None) ]
<SYSTEM_TASK:> Validates to ensure that the value is a number of the specified type, and lies with the specified range <END_TASK> <USER_TASK:> Description: def validate_number_attribute( fully_qualified_name: str, spec: Dict[str, Any], attribute: str, value_type: Union[Type[int], Type[float]] = int, minimum: Optional[Union[int, float]] = None, maximum: Optional[Union[int, float]] = None) -> Optional[InvalidNumberError]: """ Validates to ensure that the value is a number of the specified type, and lies with the specified range """
if attribute not in spec: return try: value = value_type(spec[attribute]) if (minimum is not None and value < minimum) or (maximum is not None and value > maximum): raise None except: return InvalidNumberError(fully_qualified_name, spec, attribute, value_type, minimum, maximum)
<SYSTEM_TASK:> Validates to ensure that the value of an attribute lies within an allowed set of candidates <END_TASK> <USER_TASK:> Description: def validate_enum_attribute(fully_qualified_name: str, spec: Dict[str, Any], attribute: str, candidates: Set[Union[str, int, float]]) -> Optional[InvalidValueError]: """ Validates to ensure that the value of an attribute lies within an allowed set of candidates """
if attribute not in spec: return if spec[attribute] not in candidates: return InvalidValueError(fully_qualified_name, spec, attribute, candidates)
<SYSTEM_TASK:> Checks if this key starts with the other key provided. Returns False if key_type, identity <END_TASK> <USER_TASK:> Description: def starts_with(self, other: 'Key') -> bool: """ Checks if this key starts with the other key provided. Returns False if key_type, identity or group are different. For `KeyType.TIMESTAMP` returns True. For `KeyType.DIMENSION` does prefix match between the two dimensions property. """
if (self.key_type, self.identity, self.group) != (other.key_type, other.identity, other.group): return False if self.key_type == KeyType.TIMESTAMP: return True if self.key_type == KeyType.DIMENSION: if len(self.dimensions) < len(other.dimensions): return False return self.dimensions[0:len(other.dimensions)] == other.dimensions
<SYSTEM_TASK:> Evaluates the dimension fields. Returns False if any of the fields could not be evaluated. <END_TASK> <USER_TASK:> Description: def _evaluate_dimension_fields(self) -> bool: """ Evaluates the dimension fields. Returns False if any of the fields could not be evaluated. """
for _, item in self._dimension_fields.items(): item.run_evaluate() if item.eval_error: return False return True
<SYSTEM_TASK:> Compares the dimension field values to the value in regular fields. <END_TASK> <USER_TASK:> Description: def _compare_dimensions_to_fields(self) -> bool: """ Compares the dimension field values to the value in regular fields."""
for name, item in self._dimension_fields.items(): if item.value != self._nested_items[name].value: return False return True
<SYSTEM_TASK:> Generates the Key object based on dimension fields. <END_TASK> <USER_TASK:> Description: def _key(self): """ Generates the Key object based on dimension fields. """
return Key(self._schema.key_type, self._identity, self._name, [str(item.value) for item in self._dimension_fields.values()])