text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Adds a new coroutine function with optional variadic argumetns. <END_TASK> <USER_TASK:> Description: def add(self, coro, *args, **kw): """ Adds a new coroutine function with optional variadic argumetns. Arguments: coro (coroutine function): coroutine to execute. *args (mixed): optional variadic arguments Raises: TypeError: if the coro object is not a valid coroutine Returns: future: coroutine wrapped future """
# Create coroutine object if a function is provided if asyncio.iscoroutinefunction(coro): coro = coro(*args, **kw) # Verify coroutine if not asyncio.iscoroutine(coro): raise TypeError('paco: coro must be a coroutine object') # Store coroutine with arguments for deferred execution index = max(len(self.pool), 0) task = Task(index, coro) # Append the coroutine data to the pool self.pool.append(task) return coro
<SYSTEM_TASK:> Executes the registered coroutines in the executor queue. <END_TASK> <USER_TASK:> Description: def run(self, timeout=None, return_when=None, return_exceptions=None, ignore_empty=None): """ Executes the registered coroutines in the executor queue. Arguments: timeout (int/float): max execution timeout. No limit by default. return_exceptions (bool): in case of coroutine exception. return_when (str): sets when coroutine should be resolved. See `asyncio.wait`_ for supported values. ignore_empty (bool, optional): do not raise an exception if there are no coroutines to schedule are empty. Returns: asyncio.Future (tuple): two sets of Futures: ``(done, pending)`` Raises: ValueError: if there is no coroutines to schedule. RuntimeError: if executor is still running. TimeoutError: if execution takes more than expected. .. _asyncio.wait: https://docs.python.org/3/library/asyncio-task.html#asyncio.wait # noqa """
# Only allow 1 concurrent execution if self.running: raise RuntimeError('paco: executor is already running') # Overwrite ignore empty behaviour, if explicitly defined ignore_empty = (self.ignore_empty if ignore_empty is None else ignore_empty) # Check we have coroutines to schedule if len(self.pool) == 0: # If ignore empty mode enabled, just return an empty tuple if ignore_empty: return (tuple(), tuple()) # Othwerise raise an exception raise ValueError('paco: pool of coroutines is empty') # Set executor state to running self.running = True # Configure return exceptions if return_exceptions is not None: self.return_exceptions = return_exceptions if return_exceptions is False and return_when is None: return_when = 'FIRST_EXCEPTION' if return_when is None: return_when = 'ALL_COMPLETED' # Trigger pre-execution event yield from self.observer.trigger('start', self) # Sequential coroutines execution if self.limit == 1: done, pending = yield from self._run_sequentially() # Concurrent execution based on configured limit if self.limit != 1: done, pending = yield from self._run_concurrently( timeout=timeout, return_when=return_when) # Reset internal state and queue self.running = False # Raise exception, if needed if self.return_exceptions is False and self.errors: err = self.errors[0] err.errors = self.errors[1:] raise err # Trigger pre-execution event yield from self.observer.trigger('finish', self) # Reset executor state to defaults after each execution self.reset() # Return resultant futures in two tuples return done, pending
<SYSTEM_TASK:> Given a value of an interval, this function returns the <END_TASK> <USER_TASK:> Description: def next_interval(self, interval): """ Given a value of an interval, this function returns the next interval value """
index = np.where(self.intervals == interval) if index[0][0] + 1 < len(self.intervals): return self.intervals[index[0][0] + 1] else: raise IndexError("Ran out of intervals!")
<SYSTEM_TASK:> This function returns the nearest interval to any given interval. <END_TASK> <USER_TASK:> Description: def nearest_interval(self, interval): """ This function returns the nearest interval to any given interval. """
thresh_range = 25 # in cents if interval < self.intervals[0] - thresh_range or interval > self.intervals[-1] + thresh_range: raise IndexError("The interval given is beyond " + str(thresh_range) + " cents over the range of intervals defined.") index = find_nearest_index(self.intervals, interval) return self.intervals[index]
<SYSTEM_TASK:> Optimise ML distance between two partials. min and max set brackets <END_TASK> <USER_TASK:> Description: def brent_optimise(node1, node2, min_brlen=0.001, max_brlen=10, verbose=False): """ Optimise ML distance between two partials. min and max set brackets """
from scipy.optimize import minimize_scalar wrapper = BranchLengthOptimiser(node1, node2, (min_brlen + max_brlen) / 2.) n = minimize_scalar(lambda x: -wrapper(x)[0], method='brent', bracket=(min_brlen, max_brlen))['x'] if verbose: logger.info(wrapper) if n < min_brlen: n = min_brlen wrapper(n) return n, -1 / wrapper.get_d2lnl(n)
<SYSTEM_TASK:> Load an alignment, calculate all pairwise distances and variances <END_TASK> <USER_TASK:> Description: def pairdists(alignment, subs_model, alpha=None, ncat=4, tolerance=1e-6, verbose=False): """ Load an alignment, calculate all pairwise distances and variances model parameter must be a Substitution model type from phylo_utils """
# Check if not isinstance(subs_model, phylo_utils.models.Model): raise ValueError("Can't handle this model: {}".format(model)) if alpha is None: alpha = 1.0 ncat = 1 # Set up markov model tm = TransitionMatrix(subs_model) gamma_rates = discrete_gamma(alpha, ncat) partials = alignment_to_partials(alignment) seqnames = alignment.get_names() nseq = len(seqnames) distances = np.zeros((nseq, nseq)) variances = np.zeros((nseq, nseq)) # Check the model has the appropriate size if not subs_model.size == partials[seqnames[0]].shape[1]: raise ValueError("Model {} expects {} states, but the alignment has {}".format(model.name, model.size, partials[seqnames[0]].shape[1])) nodes = [phylo_utils.likelihood.LnlModel(tm) for seq in range(nseq)] for node, header in zip(nodes, seqnames): node.set_partials(partials[header]) # retrieve partial likelihoods from partials dictionary for i, j in itertools.combinations(range(nseq), 2): brlen, var = brent_optimise(nodes[i], nodes[j], verbose=verbose) distances[i, j] = distances[j, i] = brlen variances[i, j] = variances[j, i] = var dm = DistanceMatrix.from_array(distances, names=seqnames) vm = DistanceMatrix.from_array(variances, names=seqnames) return dm, vm
<SYSTEM_TASK:> Return sequences simulated under the transition matrix's model <END_TASK> <USER_TASK:> Description: def simulate(self, nsites, transition_matrix, tree, ncat=1, alpha=1): """ Return sequences simulated under the transition matrix's model """
sim = SequenceSimulator(transition_matrix, tree, ncat, alpha) return list(sim.simulate(nsites).items())
<SYSTEM_TASK:> Return a new Alignment that is a bootstrap replicate of self <END_TASK> <USER_TASK:> Description: def bootstrap(self): """ Return a new Alignment that is a bootstrap replicate of self """
new_sites = sorted(sample_wr(self.get_sites())) seqs = list(zip(self.get_names(), (''.join(seq) for seq in zip(*new_sites)))) return self.__class__(seqs)
<SYSTEM_TASK:> Evolve multiple sites during one tree traversal <END_TASK> <USER_TASK:> Description: def simulate(self, n): """ Evolve multiple sites during one tree traversal """
self.tree._tree.seed_node.states = self.ancestral_states(n) categories = np.random.randint(self.ncat, size=n).astype(np.intc) for node in self.tree.preorder(skip_seed=True): node.states = self.evolve_states(node.parent_node.states, categories, node.pmats) if node.is_leaf(): self.sequences[node.taxon.label] = node.states return self.sequences_to_string()
<SYSTEM_TASK:> Generate ancestral sequence states from the equilibrium frequencies <END_TASK> <USER_TASK:> Description: def ancestral_states(self, n): """ Generate ancestral sequence states from the equilibrium frequencies """
anc = np.empty(n, dtype=np.intc) _weighted_choices(self.state_indices, self.freqs, anc) return anc
<SYSTEM_TASK:> Convert state indices to a string of characters <END_TASK> <USER_TASK:> Description: def sequences_to_string(self): """ Convert state indices to a string of characters """
return {k: ''.join(self.states[v]) for (k, v) in self.sequences.items()}
<SYSTEM_TASK:> Create a Motorola S-Record record of given data. <END_TASK> <USER_TASK:> Description: def pack_srec(type_, address, size, data): """Create a Motorola S-Record record of given data. """
if type_ in '0159': line = '{:02X}{:04X}'.format(size + 2 + 1, address) elif type_ in '268': line = '{:02X}{:06X}'.format(size + 3 + 1, address) elif type_ in '37': line = '{:02X}{:08X}'.format(size + 4 + 1, address) else: raise Error( "expected record type 0..3 or 5..9, but got '{}'".format(type_)) if data: line += binascii.hexlify(data).decode('ascii').upper() return 'S{}{}{:02X}'.format(type_, line, crc_srec(line))
<SYSTEM_TASK:> Unpack given Motorola S-Record record into variables. <END_TASK> <USER_TASK:> Description: def unpack_srec(record): """Unpack given Motorola S-Record record into variables. """
# Minimum STSSCC, where T is type, SS is size and CC is crc. if len(record) < 6: raise Error("record '{}' too short".format(record)) if record[0] != 'S': raise Error( "record '{}' not starting with an 'S'".format(record)) size = int(record[2:4], 16) type_ = record[1:2] if type_ in '0159': width = 4 elif type_ in '268': width = 6 elif type_ in '37': width = 8 else: raise Error( "expected record type 0..3 or 5..9, but got '{}'".format(type_)) data_offset = (4 + width) crc_offset = (4 + 2 * size - 2) address = int(record[4:data_offset], 16) data = binascii.unhexlify(record[data_offset:crc_offset]) actual_crc = int(record[crc_offset:], 16) expected_crc = crc_srec(record[2:crc_offset]) if actual_crc != expected_crc: raise Error( "expected crc '{:02X}' in record {}, but got '{:02X}'".format( expected_crc, record, actual_crc)) return (type_, address, size - 1 - width // 2, data)
<SYSTEM_TASK:> Create a Intel HEX record of given data. <END_TASK> <USER_TASK:> Description: def pack_ihex(type_, address, size, data): """Create a Intel HEX record of given data. """
line = '{:02X}{:04X}{:02X}'.format(size, address, type_) if data: line += binascii.hexlify(data).decode('ascii').upper() return ':{}{:02X}'.format(line, crc_ihex(line))
<SYSTEM_TASK:> Unpack given Intel HEX record into variables. <END_TASK> <USER_TASK:> Description: def unpack_ihex(record): """Unpack given Intel HEX record into variables. """
# Minimum :SSAAAATTCC, where SS is size, AAAA is address, TT is # type and CC is crc. if len(record) < 11: raise Error("record '{}' too short".format(record)) if record[0] != ':': raise Error("record '{}' not starting with a ':'".format(record)) size = int(record[1:3], 16) address = int(record[3:7], 16) type_ = int(record[7:9], 16) if size > 0: data = binascii.unhexlify(record[9:9 + 2 * size]) else: data = b'' actual_crc = int(record[9 + 2 * size:], 16) expected_crc = crc_ihex(record[1:9 + 2 * size]) if actual_crc != expected_crc: raise Error( "expected crc '{:02X}' in record {}, but got '{:02X}'".format( expected_crc, record, actual_crc)) return (type_, address, size, data)
<SYSTEM_TASK:> Return chunks of the data aligned as given by `alignment`. `size` <END_TASK> <USER_TASK:> Description: def chunks(self, size=32, alignment=1): """Return chunks of the data aligned as given by `alignment`. `size` must be a multiple of `alignment`. Each chunk is returned as a named two-tuple of its address and data. """
if (size % alignment) != 0: raise Error( 'size {} is not a multiple of alignment {}'.format( size, alignment)) address = self.address data = self.data # First chunk may be shorter than `size` due to alignment. chunk_offset = (address % alignment) if chunk_offset != 0: first_chunk_size = (alignment - chunk_offset) yield self._Chunk(address, data[:first_chunk_size]) address += (first_chunk_size // self._word_size_bytes) data = data[first_chunk_size:] else: first_chunk_size = 0 for offset in range(0, len(data), size): yield self._Chunk(address + offset // self._word_size_bytes, data[offset:offset + size])
<SYSTEM_TASK:> Add given data to this segment. The added data must be adjacent to <END_TASK> <USER_TASK:> Description: def add_data(self, minimum_address, maximum_address, data, overwrite): """Add given data to this segment. The added data must be adjacent to the current segment data, otherwise an exception is thrown. """
if minimum_address == self.maximum_address: self.maximum_address = maximum_address self.data += data elif maximum_address == self.minimum_address: self.minimum_address = minimum_address self.data = data + self.data elif (overwrite and minimum_address < self.maximum_address and maximum_address > self.minimum_address): self_data_offset = minimum_address - self.minimum_address # Prepend data. if self_data_offset < 0: self_data_offset *= -1 self.data = data[:self_data_offset] + self.data del data[:self_data_offset] self.minimum_address = minimum_address # Overwrite overlapping part. self_data_left = len(self.data) - self_data_offset if len(data) <= self_data_left: self.data[self_data_offset:self_data_offset + len(data)] = data data = bytearray() else: self.data[self_data_offset:] = data[:self_data_left] data = data[self_data_left:] # Append data. if len(data) > 0: self.data += data self.maximum_address = maximum_address else: raise AddDataError( 'data added to a segment must be adjacent to or overlapping ' 'with the original segment data')
<SYSTEM_TASK:> Remove given data range from this segment. Returns the second <END_TASK> <USER_TASK:> Description: def remove_data(self, minimum_address, maximum_address): """Remove given data range from this segment. Returns the second segment if the removed data splits this segment in two. """
if ((minimum_address >= self.maximum_address) and (maximum_address <= self.minimum_address)): raise Error('cannot remove data that is not part of the segment') if minimum_address < self.minimum_address: minimum_address = self.minimum_address if maximum_address > self.maximum_address: maximum_address = self.maximum_address remove_size = maximum_address - minimum_address part1_size = minimum_address - self.minimum_address part1_data = self.data[0:part1_size] part2_data = self.data[part1_size + remove_size:] if len(part1_data) and len(part2_data): # Update this segment and return the second segment. self.maximum_address = self.minimum_address + part1_size self.data = part1_data return _Segment(maximum_address, maximum_address + len(part2_data), part2_data, self._word_size_bytes) else: # Update this segment. if len(part1_data) > 0: self.maximum_address = minimum_address self.data = part1_data elif len(part2_data) > 0: self.minimum_address = maximum_address self.data = part2_data else: self.maximum_address = self.minimum_address self.data = bytearray()
<SYSTEM_TASK:> Add segments by ascending address. <END_TASK> <USER_TASK:> Description: def add(self, segment, overwrite=False): """Add segments by ascending address. """
if self._list: if segment.minimum_address == self._current_segment.maximum_address: # Fast insertion for adjacent segments. self._current_segment.add_data(segment.minimum_address, segment.maximum_address, segment.data, overwrite) else: # Linear insert. for i, s in enumerate(self._list): if segment.minimum_address <= s.maximum_address: break if segment.minimum_address > s.maximum_address: # Non-overlapping, non-adjacent after. self._list.append(segment) elif segment.maximum_address < s.minimum_address: # Non-overlapping, non-adjacent before. self._list.insert(i, segment) else: # Adjacent or overlapping. s.add_data(segment.minimum_address, segment.maximum_address, segment.data, overwrite) segment = s self._current_segment = segment self._current_segment_index = i # Remove overwritten and merge adjacent segments. while self._current_segment is not self._list[-1]: s = self._list[self._current_segment_index + 1] if self._current_segment.maximum_address >= s.maximum_address: # The whole segment is overwritten. del self._list[self._current_segment_index + 1] elif self._current_segment.maximum_address >= s.minimum_address: # Adjacent or beginning of the segment overwritten. self._current_segment.add_data( self._current_segment.maximum_address, s.maximum_address, s.data[self._current_segment.maximum_address - s.minimum_address:], overwrite=False) del self._list[self._current_segment_index+1] break else: # Segments are not overlapping, nor adjacent. break else: self._list.append(segment) self._current_segment = segment self._current_segment_index = 0
<SYSTEM_TASK:> Iterate over all segments and return chunks of the data aligned as <END_TASK> <USER_TASK:> Description: def chunks(self, size=32, alignment=1): """Iterate over all segments and return chunks of the data aligned as given by `alignment`. `size` must be a multiple of `alignment`. Each chunk is returned as a named two-tuple of its address and data. """
if (size % alignment) != 0: raise Error( 'size {} is not a multiple of alignment {}'.format( size, alignment)) for segment in self: for chunk in segment.chunks(size, alignment): yield chunk
<SYSTEM_TASK:> The minimum address of the data, or ``None`` if the file is empty. <END_TASK> <USER_TASK:> Description: def minimum_address(self): """The minimum address of the data, or ``None`` if the file is empty. """
minimum_address = self._segments.minimum_address if minimum_address is not None: minimum_address //= self.word_size_bytes return minimum_address
<SYSTEM_TASK:> The maximum address of the data, or ``None`` if the file is empty. <END_TASK> <USER_TASK:> Description: def maximum_address(self): """The maximum address of the data, or ``None`` if the file is empty. """
maximum_address = self._segments.maximum_address if maximum_address is not None: maximum_address //= self.word_size_bytes return maximum_address
<SYSTEM_TASK:> Add given data string by guessing its format. The format must be <END_TASK> <USER_TASK:> Description: def add(self, data, overwrite=False): """Add given data string by guessing its format. The format must be Motorola S-Records, Intel HEX or TI-TXT. Set `overwrite` to ``True`` to allow already added data to be overwritten. """
if is_srec(data): self.add_srec(data, overwrite) elif is_ihex(data): self.add_ihex(data, overwrite) elif is_ti_txt(data): self.add_ti_txt(data, overwrite) else: raise UnsupportedFileFormatError()
<SYSTEM_TASK:> Add given Motorola S-Records string. Set `overwrite` to ``True`` to <END_TASK> <USER_TASK:> Description: def add_srec(self, records, overwrite=False): """Add given Motorola S-Records string. Set `overwrite` to ``True`` to allow already added data to be overwritten. """
for record in StringIO(records): type_, address, size, data = unpack_srec(record.strip()) if type_ == '0': self._header = data elif type_ in '123': address *= self.word_size_bytes self._segments.add(_Segment(address, address + size, bytearray(data), self.word_size_bytes), overwrite) elif type_ in '789': self.execution_start_address = address
<SYSTEM_TASK:> Add given Intel HEX records string. Set `overwrite` to ``True`` to <END_TASK> <USER_TASK:> Description: def add_ihex(self, records, overwrite=False): """Add given Intel HEX records string. Set `overwrite` to ``True`` to allow already added data to be overwritten. """
extended_segment_address = 0 extended_linear_address = 0 for record in StringIO(records): type_, address, size, data = unpack_ihex(record.strip()) if type_ == IHEX_DATA: address = (address + extended_segment_address + extended_linear_address) address *= self.word_size_bytes self._segments.add(_Segment(address, address + size, bytearray(data), self.word_size_bytes), overwrite) elif type_ == IHEX_END_OF_FILE: pass elif type_ == IHEX_EXTENDED_SEGMENT_ADDRESS: extended_segment_address = int(binascii.hexlify(data), 16) extended_segment_address *= 16 elif type_ == IHEX_EXTENDED_LINEAR_ADDRESS: extended_linear_address = int(binascii.hexlify(data), 16) extended_linear_address <<= 16 elif type_ in [IHEX_START_SEGMENT_ADDRESS, IHEX_START_LINEAR_ADDRESS]: self.execution_start_address = int(binascii.hexlify(data), 16) else: raise Error("expected type 1..5 in record {}, but got {}".format( record, type_))
<SYSTEM_TASK:> Add given TI-TXT string `lines`. Set `overwrite` to ``True`` to <END_TASK> <USER_TASK:> Description: def add_ti_txt(self, lines, overwrite=False): """Add given TI-TXT string `lines`. Set `overwrite` to ``True`` to allow already added data to be overwritten. """
address = None eof_found = False for line in StringIO(lines): # Abort if data is found after end of file. if eof_found: raise Error("bad file terminator") line = line.strip() if len(line) < 1: raise Error("bad line length") if line[0] == 'q': eof_found = True elif line[0] == '@': try: address = int(line[1:], 16) except ValueError: raise Error("bad section address") else: # Try to decode the data. try: data = bytearray(binascii.unhexlify(line.replace(' ', ''))) except (TypeError, binascii.Error): raise Error("bad data") size = len(data) # Check that there are correct number of bytes per # line. There should TI_TXT_BYTES_PER_LINE. Only # exception is last line of section which may be # shorter. if size > TI_TXT_BYTES_PER_LINE: raise Error("bad line length") if address is None: raise Error("missing section address") self._segments.add(_Segment(address, address + size, data, self.word_size_bytes), overwrite) if size == TI_TXT_BYTES_PER_LINE: address += size else: address = None if not eof_found: raise Error("missing file terminator")
<SYSTEM_TASK:> Add given data at given address. Set `overwrite` to ``True`` to <END_TASK> <USER_TASK:> Description: def add_binary(self, data, address=0, overwrite=False): """Add given data at given address. Set `overwrite` to ``True`` to allow already added data to be overwritten. """
address *= self.word_size_bytes self._segments.add(_Segment(address, address + len(data), bytearray(data), self.word_size_bytes), overwrite)
<SYSTEM_TASK:> Open given file and add its data by guessing its format. The format <END_TASK> <USER_TASK:> Description: def add_file(self, filename, overwrite=False): """Open given file and add its data by guessing its format. The format must be Motorola S-Records, Intel HEX or TI-TXT. Set `overwrite` to ``True`` to allow already added data to be overwritten. """
with open(filename, 'r') as fin: self.add(fin.read(), overwrite)
<SYSTEM_TASK:> Open given Motorola S-Records file and add its records. Set <END_TASK> <USER_TASK:> Description: def add_srec_file(self, filename, overwrite=False): """Open given Motorola S-Records file and add its records. Set `overwrite` to ``True`` to allow already added data to be overwritten. """
with open(filename, 'r') as fin: self.add_srec(fin.read(), overwrite)
<SYSTEM_TASK:> Open given Intel HEX file and add its records. Set `overwrite` to <END_TASK> <USER_TASK:> Description: def add_ihex_file(self, filename, overwrite=False): """Open given Intel HEX file and add its records. Set `overwrite` to ``True`` to allow already added data to be overwritten. """
with open(filename, 'r') as fin: self.add_ihex(fin.read(), overwrite)
<SYSTEM_TASK:> Open given TI-TXT file and add its contents. Set `overwrite` to <END_TASK> <USER_TASK:> Description: def add_ti_txt_file(self, filename, overwrite=False): """Open given TI-TXT file and add its contents. Set `overwrite` to ``True`` to allow already added data to be overwritten. """
with open(filename, 'r') as fin: self.add_ti_txt(fin.read(), overwrite)
<SYSTEM_TASK:> Open given binary file and add its contents. Set `overwrite` to <END_TASK> <USER_TASK:> Description: def add_binary_file(self, filename, address=0, overwrite=False): """Open given binary file and add its contents. Set `overwrite` to ``True`` to allow already added data to be overwritten. """
with open(filename, 'rb') as fin: self.add_binary(fin.read(), address, overwrite)
<SYSTEM_TASK:> Format the binary file as Motorola S-Records records and return <END_TASK> <USER_TASK:> Description: def as_srec(self, number_of_data_bytes=32, address_length_bits=32): """Format the binary file as Motorola S-Records records and return them as a string. `number_of_data_bytes` is the number of data bytes in each record. `address_length_bits` is the number of address bits in each record. >>> print(binfile.as_srec()) S32500000100214601360121470136007EFE09D219012146017E17C20001FF5F16002148011973 S32500000120194E79234623965778239EDA3F01B2CA3F0156702B5E712B722B73214601342199 S5030002FA """
header = [] if self._header is not None: record = pack_srec('0', 0, len(self._header), self._header) header.append(record) type_ = str((address_length_bits // 8) - 1) if type_ not in '123': raise Error("expected data record type 1..3, but got {}".format( type_)) data = [pack_srec(type_, address, len(data), data) for address, data in self._segments.chunks(number_of_data_bytes)] number_of_records = len(data) if number_of_records <= 0xffff: footer = [pack_srec('5', number_of_records, 0, None)] elif number_of_records <= 0xffffff: footer = [pack_srec('6', number_of_records, 0, None)] else: raise Error('too many records {}'.format(number_of_records)) # Add the execution start address. if self.execution_start_address is not None: if type_ == '1': record = pack_srec('9', self.execution_start_address, 0, None) elif type_ == '2': record = pack_srec('8', self.execution_start_address, 0, None) else: record = pack_srec('7', self.execution_start_address, 0, None) footer.append(record) return '\n'.join(header + data + footer) + '\n'
<SYSTEM_TASK:> Format the binary file as Intel HEX records and return them as a <END_TASK> <USER_TASK:> Description: def as_ihex(self, number_of_data_bytes=32, address_length_bits=32): """Format the binary file as Intel HEX records and return them as a string. `number_of_data_bytes` is the number of data bytes in each record. `address_length_bits` is the number of address bits in each record. >>> print(binfile.as_ihex()) :20010000214601360121470136007EFE09D219012146017E17C20001FF5F16002148011979 :20012000194E79234623965778239EDA3F01B2CA3F0156702B5E712B722B7321460134219F :00000001FF """
def i32hex(address, extended_linear_address, data_address): if address > 0xffffffff: raise Error( 'cannot address more than 4 GB in I32HEX files (32 ' 'bits addresses)') address_upper_16_bits = (address >> 16) address &= 0xffff # All segments are sorted by address. Update the # extended linear address when required. if address_upper_16_bits > extended_linear_address: extended_linear_address = address_upper_16_bits packed = pack_ihex(IHEX_EXTENDED_LINEAR_ADDRESS, 0, 2, binascii.unhexlify('{:04X}'.format( extended_linear_address))) data_address.append(packed) return address, extended_linear_address def i16hex(address, extended_segment_address, data_address): if address > 16 * 0xffff + 0xffff: raise Error( 'cannot address more than 1 MB in I16HEX files (20 ' 'bits addresses)') address_lower = (address - 16 * extended_segment_address) # All segments are sorted by address. Update the # extended segment address when required. if address_lower > 0xffff: extended_segment_address = (4096 * (address >> 16)) if extended_segment_address > 0xffff: extended_segment_address = 0xffff address_lower = (address - 16 * extended_segment_address) packed = pack_ihex(IHEX_EXTENDED_SEGMENT_ADDRESS, 0, 2, binascii.unhexlify('{:04X}'.format( extended_segment_address))) data_address.append(packed) return address_lower, extended_segment_address def i8hex(address): if address > 0xffff: raise Error( 'cannot address more than 64 kB in I8HEX files (16 ' 'bits addresses)') data_address = [] extended_segment_address = 0 extended_linear_address = 0 for address, data in self._segments.chunks(number_of_data_bytes): if address_length_bits == 32: address, extended_linear_address = i32hex(address, extended_linear_address, data_address) elif address_length_bits == 24: address, extended_segment_address = i16hex(address, extended_segment_address, data_address) elif address_length_bits == 16: i8hex(address) else: raise Error( 'expected address length 16, 24 or 32, but got {}'.format( address_length_bits)) data_address.append(pack_ihex(IHEX_DATA, address, len(data), data)) footer = [] if self.execution_start_address is not None: if address_length_bits == 24: address = binascii.unhexlify( '{:08X}'.format(self.execution_start_address)) footer.append(pack_ihex(IHEX_START_SEGMENT_ADDRESS, 0, 4, address)) elif address_length_bits == 32: address = binascii.unhexlify( '{:08X}'.format(self.execution_start_address)) footer.append(pack_ihex(IHEX_START_LINEAR_ADDRESS, 0, 4, address)) footer.append(pack_ihex(IHEX_END_OF_FILE, 0, 0, None)) return '\n'.join(data_address + footer) + '\n'
<SYSTEM_TASK:> Return a byte string of all data within given address range. <END_TASK> <USER_TASK:> Description: def as_binary(self, minimum_address=None, maximum_address=None, padding=None): """Return a byte string of all data within given address range. `minimum_address` is the absolute minimum address of the resulting binary data. `maximum_address` is the absolute maximum address of the resulting binary data (non-inclusive). `padding` is the word value of the padding between non-adjacent segments. Give as a bytes object of length 1 when the word size is 8 bits, length 2 when the word size is 16 bits, and so on. >>> binfile.as_binary() bytearray(b'!F\\x016\\x01!G\\x016\\x00~\\xfe\\t\\xd2\\x19\\x01!F\\x01~\\x17\\xc2\\x00\\x01 \\xff_\\x16\\x00!H\\x01\\x19\\x19Ny#F#\\x96Wx#\\x9e\\xda?\\x01\\xb2\\xca?\\x01Vp+^q+r+s! F\\x014!') """
if len(self._segments) == 0: return b'' if minimum_address is None: current_maximum_address = self.minimum_address else: current_maximum_address = minimum_address if maximum_address is None: maximum_address = self.maximum_address if current_maximum_address >= maximum_address: return b'' if padding is None: padding = b'\xff' * self.word_size_bytes binary = bytearray() for address, data in self._segments: length = len(data) // self.word_size_bytes # Discard data below the minimum address. if address < current_maximum_address: if address + length <= current_maximum_address: continue offset = (current_maximum_address - address) * self.word_size_bytes data = data[offset:] length = len(data) // self.word_size_bytes address = current_maximum_address # Discard data above the maximum address. if address + length > maximum_address: if address < maximum_address: size = (maximum_address - address) * self.word_size_bytes data = data[:size] length = len(data) // self.word_size_bytes elif maximum_address >= current_maximum_address: binary += padding * (maximum_address - current_maximum_address) break binary += padding * (address - current_maximum_address) binary += data current_maximum_address = address + length return binary
<SYSTEM_TASK:> Format the binary file as a string values separated by given <END_TASK> <USER_TASK:> Description: def as_array(self, minimum_address=None, padding=None, separator=', '): """Format the binary file as a string values separated by given separator `separator`. This function can be used to generate array initialization code for C and other languages. `minimum_address` is the start address of the resulting binary data. `padding` is the value of the padding between not adjacent segments. >>> binfile.as_array() '0x21, 0x46, 0x01, 0x36, 0x01, 0x21, 0x47, 0x01, 0x36, 0x00, 0x7e, 0xfe, 0x09, 0xd2, 0x19, 0x01, 0x21, 0x46, 0x01, 0x7e, 0x17, 0xc2, 0x00, 0x01, 0xff, 0x5f, 0x16, 0x00, 0x21, 0x48, 0x01, 0x19, 0x19, 0x4e, 0x79, 0x23, 0x46, 0x23, 0x96, 0x57, 0x78, 0x23, 0x9e, 0xda, 0x3f, 0x01, 0xb2, 0xca, 0x3f, 0x01, 0x56, 0x70, 0x2b, 0x5e, 0x71, 0x2b, 0x72, 0x2b, 0x73, 0x21, 0x46, 0x01, 0x34, 0x21' """
binary_data = self.as_binary(minimum_address, padding=padding) words = [] for offset in range(0, len(binary_data), self.word_size_bytes): word = 0 for byte in binary_data[offset:offset + self.word_size_bytes]: word <<= 8 word += byte words.append('0x{:02x}'.format(word)) return separator.join(words)
<SYSTEM_TASK:> Format the binary file as a hexdump and return it as a string. <END_TASK> <USER_TASK:> Description: def as_hexdump(self): """Format the binary file as a hexdump and return it as a string. >>> print(binfile.as_hexdump()) 00000100 21 46 01 36 01 21 47 01 36 00 7e fe 09 d2 19 01 |!F.6.!G.6.~.....| 00000110 21 46 01 7e 17 c2 00 01 ff 5f 16 00 21 48 01 19 |!F.~....._..!H..| 00000120 19 4e 79 23 46 23 96 57 78 23 9e da 3f 01 b2 ca |.Ny#F#.Wx#..?...| 00000130 3f 01 56 70 2b 5e 71 2b 72 2b 73 21 46 01 34 21 |?.Vp+^q+r+s!F.4!| """
# Empty file? if len(self) == 0: return '\n' non_dot_characters = set(string.printable) non_dot_characters -= set(string.whitespace) non_dot_characters |= set(' ') def align16(address): return address - (address % 16) def padding(length): return [None] * length def format_line(address, data): """`data` is a list of integers and None for unused elements. """ data += padding(16 - len(data)) hexdata = [] for byte in data: if byte is not None: elem = '{:02x}'.format(byte) else: elem = ' ' hexdata.append(elem) first_half = ' '.join(hexdata[0:8]) second_half = ' '.join(hexdata[8:16]) text = '' for byte in data: if byte is None: text += ' ' elif chr(byte) in non_dot_characters: text += chr(byte) else: text += '.' return '{:08x} {:23s} {:23s} |{:16s}|'.format( address, first_half, second_half, text) # Format one line at a time. lines = [] line_address = align16(self.minimum_address) line_data = [] for chunk in self._segments.chunks(size=16, alignment=16): aligned_chunk_address = align16(chunk.address) if aligned_chunk_address > line_address: lines.append(format_line(line_address, line_data)) if aligned_chunk_address > line_address + 16: lines.append('...') line_address = aligned_chunk_address line_data = [] line_data += padding(chunk.address - line_address - len(line_data)) line_data += [byte for byte in chunk.data] lines.append(format_line(line_address, line_data)) return '\n'.join(lines) + '\n'
<SYSTEM_TASK:> Fill all empty space between segments with given value `value`. <END_TASK> <USER_TASK:> Description: def fill(self, value=b'\xff'): """Fill all empty space between segments with given value `value`. """
previous_segment_maximum_address = None fill_segments = [] for address, data in self._segments: maximum_address = address + len(data) if previous_segment_maximum_address is not None: fill_size = address - previous_segment_maximum_address fill_size_words = fill_size // self.word_size_bytes fill_segments.append(_Segment( previous_segment_maximum_address, previous_segment_maximum_address + fill_size, value * fill_size_words, self.word_size_bytes)) previous_segment_maximum_address = maximum_address for segment in fill_segments: self._segments.add(segment)
<SYSTEM_TASK:> Exclude given range and keep the rest. <END_TASK> <USER_TASK:> Description: def exclude(self, minimum_address, maximum_address): """Exclude given range and keep the rest. `minimum_address` is the first word address to exclude (including). `maximum_address` is the last word address to exclude (excluding). """
if maximum_address < minimum_address: raise Error('bad address range') minimum_address *= self.word_size_bytes maximum_address *= self.word_size_bytes self._segments.remove(minimum_address, maximum_address)
<SYSTEM_TASK:> Keep given range and discard the rest. <END_TASK> <USER_TASK:> Description: def crop(self, minimum_address, maximum_address): """Keep given range and discard the rest. `minimum_address` is the first word address to keep (including). `maximum_address` is the last word address to keep (excluding). """
minimum_address *= self.word_size_bytes maximum_address *= self.word_size_bytes maximum_address_address = self._segments.maximum_address self._segments.remove(0, minimum_address) self._segments.remove(maximum_address, maximum_address_address)
<SYSTEM_TASK:> Return a string of human readable information about the binary <END_TASK> <USER_TASK:> Description: def info(self): """Return a string of human readable information about the binary file. .. code-block:: python >>> print(binfile.info()) Data ranges: 0x00000100 - 0x00000140 (64 bytes) """
info = '' if self._header is not None: if self._header_encoding is None: header = '' for b in bytearray(self.header): if chr(b) in string.printable: header += chr(b) else: header += '\\x{:02x}'.format(b) else: header = self.header info += 'Header: "{}"\n'.format(header) if self.execution_start_address is not None: info += 'Execution start address: 0x{:08x}\n'.format( self.execution_start_address) info += 'Data ranges:\n\n' for address, data in self._segments: minimum_address = address size = len(data) maximum_address = (minimum_address + size // self.word_size_bytes) info += 4 * ' ' info += '0x{:08x} - 0x{:08x} ({})\n'.format( minimum_address, maximum_address, format_size(size, binary=True)) return info
<SYSTEM_TASK:> Collect metric info in a single preorder traversal. <END_TASK> <USER_TASK:> Description: def _precompute(self, tree): """ Collect metric info in a single preorder traversal. """
d = {} for n in tree.preorder_internal_node_iter(): d[n] = namedtuple('NodeDist', ['dist_from_root', 'edges_from_root']) if n.parent_node: d[n].dist_from_root = d[n.parent_node].dist_from_root + n.edge_length d[n].edges_from_root = d[n.parent_node].edges_from_root + 1 else: d[n].dist_from_root = 0.0 d[n].edges_from_root = 0 return d
<SYSTEM_TASK:> Populate the vectors m and M. <END_TASK> <USER_TASK:> Description: def _get_vectors(self, tree, precomputed_info): """ Populate the vectors m and M. """
little_m = [] big_m = [] leaf_nodes = sorted(tree.leaf_nodes(), key=lambda x: x.taxon.label) # inner nodes, sorted order for leaf_a, leaf_b in combinations(leaf_nodes, 2): mrca = tree.mrca(taxa=[leaf_a.taxon, leaf_b.taxon]) little_m.append(precomputed_info[mrca].edges_from_root) big_m.append(precomputed_info[mrca].dist_from_root) # leaf nodes, sorted order for leaf in leaf_nodes: little_m.append(1) big_m.append(leaf.edge_length) return np.array(little_m), np.array(big_m)
<SYSTEM_TASK:> Deletes sequences that were marked for deletion by convert_to_IUPAC <END_TASK> <USER_TASK:> Description: def remove_empty(rec): """ Deletes sequences that were marked for deletion by convert_to_IUPAC """
for header, sequence in rec.mapping.items(): if all(char == 'X' for char in sequence): rec.headers.remove(header) rec.sequences.remove(sequence) rec.update() return rec
<SYSTEM_TASK:> Utility to properly transliterate text. <END_TASK> <USER_TASK:> Description: def transliterate(text): """ Utility to properly transliterate text. """
text = unidecode(six.text_type(text)) text = text.replace('@', 'a') return text
<SYSTEM_TASK:> Transliterate a given string into the latin alphabet. <END_TASK> <USER_TASK:> Description: def latinize(mapping, bind, values): """ Transliterate a given string into the latin alphabet. """
for v in values: if isinstance(v, six.string_types): v = transliterate(v) yield v
<SYSTEM_TASK:> Merge all the strings. Put space between them. <END_TASK> <USER_TASK:> Description: def join(mapping, bind, values): """ Merge all the strings. Put space between them. """
return [' '.join([six.text_type(v) for v in values if v is not None])]
<SYSTEM_TASK:> Generate a sha1 for each of the given values. <END_TASK> <USER_TASK:> Description: def hash(mapping, bind, values): """ Generate a sha1 for each of the given values. """
for v in values: if v is None: continue if not isinstance(v, six.string_types): v = six.text_type(v) yield sha1(v.encode('utf-8')).hexdigest()
<SYSTEM_TASK:> Perform several types of string cleaning for titles etc.. <END_TASK> <USER_TASK:> Description: def clean(mapping, bind, values): """ Perform several types of string cleaning for titles etc.. """
categories = {'C': ' '} for value in values: if isinstance(value, six.string_types): value = normality.normalize(value, lowercase=False, collapse=True, decompose=False, replace_categories=categories) yield value
<SYSTEM_TASK:> Checks that all nodes are reachable from the first node - i.e. that the <END_TASK> <USER_TASK:> Description: def isconnected(mask): """ Checks that all nodes are reachable from the first node - i.e. that the graph is fully connected. """
nodes_to_check = list((np.where(mask[0, :])[0])[1:]) seen = [True] + [False] * (len(mask) - 1) while nodes_to_check and not all(seen): node = nodes_to_check.pop() reachable = np.where(mask[node, :])[0] for i in reachable: if not seen[i]: nodes_to_check.append(i) seen[i] = True return all(seen)
<SYSTEM_TASK:> Scales all rows to length 1. Fails when row is 0-length, so it <END_TASK> <USER_TASK:> Description: def normalise_rows(matrix): """ Scales all rows to length 1. Fails when row is 0-length, so it leaves these unchanged """
lengths = np.apply_along_axis(np.linalg.norm, 1, matrix) if not (lengths > 0).all(): # raise ValueError('Cannot normalise 0 length vector to length 1') # print(matrix) lengths[lengths == 0] = 1 return matrix / lengths[:, np.newaxis]
<SYSTEM_TASK:> Returns the k-th nearest distances, row-wise, as a column vector <END_TASK> <USER_TASK:> Description: def kdists(matrix, k=7, ix=None): """ Returns the k-th nearest distances, row-wise, as a column vector """
ix = ix or kindex(matrix, k) return matrix[ix][np.newaxis].T
<SYSTEM_TASK:> Returns indices to select the kth nearest neighbour <END_TASK> <USER_TASK:> Description: def kindex(matrix, k): """ Returns indices to select the kth nearest neighbour"""
ix = (np.arange(len(matrix)), matrix.argsort(axis=0)[k]) return ix
<SYSTEM_TASK:> Creates a boolean mask to include points within k nearest <END_TASK> <USER_TASK:> Description: def kmask(matrix, k=7, dists=None, logic='or'): """ Creates a boolean mask to include points within k nearest neighbours, and exclude the rest. Logic can be OR or AND. OR gives the k-nearest-neighbour mask, AND gives the mutual k-nearest-neighbour mask."""
dists = (kdists(matrix, k=k) if dists is None else dists) mask = (matrix <= dists) if logic == 'or' or logic == '|': return mask | mask.T elif logic == 'and' or logic == '&': return mask & mask.T return mask
<SYSTEM_TASK:> Returns the local scale based on the k-th nearest neighbour <END_TASK> <USER_TASK:> Description: def kscale(matrix, k=7, dists=None): """ Returns the local scale based on the k-th nearest neighbour """
dists = (kdists(matrix, k=k) if dists is None else dists) scale = dists.dot(dists.T) return scale
<SYSTEM_TASK:> Shift and scale matrix so its minimum value is placed at `shift` and <END_TASK> <USER_TASK:> Description: def shift_and_scale(matrix, shift, scale): """ Shift and scale matrix so its minimum value is placed at `shift` and its maximum value is scaled to `scale` """
zeroed = matrix - matrix.min() scaled = (scale - shift) * (zeroed / zeroed.max()) return scaled + shift
<SYSTEM_TASK:> Returns fitted coordinates in specified number of dimensions, and <END_TASK> <USER_TASK:> Description: def coords_by_dimension(self, dimensions=3): """ Returns fitted coordinates in specified number of dimensions, and the amount of variance explained) """
coords_matrix = self.vecs[:, :dimensions] varexp = self.cve[dimensions - 1] return coords_matrix, varexp
<SYSTEM_TASK:> Given a mapping and JSON schema spec, extract a value from ``data`` <END_TASK> <USER_TASK:> Description: def extract_value(mapping, bind, data): """ Given a mapping and JSON schema spec, extract a value from ``data`` and apply certain transformations to normalize the value. """
columns = mapping.get('columns', [mapping.get('column')]) values = [data.get(c) for c in columns] for transform in mapping.get('transforms', []): # any added transforms must also be added to the schema. values = list(TRANSFORMS[transform](mapping, bind, values)) format_str = mapping.get('format') value = values[0] if len(values) else None if not is_empty(format_str): value = format_str % tuple('' if v is None else v for v in values) empty = is_empty(value) if empty: value = mapping.get('default') or bind.schema.get('default') return empty, convert_value(bind, value)
<SYSTEM_TASK:> A wrapper around peakdetect to pack the return values in a nicer format <END_TASK> <USER_TASK:> Description: def peaks(x, y, lookahead=20, delta=0.00003): """ A wrapper around peakdetect to pack the return values in a nicer format """
_max, _min = peakdetect(y, x, lookahead, delta) x_peaks = [p[0] for p in _max] y_peaks = [p[1] for p in _max] x_valleys = [p[0] for p in _min] y_valleys = [p[1] for p in _min] _peaks = [x_peaks, y_peaks] _valleys = [x_valleys, y_valleys] return {"peaks": _peaks, "valleys": _valleys}
<SYSTEM_TASK:> The clustering returned by the hcluster module gives group <END_TASK> <USER_TASK:> Description: def _restricted_growth_notation(l): """ The clustering returned by the hcluster module gives group membership without regard for numerical order This function preserves the group membership, but sorts the labelling into numerical order """
list_length = len(l) d = defaultdict(list) for (i, element) in enumerate(l): d[element].append(i) l2 = [None] * list_length for (name, index_list) in enumerate(sorted(d.values(), key=min)): for index in index_list: l2[index] = name return tuple(l2)
<SYSTEM_TASK:> Alternative representation of group membership - <END_TASK> <USER_TASK:> Description: def get_membership(self): """ Alternative representation of group membership - creates a list with one tuple per group; each tuple contains the indices of its members Example: partition = (0,0,0,1,0,1,2,2) membership = [(0,1,2,4), (3,5), (6,7)] :return: list of tuples giving group memberships by index """
result = defaultdict(list) for (position, value) in enumerate(self.partition_vector): result[value].append(position) return sorted([tuple(x) for x in result.values()])
<SYSTEM_TASK:> Each peak in the peaks of the object is checked for its presence in <END_TASK> <USER_TASK:> Description: def extend_peaks(self, prop_thresh=50): """Each peak in the peaks of the object is checked for its presence in other octaves. If it does not exist, it is created. prop_thresh is the cent range within which the peak in the other octave is expected to be present, i.e., only if there is a peak within this cent range in other octaves, then the peak is considered to be present in that octave. Note that this does not change the peaks of the object. It just returns the extended peaks. """
# octave propagation of the reference peaks temp_peaks = [i + 1200 for i in self.peaks["peaks"][0]] temp_peaks.extend([i - 1200 for i in self.peaks["peaks"][0]]) extended_peaks = [] extended_peaks.extend(self.peaks["peaks"][0]) for i in temp_peaks: # if a peak exists around, don't add this new one. nearest_ind = slope.find_nearest_index(self.peaks["peaks"][0], i) diff = abs(self.peaks["peaks"][0][nearest_ind] - i) diff = np.mod(diff, 1200) if diff > prop_thresh: extended_peaks.append(i) return extended_peaks
<SYSTEM_TASK:> This function plots histogram together with its smoothed <END_TASK> <USER_TASK:> Description: def plot(self, intervals=None, new_fig=True): """This function plots histogram together with its smoothed version and peak information if provided. Just intonation intervals are plotted for a reference."""
import pylab as p if new_fig: p.figure() #step 1: plot histogram p.plot(self.x, self.y, ls='-', c='b', lw='1.5') #step 2: plot peaks first_peak = None last_peak = None if self.peaks: first_peak = min(self.peaks["peaks"][0]) last_peak = max(self.peaks["peaks"][0]) p.plot(self.peaks["peaks"][0], self.peaks["peaks"][1], 'rD', ms=10) p.plot(self.peaks["valleys"][0], self.peaks["valleys"][1], 'yD', ms=5) #Intervals if intervals is not None: #spacing = 0.02*max(self.y) for interval in intervals: if first_peak is not None: if interval <= first_peak or interval >= last_peak: continue p.axvline(x=interval, ls='-.', c='g', lw='1.5') if interval-1200 >= min(self.x): p.axvline(x=interval-1200, ls=':', c='b', lw='0.5') if interval+1200 <= max(self.x): p.axvline(x=interval+1200, ls=':', c='b', lw='0.5') if interval+2400 <= max(self.x): p.axvline(x=interval+2400, ls='-.', c='r', lw='0.5') #spacing *= -1 #p.title("Tonic-aligned complete-range pitch histogram") #p.xlabel("Pitch value (Cents)") #p.ylabel("Normalized frequency of occurence") p.show()
<SYSTEM_TASK:> Helper to map a function over a range of inputs, using a threadpool, with a progress meter <END_TASK> <USER_TASK:> Description: def threadpool_map(task, args, message, concurrency, batchsize=1, nargs=None): """ Helper to map a function over a range of inputs, using a threadpool, with a progress meter """
import concurrent.futures njobs = get_njobs(nargs, args) show_progress = bool(message) batches = grouper(batchsize, tupleise(args)) batched_task = lambda batch: [task(*job) for job in batch] if show_progress: message += ' (TP:{}w:{}b)'.format(concurrency, batchsize) pbar = setup_progressbar(message, njobs, simple_progress=True) pbar.start() with concurrent.futures.ThreadPoolExecutor(max_workers=concurrency) as executor: futures = [] completed_count = 0 for batch in batches: futures.append(executor.submit(batched_task, batch)) if show_progress: for i, fut in enumerate(concurrent.futures.as_completed(futures), start=1): completed_count += len(fut.result()) pbar.update(completed_count) else: concurrent.futures.wait(futures) if show_progress: pbar.finish() return flatten_list([fut.result() for fut in futures])
<SYSTEM_TASK:> If item is not in lst, add item to list at its sorted position <END_TASK> <USER_TASK:> Description: def insort_no_dup(lst, item): """ If item is not in lst, add item to list at its sorted position """
import bisect ix = bisect.bisect_left(lst, item) if lst[ix] != item: lst[ix:ix] = [item]
<SYSTEM_TASK:> Create a phylo_utils.likelihood.GammaMixture for calculating <END_TASK> <USER_TASK:> Description: def create_gamma_model(alignment, missing_data=None, ncat=4): """ Create a phylo_utils.likelihood.GammaMixture for calculating likelihood on a tree, from a treeCl.Alignment and its matching treeCl.Parameters """
model = alignment.parameters.partitions.model freqs = alignment.parameters.partitions.frequencies alpha = alignment.parameters.partitions.alpha if model == 'LG': subs_model = LG(freqs) elif model == 'WAG': subs_model = WAG(freqs) elif model == 'GTR': rates = alignment.parameters.partitions.rates subs_model = GTR(rates, freqs, True) else: raise ValueError("Can't handle this model: {}".format(model)) tm = TransitionMatrix(subs_model) gamma = GammaMixture(alpha, ncat) gamma.init_models(tm, alignment_to_partials(alignment, missing_data)) return gamma
<SYSTEM_TASK:> Sample from lst, with replacement <END_TASK> <USER_TASK:> Description: def sample_wr(lst): """ Sample from lst, with replacement """
arr = np.array(lst) indices = np.random.randint(len(lst), size=len(lst)) sample = np.empty(arr.shape, dtype=arr.dtype) for i, ix in enumerate(indices): sample[i] = arr[ix] return list(sample)
<SYSTEM_TASK:> Coerce inputs into compatible format <END_TASK> <USER_TASK:> Description: def _preprocess_inputs(x, weights): """ Coerce inputs into compatible format """
if weights is None: w_arr = np.ones(len(x)) else: w_arr = np.array(weights) x_arr = np.array(x) if x_arr.ndim == 2: if w_arr.ndim == 1: w_arr = w_arr[:, np.newaxis] return x_arr, w_arr
<SYSTEM_TASK:> Return the weighted arithmetic mean of x <END_TASK> <USER_TASK:> Description: def amean(x, weights=None): """ Return the weighted arithmetic mean of x """
w_arr, x_arr = _preprocess_inputs(x, weights) return (w_arr*x_arr).sum(axis=0) / w_arr.sum(axis=0)
<SYSTEM_TASK:> Return the weighted geometric mean of x <END_TASK> <USER_TASK:> Description: def gmean(x, weights=None): """ Return the weighted geometric mean of x """
w_arr, x_arr = _preprocess_inputs(x, weights) return np.exp((w_arr*np.log(x_arr)).sum(axis=0) / w_arr.sum(axis=0))
<SYSTEM_TASK:> Return the weighted harmonic mean of x <END_TASK> <USER_TASK:> Description: def hmean(x, weights=None): """ Return the weighted harmonic mean of x """
w_arr, x_arr = _preprocess_inputs(x, weights) return w_arr.sum(axis=0) / (w_arr/x_arr).sum(axis=0)
<SYSTEM_TASK:> Returns a list of records in SORT_KEY order <END_TASK> <USER_TASK:> Description: def records(self): """ Returns a list of records in SORT_KEY order """
return [self._records[i] for i in range(len(self._records))]
<SYSTEM_TASK:> Read a directory full of tree files, matching them up to the <END_TASK> <USER_TASK:> Description: def read_trees(self, input_dir): """ Read a directory full of tree files, matching them up to the already loaded alignments """
if self.show_progress: pbar = setup_progressbar("Loading trees", len(self.records)) pbar.start() for i, rec in enumerate(self.records): hook = os.path.join(input_dir, '{}.nwk*'.format(rec.name)) filename = glob.glob(hook) try: with fileIO.freader(filename[0]) as infile: tree = infile.read().decode('utf-8') d = dict(ml_tree=tree) rec.parameters.construct_from_dict(d) except (IOError, IndexError): continue finally: if self.show_progress: pbar.update(i) if self.show_progress: pbar.finish()
<SYSTEM_TASK:> Read a directory full of json parameter files, matching them up to the <END_TASK> <USER_TASK:> Description: def read_parameters(self, input_dir): """ Read a directory full of json parameter files, matching them up to the already loaded alignments """
if self.show_progress: pbar = setup_progressbar("Loading parameters", len(self.records)) pbar.start() for i, rec in enumerate(self.records): hook = os.path.join(input_dir, '{}.json*'.format(rec.name)) filename = glob.glob(hook) try: with fileIO.freader(filename[0]) as infile: d = json.loads(infile.read().decode('utf-8'), parse_int=True) rec.parameters.construct_from_dict(d) except (IOError, IndexError): continue finally: if self.show_progress: pbar.update(i) if self.show_progress: pbar.finish()
<SYSTEM_TASK:> Infer phylogenetic trees for the loaded Alignments <END_TASK> <USER_TASK:> Description: def calc_trees(self, indices=None, task_interface=None, jobhandler=default_jobhandler, batchsize=1, show_progress=True, **kwargs): """ Infer phylogenetic trees for the loaded Alignments :param indices: Only run inference on the alignments at these given indices :param task_interface: Inference tool specified via TaskInterface (default RaxmlTaskInterface) :param jobhandler: Launch jobs via this JobHandler (default SequentialJobHandler; also available are ThreadpoolJobHandler and ProcesspoolJobHandler for running inference in parallel) :param batchsize: Batch size for Thread- or ProcesspoolJobHandlers) :param kwargs: Remaining arguments to pass to the TaskInterface :return: None """
if indices is None: indices = list(range(len(self))) if task_interface is None: task_interface = tasks.RaxmlTaskInterface() records = [self[i] for i in indices] # Scrape args from records args, to_delete = task_interface.scrape_args(records, **kwargs) # Dispatch work msg = '{} Tree estimation'.format(task_interface.name) if show_progress else '' map_result = jobhandler(task_interface.get_task(), args, msg, batchsize) # Process results with fileIO.TempFileList(to_delete): for rec, result in zip(records, map_result): #logger.debug('Result - {}'.format(result)) rec.parameters.construct_from_dict(result)
<SYSTEM_TASK:> Returns the number of species found over all records <END_TASK> <USER_TASK:> Description: def num_species(self): """ Returns the number of species found over all records """
all_headers = reduce(lambda x, y: set(x) | set(y), (rec.get_names() for rec in self.records)) return len(all_headers)
<SYSTEM_TASK:> Return a copy of the collection with all alignment columns permuted <END_TASK> <USER_TASK:> Description: def permuted_copy(self, partition=None): """ Return a copy of the collection with all alignment columns permuted """
def take(n, iterable): return [next(iterable) for _ in range(n)] if partition is None: partition = Partition([1] * len(self)) index_tuples = partition.get_membership() alignments = [] for ix in index_tuples: concat = Concatenation(self, ix) sites = concat.alignment.get_sites() random.shuffle(sites) d = dict(zip(concat.alignment.get_names(), [iter(x) for x in zip(*sites)])) new_seqs = [[(k, ''.join(take(l, d[k]))) for k in d] for l in concat.lengths] for seqs, datatype, name in zip(new_seqs, concat.datatypes, concat.names): alignment = Alignment(seqs, datatype) alignment.name = name alignments.append(alignment) return self.__class__(records=sorted(alignments, key=lambda x: SORT_KEY(x.name)))
<SYSTEM_TASK:> Return a hash of the tuple of indices that specify the group <END_TASK> <USER_TASK:> Description: def get_id(self, grp): """ Return a hash of the tuple of indices that specify the group """
thehash = hex(hash(grp)) if ISPY3: # use default encoding to get bytes thehash = thehash.encode() return self.cache.get(grp, hashlib.sha1(thehash).hexdigest())
<SYSTEM_TASK:> Check for the existence of alignment and result files. <END_TASK> <USER_TASK:> Description: def check_work_done(self, grp): """ Check for the existence of alignment and result files. """
id_ = self.get_id(grp) concat_file = os.path.join(self.cache_dir, '{}.phy'.format(id_)) result_file = os.path.join(self.cache_dir, '{}.{}.json'.format(id_, self.task_interface.name)) return os.path.exists(concat_file), os.path.exists(result_file)
<SYSTEM_TASK:> Write the concatenated alignment to disk in the location specified by <END_TASK> <USER_TASK:> Description: def write_group(self, grp, overwrite=False, **kwargs): """ Write the concatenated alignment to disk in the location specified by self.cache_dir """
id_ = self.get_id(grp) alignment_done, result_done = self.check_work_done(grp) self.cache[grp] = id_ al_filename = os.path.join(self.cache_dir, '{}.phy'.format(id_)) qfile_filename = os.path.join(self.cache_dir, '{}.partitions.txt'.format(id_)) if overwrite or not (alignment_done or result_done): conc = self.collection.concatenate(grp) al = conc.alignment al.write_alignment(al_filename, 'phylip', True) q = conc.qfile(**kwargs) with open(qfile_filename, 'w') as fl: fl.write(q + '\n')
<SYSTEM_TASK:> Retrieve the results for a group. Needs this to already be calculated - <END_TASK> <USER_TASK:> Description: def get_group_result(self, grp, **kwargs): """ Retrieve the results for a group. Needs this to already be calculated - errors out if result not available. """
id_ = self.get_id(grp) self.cache[grp] = id_ # Check if this file is already processed alignment_written, results_written = self.check_work_done(grp) if not results_written: if not alignment_written: self.write_group(grp, **kwargs) logger.error('Alignment {} has not been analysed - run analyse_cache_dir'.format(id_)) raise ValueError('Missing result') else: with open(self.get_result_file(id_)) as fl: return json.load(fl)
<SYSTEM_TASK:> Assumes analysis is done and written to id.json! <END_TASK> <USER_TASK:> Description: def get_partition_score(self, p): """ Assumes analysis is done and written to id.json! """
scores = [] for grp in p.get_membership(): try: result = self.get_group_result(grp) scores.append(result['likelihood']) except ValueError: scores.append(None) return sum(scores)
<SYSTEM_TASK:> Return the trees associated with a partition, p <END_TASK> <USER_TASK:> Description: def get_partition_trees(self, p): """ Return the trees associated with a partition, p """
trees = [] for grp in p.get_membership(): try: result = self.get_group_result(grp) trees.append(result['ml_tree']) except ValueError: trees.append(None) logger.error('No tree found for group {}'.format(grp)) return trees
<SYSTEM_TASK:> The Expectation step of the CEM algorithm <END_TASK> <USER_TASK:> Description: def expect(self, use_proportions=True): """ The Expectation step of the CEM algorithm """
changed = self.get_changed(self.partition, self.prev_partition) lk_table = self.generate_lktable(self.partition, changed, use_proportions) self.table = self.likelihood_table_to_probs(lk_table)
<SYSTEM_TASK:> The Maximisation step of the CEM algorithm <END_TASK> <USER_TASK:> Description: def maximise(self, **kwargs): """ The Maximisation step of the CEM algorithm """
self.scorer.write_partition(self.partition) self.scorer.analyse_cache_dir(**kwargs) self.likelihood = self.scorer.get_partition_score(self.partition) self.scorer.clean_cache() changed = self.get_changed(self.partition, self.prev_partition) self.update_perlocus_likelihood_objects(self.partition, changed) return self.partition, self.likelihood, sum(inst.get_likelihood() for inst in self.insts)
<SYSTEM_TASK:> Store the partition in self.partition, and <END_TASK> <USER_TASK:> Description: def set_partition(self, partition): """ Store the partition in self.partition, and move the old self.partition into self.prev_partition """
assert len(partition) == self.numgrp self.partition, self.prev_partition = partition, self.partition
<SYSTEM_TASK:> Return the loci that are in clusters that have changed between <END_TASK> <USER_TASK:> Description: def get_changed(self, p1, p2): """ Return the loci that are in clusters that have changed between partitions p1 and p2 """
if p1 is None or p2 is None: return list(range(len(self.insts))) return set(flatten_list(set(p1) - set(p2)))
<SYSTEM_TASK:> Set parameters of likelihood model - inst - <END_TASK> <USER_TASK:> Description: def _update_likelihood_model(self, inst, partition_parameters, tree): """ Set parameters of likelihood model - inst - using values in dictionary - partition_parameters -, and - tree - """
# Build transition matrix from dict model = partition_parameters['model'] freqs = partition_parameters.get('frequencies') if model == 'LG': subs_model = phylo_utils.models.LG(freqs) elif model == 'WAG': subs_model = phylo_utils.models.WAG(freqs) elif model == 'GTR': rates = partition_parameters.get('rates') subs_model = phylo_utils.models.GTR(rates, freqs, True) else: raise ValueError("Can't handle this model: {}".format(model)) tm = phylo_utils.markov.TransitionMatrix(subs_model) # Read alpha value alpha = partition_parameters['alpha'] inst.set_tree(tree) inst.update_alpha(alpha) inst.update_transition_matrix(tm)
<SYSTEM_TASK:> Does the simple thing - if any group is empty, but needs to have at <END_TASK> <USER_TASK:> Description: def _fill_empty_groups_old(self, probs, assignment): """ Does the simple thing - if any group is empty, but needs to have at least one member, assign the data point with highest probability of membership """
new_assignment = np.array(assignment.tolist()) for k in range(self.numgrp): if np.count_nonzero(assignment==k) == 0: logger.info('Group {} became empty'.format(k)) best = np.where(probs[:,k]==probs[:,k].max())[0][0] new_assignment[best] = k new_assignment = self._fill_empty_groups(probs, new_assignment) return new_assignment
<SYSTEM_TASK:> Jacobian matrix given Christophe's suggestion of f <END_TASK> <USER_TASK:> Description: def jac(x,a): """ Jacobian matrix given Christophe's suggestion of f """
return (x-a) / np.sqrt(((x-a)**2).sum(1))[:,np.newaxis]
<SYSTEM_TASK:> Given a value of x, return a better x <END_TASK> <USER_TASK:> Description: def grad_desc_update(x, a, c, step=0.01): """ Given a value of x, return a better x using gradient descent """
return x - step * gradient(x,a,c)
<SYSTEM_TASK:> index = index of the locus the bootstrap sample corresponds to - only important if <END_TASK> <USER_TASK:> Description: def run_out_of_sample_mds(boot_collection, ref_collection, ref_distance_matrix, index, dimensions, task=_fast_geo, rooted=False, **kwargs): """ index = index of the locus the bootstrap sample corresponds to - only important if using recalc=True in kwargs """
fit = np.empty((len(boot_collection), dimensions)) if ISPY3: query_trees = [PhyloTree(tree.encode(), rooted) for tree in boot_collection.trees] ref_trees = [PhyloTree(tree.encode(), rooted) for tree in ref_collection.trees] else: query_trees = [PhyloTree(tree, rooted) for tree in boot_collection.trees] ref_trees = [PhyloTree(tree, rooted) for tree in ref_collection.trees] for i, tree in enumerate(query_trees): distvec = np.array([task(tree, ref_tree, False) for ref_tree in ref_trees]) oos = OutOfSampleMDS(ref_distance_matrix) fit[i] = oos.fit(index, distvec, dimensions=dimensions, **kwargs) return fit
<SYSTEM_TASK:> Make A and part of b. See docstring of this class <END_TASK> <USER_TASK:> Description: def _make_A_and_part_of_b_adjacent(self, ref_crds): """ Make A and part of b. See docstring of this class for answer to "What are A and b?" """
rot = self._rotate_rows(ref_crds) A = 2*(rot - ref_crds) partial_b = (rot**2 - ref_crds**2).sum(1) return A, partial_b
<SYSTEM_TASK:> Try and recursively iterate a JSON schema and to generate an ES mapping <END_TASK> <USER_TASK:> Description: def generate_schema_mapping(resolver, schema_uri, depth=1): """ Try and recursively iterate a JSON schema and to generate an ES mapping that encasulates it. """
visitor = SchemaVisitor({'$ref': schema_uri}, resolver) return _generate_schema_mapping(visitor, set(), depth)
<SYSTEM_TASK:> Kwargs are passed to the Phyml process command line <END_TASK> <USER_TASK:> Description: def phyml_task(alignment_file, model, **kwargs): """ Kwargs are passed to the Phyml process command line """
import re fl = os.path.abspath(alignment_file) ph = Phyml(verbose=False) if model in ['JC69', 'K80', 'F81', 'F84', 'HKY85', 'TN93', 'GTR']: datatype = 'nt' elif re.search('[01]{6}', model) is not None: datatype = 'nt' else: datatype = 'aa' cmd = '-i {} -m {} -d {} -f m --quiet'.format(alignment_file, model, datatype) logger.debug("Phyml command = {}".format(cmd)) ph(cmd, wait=True, **kwargs) logger.debug("Phyml stdout = {}".format(ph.get_stdout())) logger.debug("Phyml stderr = {}".format(ph.get_stderr())) parser = PhymlParser() expected_outfiles = ['{}_phyml_stats'.format(alignment_file), '{}_phyml_tree'.format(alignment_file)] for i in range(2): if not os.path.exists(expected_outfiles[i]): expected_outfiles[i] += '.txt' logger.debug('Stats file {} {}'.format(expected_outfiles[0], 'exists' if os.path.exists(expected_outfiles[0]) else 'doesn\'t exist')) logger.debug('Tree file {} {}'.format(expected_outfiles[1], 'exists' if os.path.exists(expected_outfiles[1]) else 'doesn\'t exist')) with fileIO.TempFileList(expected_outfiles): try: result = parser.to_dict(*expected_outfiles) except IOError as ioerr: logger.error('File IO error: {}'.format(ioerr)) result = None except ParseException as parseerr: logger.error('Other parse error: {}'.format(parseerr)) result = None return result
<SYSTEM_TASK:> Validate a mapping configuration file against the relevant schema. <END_TASK> <USER_TASK:> Description: def validate_mapping(mapping): """ Validate a mapping configuration file against the relevant schema. """
file_path = os.path.join(os.path.dirname(__file__), 'schemas', 'mapping.json') with open(file_path, 'r') as fh: validator = Draft4Validator(json.load(fh)) validator.validate(mapping) return mapping
<SYSTEM_TASK:> Plots a visual representation of a distance matrix <END_TASK> <USER_TASK:> Description: def heatmap(self, partition=None, cmap=CM.Blues): """ Plots a visual representation of a distance matrix """
if isinstance(self.dm, DistanceMatrix): length = self.dm.values.shape[0] else: length = self.dm.shape[0] datamax = float(np.abs(self.dm).max()) fig = plt.figure() ax = fig.add_subplot(111) ticks_at = [0, 0.5 * datamax, datamax] if partition: sorting = flatten_list(partition.get_membership()) self.dm = self.dm.reorder(sorting) cax = ax.imshow( self.dm.values, interpolation='nearest', origin='lower', extent=[0., length, 0., length], vmin=0, vmax=datamax, cmap=cmap, ) cbar = fig.colorbar(cax, ticks=ticks_at, format='%1.2g') cbar.set_label('Distance') return fig
<SYSTEM_TASK:> Function to get input strings for tree_collection <END_TASK> <USER_TASK:> Description: def get_tree_collection_strings(self, scale=1, guide_tree=None): """ Function to get input strings for tree_collection tree_collection needs distvar, genome_map and labels - these are returned in the order above """
records = [self.collection[i] for i in self.indices] return TreeCollectionTaskInterface().scrape_args(records)
<SYSTEM_TASK:> Parses a JSON string into either a view or an index. If auto flatten <END_TASK> <USER_TASK:> Description: def from_json(buffer, auto_flatten=True, raise_for_index=True): """Parses a JSON string into either a view or an index. If auto flatten is enabled a sourcemap index that does not contain external references is automatically flattened into a view. By default if an index would be returned an `IndexedSourceMap` error is raised instead which holds the index. """
buffer = to_bytes(buffer) view_out = _ffi.new('lsm_view_t **') index_out = _ffi.new('lsm_index_t **') buffer = to_bytes(buffer) rv = rustcall( _lib.lsm_view_or_index_from_json, buffer, len(buffer), view_out, index_out) if rv == 1: return View._from_ptr(view_out[0]) elif rv == 2: index = Index._from_ptr(index_out[0]) if auto_flatten and index.can_flatten: return index.into_view() if raise_for_index: raise IndexedSourceMap('Unexpected source map index', index=index) return index else: raise AssertionError('Unknown response from C ABI (%r)' % rv)
<SYSTEM_TASK:> Dumps a sourcemap in MemDB format into bytes. <END_TASK> <USER_TASK:> Description: def dump_memdb(self, with_source_contents=True, with_names=True): """Dumps a sourcemap in MemDB format into bytes."""
len_out = _ffi.new('unsigned int *') buf = rustcall( _lib.lsm_view_dump_memdb, self._get_ptr(), len_out, with_source_contents, with_names) try: rv = _ffi.unpack(buf, len_out[0]) finally: _lib.lsm_buffer_free(buf) return rv