docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
start uploading the file until upload is complete or error. This is the main method to used, If you do not care about state of process. Args: params: a dict object describe video info, eg title, tags, description, category. all video params see the doc of prepare_video_params. Returns: return video_id if upload successfully
def upload(self, params={}): if self.upload_token is not None: # resume upload status = self.check() if status['status'] != 4: return self.commit() else: self.new_slice() while self.slice_task_id != 0: self.upload_slice() return self.commit() else: # new upload self.create(self.prepare_video_params(**params)) self.create_file() self.new_slice() while self.slice_task_id != 0: self.upload_slice() return self.commit()
957,403
Returns Python form of fully qualified name. Args: relative_to: If greater 0, the returned path is relative to the first n directories.
def filter_pyfqn(cls, value, relative_to=0): def collect_packages(element, packages): parent = element.eContainer() if parent: collect_packages(parent, packages) packages.append(element.name) packages = [] collect_packages(value, packages) if relative_to < 0 or relative_to > len(packages): raise ValueError('relative_to not in range of number of packages') fqn = '.'.join(packages[relative_to:]) if relative_to: fqn = '.' + fqn return cls.module_path_map.get(fqn, fqn)
957,558
Generate model code. Args: model: The meta-model to generate code for. outfolder: Path to the directoty that will contain the generated code. exclude: List of referenced resources for which code was already generated (to prevent regeneration).
def generate(self, model, outfolder, *, exclude=None): with pythonic_names(): super().generate(model, outfolder) check_dependency = self.with_dependencies and model.eResource if check_dependency: if exclude is None: exclude = set() resource = model.eResource # the current resource had been managed and is excluded from further generations exclude.add(resource) rset = resource.resource_set direct_resources = {r for r in rset.resources.values() if r not in exclude} for resource in direct_resources: self.generate(resource.contents[0], outfolder, exclude=exclude)
957,561
Returns a MultiFieldSelector based on combining the passed-in FieldSelector and MultiFieldSelector objects. args: ``*others=``\ *FieldSelector*\ \|\ *iterable* Each argument is interpreted as either a FieldSelector, or a FieldSelector constructor.
def __init__(self, *others): selectors = list() heads = collections.defaultdict(set) for other in others: if isinstance(other, MultiFieldSelector): for head, tail in other.heads.iteritems(): heads[head].add(tail) elif isinstance(other, FieldSelector): selectors.append(other) else: selectors.append(self.FieldSelector(other)) for selector in selectors: chain = selector.selectors if chain: head = chain[0] tail = self.FieldSelector(chain[1:]) if len(chain) > 1 else all heads[head].add(tail) else: heads[None].add(all) self.heads = dict( (head, all if all in tail else MultiFieldSelector(*tail)) for head, tail in heads.iteritems() ) if None not in heads or heads[None] is not all else {None: all} # sanity assertions follow head_types = set(type(x) for x in self.heads) self.has_int = int in head_types or long in head_types self.has_string = any(issubclass(x, basestring) for x in head_types) self.has_none = types.NoneType in head_types self.complete = self.has_none and self.heads[None] is all if self.has_none and (self.has_int or self.has_string): # this should be possible, but I'm punting on it for now raise ValueError( "MultiFieldSelector cannot yet specify a list and a hash/" "object at the same level: %r" % self.heads.keys() )
958,070
Deletes all of the fields at the specified locations. args: ``obj=``\ *OBJECT* the object to remove the fields from ``force=``\ *BOOL* if True, missing attributes do not raise errors. Otherwise, the first failure raises an exception without making any changes to ``obj``.
def delete(self, obj, force=False): # TODO: this could be a whole lot more efficient! if not force: for fs in self: try: fs.get(obj) except FieldSelectorException: raise for fs in self: try: fs.delete(obj) except FieldSelectorException: pass
958,077
A wrapper for the `Go stage API`__ .. __: http://api.go.cd/current/#stages Args: server (Server): A configured instance of :class:gocd.server.Server pipeline_name (str): The name of the pipeline we're working on stage_name (str): The name of the stage we're working on
def __init__(self, server, pipeline_name, stage_name, pipeline_counter=None): self.server = server self.pipeline_name = pipeline_name self.pipeline_counter = pipeline_counter self.stage_name = stage_name
958,082
Generator method which returns the differences from the invocant to the argument. args: ``other=``\ *Record*\ \|\ *Anything* The thing to compare against; the types must match, unless ``duck_type=True`` is passed. *diff_option*\ =\ *value* Unknown keyword arguments are eventually passed to a :ref:`DiffOptions` constructor.
def diff_iter(self, other, **kwargs): from normalize.diff import diff_iter return diff_iter(self, other, **kwargs)
958,104
Create gene association for class :class:`.GeneDeletionStrategy`. Return a dict mapping reaction IDs to :class:`psamm.expression.boolean.Expression` objects, representing relationships between reactions and related genes. This helper function should be called when creating :class:`.GeneDeletionStrategy` objects. Args: model: :class:`psamm.datasource.native.NativeModel`.
def get_gene_associations(model): for reaction in model.reactions: assoc = None if reaction.genes is None: continue elif isinstance(reaction.genes, string_types): assoc = boolean.Expression(reaction.genes) else: variables = [boolean.Variable(g) for g in reaction.genes] assoc = boolean.Expression(boolean.And(*variables)) yield reaction.id, assoc
958,183
The common visitor API used by all three visitor implementations. args: ``visitor=``\ *Visitor* Visitor options instance: contains the callbacks to use to implement the visiting, as well as traversal & filtering options. ``value=``\ *Object* Object being visited ``value_type=``\ *RecordType* The type object controlling the visiting.
def map(cls, visitor, value, value_type): unpacked = visitor.unpack(value, value_type, visitor) if unpacked == cls.StopVisiting or isinstance( unpacked, cls.StopVisiting ): return unpacked.return_value if isinstance(unpacked, tuple): props, coll = unpacked else: props, coll = unpacked, None # recurse into values for collections if coll: coll_map_generator = cls.map_collection( visitor, coll, value_type, ) mapped_coll = visitor.collect( coll_map_generator, value_type, visitor, ) else: mapped_coll = None # recurse into regular properties mapped_props = None if props: mapped_props = cls.map_record(visitor, props, value_type) elif mapped_coll is None: return visitor.apply(value, None, visitor) return visitor.reduce( mapped_props, mapped_coll, value_type, visitor, )
958,238
Minimize flux of all reactions while keeping certain fluxes fixed. The fixed reactions are given in a dictionary as reaction id to value mapping. The weighted L1-norm of the fluxes is minimized. Args: model: MetabolicModel to solve. fixed: dict of additional lower bounds on reaction fluxes. solver: LP solver instance to use. weights: dict of weights on the L1-norm terms. Returns: An iterator of reaction ID and reaction flux pairs.
def flux_minimization(model, fixed, solver, weights={}): fba = FluxBalanceProblem(model, solver) for reaction_id, value in iteritems(fixed): flux = fba.get_flux_var(reaction_id) fba.prob.add_linear_constraints(flux >= value) fba.minimize_l1() return ((reaction_id, fba.get_flux(reaction_id)) for reaction_id in model.reactions)
958,250
Find a random flux solution on the boundary of the solution space. The reactions in the threshold dictionary are constrained with the associated lower bound. Args: model: MetabolicModel to solve. threshold: dict of additional lower bounds on reaction fluxes. tfba: If True enable thermodynamic constraints. solver: LP solver instance to use. Returns: An iterator of reaction ID and reaction flux pairs.
def flux_randomization(model, threshold, tfba, solver): optimize = {} for reaction_id in model.reactions: if model.is_reversible(reaction_id): optimize[reaction_id] = 2*random.random() - 1.0 else: optimize[reaction_id] = random.random() fba = _get_fba_problem(model, tfba, solver) for reaction_id, value in iteritems(threshold): fba.prob.add_linear_constraints(fba.get_flux_var(reaction_id) >= value) fba.maximize(optimize) for reaction_id in model.reactions: yield reaction_id, fba.get_flux(reaction_id)
958,251
Check whether the ID is valid. First check if the ID is missing, and then check if it is a qualified string type, finally check if the string is empty. For all checks, it would raise a ParseError with the corresponding message. Args: entity: a string type object to be checked. entity_type: a string that shows the type of entities to check, usually `Compound` or 'Reaction'.
def _check_id(entity, entity_type): if entity is None: raise ParseError('{} ID missing'.format(entity_type)) elif not isinstance(entity, string_types): msg = '{} ID must be a string, id was {}.'.format(entity_type, entity) if isinstance(entity, bool): msg += (' You may have accidentally used an ID value that YAML' ' interprets as a boolean, such as "yes", "no", "on",' ' "off", "true" or "false". To use this ID, you have to' ' quote it with single or double quotes') raise ParseError(msg) elif len(entity) == 0: raise ParseError('{} ID must not be empty'.format(entity_type))
958,293
Convert compartment entry to YAML dict. Args: compartment: :class:`psamm.datasource.entry.CompartmentEntry`. adjacencies: Sequence of IDs or a single ID of adjacent compartments (or None).
def convert_compartment_entry(self, compartment, adjacencies): d = OrderedDict() d['id'] = compartment.id if adjacencies is not None: d['adjacent_to'] = adjacencies order = {key: i for i, key in enumerate(['name'])} prop_keys = set(compartment.properties) for prop in sorted(prop_keys, key=lambda x: (order.get(x, 1000), x)): if compartment.properties[prop] is not None: d[prop] = compartment.properties[prop] return d
958,333
Write iterable of entries as YAML object to stream. Args: stream: File-like object. entries: Iterable of entries. converter: Conversion function from entry to YAML object. properties: Set of compartment properties to output (or None to output all).
def _write_entries(self, stream, entries, converter, properties=None): def iter_entries(): for c in entries: entry = converter(c) if entry is None: continue if properties is not None: entry = OrderedDict( (key, value) for key, value in iteritems(entry) if key == 'id' or key in properties) yield entry self._dump(stream, list(iter_entries()))
958,336
Write iterable of compartments as YAML object to stream. Args: stream: File-like object. compartments: Iterable of compartment entries. adjacencies: Dictionary mapping IDs to adjacent compartment IDs. properties: Set of compartment properties to output (or None to output all).
def write_compartments(self, stream, compartments, adjacencies, properties=None): def convert(entry): return self.convert_compartment_entry( entry, adjacencies.get(entry.id)) self._write_entries(stream, compartments, convert, properties)
958,337
Write iterable of compounds as YAML object to stream. Args: stream: File-like object. compounds: Iterable of compound entries. properties: Set of compound properties to output (or None to output all).
def write_compounds(self, stream, compounds, properties=None): self._write_entries( stream, compounds, self.convert_compound_entry, properties)
958,338
Write iterable of reactions as YAML object to stream. Args: stream: File-like object. compounds: Iterable of reaction entries. properties: Set of reaction properties to output (or None to output all).
def write_reactions(self, stream, reactions, properties=None): self._write_entries( stream, reactions, self.convert_reaction_entry, properties)
958,339
Used to make a new Collection type, without that type having to be defined explicitly. Generates a new type name using the item type and a 'suffix' Collection class property. args: ``of=``\ *Record type* The type of values of the collection ``coll=``\ *Collection sub-class* The container class.
def _make_generic(of, coll): assert(issubclass(coll, Collection)) key = (coll.__name__, "%s.%s" % (of.__module__, of.__name__)) if key in GENERIC_TYPES: if GENERIC_TYPES[key].itemtype != of: raise exc.PropertyNotUnique(key=key) else: # oh, we get to name it? Goodie! generic_name = "%s%s" % (of.__name__, coll.suffix) GENERIC_TYPES[key] = type( generic_name, (coll, _Generic), dict(itemtype=of, generic_key=key) ) mod = sys.modules[of.__module__] if not hasattr(mod, generic_name): setattr(mod, generic_name, GENERIC_TYPES[key]) return GENERIC_TYPES[key]
958,497
Default collection constructor. args: ``values=``\ *iterable* Specify the initial contents of the collection. It will be converted to the correct type using :py:meth:`coll_to_tuples` and :py:meth:`tuples_to_coll` ``attribute=``\ *VALUE* It is possible to add extra properties to ``Collection`` objects; this is how you specify them on construction.
def __init__(self, values=None, **kwargs): self._values = type(self).tuples_to_coll( type(self).coll_to_tuples(values) ) super(Collection, self).__init__(**kwargs)
958,498
Add all reactions from database that occur in given compartments. Args: model: :class:`psamm.metabolicmodel.MetabolicModel`.
def add_all_database_reactions(model, compartments): added = set() for rxnid in model.database.reactions: reaction = model.database.get_reaction(rxnid) if all(compound.compartment in compartments for compound, _ in reaction.compounds): if not model.has_reaction(rxnid): added.add(rxnid) model.add_reaction(rxnid) return added
958,583
Add all exchange reactions to database and to model. Args: model: :class:`psamm.metabolicmodel.MetabolicModel`.
def add_all_exchange_reactions(model, compartment, allow_duplicates=False): all_reactions = {} if not allow_duplicates: # TODO: Avoid adding reactions that already exist in the database. # This should be integrated in the database. for rxnid in model.database.reactions: rx = model.database.get_reaction(rxnid) all_reactions[rx] = rxnid added = set() added_compounds = set() initial_compounds = set(model.compounds) reactions = set(model.database.reactions) for model_compound in initial_compounds: compound = model_compound.in_compartment(compartment) if compound in added_compounds: continue rxnid_ex = create_exchange_id(reactions, compound) reaction_ex = Reaction(Direction.Both, {compound: -1}) if reaction_ex not in all_reactions: model.database.set_reaction(rxnid_ex, reaction_ex) reactions.add(rxnid_ex) else: rxnid_ex = all_reactions[reaction_ex] if not model.has_reaction(rxnid_ex): added.add(rxnid_ex) model.add_reaction(rxnid_ex) added_compounds.add(compound) return added
958,584
Add all transport reactions to database and to model. Add transport reactions for all boundaries. Boundaries are defined by pairs (2-tuples) of compartment IDs. Transport reactions are added for all compounds in the model, not just for compounds in the two boundary compartments. Args: model: :class:`psamm.metabolicmodel.MetabolicModel`. boundaries: Set of compartment boundary pairs. Returns: Set of IDs of reactions that were added.
def add_all_transport_reactions(model, boundaries, allow_duplicates=False): all_reactions = {} if not allow_duplicates: # TODO: Avoid adding reactions that already exist in the database. # This should be integrated in the database. for rxnid in model.database.reactions: rx = model.database.get_reaction(rxnid) all_reactions[rx] = rxnid boundary_pairs = set() for source, dest in boundaries: if source != dest: boundary_pairs.add(tuple(sorted((source, dest)))) added = set() added_pairs = set() initial_compounds = set(model.compounds) reactions = set(model.database.reactions) for compound in initial_compounds: for c1, c2 in boundary_pairs: compound1 = compound.in_compartment(c1) compound2 = compound.in_compartment(c2) pair = compound1, compound2 if pair in added_pairs: continue rxnid_tp = create_transport_id(reactions, compound1, compound2) reaction_tp = Reaction(Direction.Both, { compound1: -1, compound2: 1 }) if reaction_tp not in all_reactions: model.database.set_reaction(rxnid_tp, reaction_tp) reactions.add(rxnid_tp) else: rxnid_tp = all_reactions[reaction_tp] if not model.has_reaction(rxnid_tp): added.add(rxnid_tp) model.add_reaction(rxnid_tp) added_pairs.add(pair) return added
958,585
Returns an instance of :class:`Stage` Args: pipeline_name (str): Name of the pipeline the stage belongs to stage_name (str): Name of the stage to act on pipeline_counter (int): The pipeline instance the stage is for. Returns: Stage: an instantiated :class:`Stage`.
def stage(self, pipeline_name, stage_name, pipeline_counter=None): return Stage(self, pipeline_name, stage_name, pipeline_counter=pipeline_counter)
958,598
Return unique signature object for :class:`Reaction`. Signature objects are hashable, and compare equal only if the reactions are considered the same according to the specified rules. Args: direction: Include reaction directionality when considering equality. stoichiometry: Include stoichiometry when considering equality.
def reaction_signature(eq, direction=False, stoichiometry=False): def compounds_sig(compounds): if stoichiometry: return tuple(sorted(compounds)) else: return tuple(sorted(compound for compound, _ in compounds)) left = compounds_sig(eq.left) right = compounds_sig(eq.right) if left < right: reaction_sig = left, right direction_sig = eq.direction else: reaction_sig = right, left direction_sig = eq.direction.flipped() if direction: return reaction_sig, direction_sig return reaction_sig
958,695
Calculate the overall charge for the specified reaction. Args: reaction: :class:`psamm.reaction.Reaction`. compound_charge: a map from each compound to charge values.
def reaction_charge(reaction, compound_charge): charge_sum = 0.0 for compound, value in reaction.compounds: charge = compound_charge.get(compound.name, float('nan')) charge_sum += charge * float(value) return charge_sum
958,698
Calculate the overall charge for all reactions in the model. Yield (reaction, charge) pairs. Args: model: :class:`psamm.datasource.native.NativeModel`.
def charge_balance(model): compound_charge = {} for compound in model.compounds: if compound.charge is not None: compound_charge[compound.id] = compound.charge for reaction in model.reactions: charge = reaction_charge(reaction.equation, compound_charge) yield reaction, charge
958,699
Calculate formula compositions for both sides of the specified reaction. If the compounds in the reaction all have formula, then calculate and return the chemical compositions for both sides, otherwise return `None`. Args: reaction: :class:`psamm.reaction.Reaction`. compound_formula: a map from compound id to formula.
def reaction_formula(reaction, compound_formula): def multiply_formula(compound_list): for compound, count in compound_list: yield count * compound_formula[compound.name] for compound, _ in reaction.compounds: if compound.name not in compound_formula: return None else: left_form = reduce( operator.or_, multiply_formula(reaction.left), Formula()) right_form = reduce( operator.or_, multiply_formula(reaction.right), Formula()) return left_form, right_form
958,700
Calculate formula compositions for each reaction. Call :func:`reaction_formula` for each reaction. Yield (reaction, result) pairs, where result has two formula compositions or `None`. Args: model: :class:`psamm.datasource.native.NativeModel`.
def formula_balance(model): # Mapping from compound id to formula compound_formula = {} for compound in model.compounds: if compound.formula is not None: try: f = Formula.parse(compound.formula).flattened() compound_formula[compound.id] = f except ParseError as e: msg = 'Error parsing formula for compound {}:\n{}\n{}'.format( compound.id, e, compound.formula) if e.indicator is not None: msg += '\n{}'.format(e.indicator) logger.warning(msg) for reaction in model.reactions: yield reaction, reaction_formula(reaction.equation, compound_formula)
958,701
Returns all the information regarding a specific pipeline run See the `Go pipeline instance documentation`__ for examples. .. __: http://api.go.cd/current/#get-pipeline-instance Args: counter (int): The pipeline instance to fetch. If falsey returns the latest pipeline instance from :meth:`history`. Returns: Response: :class:`gocd.api.response.Response` object
def instance(self, counter=None): if not counter: history = self.history() if not history: return history else: return Response._from_json(history['pipelines'][0]) return self._get('/instance/{counter:d}'.format(counter=counter))
958,730
Helper to instantiate an :class:`gocd.api.artifact.Artifact` object Args: counter (int): The pipeline counter to get the artifact for stage: Stage name job: Job name stage_counter: Defaults to 1 Returns: Artifact: :class:`gocd.api.artifact.Artifact` object
def artifact(self, counter, stage, job, stage_counter=1): return Artifact(self.server, self.name, counter, stage, job, stage_counter)
958,732
Yields the output and metadata from all jobs in the pipeline Args: instance: The result of a :meth:`instance` call, if not supplied the latest of the pipeline will be used. Yields: tuple: (metadata (dict), output (str)). metadata contains: - pipeline - pipeline_counter - stage - stage_counter - job - job_result
def console_output(self, instance=None): if instance is None: instance = self.instance() for stage in instance['stages']: for job in stage['jobs']: if job['result'] not in self.final_results: continue artifact = self.artifact( instance['counter'], stage['name'], job['name'], stage['counter'] ) output = artifact.get('cruise-output/console.log') yield ( { 'pipeline': self.name, 'pipeline_counter': instance['counter'], 'stage': stage['name'], 'stage_counter': stage['counter'], 'job': job['name'], 'job_result': job['result'], }, output.body )
958,733
Helper to instantiate a :class:`gocd.api.stage.Stage` object Args: name: The name of the stage pipeline_counter: Returns:
def stage(self, name, pipeline_counter=None): return Stage( self.server, pipeline_name=self.name, stage_name=name, pipeline_counter=pipeline_counter, )
958,734
Return a constant indicating the type of coupling. Depending on the type of coupling, one of the constants from :class:`.CouplingClass` is returned. Args: coupling: Tuple of minimum and maximum flux ratio
def classify_coupling(coupling): lower, upper = coupling if lower is None and upper is None: return CouplingClass.Uncoupled elif lower is None or upper is None: return CouplingClass.DirectionalReverse elif lower == 0.0 and upper == 0.0: return CouplingClass.Inconsistent elif lower <= 0.0 and upper >= 0.0: return CouplingClass.DirectionalForward elif abs(lower - upper) < 1e-6: return CouplingClass.Full else: return CouplingClass.Partial
958,756
Convert raw SBML model to extended model. Args: model: :class:`NativeModel` obtained from :class:`SBMLReader`.
def convert_sbml_model(model): biomass_reactions = set() for reaction in model.reactions: # Extract limits if reaction.id not in model.limits: lower, upper = parse_flux_bounds(reaction) if lower is not None or upper is not None: model.limits[reaction.id] = reaction.id, lower, upper # Detect objective objective = parse_objective_coefficient(reaction) if objective is not None and objective != 0: biomass_reactions.add(reaction.id) if len(biomass_reactions) == 1: model.biomass_reaction = next(iter(biomass_reactions)) # Convert model to mutable entries convert_model_entries(model) # Detect extracelluar compartment if model.extracellular_compartment is None: extracellular = detect_extracellular_compartment(model) model.extracellular_compartment = extracellular # Convert exchange reactions to exchange compounds convert_exchange_to_compounds(model)
958,768
Yield key, value pairs parsed from the XHTML notes section. Each key, value pair must be defined in its own text block, e.g. ``<p>key: value</p><p>key2: value2</p>``. The key and value must be separated by a colon. Whitespace is stripped from both key and value, and quotes are removed from values if present. The key is normalized by conversion to lower case and spaces replaced with underscores. Args: entry: :class:`_SBMLEntry`.
def parse_xhtml_notes(entry): for note in entry.xml_notes.itertext(): m = re.match(r'^([^:]+):(.+)$', note) if m: key, value = m.groups() key = key.strip().lower().replace(' ', '_') value = value.strip() m = re.match(r'^"(.*)"$', value) if m: value = m.group(1) if value != '': yield key, value
958,774
Return species properties defined in the XHTML notes. Older SBML models often define additional properties in the XHTML notes section because structured methods for defining properties had not been developed. This will try to parse the following properties: ``PUBCHEM ID``, ``CHEBI ID``, ``FORMULA``, ``KEGG ID``, ``CHARGE``. Args: entry: :class:`SBMLSpeciesEntry`.
def parse_xhtml_species_notes(entry): properties = {} if entry.xml_notes is not None: cobra_notes = dict(parse_xhtml_notes(entry)) for key in ('pubchem_id', 'chebi_id'): if key in cobra_notes: properties[key] = cobra_notes[key] if 'formula' in cobra_notes: properties['formula'] = cobra_notes['formula'] if 'kegg_id' in cobra_notes: properties['kegg'] = cobra_notes['kegg_id'] if 'charge' in cobra_notes: try: value = int(cobra_notes['charge']) except ValueError: logger.warning( 'Unable to parse charge for {} as an' ' integer: {}'.format( entry.id, cobra_notes['charge'])) value = cobra_notes['charge'] properties['charge'] = value return properties
958,775
Return reaction properties defined in the XHTML notes. Older SBML models often define additional properties in the XHTML notes section because structured methods for defining properties had not been developed. This will try to parse the following properties: ``SUBSYSTEM``, ``GENE ASSOCIATION``, ``EC NUMBER``, ``AUTHORS``, ``CONFIDENCE``. Args: entry: :class:`SBMLReactionEntry`.
def parse_xhtml_reaction_notes(entry): properties = {} if entry.xml_notes is not None: cobra_notes = dict(parse_xhtml_notes(entry)) if 'subsystem' in cobra_notes: properties['subsystem'] = cobra_notes['subsystem'] if 'gene_association' in cobra_notes: properties['genes'] = cobra_notes['gene_association'] if 'ec_number' in cobra_notes: properties['ec'] = cobra_notes['ec_number'] if 'authors' in cobra_notes: properties['authors'] = [ a.strip() for a in cobra_notes['authors'].split(';')] if 'confidence' in cobra_notes: try: value = int(cobra_notes['confidence']) except ValueError: logger.warning( 'Unable to parse confidence level for {} as an' ' integer: {}'.format( entry.id, cobra_notes['confidence'])) value = cobra_notes['confidence'] properties['confidence'] = value return properties
958,776
Return objective value for reaction entry. Detect objectives that are specified using the non-standardized kinetic law parameters which are used by many pre-FBC SBML models. The objective coefficient is returned for the given reaction, or None if undefined. Args: entry: :class:`SBMLReactionEntry`.
def parse_objective_coefficient(entry): for parameter in entry.kinetic_law_reaction_parameters: pid, name, value, units = parameter if (pid == 'OBJECTIVE_COEFFICIENT' or name == 'OBJECTIVE_COEFFICIENT'): return value return None
958,777
Return flux bounds for reaction entry. Detect flux bounds that are specified using the non-standardized kinetic law parameters which are used by many pre-FBC SBML models. The flux bounds are returned as a pair of lower, upper bounds. The returned bound is None if undefined. Args: entry: :class:`SBMLReactionEntry`.
def parse_flux_bounds(entry): lower_bound = None upper_bound = None for parameter in entry.kinetic_law_reaction_parameters: pid, name, value, units = parameter if pid == 'UPPER_BOUND' or name == 'UPPER_BOUND': upper_bound = value elif pid == 'LOWER_BOUND' or name == 'LOWER_BOUND': lower_bound = value return lower_bound, upper_bound
958,778
Detect the identifier for equations with extracellular compartments. Args: model: :class:`NativeModel`.
def detect_extracellular_compartment(model): extracellular_key = Counter() for reaction in model.reactions: equation = reaction.equation if equation is None: continue if len(equation.compounds) == 1: compound, _ = equation.compounds[0] compartment = compound.compartment extracellular_key[compartment] += 1 if len(extracellular_key) == 0: return None else: best_key, _ = extracellular_key.most_common(1)[0] logger.info('{} is extracellular compartment'.format(best_key)) return best_key
958,779
Convert exchange reactions in model to exchange compounds. Only exchange reactions in the extracellular compartment are converted. The extracelluar compartment must be defined for the model. Args: model: :class:`NativeModel`.
def convert_exchange_to_compounds(model): # Build set of exchange reactions exchanges = set() for reaction in model.reactions: equation = reaction.properties.get('equation') if equation is None: continue if len(equation.compounds) != 1: # Provide warning for exchange reactions with more than # one compound, they won't be put into the exchange definition if (len(equation.left) == 0) != (len(equation.right) == 0): logger.warning('Exchange reaction {} has more than one' ' compound, it was not converted to' ' exchange compound'.format(reaction.id)) continue exchanges.add(reaction.id) # Convert exchange reactions into exchange compounds for reaction_id in exchanges: equation = model.reactions[reaction_id].equation compound, value = equation.compounds[0] if compound.compartment != model.extracellular_compartment: continue if compound in model.exchange: logger.warning( 'Compound {} is already defined in the exchange' ' definition'.format(compound)) continue # We multiply the flux bounds by value in order to create equivalent # exchange reactions with stoichiometric value of one. If the flux # bounds are not set but the reaction is unidirectional, the implicit # flux bounds must be used. lower_flux, upper_flux = None, None if reaction_id in model.limits: _, lower, upper = model.limits[reaction_id] if lower is not None: lower_flux = lower * abs(value) if upper is not None: upper_flux = upper * abs(value) if lower_flux is None and equation.direction == Direction.Forward: lower_flux = 0 if upper_flux is None and equation.direction == Direction.Reverse: upper_flux = 0 # If the stoichiometric value of the reaction is reversed, the flux # limits must be flipped. if value > 0: lower_flux, upper_flux = ( -upper_flux if upper_flux is not None else None, -lower_flux if lower_flux is not None else None) model.exchange[compound] = ( compound, reaction_id, lower_flux, upper_flux) model.reactions.discard(reaction_id) model.limits.pop(reaction_id, None)
958,780
Merge equivalent compounds in various compartments. Tries to detect and merge compound entries that represent the same compound in different compartments. The entries are only merged if all properties are equivalent. Compound entries must have an ID with a suffix of an underscore followed by the compartment ID. This suffix will be stripped and compounds with identical IDs are merged if the properties are identical. Args: model: :class:`NativeModel`.
def merge_equivalent_compounds(model): def dicts_are_compatible(d1, d2): return all(key not in d1 or key not in d2 or d1[key] == d2[key] for key in set(d1) | set(d2)) compound_compartment = {} inelegible = set() for reaction in model.reactions: equation = reaction.equation if equation is None: continue for compound, _ in equation.compounds: compartment = compound.compartment if compartment is not None: compound_compartment[compound.name] = compartment if not compound.name.endswith('_{}'.format(compartment)): inelegible.add(compound.name) compound_groups = {} for compound_id, compartment in iteritems(compound_compartment): if compound_id in inelegible: continue suffix = '_{}'.format(compound_compartment[compound_id]) if compound_id.endswith(suffix): group_name = compound_id[:-len(suffix)] compound_groups.setdefault(group_name, set()).add(compound_id) compound_mapping = {} merged_compounds = {} for group, compound_set in iteritems(compound_groups): # Try to merge as many compounds as possible merged = [] for compound_id in compound_set: props = dict(model.compounds[compound_id].properties) # Ignore differences in ID and compartment properties props.pop('id', None) props.pop('compartment', None) for merged_props, merged_set in merged: if dicts_are_compatible(props, merged_props): merged_set.add(compound_id) merged_props.update(props) break else: keys = set(key for key in set(props) | set(merged_props) if key not in props or key not in merged_props or props[key] != merged_props[key]) logger.info( 'Unable to merge {} into {}, difference in' ' keys: {}'.format( compound_id, ', '.join(merged_set), ', '.join(keys))) else: merged.append((props, {compound_id})) if len(merged) == 1: # Merge into one set with the group name merged_props, merged_set = merged[0] for compound_id in merged_set: compound_mapping[compound_id] = group merged_compounds[group] = merged_props else: # Since we cannot merge all compounds, create new group names # based on the group and compartments. for merged_props, merged_set in merged: compartments = set(compound_compartment[c] for c in merged_set) merged_name = '{}_{}'.format( group, '_'.join(sorted(compartments))) for compound_id in merged_set: compound_mapping[compound_id] = merged_name merged_compounds[merged_name] = merged_props # Translate reaction compounds for reaction in model.reactions: equation = reaction.equation if equation is None: continue reaction.equation = equation.translated_compounds( lambda c: compound_mapping.get(c, c)) # Translate compound entries new_compounds = [] for compound in model.compounds: if compound.id not in compound_mapping: new_compounds.append(compound) else: group = compound_mapping[compound.id] if group not in merged_compounds: continue props = merged_compounds.pop(group) props['id'] = group new_compounds.append(DictCompoundEntry( props, filemark=compound.filemark)) model.compounds.clear() model.compounds.update(new_compounds) # Translate exchange new_exchange = OrderedDict() for compound, reaction_id, lower, upper in itervalues(model.exchange): new_compound = compound.translate( lambda name: compound_mapping.get(name, name)) new_exchange[new_compound] = new_compound, reaction_id, lower, upper model.exchange.clear() model.exchange.update(new_exchange)
958,781
Write a given model to file. Args: file: File-like object open for writing. model: Instance of :class:`NativeModel` to write. pretty: Whether to format the XML output for readability.
def write_model(self, file, model, pretty=False): ET.register_namespace('mathml', MATHML_NS) ET.register_namespace('xhtml', XHTML_NS) ET.register_namespace('fbc', FBC_V2) # Load compound information compound_name = {} compound_properties = {} for compound in model.compounds: compound_name[compound.id] = ( compound.name if compound.name is not None else compound.id) compound_properties[compound.id] = compound.properties model_reactions = set(model.model) reaction_properties = {} biomass_id = None for r in model.reactions: if (model_reactions is not None and r.id not in model_reactions): continue reaction_id = util.create_unique_id( self._make_safe_id(r.id), reaction_properties) if r.id == model.biomass_reaction: biomass_id = reaction_id reaction_properties[reaction_id] = r.properties # Add exchange reactions to reaction_properties, # also add flux limit info to flux_limits flux_limits = {} for compound, reaction_id, lower, upper in itervalues(model.exchange): # Create exchange reaction if reaction_id is None: reaction_id = create_exchange_id(reaction_properties, compound) reaction_id = util.create_unique_id( self._make_safe_id(reaction_id), reaction_properties) reaction_properties[reaction_id] = { 'id': reaction_id, 'equation': Reaction(Direction.Both, {compound: -1}) } if lower is None: lower = -model.default_flux_limit if upper is None: upper = model.default_flux_limit flux_limits[reaction_id] = (lower, upper) # Create a dummy properties dict for undefined compounds if compound.name not in compound_properties: compound_properties[compound.name] = { 'id': compound.name } root = ET.Element(self._sbml_tag('sbml')) root.set(self._sbml_tag('level'), '3') root.set(self._sbml_tag('version'), '1') root.set(_tag('required', FBC_V2), 'false') if model.version_string is not None: notes_tag = ET.SubElement(root, self._sbml_tag('notes')) body_tag = ET.SubElement(notes_tag, _tag('body', XHTML_NS)) self._add_properties_notes( body_tag, {'model version': model.version_string}) model_tag = ET.SubElement(root, self._sbml_tag('model')) model_tag.set(_tag('strict', FBC_V2), 'true') if model.name is not None: model_tag.set(self._sbml_tag('name'), model.name) # Build mapping from Compound to species ID model_compartments = {} model_species = {} species_ids = set() for _, properties in iteritems(reaction_properties): for compound, _ in properties['equation'].compounds: if compound in model_species: continue # Create a dummy properties dict for undefined compounds if compound.name not in compound_properties: compound_properties[compound.name] = { 'id': compound.name } compound_id = util.create_unique_id( self._make_safe_id(compound.name), species_ids) model_species[compound] = compound_id species_ids.add(compound_id) if compound.compartment not in model_compartments: model_compartments[ compound.compartment] = 'C_' + util.create_unique_id( self._make_safe_id(compound.compartment), model_compartments) # Create list of compartments compartments = ET.SubElement( model_tag, self._sbml_tag('listOfCompartments')) for _, compartment_id in iteritems(model_compartments): compartment_tag = ET.SubElement( compartments, self._sbml_tag('compartment')) compartment_tag.set(self._sbml_tag('id'), compartment_id) compartment_tag.set(self._sbml_tag('constant'), 'true') # Create list of species species_list = ET.SubElement( model_tag, self._sbml_tag('listOfSpecies')) for species, species_id in sorted( iteritems(model_species), key=lambda x: x[1]): species_tag = ET.SubElement(species_list, self._sbml_tag('species')) species_tag.set(self._sbml_tag('id'), 'M_' + species_id) species_tag.set( self._sbml_tag('name'), compound_name.get(species.name, species.name)) species_tag.set( self._sbml_tag('compartment'), model_compartments[species.compartment]) species_tag.set(self._sbml_tag('constant'), 'false') species_tag.set(self._sbml_tag('boundaryCondition'), 'false') species_tag.set(self._sbml_tag('hasOnlySubstanceUnits'), 'true') if 'charge' in compound_properties[species.name]: species_tag.set(_tag('charge', FBC_V2), text_type( compound_properties[species.name]['charge'])) if 'formula' in compound_properties[species.name]: species_tag.set(_tag( 'chemicalFormula', FBC_V2), text_type( compound_properties[species.name]['formula'])) notes_tag = ET.SubElement(species_tag, self._sbml_tag('notes')) body_tag = ET.SubElement(notes_tag, _tag('body', XHTML_NS)) self._add_properties_notes( body_tag, compound_properties[species.name]) params_list = ET.SubElement( model_tag, self._sbml_tag('listOfParameters')) # Create mapping for reactions containing flux limit definitions for rxn_id, lower_lim, upper_lim in itervalues(model.limits): flux_limits[rxn_id] = lower_lim, upper_lim params = {} gene_ids = {} if biomass_id is not None: self._add_fbc_objective(model_tag, biomass_id) # Create list of reactions reactions = ET.SubElement(model_tag, self._sbml_tag('listOfReactions')) for eq_id, properties in sorted(iteritems(reaction_properties)): reaction_tag = ET.SubElement(reactions, self._sbml_tag('reaction')) equation = properties['equation'] reaction_tag.set(self._sbml_tag('id'), 'R_' + eq_id) if 'name' in properties: reaction_tag.set(self._sbml_tag('name'), properties['name']) reaction_tag.set(self._sbml_tag('reversible'), text_type( equation.direction == Direction.Both).lower()) reaction_tag.set(self._sbml_tag('fast'), 'false') lower_str, upper_str = self._get_flux_bounds( eq_id, model, flux_limits, equation) params[upper_str] = 'P_'+self._make_safe_numerical_id(upper_str) params[lower_str] = 'P_'+self._make_safe_numerical_id(lower_str) reaction_tag.set( _tag('upperFluxBound', FBC_V2), params[upper_str]) reaction_tag.set( _tag('lowerFluxBound', FBC_V2), params[lower_str]) if 'genes' in properties: self._add_gene_associations( eq_id, properties['genes'], gene_ids, reaction_tag) if any(value < 0 for _, value in equation.compounds): reactants = ET.SubElement( reaction_tag, self._sbml_tag('listOfReactants')) if any(value > 0 for _, value in equation.compounds): products = ET.SubElement( reaction_tag, self._sbml_tag('listOfProducts')) for compound, value in sorted(equation.compounds): dest_list = reactants if value < 0 else products spec_ref = ET.SubElement( dest_list, self._sbml_tag('speciesReference')) spec_ref.set( self._sbml_tag('species'), 'M_' + model_species[compound]) spec_ref.set( self._sbml_tag('constant'), 'true') spec_ref.set( self._sbml_tag('stoichiometry'), text_type(abs(value))) notes_tag = ET.SubElement(reaction_tag, self._sbml_tag('notes')) body_tag = ET.SubElement(notes_tag, _tag('body', XHTML_NS)) self._add_properties_notes(body_tag, reaction_properties[eq_id]) if self._cobra_flux_bounds is True: # Create COBRA-compliant parameter list kl_tag = ET.SubElement( reaction_tag, self._sbml_tag('kineticLaw')) math_tag = ET.SubElement(kl_tag, self._sbml_tag('math')) ci_tag = ET.SubElement(math_tag, _tag('ci', MATHML_NS)) ci_tag.text = 'FLUX_VALUE' param_list = ET.SubElement( kl_tag, self._sbml_tag('listOfParameters')) ET.SubElement(param_list, self._sbml_tag('parameter'), { self._sbml_tag('id'): 'LOWER_BOUND', self._sbml_tag('name'): 'LOWER_BOUND', self._sbml_tag('value'): lower_str, self._sbml_tag('constant'): 'true' }) ET.SubElement(param_list, self._sbml_tag('parameter'), { self._sbml_tag('id'): 'UPPER_BOUND', self._sbml_tag('name'): 'UPPER_BOUND', self._sbml_tag('value'): upper_str, self._sbml_tag('constant'): 'true' }) for val, id in iteritems(params): param_tag = ET.SubElement(params_list, self._sbml_tag('parameter')) param_tag.set(self._sbml_tag('id'), id) param_tag.set(self._sbml_tag('value'), val) param_tag.set(self._sbml_tag('constant'), 'true') self._add_gene_list(model_tag, gene_ids) tree = ET.ElementTree(root) if pretty: self._indent(root) write_options = dict( encoding='utf-8', default_namespace=self._namespace) if PY3: write_options['encoding'] = 'unicode' tree.write(file, **write_options)
958,807
JSON marshall in function: a 'visitor' function which looks for JSON types/hints on types being converted to, but does not require them. Args: ``record_type=``\ *TYPE* Record type to convert data to ``json_struct=``\ *DICT|LIST* a loaded (via ``json.loads``) data structure, normally a dict or a list.
def from_json(record_type, json_struct): if issubclass(record_type, JsonRecord): return record_type(json_struct) elif issubclass(record_type, Record): # do what the default JsonRecord __init__ does init_kwargs = json_to_initkwargs(record_type, json_struct) instance = record_type(**init_kwargs) return instance else: raise exc.CastTypeError(badtype=record_type)
958,811
Build a new JsonRecord sub-class. Args: ``json_data=``\ *LIST|other* JSON data (string or already ``json.loads``'d) ``**kwargs`` Other initializer attributes, for lists with extra attributes (eg, paging information)
def __init__(self, json_data=None, **kwargs): if isinstance(json_data, OhPickle): return if isinstance(json_data, basestring): json_data = json.loads(json_data) if json_data is not None: kwargs = type(self).json_to_initkwargs(json_data, kwargs) super(JsonRecordList, self).__init__(**kwargs)
958,818
A wrapper for the `Go pluggable SCM API`__ .. __: https://api.go.cd/current/#scms Args: server (Server): A configured instance of :class:gocd.server.Server name (str): The name of the SCM material
def __init__(self, server, name=""): self.server = server self.name = name
958,847
Remove old constraints and then solve the current problem. Args: sense: Minimize or maximize the objective. (:class:`.lp.ObjectiveSense) Returns: The Result object for the solved LP problem
def _solve(self, sense=None): # Remove the constraints from the last run while len(self._remove_constr) > 0: self._remove_constr.pop().delete() try: return self._prob.solve(sense=sense) except lp.SolverError as e: raise_from(MOMAError(text_type(e)), e) finally: self._remove_constr = []
958,863
Solve the wild type problem using FBA. Args: objective: The objective reaction to be maximized. Returns: The LP Result object for the solved FBA problem.
def solve_fba(self, objective): self._prob.set_objective(self._v_wt[objective]) return self._solve(lp.ObjectiveSense.Maximize)
958,864
Return a dictionary of all the fluxes solved by FBA. Dictionary of fluxes is used in :meth:`.lin_moma` and :meth:`.moma` to minimize changes in the flux distributions following model perturbation. Args: objective: The objective reaction that is maximized. Returns: Dictionary of fluxes for each reaction in the model.
def get_fba_flux(self, objective): flux_result = self.solve_fba(objective) fba_fluxes = {} # Place all the flux values in a dictionary for key in self._model.reactions: fba_fluxes[key] = flux_result.get_value(self._v_wt[key]) return fba_fluxes
958,865
Find the FBA solution that minimizes all the flux values. Maximize the objective flux then minimize all other fluxes while keeping the objective flux at the maximum. Args: objective: The objective reaction that is maximized. Returns: A dictionary of all the reactions and their minimized fluxes.
def get_minimal_fba_flux(self, objective): # Define constraints vs_wt = self._v_wt.set(self._model.reactions) zs = self._z.set(self._model.reactions) wt_obj_flux = self.get_fba_obj_flux(objective) with self.constraints() as constr: constr.add( zs >= vs_wt, vs_wt >= -zs, self._v_wt[objective] >= wt_obj_flux) self._prob.set_objective(self._z.sum(self._model.reactions)) result = self._solve(lp.ObjectiveSense.Minimize) fba_fluxes = {} for key in self._model.reactions: fba_fluxes[key] = result.get_value(self._v_wt[key]) return fba_fluxes
958,866
Minimize the redistribution of fluxes using Euclidean distance. Minimizing the redistribution of fluxes using a quadratic objective function. The distance is minimized by minimizing the sum of (wild type - knockout)^2. Args: wt_fluxes: Dictionary of all the wild type fluxes that will be used to find a close MOMA solution. Fluxes can be expiremental or calculated using :meth: get_fba_flux(objective).
def moma(self, wt_fluxes): reactions = set(self._adjustment_reactions()) v = self._v obj_expr = 0 for f_reaction, f_value in iteritems(wt_fluxes): if f_reaction in reactions: # Minimize the Euclidean distance between the two vectors obj_expr += (f_value - v[f_reaction])**2 self._prob.set_objective(obj_expr) self._solve(lp.ObjectiveSense.Minimize)
958,870
Check consistency of model reactions. Yield all reactions in the model that are not part of the consistent subset. Args: model: :class:`MetabolicModel` to solve. epsilon: Flux threshold value. solver: LP solver instance to use.
def fastcc(model, epsilon, solver): reaction_set = set(model.reactions) subset = set(reaction_id for reaction_id in reaction_set if model.limits[reaction_id].lower >= 0) logger.info('Checking {} irreversible reactions...'.format(len(subset))) logger.debug('|J| = {}, J = {}'.format(len(subset), subset)) p = FastcoreProblem(model, solver, epsilon=epsilon) p.lp7(subset) consistent_subset = set( reaction_id for reaction_id in model.reactions if abs(p.get_flux(reaction_id)) >= 0.999 * epsilon) logger.debug('|A| = {}, A = {}'.format( len(consistent_subset), consistent_subset)) for reaction in subset - consistent_subset: # Inconsistent reaction yield reaction # Check remaining reactions subset = (reaction_set - subset) - consistent_subset logger.info('Checking reversible reactions...') logger.debug('|J| = {}, J = {}'.format(len(subset), subset)) flipped = False singleton = False while len(subset) > 0: logger.info('{} reversible reactions left to check...'.format( len(subset))) if singleton: reaction = next(iter(subset)) subset_i = {reaction} logger.debug('LP3 on {}'.format(subset_i)) p.maximize({reaction: -1 if p.is_flipped(reaction) else 1}) else: subset_i = subset logger.debug('LP7 on {}'.format(subset_i)) p.lp7(subset_i) consistent_subset.update( reaction_id for reaction_id in subset if abs(p.get_flux(reaction_id) >= 0.999 * epsilon)) logger.debug('|A| = {}, A = {}'.format( len(consistent_subset), consistent_subset)) if not subset.isdisjoint(consistent_subset): subset -= consistent_subset logger.debug('|J| = {}, J = {}'.format(len(subset), subset)) flipped = False else: # TODO: irreversible reactions are taken care of before the # loop so at this point all reactions in subset_i are reversble(?). subset_rev_i = subset_i & model.reversible if flipped or len(subset_rev_i) == 0: flipped = False if singleton: subset -= subset_rev_i for reaction in subset_rev_i: logger.info('Inconsistent: {}'.format(reaction)) yield reaction else: singleton = True else: p.flip(subset_rev_i) flipped = True logger.info('Flipped {} reactions'.format(len(subset_rev_i)))
958,937
Quickly check whether model is consistent Return true if the model is consistent. If it is only necessary to know whether a model is consistent, this function is fast as it will return the result as soon as it finds a single inconsistent reaction. Args: model: :class:`MetabolicModel` to solve. epsilon: Flux threshold value. solver: LP solver instance to use.
def fastcc_is_consistent(model, epsilon, solver): for reaction in fastcc(model, epsilon, solver): return False return True
958,938
Return consistent subset of model. The largest consistent subset is returned as a set of reaction names. Args: model: :class:`MetabolicModel` to solve. epsilon: Flux threshold value. solver: LP solver instance to use. Returns: Set of reaction IDs in the consistent reaction subset.
def fastcc_consistent_subset(model, epsilon, solver): reaction_set = set(model.reactions) return reaction_set.difference(fastcc(model, epsilon, solver))
958,939
Match compounds greedily based on score function. Args: reaction: Reaction equation :class:`psamm.reaction.Reaction`. compound_formula: Dictionary mapping compound IDs to :class:`psamm.formula.Formula`. Formulas must be flattened. score_func: Function that takes two :class:`_CompoundInstance` and returns the score.
def _match_greedily(reaction, compound_formula, score_func): uninstantiated_left, uninstantiated_right = _reaction_to_dicts(reaction) def compound_instances(uninstantiated): instances = [] for compound, value in iteritems(uninstantiated): if value > 0: f = compound_formula[compound.name] instances.append(_CompoundInstance(compound, value, f)) for inst in instances: uninstantiated[inst.compound] -= 1 return instances def instantiate(uninstantiated, compound): n = uninstantiated[compound] if n > 0: f = compound_formula[compound.name] inst = _CompoundInstance(compound, n, f) uninstantiated[compound] -= 1 return inst return None left = compound_instances(uninstantiated_left) right = compound_instances(uninstantiated_right) instances = left + right pairs = {} for inst1, inst2 in product(left, right): result = score_func(inst1, inst2) if result is not None: pairs[inst1, inst2] = result def inst_pair_sort_key(entry): (inst1, inst2), score = entry c1, c2 = inst1.compound, inst2.compound same_compound = c1.name == c2.name and c1.compartment != c2.compartment return same_compound, score, c1.name, c2.name transfer = {} while len(pairs) > 0: (inst1, inst2), _ = max(iteritems(pairs), key=inst_pair_sort_key) common = inst1.formula & inst2.formula key = (inst1.compound, inst1.index), (inst2.compound, inst2.index) if key not in transfer: transfer[key] = Formula() transfer[key] |= common for inst in (inst1, inst2): inst.formula -= common to_insert = set() inst = instantiate(uninstantiated_left, inst1.compound) if inst is not None: left.append(inst) instances.append(inst) to_insert.add(inst) inst = instantiate(uninstantiated_right, inst2.compound) if inst is not None: right.append(inst) instances.append(inst) to_insert.add(inst) to_update = {inst1, inst2} to_delete = set() for inst1, inst2 in pairs: if inst1 in to_update or inst2 in to_update: if len(inst1.formula) > 0 and len(inst2.formula) > 0: result = score_func(inst1, inst2) if result is None: to_delete.add((inst1, inst2)) else: pairs[inst1, inst2] = result else: to_delete.add((inst1, inst2)) for pair in to_delete: del pairs[pair] for inst1, inst2 in product(left, right): if inst1 in to_insert or inst2 in to_insert: result = score_func(inst1, inst2) if result is not None: pairs[inst1, inst2] = result balance = {} for inst in instances: if len(inst.formula) > 0: key = inst.compound, inst.index balance[key] = inst.formula return transfer, balance
958,950
Instantiate a new webdriver class Args: outputdir: The path to the directory to use. os_name: Valid options: ['windows', 'linux', 'mac'] os_bits: Valid options: ['32', '64']
def __init__(self, outputdir, os_name, os_bits): if type(self) == Basedriver: raise Exception('Basedriver cannot be instantiated') self.outputdir = outputdir self.os_name = os_name self.os_bits = os_bits
959,085
class constructor Args: kwargs: widget options
def __init__(self, **kwargs): super(Aladin, self).__init__(**kwargs) # trigger the handle_aladin_event function when the send function is called on the js-side # see: http://jupyter-notebook.readthedocs.io/en/latest/comms.html self.on_msg(self.handle_aladin_event)
959,395
load a VOTable table from an url and load its data into the widget Args: votable_URL: string url votable_options: dictionary object
def add_catalog_from_URL(self, votable_URL, votable_options={}): self.votable_URL= votable_URL self.votable_options= votable_options self.votable_from_URL_flag= not self.votable_from_URL_flag
959,396
load a MOC from a URL and display it in Aladin Lite widget Arguments: moc_URL: string url moc_options: dictionary object
def add_moc_from_URL(self, moc_URL, moc_options = {}): self.moc_URL = moc_URL self.moc_options = moc_options self.moc_from_URL_flag = not self.moc_from_URL_flag
959,397
load a MOC from a dict object and display it in Aladin Lite widget Arguments: moc_dict: the dict containing the MOC cells. Key are the HEALPix orders, values are the pixel indexes, eg: {"1":[1,2,4], "2":[12,13,14,21,23,25]} moc_options: dictionary object
def add_moc_from_dict(self, moc_dict, moc_options = {}): self.moc_dict = moc_dict self.moc_options = moc_options self.moc_from_dict_flag = not self.moc_from_dict_flag
959,398
load a VOTable -already accessible on the python side- into the widget Args: table: votable object
def add_table(self, table): # theses library must be installed, and are used in votable operations # http://www.astropy.org/ import astropy table_array = table.__array__() self.table_keys= table.keys() table_columns= [] for i in range(0,len(table.columns[0])): row_data = [] # this step is needed in order to properly retrieve strings data # (otherwise, Aladin Lite shows these values as DataView object) for item in table_array[i]: if isinstance(item, bytes): row_data.append(item.decode('utf-8')) else: row_data.append(item) table_columns.append(row_data) self.table_columns = table_columns self.table_flag= not self.table_flag
959,399
add a listener to the widget Args: listener_type: string that can either be 'objectHovered' or 'objClicked' callback: python function
def add_listener(self, listener_type, callback): self.listener_type= listener_type if listener_type == 'objectHovered': self.listener_callback_source_hover= callback elif listener_type == 'objectClicked': self.listener_callback_source_click= callback elif listener_type == 'click': self.listener_callback_click= callback elif listener_type == 'select': self.listener_callback_select= callback self.listener_flag= not self.listener_flag
959,400
Parse the ELF header in ``data`` and populate the properties. Args: data(bytes): The ELF header.
def _parse_header(self, data): (magic, word_size, byte_order, version, osabi, abi_version, _), data = \ unpack('4sBBBBB7s', data[:16]), data[16:] assert magic == self._ELF_MAGIC, 'Missing ELF magic' assert word_size in (1, 2), 'Invalid word size' assert byte_order in (1, 2), 'Invalid byte order' assert version == 1, 'Invalid version' self.osabi = self.OSABI(osabi) self.abi_version = abi_version endian = Target.Endian(byte_order - 1) (type_, machine, version), data = unpack('HHI', data[:8], endian=endian), data[8:] try: self.type = self.Type(type_) except ValueError: self.type = self.Type.unknown try: self.machine = ELF.Machine(machine) except ValueError: self.machine = ELF.Machine.unknown assert version == 1, 'Invalid version' if self.machine is ELF.Machine.i386: arch = Target.Arch.x86 assert word_size == 1, 'Unexpected ELF64 for machine type x86' assert endian is Target.Endian.little, 'Unexpected big-endian for machine type x86' elif self.machine is ELF.Machine.x86_64: arch = Target.Arch.x86 assert word_size == 2, 'Unexpected ELF32 for machine type x64_64' assert endian is Target.Endian.little, 'Unexpected big-endian for machine type x86' elif self.machine is ELF.Machine.arm: arch = Target.Arch.arm assert word_size == 1, 'Unexpected ELF64 for machine type arm' elif self.machine is ELF.Machine.aarch64: arch = Target.Arch.arm assert word_size == 2, 'Unexpected ELF32 for machine type aarch64' else: arch = Target.Arch.unknown self.arch = arch self.bits = 32 * word_size self.endian = endian if self.bits == 32: fmt = 'IIIIHHHHHH' else: fmt = 'QQQIHHHHHH' fmt_size = pack_size(fmt) (self.entry, self.phoff, self.shoff, self.flags, self.hsize, self.phentsize, self.phnum, self.shentsize, self.shnum, self.shstrndx) = \ unpack(fmt, data[:fmt_size], target=self)
959,723
Parse an ELF file and fill the class' properties. Arguments: f(file or str): The (path to) the ELF file to read.
def parse_file(self, f): if type(f) is str: self.f = open(f, 'rb') else: self.f = f self._parse_header(self.f.read(64))
959,724
Get a specific section header by index or name. Args: section(int or str): The index or name of the section header to return. Returns: :class:`~ELF.SectionHeader`: The section header. Raises: KeyError: The requested section header does not exist.
def get_section_header(self, section): self._ensure_section_headers_loaded() if type(section) is int: return self._section_headers_by_index[section] else: return self._section_headers_by_name[section]
959,727
Get a specific symbol by index or name. Args: symbol(int or str): The index or name of the symbol to return. Returns: ELF.Symbol: The symbol. Raises: KeyError: The requested symbol does not exist.
def get_symbol(self, symbol): self._ensure_symbols_loaded() if type(symbol) is int: return self._symbols_by_index[symbol] else: return self._symbols_by_name[symbol]
959,731
Load a .pyc file from a file-like object. Arguments: fp(file): The file-like object to read. Returns: PycFile: The parsed representation of the .pyc file.
def pyc_load(fp): magic_1 = U16(fp.read(2), target=MARSHAL_TARGET) magic_2 = U16(fp.read(2), target=MARSHAL_TARGET) internals = MAGIC_MAP.get(magic_1) if internals is None: raise ValueError('Invalid or unknown magic (%d).' % magic_1) if magic_2 != 2573: raise ValueError('Invalid secondary magic (%d).' % magic_2) timestamp = datetime.datetime.fromtimestamp(U32(fp.read(4), target=MARSHAL_TARGET)) if internals['version'] >= 33: file_size = U32(fp.read(4)) else: file_size = None code_object = marshal_load(fp, internals) return PycFile(magic_1, internals, timestamp, file_size, code_object)
959,768
Disassemble python bytecode into a series of :class:`Op` and :class:`Label` instances. Arguments: code(bytes): The bytecode (a code object's ``co_code`` property). You can also provide a function. origin(dict): The opcode specification of the python version that generated ``code``. If you provide ``None``, the specs for the currently running python version will be used. Returns: list: A list of opcodes and labels.
def disassemble(code, origin=None): if inspect.isfunction(code): code = six.get_function_code(code).co_code origin = get_py_internals(origin) opname = origin['opname'] hasjrel = origin['hasjrel'] hasjabs = origin['hasjabs'] hasjump = set(hasjrel) | set(hasjabs) wordcode = origin['wordcode'] if not wordcode: ext_arg_shift = 16 else: ext_arg_shift = 8 ext_arg_name = opname[origin['extended_arg']] ext_arg = 0 addr_labels = {} addr_ops = [] code_iter = enumerate(six.iterbytes(code)) for op_addr, op_code in code_iter: if op_code >= origin['have_argument']: rel_addr, arg = next(code_iter) if not wordcode: rel_addr, b = next(code_iter) arg += b << 8 arg += ext_arg if op_code in hasjrel: arg += rel_addr if op_code in hasjump: arg = addr_labels.setdefault(arg, Label()) else: if wordcode: next(code_iter) arg = None ext_arg = 0 op_name = opname[op_code] if op_name == ext_arg_name: ext_arg = arg << ext_arg_shift op = None else: op = Op(op_name, arg) addr_ops.append((op_addr, op)) ops = [] for op_addr, op in addr_ops: label = addr_labels.get(op_addr) if label is not None: ops.append(label) if op is not None: ops.append(op) return ops
959,816
Assemble a set of :class:`Op` and :class:`Label` instance back into bytecode. Arguments: ops(list): A list of opcodes and labels (as returned by :func:`disassemble`). target: The opcode specification of the targeted python version. If this is ``None`` the specification of the currently running python version will be used. Returns: bytes: The assembled bytecode.
def assemble(ops, target=None): target = get_py_internals(target) opmap = target['opmap'] hasjrel = target['hasjrel'] hasjabs = target['hasjabs'] hasjump = set(hasjrel) | set(hasjabs) have_argument = target['have_argument'] extended_arg = target['extended_arg'] wordcode = target['wordcode'] if not wordcode: def encode_op(output, op_code, op_arg=None): n = 1 if op_arg is None: output.append(op_code) else: n += 2 ext_arg = op_arg >> 16 if ext_arg: n += 3 output.extend([extended_arg, ext_arg & 255, ext_arg >> 8]) op_arg &= 65535 output.extend([op_code, op_arg & 255, op_arg >> 8]) return n else: def encode_op(output, op_code, op_arg=None): n = 2 if op_arg is None: output.extend([op_code, 0]) else: ext_arg = op_arg >> 8 if ext_arg: n += encode_op(extended_arg, ext_arg) output.extend([op_code, op_arg & 255]) return n # A bit of a chicken and egg problem: The address of a label depends on the instructions before it. However, # the instructions before a label might depend on the label itself: For very large functions, jumps may # require an EXTENDED_ARG opcode if the jump destination is far away. Which we only know when the label # has materialized, which means the address of the label will change on the next pass, which might mean # a different jump offset might become larger, etc... We run passes until no label changes address. label_address = {} while True: retry = False output = bytearray() address = 0 for op in ops: if isinstance(op, Label): if label_address.get(op) != address: retry = True label_address[op] = address continue op_code = opmap[op.name] op_arg = op.arg if op_code >= have_argument and op_arg is None: # Sanity check. raise ValueError('Opcode %s requires argument.' % op) elif op_code < have_argument and op_arg is not None: # Sanity check. raise ValueError('Opcode %s should not have an argument.' % op) elif isinstance(op_arg, Label): if op_code not in hasjump: # Sanity check. raise ValueError('Did not expect label as argument for opcode %s.' % op) if op_arg not in ops: # Sanity check. raise ValueError('Label is not part of this op list.') # Try to turn the label argument into an address. op_arg = label_address.get(op_arg) if op_arg is None: # Label hasn't materialized yet, we'll catch it on the next pass. address += encode_op(output, op_code, 0) continue if op_code in hasjrel: op_arg -= address elif op_code in hasjump: # Sanity check. raise ValueError('Expected label as argument for opcode %s.' % op) # Encode the opcode and the argument. n = encode_op(output, op_code, op_arg) address += n if op_code in hasjrel: if not wordcode: op_arg = output[-2] + (output[-1] << 8) if op_arg < n: ext_arg = output[-5] + (output[-4] << 8) - 1 output[-5], output[-4] = ext_arg & 255, ext_arg >> 8 op_arg += 65536 op_arg -= n output[-2], output[-1] = op_arg & 255, op_arg >> 8 else: for i in itertools.count(1, 2): if n <= output[-i]: output[-i] -= n break output[-i] += 256 - n n = 1 if not retry: return bytes(output)
959,817
Create a new instance from a function. Gets the code object from the function and passes it and any other specified parameters to :meth:`from_code`. Arguments: f(function): The function to get the code object from. Returns: CodeObject: A new :class:`CodeObject` instance.
def from_function(cls, f, *args, **kwargs): return cls.from_code(six.get_function_code(f), *args, **kwargs)
959,826
Takes a bytecode operation (:class:`Op`) and annotates it using the data contained in this code object. Arguments: op(Op): An :class:`Op` instance. Returns: AnnotatedOp: An annotated bytecode operation.
def annotate_op(self, op): if isinstance(op, Label): return op else: return AnnotatedOp(self, op.name, op.arg)
959,827
Disassemble the bytecode of this code object into a series of opcodes and labels. Can also annotate the opcodes and group the opcodes into blocks based on the labels. Arguments: annotate(bool): Whether to annotate the operations. blocks(bool): Whether to group the operations into blocks. Returns: list: A list of :class:`Op` (or :class:`AnnotatedOp`) instances and labels.
def disassemble(self, annotate=False, blocks=False): ops = disassemble(self.co_code, self.internals) if annotate: ops = [self.annotate_op(op) for op in ops] if blocks: return blocks_from_ops(ops) else: return ops
959,828
Read the index, and load the document list from it Arguments: callback --- called during the indexation (may be called *often*). step : DocSearch.INDEX_STEP_READING or DocSearch.INDEX_STEP_SORTING progression : how many elements done yet total : number of elements to do document (only if step == DocSearch.INDEX_STEP_READING): file being read
def reload_index(self, progress_cb=dummy_progress_cb): nb_results = self.index.start_reload_index() progress = 0 while self.index.continue_reload_index(): progress_cb(progress, nb_results, self.INDEX_STEP_LOADING) progress += 1 progress_cb(1, 1, self.INDEX_STEP_LOADING) self.index.end_reload_index()
959,860
Create a new label Arguments: doc --- first document on which the label must be added (required for now)
def create_label(self, label, doc=None, callback=dummy_progress_cb): if doc: clone = doc.clone() # make sure it's serializable r = self.index.create_label(label, doc=clone) return r
959,863
Prepare a capstone disassembler instance for a given target and syntax. Args: syntax(AsmSyntax): The assembler syntax (Intel or AT&T). target(~pwnypack.target.Target): The target to create a disassembler instance for. The global target is used if this argument is ``None``. Returns: An instance of the capstone disassembler. Raises: NotImplementedError: If the specified target isn't supported.
def prepare_capstone(syntax=AsmSyntax.att, target=None): if not HAVE_CAPSTONE: raise NotImplementedError('pwnypack requires capstone to disassemble to AT&T and Intel syntax') if target is None: target = pwnypack.target.target if target.arch == pwnypack.target.Target.Arch.x86: if target.bits is pwnypack.target.Target.Bits.bits_32: md = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_32) else: md = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_64) elif target.arch == pwnypack.target.Target.Arch.arm: mode = 0 if target.bits is pwnypack.target.Target.Bits.bits_32: arch = capstone.CS_ARCH_ARM if target.mode and pwnypack.target.Target.Mode.arm_thumb: mode = capstone.CS_MODE_THUMB else: mode = capstone.CS_MODE_ARM if target.mode and pwnypack.target.Target.Mode.arm_m_class: mode |= capstone.CS_MODE_MCLASS if target.mode and pwnypack.target.Target.Mode.arm_v8: mode |= capstone.CS_MODE_V8 else: arch = capstone.CS_ARCH_ARM64 if target.endian is pwnypack.target.Target.Endian.little: mode |= capstone.CS_MODE_LITTLE_ENDIAN else: mode |= capstone.CS_MODE_BIG_ENDIAN md = capstone.Cs(arch, mode) else: raise NotImplementedError('Only x86 is currently supported.') md.skipdata = True if syntax is AsmSyntax.att: md.syntax = capstone.CS_OPT_SYNTAX_ATT elif syntax is AsmSyntax.intel: md.skipdata_setup(('db', None, None)) else: raise NotImplementedError('capstone engine only implements AT&T and Intel syntax.') return md
959,886
Given an element of a de Bruijn sequence, find its index in that sequence. Args: key(str): The piece of the de Bruijn sequence to find. width(int): The width of each element in the sequence. Returns: int: The index of ``key`` in the de Bruijn sequence.
def cycle_find(key, width=4): key_len = len(key) buf = '' it = deBruijn(width, 26) for i in range(key_len): buf += chr(ord('A') + next(it)) if buf == key: return 0 for i, c in enumerate(it): buf = buf[1:] + chr(ord('A') + c) if buf == key: return i + 1 return -1
959,903
Returns all the documents matching the given keywords Arguments: sentence --- a sentenced query Returns: An array of document (doc objects)
def find_documents(self, sentence, limit=None, must_sort=True, search_type='fuzzy'): sentence = sentence.strip() sentence = strip_accents(sentence) if sentence == u"": return self.get_all_docs() result_list_list = [] total_results = 0 for query_parser in self.search_param_list[search_type]: query = query_parser["query_parser"].parse(sentence) sortedby = None if must_sort and "sortedby" in query_parser: sortedby = query_parser['sortedby'] if sortedby: results = self.__searcher.search( query, limit=limit, sortedby=sortedby ) else: results = self.__searcher.search( query, limit=limit ) results = [ (result['docid'], result['doctype']) for result in results ] result_list_list.append(results) total_results += len(results) if not must_sort and total_results >= limit: break # merging results docs = set() for result_intermediate in result_list_list: for result in result_intermediate: doc = self._docs_by_id.get(result[0]) if doc is None: continue docs.add(doc) docs = [d for d in docs] if not must_sort and limit is not None: docs = docs[:limit] return docs
959,934
Search all possible suggestions. Suggestions returned always have at least one document matching. Arguments: sentence --- keywords (single strings) for which we want suggestions Return: An array of sets of keywords. Each set of keywords (-> one string) is a suggestion.
def find_suggestions(self, sentence): if not isinstance(sentence, str): sentence = str(sentence) keywords = sentence.split(" ") query_parser = self.search_param_list['strict'][0]['query_parser'] base_search = u" ".join(keywords).strip() final_suggestions = [] corrector = self.__searcher.corrector("content") label_corrector = self.__searcher.corrector("label") for (keyword_idx, keyword) in enumerate(keywords): if (len(keyword) <= MIN_KEYWORD_LEN): continue keyword_suggestions = label_corrector.suggest( keyword, limit=2 )[:] keyword_suggestions += corrector.suggest(keyword, limit=5)[:] for keyword_suggestion in keyword_suggestions: new_suggestion = keywords[:] new_suggestion[keyword_idx] = keyword_suggestion new_suggestion = u" ".join(new_suggestion).strip() if new_suggestion == base_search: continue # make sure it would return results query = query_parser.parse(new_suggestion) results = self.__searcher.search(query, limit=1) if len(results) <= 0: continue final_suggestions.append(new_suggestion) final_suggestions.sort() return final_suggestions
959,935
Create a new label Arguments: doc --- first document on which the label must be added (required for now)
def create_label(self, label, doc=None): label = copy.copy(label) assert(label not in self.labels.values()) self.labels[label.name] = label self.label_guesser.load(label.name) # TODO(Jflesch): Should train with previous documents if doc: doc.add_label(label) self.upd_doc(doc) self.commit()
959,936
Add a label on a document. Arguments: label --- The new label (see labels.Label) doc --- The first document on which this label has been added
def add_label(self, doc, label, update_index=True): label = copy.copy(label) assert(label in self.labels.values()) doc.add_label(label) if update_index: self.upd_doc(doc) self.commit()
959,937
Return a suitable pickle protocol version for a given target. Arguments: target: The internals description of the targeted python version. If this is ``None`` the specification of the currently running python version will be used. protocol(None or int): The requested protocol version (or None for the default of the target python version). Returns: int: A suitable pickle protocol version.
def get_protocol_version(protocol=None, target=None): target = get_py_internals(target) if protocol is None: protocol = target['pickle_default_protocol'] if protocol > cPickle.HIGHEST_PROTOCOL: warnings.warn('Downgrading pickle protocol, running python supports up to %d.' % cPickle.HIGHEST_PROTOCOL) protocol = cPickle.HIGHEST_PROTOCOL target_highest_protocol = target['pickle_highest_protocol'] if protocol > target_highest_protocol: warnings.warn('Downgrading pickle protocol, target python supports up to %d.' % target_highest_protocol) protocol = target_highest_protocol return protocol
960,001
Very crude inter-python version opcode translator. Raises SyntaxError when the opcode doesn't exist in the destination opmap. Used to transcribe python code objects between python versions. Arguments: code_obj(pwnypack.bytecode.CodeObject): The code object representation to translate. target(dict): The py_internals structure for the target python version.
def translate_opcodes(code_obj, target): target = get_py_internals(target) src_ops = code_obj.disassemble() dst_opmap = target['opmap'] dst_ops = [] op_iter = enumerate(src_ops) for i, op in op_iter: if isinstance(op, pwnypack.bytecode.Label): dst_ops.append(op) continue if op.name not in dst_opmap: if op.name == 'POP_JUMP_IF_FALSE' and 'JUMP_IF_TRUE' in dst_opmap: lbl = pwnypack.bytecode.Label() dst_ops.extend([ pwnypack.bytecode.Op('JUMP_IF_TRUE', lbl), pwnypack.bytecode.Op('POP_TOP', None), pwnypack.bytecode.Op('JUMP_ABSOLUTE', op.arg), lbl, pwnypack.bytecode.Op('POP_TOP', None), ]) elif op.name == 'POP_JUMP_IF_TRUE' and 'JUMP_IF_FALSE' in dst_opmap: lbl = pwnypack.bytecode.Label() dst_ops.extend([ pwnypack.bytecode.Op('JUMP_IF_FALSE', lbl), pwnypack.bytecode.Op('POP_TOP', None), pwnypack.bytecode.Op('JUMP_ABSOLUTE', op.arg), lbl, pwnypack.bytecode.Op('POP_TOP', None), ]) elif op.name == 'JUMP_IF_FALSE' and 'JUMP_IF_FALSE_OR_POP' in dst_opmap and \ src_ops[i + 1].name == 'POP_TOP': next(op_iter) dst_ops.append(pwnypack.bytecode.Op('JUMP_IF_FALSE_OR_POP', op.arg)) elif op.name == 'JUMP_IF_TRUE' and 'JUMP_IF_TRUE_OR_POP' in dst_opmap and \ src_ops[i + 1].name == 'POP_TOP': next(op_iter) dst_ops.append(pwnypack.bytecode.Op('JUMP_IF_TRUE_OR_POP', op.arg)) else: raise SyntaxError('Opcode %s not supported on target.' % op.name) else: dst_ops.append(op) code_obj.assemble(dst_ops, target)
960,003
Assume the identity of another target. This can be useful to make the global target assume the identity of an ELF executable. Arguments: other(:class:`Target`): The target whose identity to assume. Example: >>> from pwny import * >>> target.assume(ELF('my-executable'))
def assume(self, other): self._arch = other._arch self._bits = other._bits self._endian = other._endian self._mode = other._mode
960,285
Allocate a piece of data that will be included in the shellcode body. Arguments: value(...): The value to add to the shellcode. Can be bytes or string type. Returns: ~pwnypack.types.Offset: The offset used to address the data.
def alloc_data(self, value): if isinstance(value, six.binary_type): return self._alloc_data(value) elif isinstance(value, six.text_type): return self._alloc_data(value.encode('utf-8') + b'\0') else: raise TypeError('No idea how to encode %s' % repr(value))
960,448
Allocate a buffer (a range of uninitialized memory). Arguments: length(int): The length of the buffer to allocate. Returns: ~pwnypack.types.Buffer: The object used to address this buffer.
def alloc_buffer(self, length): buf = Buffer(sum(len(v) for v in six.iterkeys(self.data)) + sum(v.length for v in self.buffers), length) self.buffers.append(buf) return buf
960,449
Add a value to a register. The value can be another :class:`Register`, an :class:`Offset`, a :class:`Buffer`, an integer or ``None``. Arguments: reg(pwnypack.shellcode.types.Register): The register to add the value to. value: The value to add to the register. Returns: list: A list of mnemonics that will add ``value`` to ``reg``.
def reg_add(self, reg, value): if value is None: return [] elif isinstance(value, Register): return self.reg_add_reg(reg, value) elif isinstance(value, (Buffer, six.integer_types)): if isinstance(reg, Buffer): value = sum(len(v) for v in six.iterkeys(self.data)) + value.offset if not value: return [] reg_width = self.REGISTER_WIDTH[reg] if value < -2 ** (reg_width-1): raise ValueError('%d does not fit %s' % (value, reg)) elif value >= 2 ** reg_width: raise ValueError('%d does not fit %s' % (value, reg)) if value > 0: return self.reg_add_imm(reg, value) else: return self.reg_sub_imm(reg, -value) else: raise ValueError('Invalid argument type "%s"' % repr(value))
960,452
Translate a list of operations into its assembler source. Arguments: ops(list): A list of shellcode operations. Returns: str: The assembler source code that implements the shellcode.
def compile(self, ops): def _compile(): code = [] for op in ops: if isinstance(op, SyscallInvoke): code.extend(self.syscall(op)) elif isinstance(op, LoadRegister): code.extend(self.reg_load(op.register, op.value)) elif isinstance(op, str): code.extend(op.split('\n')) else: raise ValueError('No idea how to assemble "%s"' % repr(op)) return ['\t%s' % line for line in code] # We do 2 passes to make sure all data is allocated so buffers point at the right offset. _compile() return '\n'.join(self.finalize(self.data_finalizer(_compile(), self.data))) + '\n'
960,454
Assemble a list of operations into executable code. Arguments: ops(list): A list of shellcode operations. Returns: bytes: The executable code that implements the shellcode.
def assemble(self, ops): return pwnypack.asm.asm(self.compile(ops), target=self.target)
960,455
Read *n* bytes from the subprocess' output channel. Args: n(int): The number of bytes to read. Returns: bytes: *n* bytes of output. Raises: EOFError: If the process exited.
def read(self, n): d = b'' while n: try: block = self._process.stdout.read(n) except ValueError: block = None if not block: self._process.poll() raise EOFError('Process ended') d += block n -= len(block) return d
960,466
Write *n* bytes to the subprocess' input channel. Args: data(bytes): The data to write. Raises: EOFError: If the process exited.
def write(self, data): self._process.poll() if self._process.returncode is not None: raise EOFError('Process ended') self._process.stdin.write(data)
960,467
Receive *n* bytes from the socket. Args: n(int): The number of bytes to read. Returns: bytes: *n* bytes read from the socket. Raises: EOFError: If the socket was closed.
def read(self, n): d = b'' while n: try: block = self._socket.recv(n) except socket.error: block = None if not block: raise EOFError('Socket closed') d += block n -= len(block) return d
960,468
Send *n* bytes to socket. Args: data(bytes): The data to send. Raises: EOFError: If the socket was closed.
def write(self, data): while data: try: n = self._socket.send(data) except socket.error: n = None if not n: raise EOFError('Socket closed') data = data[n:]
960,469
Read *n* bytes from the channel. Args: n(int): The number of bytes to read from the channel. echo(bool): Whether to write the read data to stdout. Returns: bytes: *n* bytes of data. Raises: EOFError: If the channel was closed.
def read(self, n, echo=None): d = self.channel.read(n) if echo or (echo is None and self.echo): sys.stdout.write(d.decode('latin1')) sys.stdout.flush() return d
960,474
Read until the channel is closed. Args: echo(bool): Whether to write the read data to stdout. Returns: bytes: The read data.
def read_eof(self, echo=None): d = b'' while True: try: d += self.read(1, echo) except EOFError: return d
960,475
Read until a certain string is encountered.. Args: s(bytes): The string to wait for. echo(bool): Whether to write the read data to stdout. Returns: bytes: The data up to and including *s*. Raises: EOFError: If the channel was closed.
def read_until(self, s, echo=None): s_len = len(s) buf = self.read(s_len, echo) while buf[-s_len:] != s: buf += self.read(1, echo) return buf
960,476
Read *n* lines from channel. Args: n(int): The number of lines to read. echo(bool): Whether to write the read data to stdout. Returns: list of bytes: *n* lines which include new line characters. Raises: EOFError: If the channel was closed before *n* lines were read.
def readlines(self, n, echo=None): return [ self.until(b'\n', echo) for _ in range(n) ]
960,477
Write data to channel. Args: data(bytes): The data to write to the channel. echo(bool): Whether to echo the written data to stdout. Raises: EOFError: If the channel was closed before all data was sent.
def write(self, data, echo=None): if echo or (echo is None and self.echo): sys.stdout.write(data.decode('latin1')) sys.stdout.flush() self.channel.write(data)
960,478
Write a list of byte sequences to the channel and terminate them with a separator (line feed). Args: lines(list of bytes): The lines to send. sep(bytes): The separator to use after each line. echo(bool): Whether to echo the written data to stdout. Raises: EOFError: If the channel was closed before all data was sent.
def writelines(self, lines, sep=b'\n', echo=None): self.write(sep.join(lines + [b'']), echo)
960,479
Write a byte sequences to the channel and terminate it with carriage return and line feed. Args: line(bytes): The line to send. sep(bytes): The separator to use after each line. echo(bool): Whether to echo the written data to stdout. Raises: EOFError: If the channel was closed before all data was sent.
def writeline(self, line=b'', sep=b'\n', echo=None): self.writelines([line], sep, echo)
960,480
Set up a :class:`TCPClientSocketChannel` and create a :class:`Flow` instance for it. Args: host(str): The hostname or IP address to connect to. port(int): The port number to connect to. echo(bool): Whether to echo read/written data to stdout by default. Returns: :class:`Flow`: A Flow instance initialised with the TCP socket channel.
def connect_tcp(cls, host, port, echo=False): return cls(TCPClientSocketChannel(host, port), echo=echo)
960,483