_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q2400
get_smallest_compound_id
train
def get_smallest_compound_id(compounds_identifiers): """ Return the smallest KEGG compound identifier from a list. KEGG identifiers may map to compounds, drugs or glycans prefixed respectively with "C", "D", and "G" followed by at least 5 digits. We choose the lowest KEGG identifier with the assumption that several identifiers are due to chirality and that the lower one represents the more common form. Parameters ---------- compounds_identifiers : list A list of mixed KEGG identifiers. Returns ------- str The KEGG compound identifier with the smallest number. Raises ------ ValueError When compound_identifiers contains no KEGG compound identifiers. """ return min((c for c in compounds_identifiers if c.startswith("C")), key=lambda c: int(c[1:]))
python
{ "resource": "" }
q2401
map_metabolite2kegg
train
def map_metabolite2kegg(metabolite): """ Return a KEGG compound identifier for the metabolite if it exists. First see if there is an unambiguous mapping to a single KEGG compound ID provided with the model. If not, check if there is any KEGG compound ID in a list of mappings. KEGG IDs may map to compounds, drugs and glycans. KEGG compound IDs are sorted so we keep the lowest that is there. If none of this works try mapping to KEGG via the CompoundMatcher by the name of the metabolite. If the metabolite cannot be mapped at all we simply map it back to its own ID. Parameters ---------- metabolite : cobra.Metabolite The metabolite to be mapped to its KEGG compound identifier. Returns ------- None If the metabolite could not be mapped. str The smallest KEGG compound identifier that was found. """ logger.debug("Looking for KEGG compound identifier for %s.", metabolite.id) kegg_annotation = metabolite.annotation.get("kegg.compound") if kegg_annotation is None: # TODO (Moritz Beber): Currently name matching is very slow and # inaccurate. We disable it until there is a better solution. # if metabolite.name: # # The compound matcher uses regular expression and chokes # # with a low level error on `[` in the name, for example. # df = compound_matcher.match(metabolite.name) # try: # return df.loc[df["score"] > threshold, "CID"].iat[0] # except (IndexError, AttributeError): # logger.warning( # "Could not match the name %r to any kegg.compound " # "annotation for metabolite %s.", # metabolite.name, metabolite.id # ) # return # else: logger.warning("No kegg.compound annotation for metabolite %s.", metabolite.id) return if isinstance(kegg_annotation, string_types) and \ kegg_annotation.startswith("C"): return kegg_annotation elif isinstance(kegg_annotation, Iterable): try: return get_smallest_compound_id(kegg_annotation) except ValueError: return logger.warning( "No matching kegg.compound annotation for metabolite %s.", metabolite.id ) return
python
{ "resource": "" }
q2402
translate_reaction
train
def translate_reaction(reaction, metabolite_mapping): """ Return a mapping from KEGG compound identifiers to coefficients. Parameters ---------- reaction : cobra.Reaction The reaction whose metabolites are to be translated. metabolite_mapping : dict An existing mapping from cobra.Metabolite to KEGG compound identifier that may already contain the metabolites in question or will have to be extended. Returns ------- dict The stoichiometry of the reaction given as a mapping from metabolite KEGG identifier to coefficient. """ # Transport reactions where the same metabolite occurs in different # compartments should have been filtered out but just to be sure, we add # coefficients in the mapping. stoichiometry = defaultdict(float) for met, coef in iteritems(reaction.metabolites): kegg_id = metabolite_mapping.setdefault(met, map_metabolite2kegg(met)) if kegg_id is None: continue stoichiometry[kegg_id] += coef return dict(stoichiometry)
python
{ "resource": "" }
q2403
find_thermodynamic_reversibility_index
train
def find_thermodynamic_reversibility_index(reactions): u""" Return the reversibility index of the given reactions. To determine the reversibility index, we calculate the reversibility index ln_gamma (see [1]_ section 3.5) of each reaction using the eQuilibrator API [2]_. Parameters ---------- reactions: list of cobra.Reaction A list of reactions for which to calculate the reversibility index. Returns ------- tuple list of cobra.Reaction, index pairs A list of pairs of reactions and their reversibility indexes. list of cobra.Reaction A list of reactions which contain at least one metabolite that could not be mapped to KEGG on the basis of its annotation. list of cobra.Reaction A list of reactions for which it is not possible to calculate the standard change in Gibbs free energy potential. Reasons of failure include that participating metabolites cannot be broken down with the group contribution method. list of cobra.Reaction A list of reactions that are not chemically or redox balanced. References ---------- .. [1] Elad Noor, Arren Bar-Even, Avi Flamholz, Yaniv Lubling, Dan Davidi, Ron Milo; An integrated open framework for thermodynamics of reactions that combines accuracy and coverage, Bioinformatics, Volume 28, Issue 15, 1 August 2012, Pages 2037–2044, https://doi.org/10.1093/bioinformatics/bts317 .. [2] https://pypi.org/project/equilibrator-api/ """ incomplete_mapping = [] problematic_calculation = [] reversibility_indexes = [] unbalanced = [] metabolite_mapping = {} for rxn in reactions: stoich = translate_reaction(rxn, metabolite_mapping) if len(stoich) < len(rxn.metabolites): incomplete_mapping.append(rxn) continue try: # Remove protons from stoichiometry. if "C00080" in stoich: del stoich["C00080"] eq_rxn = Reaction(stoich, rxn.id) except KeyError: incomplete_mapping.append(rxn) continue if eq_rxn.check_full_reaction_balancing(): try: ln_rev_index = eq_rxn.reversibility_index() # TODO (Moritz Beber): Which exceptions can we expect here? except Exception: problematic_calculation.append(rxn) continue reversibility_indexes.append((rxn, ln_rev_index)) else: unbalanced.append(rxn) reversibility_indexes.sort(key=lambda p: abs(p[1]), reverse=True) return ( reversibility_indexes, incomplete_mapping, problematic_calculation, unbalanced )
python
{ "resource": "" }
q2404
check_stoichiometric_consistency
train
def check_stoichiometric_consistency(model): """ Verify the consistency of the model's stoichiometry. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- See [1]_ section 3.1 for a complete description of the algorithm. .. [1] Gevorgyan, A., M. G Poolman, and D. A Fell. "Detection of Stoichiometric Inconsistencies in Biomolecular Models." Bioinformatics 24, no. 19 (2008): 2245. """ problem = model.problem # The transpose of the stoichiometric matrix N.T in the paper. stoich_trans = problem.Model() internal_rxns = con_helpers.get_internals(model) metabolites = set(met for rxn in internal_rxns for met in rxn.metabolites) LOGGER.info("model '%s' has %d internal reactions", model.id, len(internal_rxns)) LOGGER.info("model '%s' has %d internal metabolites", model.id, len(metabolites)) stoich_trans.add([problem.Variable(m.id, lb=1) for m in metabolites]) stoich_trans.update() con_helpers.add_reaction_constraints( stoich_trans, internal_rxns, problem.Constraint) # The objective is to minimize the metabolite mass vector. stoich_trans.objective = problem.Objective( Zero, direction="min", sloppy=True) stoich_trans.objective.set_linear_coefficients( {var: 1. for var in stoich_trans.variables}) status = stoich_trans.optimize() if status == OPTIMAL: return True elif status == INFEASIBLE: return False else: raise RuntimeError( "Could not determine stoichiometric consistencty." " Solver status is '{}'" " (only optimal or infeasible expected).".format(status))
python
{ "resource": "" }
q2405
find_unconserved_metabolites
train
def find_unconserved_metabolites(model): """ Detect unconserved metabolites. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- See [1]_ section 3.2 for a complete description of the algorithm. .. [1] Gevorgyan, A., M. G Poolman, and D. A Fell. "Detection of Stoichiometric Inconsistencies in Biomolecular Models." Bioinformatics 24, no. 19 (2008): 2245. """ problem = model.problem stoich_trans = problem.Model() internal_rxns = con_helpers.get_internals(model) metabolites = set(met for rxn in internal_rxns for met in rxn.metabolites) # The binary variables k[i] in the paper. k_vars = list() for met in metabolites: # The element m[i] of the mass vector. m_var = problem.Variable(met.id) k_var = problem.Variable("k_{}".format(met.id), type="binary") k_vars.append(k_var) stoich_trans.add([m_var, k_var]) # This constraint is equivalent to 0 <= k[i] <= m[i]. stoich_trans.add(problem.Constraint( k_var - m_var, ub=0, name="switch_{}".format(met.id))) stoich_trans.update() con_helpers.add_reaction_constraints( stoich_trans, internal_rxns, problem.Constraint) # The objective is to maximize the binary indicators k[i], subject to the # above inequality constraints. stoich_trans.objective = problem.Objective( Zero, sloppy=True, direction="max") stoich_trans.objective.set_linear_coefficients( {var: 1. for var in k_vars}) status = stoich_trans.optimize() if status == OPTIMAL: # TODO: See if that could be a Boolean test `bool(var.primal)`. return set([model.metabolites.get_by_id(var.name[2:]) for var in k_vars if var.primal < 0.8]) else: raise RuntimeError( "Could not compute list of unconserved metabolites." " Solver status is '{}' (only optimal expected).".format(status))
python
{ "resource": "" }
q2406
find_inconsistent_min_stoichiometry
train
def find_inconsistent_min_stoichiometry(model, atol=1e-13): """ Detect inconsistent minimal net stoichiometries. Parameters ---------- model : cobra.Model The metabolic model under investigation. atol : float, optional Values below the absolute tolerance are treated as zero. Expected to be very small but larger than zero. Notes ----- See [1]_ section 3.3 for a complete description of the algorithm. References ---------- .. [1] Gevorgyan, A., M. G Poolman, and D. A Fell. "Detection of Stoichiometric Inconsistencies in Biomolecular Models." Bioinformatics 24, no. 19 (2008): 2245. """ if check_stoichiometric_consistency(model): return set() Model, Constraint, Variable, Objective = con_helpers.get_interface(model) unconserved_mets = find_unconserved_metabolites(model) LOGGER.info("model has %d unconserved metabolites", len(unconserved_mets)) internal_rxns = con_helpers.get_internals(model) internal_mets = set( met for rxn in internal_rxns for met in rxn.metabolites) get_id = attrgetter("id") reactions = sorted(internal_rxns, key=get_id) metabolites = sorted(internal_mets, key=get_id) stoich, met_index, rxn_index = con_helpers.stoichiometry_matrix( metabolites, reactions) left_ns = con_helpers.nullspace(stoich.T) # deal with numerical instabilities left_ns[np.abs(left_ns) < atol] = 0.0 LOGGER.info("nullspace has dimension %d", left_ns.shape[1]) inc_minimal = set() (problem, indicators) = con_helpers.create_milp_problem( left_ns, metabolites, Model, Variable, Constraint, Objective) LOGGER.debug(str(problem)) cuts = list() for met in unconserved_mets: row = met_index[met] if (left_ns[row] == 0.0).all(): LOGGER.debug("%s: singleton minimal unconservable set", met.id) # singleton set! inc_minimal.add((met,)) continue # expect a positive mass for the unconserved metabolite problem.variables[met.id].lb = 1e-3 status = problem.optimize() while status == "optimal": LOGGER.debug("%s: status %s", met.id, status) LOGGER.debug("sum of all primal values: %f", sum(problem.primal_values.values())) LOGGER.debug("sum of binary indicators: %f", sum(var.primal for var in indicators)) solution = [model.metabolites.get_by_id(var.name[2:]) for var in indicators if var.primal > 0.2] LOGGER.debug("%s: set size %d", met.id, len(solution)) inc_minimal.add(tuple(solution)) if len(solution) == 1: break cuts.append(con_helpers.add_cut( problem, indicators, len(solution) - 1, Constraint)) status = problem.optimize() LOGGER.debug("%s: last status %s", met.id, status) # reset problem.variables[met.id].lb = 0.0 problem.remove(cuts) cuts.clear() return inc_minimal
python
{ "resource": "" }
q2407
detect_energy_generating_cycles
train
def detect_energy_generating_cycles(model, metabolite_id): u""" Detect erroneous energy-generating cycles for a a single metabolite. The function will first build a dissipation reaction corresponding to the input metabolite. This reaction is then set as the objective for optimization, after closing all exchanges. If the reaction was able to carry flux, an erroneous energy-generating cycle must be present. In this case a list of reactions with a flux greater than zero is returned. Otherwise, the function returns False. Parameters ---------- model : cobra.Model The metabolic model under investigation. metabolite_id : str The identifier of an energy metabolite. Notes ----- "[...] energy generating cycles (EGC) [...] charge energy metabolites without a source of energy. [...] To efficiently identify the existence of diverse EGCs, we first add a dissipation reaction to the metabolic network for each metabolite used to transmit cellular energy; e.g., for ATP, the irreversible reaction ATP + H2O → ADP + P + H+ is added. These dissipation reactions close any existing energy-generating cycles, thereby converting them to type-III pathways. Fluxes through any of the dissipation reactions at steady state indicate the generation of energy through the metabolic network. Second, all uptake reactions are constrained to zero. The sum of the fluxes through the energy dissipation reactions is now maximized using FBA. For a model without EGCs, these reactions cannot carry any flux without the uptake of nutrients. [1]_." References ---------- .. [1] Fritzemeier, C. J., Hartleb, D., Szappanos, B., Papp, B., & Lercher, M. J. (2017). Erroneous energy-generating cycles in published genome scale metabolic networks: Identification and removal. PLoS Computational Biology, 13(4), 1–14. http://doi.org/10.1371/journal.pcbi.1005494 """ main_comp = helpers.find_compartment_id_in_model(model, 'c') met = helpers.find_met_in_model(model, metabolite_id, main_comp)[0] dissipation_rxn = Reaction('Dissipation') if metabolite_id in ['MNXM3', 'MNXM63', 'MNXM51', 'MNXM121', 'MNXM423']: # build nucleotide-type dissipation reaction dissipation_rxn.add_metabolites({ helpers.find_met_in_model(model, 'MNXM2', main_comp)[0]: -1, helpers.find_met_in_model(model, 'MNXM1', main_comp)[0]: 1, helpers.find_met_in_model(model, 'MNXM9', main_comp)[0]: 1, }) elif metabolite_id in ['MNXM6', 'MNXM10']: # build nicotinamide-type dissipation reaction dissipation_rxn.add_metabolites({ helpers.find_met_in_model(model, 'MNXM1', main_comp)[0]: 1 }) elif metabolite_id in ['MNXM38', 'MNXM208', 'MNXM191', 'MNXM223', 'MNXM7517', 'MNXM12233', 'MNXM558']: # build redox-partner-type dissipation reaction dissipation_rxn.add_metabolites({ helpers.find_met_in_model(model, 'MNXM1', main_comp)[0]: 2 }) elif metabolite_id == 'MNXM21': dissipation_rxn.add_metabolites({ helpers.find_met_in_model(model, 'MNXM2', main_comp)[0]: -1, helpers.find_met_in_model(model, 'MNXM1', main_comp)[0]: 1, helpers.find_met_in_model(model, 'MNXM26', main_comp)[0]: 1, }) elif metabolite_id == 'MNXM89557': dissipation_rxn.add_metabolites({ helpers.find_met_in_model(model, 'MNXM2', main_comp)[0]: -1, helpers.find_met_in_model(model, 'MNXM1', main_comp)[0]: 2, helpers.find_met_in_model(model, 'MNXM15', main_comp)[0]: 1, }) dissipation_product = helpers.find_met_in_model( model, ENERGY_COUPLES[metabolite_id], main_comp)[0] dissipation_rxn.add_metabolites( {met: -1, dissipation_product: 1}) helpers.close_boundaries_sensibly(model) model.add_reactions([dissipation_rxn]) model.objective = dissipation_rxn solution = model.optimize(raise_error=True) if solution.objective_value > 0.0: return solution.fluxes[solution.fluxes.abs() > 0.0].index. \ drop(["Dissipation"]).tolist() else: return []
python
{ "resource": "" }
q2408
find_orphans
train
def find_orphans(model): """ Return metabolites that are only consumed in reactions. Metabolites that are involved in an exchange reaction are never considered to be orphaned. Parameters ---------- model : cobra.Model The metabolic model under investigation. """ exchange = frozenset(model.exchanges) return [ met for met in model.metabolites if (len(met.reactions) > 0) and all( (not rxn.reversibility) and (rxn not in exchange) and (rxn.metabolites[met] < 0) for rxn in met.reactions ) ]
python
{ "resource": "" }
q2409
find_metabolites_not_produced_with_open_bounds
train
def find_metabolites_not_produced_with_open_bounds(model): """ Return metabolites that cannot be produced with open exchange reactions. A perfect model should be able to produce each and every metabolite when all medium components are available. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list Those metabolites that could not be produced. """ mets_not_produced = list() helpers.open_exchanges(model) for met in model.metabolites: with model: exch = model.add_boundary( met, type="irrex", reaction_id="IRREX", lb=0, ub=1000) solution = helpers.run_fba(model, exch.id) if np.isnan(solution) or solution < TOLERANCE_THRESHOLD: mets_not_produced.append(met) return mets_not_produced
python
{ "resource": "" }
q2410
find_metabolites_not_consumed_with_open_bounds
train
def find_metabolites_not_consumed_with_open_bounds(model): """ Return metabolites that cannot be consumed with open boundary reactions. When all metabolites can be secreted, it should be possible for each and every metabolite to be consumed in some form. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list Those metabolites that could not be consumed. """ mets_not_consumed = list() helpers.open_exchanges(model) for met in model.metabolites: with model: exch = model.add_boundary( met, type="irrex", reaction_id="IRREX", lb=-1000, ub=0) solution = helpers.run_fba(model, exch.id, direction="min") if np.isnan(solution) or abs(solution) < TOLERANCE_THRESHOLD: mets_not_consumed.append(met) return mets_not_consumed
python
{ "resource": "" }
q2411
find_reactions_with_unbounded_flux_default_condition
train
def find_reactions_with_unbounded_flux_default_condition(model): """ Return list of reactions whose flux is unbounded in the default condition. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- tuple list A list of reactions that in default modeling conditions are able to carry flux as high/low as the systems maximal and minimal bounds. float The fraction of the amount of unbounded reactions to the amount of non-blocked reactions. list A list of reactions that in default modeling conditions are not able to carry flux at all. """ try: fva_result = flux_variability_analysis(model, fraction_of_optimum=1.0) except Infeasible as err: LOGGER.error("Failed to find reactions with unbounded flux " "because '{}'. This may be a bug.".format(err)) raise Infeasible("It was not possible to run flux variability " "analysis on the model. Make sure that the model " "can be solved! Check if the constraints are not " "too strict.") # Per reaction (row) the flux is below threshold (close to zero). conditionally_blocked = fva_result.loc[ fva_result.abs().max(axis=1) < TOLERANCE_THRESHOLD ].index.tolist() small, large = helpers.find_bounds(model) # Find those reactions whose flux is close to or outside of the median # upper or lower bound, i.e., appears unconstrained. unlimited_flux = fva_result.loc[ np.isclose(fva_result["maximum"], large, atol=TOLERANCE_THRESHOLD) | (fva_result["maximum"] > large) | np.isclose(fva_result["minimum"], small, atol=TOLERANCE_THRESHOLD) | (fva_result["minimum"] < small) ].index.tolist() try: fraction = len(unlimited_flux) / \ (len(model.reactions) - len(conditionally_blocked)) except ZeroDivisionError: LOGGER.error("Division by Zero! Failed to calculate the " "fraction of unbounded reactions. Does this model " "have any reactions at all?") raise ZeroDivisionError("It was not possible to calculate the " "fraction of unbounded reactions to " "un-blocked reactions. This may be because" "the model doesn't have any reactions at " "all or that none of the reactions can " "carry a flux larger than zero!") return unlimited_flux, fraction, conditionally_blocked
python
{ "resource": "" }
q2412
read_tabular
train
def read_tabular(filename, dtype_conversion=None): """ Read a tabular data file which can be CSV, TSV, XLS or XLSX. Parameters ---------- filename : str or pathlib.Path The full file path. May be a compressed file. dtype_conversion : dict Column names as keys and corresponding type for loading the data. Please take a look at the `pandas documentation <https://pandas.pydata.org/pandas-docs/stable/io.html#specifying-column-data-types>`__ for detailed explanations. Returns ------- pandas.DataFrame The data table. """ if dtype_conversion is None: dtype_conversion = {} name, ext = filename.split(".", 1) ext = ext.lower() # Completely empty columns are interpreted as float by default. dtype_conversion["comment"] = str if "csv" in ext: df = pd.read_csv(filename, dtype=dtype_conversion, encoding="utf-8") elif "tsv" in ext: df = pd.read_table(filename, dtype=dtype_conversion, encoding="utf-8") elif "xls" in ext or "xlsx" in ext: df = pd.read_excel(filename, dtype=dtype_conversion, encoding="utf-8") # TODO: Add a function to parse ODS data into a pandas data frame. else: raise ValueError("Unknown file format '{}'.".format(ext)) return df
python
{ "resource": "" }
q2413
snapshot
train
def snapshot(model, filename, pytest_args, exclusive, skip, solver, experimental, custom_tests, custom_config): """ Take a snapshot of a model's state and generate a report. MODEL: Path to model file. Can also be supplied via the environment variable MEMOTE_MODEL or configured in 'setup.cfg' or 'memote.ini'. """ model_obj, sbml_ver, notifications = api.validate_model( model) if model_obj is None: LOGGER.critical( "The model could not be loaded due to the following SBML errors.") utils.stdout_notifications(notifications) api.validation_report(model, notifications, filename) sys.exit(1) if not any(a.startswith("--tb") for a in pytest_args): pytest_args = ["--tb", "no"] + pytest_args # Add further directories to search for tests. pytest_args.extend(custom_tests) config = ReportConfiguration.load() # Update the default test configuration with custom ones (if any). for custom in custom_config: config.merge(ReportConfiguration.load(custom)) model_obj.solver = solver _, results = api.test_model(model_obj, sbml_version=sbml_ver, results=True, pytest_args=pytest_args, skip=skip, exclusive=exclusive, experimental=experimental) with open(filename, "w", encoding="utf-8") as file_handle: LOGGER.info("Writing snapshot report to '%s'.", filename) file_handle.write(api.snapshot_report(results, config))
python
{ "resource": "" }
q2414
history
train
def history(location, model, filename, deployment, custom_config): """Generate a report over a model's git commit history.""" callbacks.git_installed() LOGGER.info("Initialising history report generation.") if location is None: raise click.BadParameter("No 'location' given or configured.") try: repo = git.Repo() except git.InvalidGitRepositoryError: LOGGER.critical( "The history report requires a git repository in order to check " "the model's commit history.") sys.exit(1) LOGGER.info("Obtaining history of results from " "the deployment branch {}.".format(deployment)) repo.git.checkout(deployment) try: manager = managers.SQLResultManager(repository=repo, location=location) except (AttributeError, ArgumentError): manager = managers.RepoResultManager( repository=repo, location=location) config = ReportConfiguration.load() # Update the default test configuration with custom ones (if any). for custom in custom_config: config.merge(ReportConfiguration.load(custom)) LOGGER.info("Tracing the commit history.") history = managers.HistoryManager(repository=repo, manager=manager) history.load_history(model, skip={deployment}) LOGGER.info("Composing the history report.") report = api.history_report(history, config=config) with open(filename, "w", encoding="utf-8") as file_handle: file_handle.write(report)
python
{ "resource": "" }
q2415
diff
train
def diff(models, filename, pytest_args, exclusive, skip, solver, experimental, custom_tests, custom_config): """ Take a snapshot of all the supplied models and generate a diff report. MODELS: List of paths to two or more model files. """ if not any(a.startswith("--tb") for a in pytest_args): pytest_args = ["--tb", "no"] + pytest_args # Add further directories to search for tests. pytest_args.extend(custom_tests) config = ReportConfiguration.load() # Update the default test configuration with custom ones (if any). for custom in custom_config: config.merge(ReportConfiguration.load(custom)) # Build the diff report specific data structure diff_results = dict() model_and_model_ver_tuple = list() for model_path in models: try: model_filename = os.path.basename(model_path) diff_results.setdefault(model_filename, dict()) model, model_ver, notifications = api.validate_model(model_path) if model is None: head, tail = os.path.split(filename) report_path = os.path.join( head, '{}_structural_report.html'.format(model_filename)) api.validation_report( model_path, notifications, report_path) LOGGER.critical( "The model {} could not be loaded due to SBML errors " "reported in {}.".format(model_filename, report_path)) continue model.solver = solver model_and_model_ver_tuple.append((model, model_ver)) except (IOError, SBMLError): LOGGER.debug(exc_info=True) LOGGER.warning("An error occurred while loading the model '%s'. " "Skipping.", model_filename) # Abort the diff report unless at least two models can be loaded # successfully. if len(model_and_model_ver_tuple) < 2: LOGGER.critical( "Out of the %d provided models only %d could be loaded. Please, " "check if the models that could not be loaded are valid SBML. " "Aborting.", len(models), len(model_and_model_ver_tuple)) sys.exit(1) # Running pytest in individual processes to avoid interference partial_test_diff = partial(_test_diff, pytest_args=pytest_args, skip=skip, exclusive=exclusive, experimental=experimental) pool = Pool(min(len(models), cpu_count())) results = pool.map(partial_test_diff, model_and_model_ver_tuple) for model_path, result in zip(models, results): model_filename = os.path.basename(model_path) diff_results[model_filename] = result with open(filename, "w", encoding="utf-8") as file_handle: LOGGER.info("Writing diff report to '%s'.", filename) file_handle.write(api.diff_report(diff_results, config))
python
{ "resource": "" }
q2416
HistoryManager.build_branch_structure
train
def build_branch_structure(self, model, skip): """Inspect and record the repo's branches and their history.""" self._history = dict() self._history["commits"] = commits = dict() self._history["branches"] = branches = dict() for branch in self._repo.refs: LOGGER.debug(branch.name) if branch.name in skip: continue branches[branch.name] = branch_history = list() latest = branch.commit history = [latest] + list(latest.iter_parents()) for commit in history: # Find model in committed files. if not is_modified(model, commit): LOGGER.info( "The model was not modified in commit '{}'. " "Skipping.".format(commit)) continue branch_history.append(commit.hexsha) if commit.hexsha not in commits: commits[commit.hexsha] = sub = dict() sub["timestamp"] = commit.authored_datetime.isoformat(" ") sub["author"] = commit.author.name sub["email"] = commit.author.email LOGGER.debug("%s", json.dumps(self._history, indent=2))
python
{ "resource": "" }
q2417
HistoryManager.load_history
train
def load_history(self, model, skip={"gh-pages"}): """ Load the entire results history into memory. Could be a bad idea in a far future. """ if self._history is None: self.build_branch_structure(model, skip) self._results = dict() all_commits = list(self._history["commits"]) for commit in all_commits: try: self._results[commit] = self.manager.load(commit) except (IOError, NoResultFound) as err: LOGGER.error("Could not load result '%s'.", commit) LOGGER.debug("%s", str(err))
python
{ "resource": "" }
q2418
HistoryManager.get_result
train
def get_result(self, commit, default=MemoteResult()): """Return an individual result from the history if it exists.""" assert self._results is not None, \ "Please call the method `load_history` first." return self._results.get(commit, default)
python
{ "resource": "" }
q2419
absolute_extreme_coefficient_ratio
train
def absolute_extreme_coefficient_ratio(model): """ Return the maximum and minimum absolute, non-zero coefficients. Parameters ---------- model : cobra.Model The metabolic model under investigation. """ s_matrix, _, _ = con_helpers.stoichiometry_matrix( model.metabolites, model.reactions ) abs_matrix = np.abs(s_matrix) return abs_matrix.max(), abs_matrix[abs_matrix > 0].min()
python
{ "resource": "" }
q2420
number_independent_conservation_relations
train
def number_independent_conservation_relations(model): """ Return the number of conserved metabolite pools. This number is given by the left null space of the stoichiometric matrix. Parameters ---------- model : cobra.Model The metabolic model under investigation. """ s_matrix, _, _ = con_helpers.stoichiometry_matrix( model.metabolites, model.reactions ) ln_matrix = con_helpers.nullspace(s_matrix.T) return ln_matrix.shape[1]
python
{ "resource": "" }
q2421
matrix_rank
train
def matrix_rank(model): """ Return the rank of the model's stoichiometric matrix. Parameters ---------- model : cobra.Model The metabolic model under investigation. """ s_matrix, _, _ = con_helpers.stoichiometry_matrix( model.metabolites, model.reactions ) return con_helpers.rank(s_matrix)
python
{ "resource": "" }
q2422
degrees_of_freedom
train
def degrees_of_freedom(model): """ Return the degrees of freedom, i.e., number of "free variables". Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- This specifically refers to the dimensionality of the (right) null space of the stoichiometric matrix, as dim(Null(S)) corresponds directly to the number of free variables in the system [1]_. The formula used calculates this using the rank-nullity theorem [2]_. References ---------- .. [1] Fukuda, K. & Terlaky, T. Criss-cross methods: A fresh view on pivot algorithms. Mathematical Programming 79, 369-395 (1997). .. [2] Alama, J. The Rank+Nullity Theorem. Formalized Mathematics 15, (2007). """ s_matrix, _, _ = con_helpers.stoichiometry_matrix( model.metabolites, model.reactions ) return s_matrix.shape[1] - matrix_rank(model)
python
{ "resource": "" }
q2423
ExperimentConfiguration.load
train
def load(self, model): """ Load all information from an experimental configuration file. Parameters ---------- model : cobra.Model The metabolic model under investigation. """ self.load_medium(model) self.load_essentiality(model) self.load_growth(model) # self.load_experiment(config.config.get("growth"), model) return self
python
{ "resource": "" }
q2424
ExperimentConfiguration.validate
train
def validate(self): """Validate the configuration file.""" validator = Draft4Validator(self.SCHEMA) if not validator.is_valid(self.config): for err in validator.iter_errors(self.config): LOGGER.error(str(err.message)) validator.validate(self.config)
python
{ "resource": "" }
q2425
ExperimentConfiguration.load_medium
train
def load_medium(self, model): """Load and validate all media.""" media = self.config.get("medium") if media is None: return definitions = media.get("definitions") if definitions is None or len(definitions) == 0: return path = self.get_path(media, join("data", "experimental", "media")) for medium_id, medium in iteritems(definitions): if medium is None: medium = dict() filename = medium.get("filename") if filename is None: filename = join(path, "{}.csv".format(medium_id)) elif not isabs(filename): filename = join(path, filename) tmp = Medium(identifier=medium_id, obj=medium, filename=filename) tmp.load() tmp.validate(model) self.media[medium_id] = tmp
python
{ "resource": "" }
q2426
ExperimentConfiguration.get_path
train
def get_path(self, obj, default): """Return a relative or absolute path to experimental data.""" path = obj.get("path") if path is None: path = join(self._base, default) if not isabs(path): path = join(self._base, path) return path
python
{ "resource": "" }
q2427
find_components_without_annotation
train
def find_components_without_annotation(model, components): """ Find model components with empty annotation attributes. Parameters ---------- model : cobra.Model A cobrapy metabolic model. components : {"metabolites", "reactions", "genes"} A string denoting `cobra.Model` components. Returns ------- list The components without any annotation. """ return [elem for elem in getattr(model, components) if elem.annotation is None or len(elem.annotation) == 0]
python
{ "resource": "" }
q2428
generate_component_annotation_miriam_match
train
def generate_component_annotation_miriam_match(elements, component, db): """ Tabulate which MIRIAM databases the element's annotation match. If the relevant MIRIAM identifier is not in an element's annotation it is ignored. Parameters ---------- elements : list Elements of a model, either metabolites or reactions. component : {"metabolites", "reactions"} A string denoting a type of ``cobra.Model`` component. db : str One of the MIRIAM database identifiers. Returns ------- list The components whose annotation does not match the pattern for the MIRIAM database. """ def is_faulty(annotation, key, pattern): # Ignore missing annotation for this database. if key not in annotation: return False test = annotation[key] if isinstance(test, native_str): return pattern.match(test) is None else: return any(pattern.match(elem) is None for elem in test) pattern = { "metabolites": METABOLITE_ANNOTATIONS, "reactions": REACTION_ANNOTATIONS, "genes": GENE_PRODUCT_ANNOTATIONS }[component][db] return [elem for elem in elements if is_faulty(elem.annotation, db, pattern)]
python
{ "resource": "" }
q2429
generate_component_id_namespace_overview
train
def generate_component_id_namespace_overview(model, components): """ Tabulate which MIRIAM databases the component's identifier matches. Parameters ---------- model : cobra.Model A cobrapy metabolic model. components : {"metabolites", "reactions", "genes"} A string denoting `cobra.Model` components. Returns ------- pandas.DataFrame The index of the table is given by the component identifiers. Each column corresponds to one MIRIAM database and a Boolean entry determines whether the annotation matches. """ patterns = { "metabolites": METABOLITE_ANNOTATIONS, "reactions": REACTION_ANNOTATIONS, "genes": GENE_PRODUCT_ANNOTATIONS }[components] databases = list(patterns) data = list() index = list() for elem in getattr(model, components): index.append(elem.id) data.append(tuple(patterns[db].match(elem.id) is not None for db in databases)) df = pd.DataFrame(data, index=index, columns=databases) if components != "genes": # Clean up of the dataframe. Unfortunately the Biocyc patterns match # broadly. Hence, whenever a Metabolite or Reaction ID matches to any # DB pattern AND the Biocyc pattern we have to assume that this is a # false positive. # First determine all rows in which 'biocyc' and other entries are # True simultaneously and use this Boolean series to create another # column temporarily. df['duplicate'] = df[df['biocyc']].sum(axis=1) >= 2 # Replace all nan values with False df['duplicate'].fillna(False, inplace=True) # Use the additional column to index the original dataframe to identify # false positive biocyc hits and set them to False. df.loc[df['duplicate'], 'biocyc'] = False # Delete the additional column del df['duplicate'] return df
python
{ "resource": "" }
q2430
confusion_matrix
train
def confusion_matrix(predicted_essential, expected_essential, predicted_nonessential, expected_nonessential): """ Compute a representation of the confusion matrix. Parameters ---------- predicted_essential : set expected_essential : set predicted_nonessential : set expected_nonessential : set Returns ------- dict Confusion matrix as different keys of a dictionary. The abbreviated keys correspond to the ones used in [1]_. References ---------- .. [1] `Wikipedia entry for the Confusion matrix <https://en.wikipedia.org/wiki/Confusion_matrix>`_ """ true_positive = predicted_essential & expected_essential tp = len(true_positive) true_negative = predicted_nonessential & expected_nonessential tn = len(true_negative) false_positive = predicted_essential - expected_essential fp = len(false_positive) false_negative = predicted_nonessential - expected_nonessential fn = len(false_negative) # sensitivity or true positive rate try: tpr = tp / (tp + fn) except ZeroDivisionError: tpr = None # specificity or true negative rate try: tnr = tn / (tn + fp) except ZeroDivisionError: tnr = None # precision or positive predictive value try: ppv = tp / (tp + fp) except ZeroDivisionError: ppv = None # false discovery rate fdr = 1 - ppv # accuracy try: acc = (tp + tn) / (tp + tn + fp + fn) except ZeroDivisionError: acc = None # Compute Matthews correlation coefficient. try: mcc = (tp * tn - fp * fn) /\ sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) except ZeroDivisionError: mcc = None return { "TP": list(true_positive), "TN": list(true_negative), "FP": list(false_positive), "FN": list(false_negative), "TPR": tpr, "TNR": tnr, "PPV": ppv, "FDR": fdr, "ACC": acc, "MCC": mcc }
python
{ "resource": "" }
q2431
validate_model
train
def validate_model(path): """ Validate a model structurally and optionally store results as JSON. Parameters ---------- path : Path to model file. Returns ------- tuple cobra.Model The metabolic model under investigation. tuple A tuple reporting on the SBML level, version, and FBC package version used (if any) in the SBML document. dict A simple dictionary containing a list of errors and warnings. """ notifications = {"warnings": [], "errors": []} model, sbml_ver = val.load_cobra_model(path, notifications) return model, sbml_ver, notifications
python
{ "resource": "" }
q2432
snapshot_report
train
def snapshot_report(result, config=None, html=True): """ Generate a snapshot report from a result set and configuration. Parameters ---------- result : memote.MemoteResult Nested dictionary structure as returned from the test suite. config : dict, optional The final test report configuration (default None). html : bool, optional Whether to render the report as full HTML or JSON (default True). """ if config is None: config = ReportConfiguration.load() report = SnapshotReport(result=result, configuration=config) if html: return report.render_html() else: return report.render_json()
python
{ "resource": "" }
q2433
history_report
train
def history_report(history, config=None, html=True): """ Test a model and save a history report. Parameters ---------- history : memote.HistoryManager The manager grants access to previous results. config : dict, optional The final test report configuration. html : bool, optional Whether to render the report as full HTML or JSON (default True). """ if config is None: config = ReportConfiguration.load() report = HistoryReport(history=history, configuration=config) if html: return report.render_html() else: return report.render_json()
python
{ "resource": "" }
q2434
diff_report
train
def diff_report(diff_results, config=None, html=True): """ Generate a diff report from a result set and configuration. Parameters ---------- diff_results : iterable of memote.MemoteResult Nested dictionary structure as returned from the test suite. config : dict, optional The final test report configuration (default None). html : bool, optional Whether to render the report as full HTML or JSON (default True). """ if config is None: config = ReportConfiguration.load() report = DiffReport(diff_results=diff_results, configuration=config) if html: return report.render_html() else: return report.render_json()
python
{ "resource": "" }
q2435
validation_report
train
def validation_report(path, notifications, filename): """ Generate a validation report from a notification object. Parameters ---------- path : string Path to model file. notifications : dict A simple dictionary structure containing a list of errors and warnings. """ env = Environment( loader=PackageLoader('memote.suite', 'templates'), autoescape=select_autoescape(['html', 'xml']) ) template = env.get_template('validation_template.html') model = os.path.basename(path) with open(filename, "w") as file_h: file_h.write(template.render(model=model, notifications=notifications))
python
{ "resource": "" }
q2436
ReportConfiguration.load
train
def load(cls, filename=None): """Load a test report configuration.""" if filename is None: LOGGER.debug("Loading default configuration.") with open_text(templates, "test_config.yml", encoding="utf-8") as file_handle: content = yaml.load(file_handle) else: LOGGER.debug("Loading custom configuration '%s'.", filename) try: with open(filename, encoding="utf-8") as file_handle: content = yaml.load(file_handle) except IOError as err: LOGGER.error( "Failed to load the custom configuration '%s'. Skipping.", filename) LOGGER.debug(str(err)) content = dict() return cls(content)
python
{ "resource": "" }
q2437
find_top_level_complex
train
def find_top_level_complex(gpr): """ Find unique elements of both branches of the top level logical AND. Parameters ---------- gpr : str The gene-protein-reaction association as a string. Returns ------- int The size of the symmetric difference between the set of elements to the left of the top level logical AND and the right set. """ logger.debug("%r", gpr) conform = logical_and.sub("and", gpr) conform = logical_or.sub("or", conform) conform = escape_chars.sub("_", conform) expression = ast.parse(conform) walker = GPRVisitor() walker.visit(expression) return len(walker.left ^ walker.right)
python
{ "resource": "" }
q2438
GPRVisitor.visit_BoolOp
train
def visit_BoolOp(self, node): """Set up recording of elements with this hook.""" if self._is_top and isinstance(node.op, ast.And): self._is_top = False self._current = self.left self.visit(node.values[0]) self._current = self.right for successor in node.values[1:]: self.visit(successor) else: self.generic_visit(node)
python
{ "resource": "" }
q2439
find_nonzero_constrained_reactions
train
def find_nonzero_constrained_reactions(model): """Return list of reactions with non-zero, non-maximal bounds.""" lower_bound, upper_bound = helpers.find_bounds(model) return [rxn for rxn in model.reactions if 0 > rxn.lower_bound > lower_bound or 0 < rxn.upper_bound < upper_bound]
python
{ "resource": "" }
q2440
find_zero_constrained_reactions
train
def find_zero_constrained_reactions(model): """Return list of reactions that are constrained to zero flux.""" return [rxn for rxn in model.reactions if rxn.lower_bound == 0 and rxn.upper_bound == 0]
python
{ "resource": "" }
q2441
find_unconstrained_reactions
train
def find_unconstrained_reactions(model): """Return list of reactions that are not constrained at all.""" lower_bound, upper_bound = helpers.find_bounds(model) return [rxn for rxn in model.reactions if rxn.lower_bound <= lower_bound and rxn.upper_bound >= upper_bound]
python
{ "resource": "" }
q2442
find_ngam
train
def find_ngam(model): u""" Return all potential non growth-associated maintenance reactions. From the list of all reactions that convert ATP to ADP select the reactions that match a defined reaction string and whose metabolites are situated within the main model compartment. The main model compartment is the cytosol, and if that cannot be identified, the compartment with the most metabolites. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list Reactions that qualify as non-growth associated maintenance reactions. Notes ----- [1]_ define the non-growth associated maintenance (NGAM) as the energy required to maintain all constant processes such as turgor pressure and other housekeeping activities. In metabolic models this is expressed by requiring a simple ATP hydrolysis reaction to always have a fixed minimal amount of flux. This value can be measured as described by [1]_ . References ---------- .. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for generating a high-quality genome-scale metabolic reconstruction. Nature protocols. Nature Publishing Group. http://doi.org/10.1038/nprot.2009.203 """ atp_adp_conv_rxns = helpers.find_converting_reactions( model, ("MNXM3", "MNXM7") ) id_of_main_compartment = helpers.find_compartment_id_in_model(model, 'c') reactants = { helpers.find_met_in_model(model, "MNXM3", id_of_main_compartment)[0], helpers.find_met_in_model(model, "MNXM2", id_of_main_compartment)[0] } products = { helpers.find_met_in_model(model, "MNXM7", id_of_main_compartment)[0], helpers.find_met_in_model(model, "MNXM1", id_of_main_compartment)[0], helpers.find_met_in_model(model, "MNXM9", id_of_main_compartment)[0] } candidates = [rxn for rxn in atp_adp_conv_rxns if rxn.reversibility is False and set(rxn.reactants) == reactants and set(rxn.products) == products] buzzwords = ['maintenance', 'atpm', 'requirement', 'ngam', 'non-growth', 'associated'] refined_candidates = [rxn for rxn in candidates if any( string in filter_none(rxn.name, '').lower() for string in buzzwords )] if refined_candidates: return refined_candidates else: return candidates
python
{ "resource": "" }
q2443
calculate_metabolic_coverage
train
def calculate_metabolic_coverage(model): u""" Return the ratio of reactions and genes included in the model. Determine whether the amount of reactions and genes in model not equal to zero, then return the ratio. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- float The ratio of reactions to genes also called metabolic coverage. Raises ------ ValueError If the model does not contain either reactions or genes. Notes ----- According to [1]_ this is a good quality indicator expressing the degree of metabolic coverage i.e. modeling detail of a given reconstruction. The authors explain that models with a 'high level of modeling detail have ratios >1, and [models] with low level of detail have ratios <1'. They explain that 'this difference arises because [models] with basic or intermediate levels of detail often include many reactions in which several gene products and their enzymatic transformations are ‘lumped’'. References ---------- .. [1] Monk, J., Nogales, J., & Palsson, B. O. (2014). Optimizing genome-scale network reconstructions. Nature Biotechnology, 32(5), 447–452. http://doi.org/10.1038/nbt.2870 """ if len(model.reactions) == 0 or len(model.genes) == 0: raise ValueError("The model contains no reactions or genes.") return float(len(model.reactions)) / float(len(model.genes))
python
{ "resource": "" }
q2444
find_protein_complexes
train
def find_protein_complexes(model): """ Find reactions that are catalyzed by at least a heterodimer. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list Reactions whose gene-protein-reaction association contains at least one logical AND combining different gene products (heterodimer). """ complexes = [] for rxn in model.reactions: if not rxn.gene_reaction_rule: continue size = find_top_level_complex(rxn.gene_reaction_rule) if size >= 2: complexes.append(rxn) return complexes
python
{ "resource": "" }
q2445
is_constrained_reaction
train
def is_constrained_reaction(model, rxn): """Return whether a reaction has fixed constraints.""" lower_bound, upper_bound = helpers.find_bounds(model) if rxn.reversibility: return rxn.lower_bound > lower_bound or rxn.upper_bound < upper_bound else: return rxn.lower_bound > 0 or rxn.upper_bound < upper_bound
python
{ "resource": "" }
q2446
find_unique_metabolites
train
def find_unique_metabolites(model): """Return set of metabolite IDs without duplicates from compartments.""" unique = set() for met in model.metabolites: is_missing = True for comp in model.compartments: if met.id.endswith("_{}".format(comp)): unique.add(met.id[:-(len(comp) + 1)]) is_missing = False break if is_missing: unique.add(met.id) return unique
python
{ "resource": "" }
q2447
find_duplicate_metabolites_in_compartments
train
def find_duplicate_metabolites_in_compartments(model): """ Return list of metabolites with duplicates in the same compartment. This function identifies duplicate metabolites in each compartment by determining if any two metabolites have identical InChI-key annotations. For instance, this function would find compounds with IDs ATP1 and ATP2 in the cytosolic compartment, with both having the same InChI annotations. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list A list of tuples of duplicate metabolites. """ unique_identifiers = ["inchikey", "inchi"] duplicates = [] for met_1, met_2 in combinations(model.metabolites, 2): if met_1.compartment == met_2.compartment: for key in unique_identifiers: if key in met_1.annotation and key in met_2.annotation: if met_1.annotation[key] == met_2.annotation[key]: duplicates.append((met_1.id, met_2.id)) break return duplicates
python
{ "resource": "" }
q2448
find_reactions_with_partially_identical_annotations
train
def find_reactions_with_partially_identical_annotations(model): """ Return duplicate reactions based on identical annotation. Identify duplicate reactions globally by checking if any two metabolic reactions have the same entries in their annotation attributes. This can be useful to identify one 'type' of reactions that occurs in several compartments, to curate merged models or to clean-up bulk model modifications. The heuristic looks at annotations with the keys "metanetx.reaction", "kegg.reaction", "brenda", "rhea", "biocyc", "bigg.reaction" only. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- dict A mapping from sets of annotations to groups of reactions with those annotations. int The total number of unique reactions that are duplicated. """ duplicates = {} rxn_db_identifiers = ["metanetx.reaction", "kegg.reaction", "brenda", "rhea", "biocyc", "bigg.reaction"] # Build a list that associates a reaction with a set of its annotations. ann_rxns = [] for rxn in model.reactions: ann = [] for key in rxn_db_identifiers: if key in rxn.annotation: if isinstance(rxn.annotation[key], list): ann.extend([(key, elem) for elem in rxn.annotation[key]]) else: ann.append((key, rxn.annotation[key])) ann_rxns.append((rxn, frozenset(ann))) # Compute the intersection between annotations and record the matching # reaction identifiers. for (rxn_a, ann_a), (rxn_b, ann_b) in combinations(ann_rxns, 2): mutual_pair = tuple(ann_a & ann_b) if len(mutual_pair) > 0: duplicates.setdefault(mutual_pair, set()).update( [rxn_a.id, rxn_b.id]) # Transform the object for JSON compatibility num_duplicated = set() duplicated = {} for key in duplicates: # Object keys must be strings in JSON. new_key = ",".join(sorted("{}:{}".format(ns, term) for ns, term in key)) duplicated[new_key] = rxns = list(duplicates[key]) num_duplicated.update(rxns) return duplicated, len(num_duplicated)
python
{ "resource": "" }
q2449
map_metabolites_to_structures
train
def map_metabolites_to_structures(metabolites, compartments): """ Map metabolites from the identifier namespace to structural space. Metabolites who lack structural annotation (InChI or InChIKey) are ignored. Parameters ---------- metabolites : iterable The cobra.Metabolites to map. compartments : iterable The different compartments to consider. Structures are treated separately for each compartment. Returns ------- dict A mapping from a cobra.Metabolite to its compartment specific structure index. """ # TODO (Moritz Beber): Consider SMILES? unique_identifiers = ["inchikey", "inchi"] met2mol = {} molecules = {c: [] for c in compartments} for met in metabolites: ann = [] for key in unique_identifiers: mol = met.annotation.get(key) if mol is not None: ann.append(mol) # Ignore metabolites without the required information. if len(ann) == 0: continue ann = set(ann) # Compare with other structures in the same compartment. mols = molecules[met.compartment] for i, mol_group in enumerate(mols): if len(ann & mol_group) > 0: mol_group.update(ann) # We map to the index of the group because it is hashable and # cheaper to compare later. met2mol[met] = "{}-{}".format(met.compartment, i) break if met not in met2mol: # The length of the list corresponds to the 0-index after appending. met2mol[met] = "{}-{}".format(met.compartment, len(mols)) mols.append(ann) return met2mol
python
{ "resource": "" }
q2450
find_duplicate_reactions
train
def find_duplicate_reactions(model): """ Return a list with pairs of reactions that are functionally identical. Identify duplicate reactions globally by checking if any two reactions have the same metabolites, same directionality and are in the same compartment. This can be useful to curate merged models or to clean-up bulk model modifications. The heuristic compares reactions in a pairwise manner. For each reaction, the metabolite annotations are checked for a description of the structure (via InChI and InChIKey).If they exist, substrates and products as well as the stoichiometries of any reaction pair are compared. Only reactions where the substrates, products, stoichiometry and reversibility are identical are considered to be duplicates. This test will not be able to identify duplicate reactions if there are no structure annotations. Further, it will report reactions with differing bounds as equal if they otherwise match the above conditions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list A list of pairs of duplicate reactions based on metabolites. int The number of unique reactions that have a duplicates """ met2mol = map_metabolites_to_structures(model.metabolites, model.compartments) # Build a list associating reactions with their stoichiometry in molecular # structure space. structural = [] for rxn in model.reactions: # Ignore reactions that have metabolites without structures. if not all(met in met2mol for met in rxn.metabolites): continue # We consider substrates and products separately since, for example, # the InChI for H2O and OH is the same. substrates = { met2mol[met]: rxn.get_coefficient(met) for met in rxn.reactants } products = { met2mol[met]: rxn.get_coefficient(met) for met in rxn.products } structural.append((rxn, substrates, products)) # Compare reactions using their structure-based stoichiometries. num_duplicated = set() duplicates = [] for (rxn_a, sub_a, prod_a), (rxn_b, sub_b, prod_b) in combinations( structural, 2): # Compare the substrates. if sub_a != sub_b: continue # Compare the products. if prod_a != prod_b: continue # Compare whether they are both (ir-)reversible. if rxn_a.reversibility != rxn_b.reversibility: continue # TODO (Moritz Beber): We could compare bounds here but it might be # worth knowing about the reactions even if their bounds differ? duplicates.append((rxn_a.id, rxn_b.id)) num_duplicated.add(rxn_a.id) num_duplicated.add(rxn_b.id) return duplicates, len(num_duplicated)
python
{ "resource": "" }
q2451
find_reactions_with_identical_genes
train
def find_reactions_with_identical_genes(model): """ Return reactions that have identical genes. Identify duplicate reactions globally by checking if any two reactions have the same genes. This can be useful to curate merged models or to clean-up bulk model modifications, but also to identify promiscuous enzymes. The heuristic compares reactions in a pairwise manner and reports on reaction pairs whose genes are identical. Reactions with missing genes are skipped. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- dict A mapping from sets of genes to all the reactions containing those genes. int The total number of unique reactions that appear duplicates based on their gene-protein-reaction associations. """ duplicates = dict() for rxn_a, rxn_b in combinations(model.reactions, 2): if not (rxn_a.genes and rxn_b.genes): continue if rxn_a.genes == rxn_b.genes: # This works because the `genes` are frozen sets. identifiers = rxn_a.genes duplicates.setdefault(identifiers, set()).update( [rxn_a.id, rxn_b.id]) # Transform the object for JSON compatibility num_duplicated = set() duplicated = {} for key in duplicates: # Object keys must be strings in JSON. new_key = ",".join(sorted(g.id for g in key)) duplicated[new_key] = rxns = list(duplicates[key]) num_duplicated.update(rxns) return duplicated, len(num_duplicated)
python
{ "resource": "" }
q2452
find_external_metabolites
train
def find_external_metabolites(model): """Return all metabolites in the external compartment.""" ex_comp = find_external_compartment(model) return [met for met in model.metabolites if met.compartment == ex_comp]
python
{ "resource": "" }
q2453
ResultManager.store
train
def store(self, result, filename, pretty=True): """ Write a result to the given file. Parameters ---------- result : memote.MemoteResult The dictionary structure of results. filename : str or pathlib.Path Store results directly to the given filename. pretty : bool, optional Whether (default) or not to write JSON in a more legible format. """ LOGGER.info("Storing result in '%s'.", filename) if filename.endswith(".gz"): with gzip.open(filename, "wb") as file_handle: file_handle.write( jsonify(result, pretty=pretty).encode("utf-8") ) else: with open(filename, "w", encoding="utf-8") as file_handle: file_handle.write(jsonify(result, pretty=pretty))
python
{ "resource": "" }
q2454
ResultManager.load
train
def load(self, filename): """Load a result from the given JSON file.""" LOGGER.info("Loading result from '%s'.", filename) if filename.endswith(".gz"): with gzip.open(filename, "rb") as file_handle: result = MemoteResult( json.loads(file_handle.read().decode("utf-8")) ) else: with open(filename, "r", encoding="utf-8") as file_handle: result = MemoteResult(json.load(file_handle)) # TODO (Moritz Beber): Validate the read-in JSON maybe? Trade-off # between extra time taken and correctness. Maybe we re-visit this # issue when there was a new JSON format version needed. return result
python
{ "resource": "" }
q2455
load_cobra_model
train
def load_cobra_model(path, notifications): """Load a COBRA model with meta information from an SBML document.""" doc = libsbml.readSBML(path) fbc = doc.getPlugin("fbc") sbml_ver = doc.getLevel(), doc.getVersion(), fbc if fbc is None else \ fbc.getVersion() with catch_warnings(record=True) as warnings: simplefilter("always") try: model = read_sbml_model(path) except Exception as err: notifications['errors'].append(str(err)) model = None validate = True else: validate = False notifications['warnings'].extend([str(w.message) for w in warnings]) if validate: run_sbml_validation(doc, notifications) return model, sbml_ver
python
{ "resource": "" }
q2456
format_failure
train
def format_failure(failure): """Format how an error or warning should be displayed.""" return "Line {}, Column {} - #{}: {} - Category: {}, Severity: {}".format( failure.getLine(), failure.getColumn(), failure.getErrorId(), failure.getMessage(), failure.getCategoryAsString(), failure.getSeverity() )
python
{ "resource": "" }
q2457
run_sbml_validation
train
def run_sbml_validation(document, notifications): """Report errors and warnings found in an SBML document.""" validator = libsbml.SBMLValidator() validator.validate(document) for i in range(document.getNumErrors()): notifications['errors'].append(format_failure(document.getError(i))) for i in range(validator.getNumFailures()): failure = validator.getFailure(i) if failure.isWarning(): notifications['warnings'].append(format_failure(failure)) else: notifications['errors'].append(format_failure(failure))
python
{ "resource": "" }
q2458
SQLResultManager.load
train
def load(self, commit=None): """Load a result from the database.""" git_info = self.record_git_info(commit) LOGGER.info("Loading result from '%s'.", git_info.hexsha) result = MemoteResult( self.session.query(Result.memote_result). filter_by(hexsha=git_info.hexsha). one().memote_result) # Add git info so the object is equivalent to the one returned by the # RepoResultManager. self.add_git(result.meta, git_info) return result
python
{ "resource": "" }
q2459
HistoryReport.collect_history
train
def collect_history(self): """Build the structure of results in terms of a commit history.""" def format_data(data): """Format result data according to the user-defined type.""" # TODO Remove this failsafe once proper error handling is in place. if type == "percent" or data is None: # Return an empty list here to reduce the output file size. # The angular report will ignore the `data` and instead display # the `metric`. return [] if type == "count": return len(data) return data base = dict() tests = base.setdefault("tests", dict()) score = base.setdefault("score", dict()) score_collection = score.setdefault("total_score", dict()) for branch, commits in self._history.iter_branches(): for commit in reversed(commits): result = self.result = self._history.get_result(commit) # Calculate the score for each result and store all the total # scores for each commit in the base dictionary. self.compute_score() total_score = self.result["score"]["total_score"] score_collection.setdefault("history", list()) score_collection["format_type"] = "score" score_collection["history"].append({ "branch": branch, "commit": commit, "metric": total_score}) # Now arrange the results for each test into the appropriate # format. Specifically such that the Accordion and the Vega # Plot components can easily read them. for test in result.cases: tests.setdefault(test, dict()) if "title" not in tests[test]: tests[test]["title"] = result.cases[test]["title"] if "summary" not in tests[test]: tests[test]["summary"] = result.cases[test]["summary"] if "type" not in tests[test]: tests[test]["format_type"] = result.cases[test][ "format_type"] type = tests[test]["format_type"] metric = result.cases[test].get("metric") data = result.cases[test].get("data") res = result.cases[test].get("result") if isinstance(metric, dict): tests[test].setdefault("history", dict()) for param in metric: tests[test]["history"].setdefault(param, list()). \ append({ "branch": branch, "commit": commit, "metric": metric.get(param), "data": format_data(data.get(param)), "result": res.get(param)}) else: tests[test].setdefault("history", list()).append({ "branch": branch, "commit": commit, "metric": metric, "data": format_data(data), "result": res }) return base
python
{ "resource": "" }
q2460
DiffReport.format_and_score_diff_data
train
def format_and_score_diff_data(self, diff_results): """Reformat the api results to work with the front-end.""" base = dict() meta = base.setdefault('meta', dict()) tests = base.setdefault('tests', dict()) score = base.setdefault('score', dict()) for model_filename, result in iteritems(diff_results): if meta == dict(): meta = result["meta"] for test_id, test_results in iteritems(result["tests"]): tests.setdefault(test_id, dict()) if tests[test_id] == dict(): tests[test_id]["summary"] = test_results["summary"] tests[test_id]["title"] = test_results["title"] tests[test_id]["format_type"] = test_results["format_type"] if isinstance(test_results["metric"], dict): tests[test_id].setdefault("diff", dict()) for param in test_results["metric"]: tests[test_id]["diff"].setdefault(param, list()). \ append({ "model": model_filename, "data": test_results["data"].setdefault(param), "duration": test_results["duration"].setdefault(param), "message": test_results["message"].setdefault(param), "metric": test_results["metric"].setdefault(param), "result": test_results["result"].setdefault(param)}) else: tests[test_id].setdefault("diff", list()) tests[test_id]["diff"].append({ "model": model_filename, "data": test_results.setdefault("data"), "duration": test_results.setdefault("duration"), "message": test_results.setdefault("message"), "metric": test_results.setdefault("metric"), "result": test_results.setdefault("result")}) self.result = result self.compute_score() score.setdefault('total_score', dict()).setdefault('diff', list()) score.setdefault('sections', dict()).setdefault('diff', list()) score['total_score']['diff'].append({ "model": model_filename, "total_score": self.result['score']['total_score']}) for section in self.result['score']['sections']: section.update({"model": model_filename}) score['sections']['diff'].append(section) return base
python
{ "resource": "" }
q2461
generate_shortlist
train
def generate_shortlist(mnx_db, shortlist): """ Create a condensed cross-references format from data in long form. Both data frames must contain a column 'MNX_ID' and the dump is assumed to also have a column 'XREF'. Parameters ---------- mnx_db : pandas.DataFrame The entire MetaNetX dump as a data frame. shortlist : pandas.DataFrame The shortlist of targets as a data frame. Returns ------- pandas.DataFrame A condensed format with MetaNetX identifiers as the column index and database identifiers as the row index. Elements are lists and often have multiple entries. """ # Reduce the whole database to targets of interest. xref = mnx_db.loc[mnx_db["MNX_ID"].isin(shortlist["MNX_ID"]), :] # Drop deprecated MetaNetX identifiers. Disabled for now. # xref = xref.loc[~xref["XREF"].str.startswith("deprecated", na=False), :] # Drop self-references for now since they don't follow the format. xref = xref.loc[xref["XREF"] != xref["MNX_ID"], :] # Split namespaces from identifiers. xref[["XREF_ID", "XREF"]] = xref["XREF"].str.split(":", n=1, expand=True) # Group the data in the xref dataframe so that one MNX ID maps to all # corresponding cross-references from other databases. Then list all # identifiers that belong to these databases: # MNX_ID XREF_ID # MNXM0 chebi [23367, 59999] # metacyc [UNKNOWN] # Make a separate column for every XREF_ID: # MNX_ID chebi metacyc # MNXM0 [23367, 59999] [UNKNOWN] xref = xref.groupby(["MNX_ID", "XREF_ID"], as_index=False, sort=False)[ "XREF"].apply(list).unstack('XREF_ID') # Re-insert MetaNetX identifiers as lists. # FIXME: Shouldn't we use metanetx.chemical here instead of 'mnx'? xref["mnx"] = [[x] for x in xref.index] # Transpose the data frame such that the index are now xref databases and # the column names are MetaNetX identifiers. return xref.T
python
{ "resource": "" }
q2462
generate
train
def generate(mnx_dump): """ Annotate a shortlist of metabolites with cross-references using MetaNetX. MNX_DUMP : The chemicals dump from MetaNetX usually called 'chem_xref.tsv'. Will be downloaded if it doesn't exist. """ LOGGER.info("Read shortlist.") targets = pd.read_table(join(dirname(__file__), "shortlist.tsv")) if not exists(mnx_dump): # Download the MetaNetX chemicals dump if it doesn't exists. # Download done as per https://stackoverflow.com/a/16696317. LOGGER.info("MetaNetX dump '%s' does not exist. Downloading...", mnx_dump) with open(mnx_dump, "wb") as file_handle, \ get("https://www.metanetx.org/cgi-bin/mnxget/mnxref/chem_xref.tsv", stream=True) as stream: for chunk in stream.iter_content(chunk_size=1024): file_handle.write(chunk) LOGGER.info("Done.") LOGGER.info("Read the MetaNetX dump with cross-references.") db = pd.read_table(mnx_dump, comment='#', names=['XREF', 'MNX_ID', 'Evidence', 'Description']) LOGGER.info("Generate the shortlist cross-references.") res = generate_shortlist(db, targets) LOGGER.info("Save result.") res.to_json(join(dirname(__file__), pardir, "memote", "support", "data", "met_id_shortlist.json"), force_ascii=False)
python
{ "resource": "" }
q2463
EssentialityExperiment.evaluate
train
def evaluate(self, model): """Use the defined parameters to predict single gene essentiality.""" with model: if self.medium is not None: self.medium.apply(model) if self.objective is not None: model.objective = self.objective model.add_cons_vars(self.constraints) max_val = model.slim_optimize() essen = single_gene_deletion( model, gene_list=self.data["gene"], processes=1) essen["gene"] = [list(g)[0] for g in essen.index] essen.index = essen["gene"] essen["essential"] = (essen["growth"] < (max_val * 0.1)) \ | essen["growth"].isna() return essen
python
{ "resource": "" }
q2464
register_with
train
def register_with(registry): """ Register a passed in object. Intended to be used as a decorator on model building functions with a ``dict`` as a registry. Examples -------- .. code-block:: python REGISTRY = dict() @register_with(REGISTRY) def build_empty(base): return base """ def decorator(func): registry[func.__name__] = func return func return decorator
python
{ "resource": "" }
q2465
annotate
train
def annotate(title, format_type, message=None, data=None, metric=1.0): """ Annotate a test case with info that should be displayed in the reports. Parameters ---------- title : str A human-readable descriptive title of the test case. format_type : str A string that determines how the result data is formatted in the report. It is expected not to be None. * 'number' : 'data' is a single number which can be an integer or float and should be represented as such. * 'count' : 'data' is a list, set or tuple. Choosing 'count' will display the length of that list e.g. number of metabolites without formula. * 'percent' : Instead of 'data' the content of 'metric' ought to be displayed e.g. percentage of metabolites without charge. 'metric' is expected to be a floating point number. * 'raw' : 'data' is ought to be displayed "as is" without formatting. This option is appropriate for single strings or a boolean output. message : str A short written explanation that states and possibly explains the test result. data Raw data which the test case generates and assesses. Can be of the following types: list, set, tuple, string, float, integer, and boolean. metric: float A value x in the range of 0 <= x <= 1 which represents the fraction of 'data' to the total in the model. For example, if 'data' are all metabolites without formula, 'metric' should be the fraction of metabolites without formula from the total of metabolites in the model. Returns ------- function The decorated function, now extended by the attribute 'annotation'. Notes ----- Adds "annotation" attribute to the function object, which stores values for predefined keys as a dictionary. """ if format_type not in TYPES: raise ValueError( "Invalid type. Expected one of: {}.".format(", ".join(TYPES))) def decorator(func): func.annotation = dict( title=title, summary=extended_summary(func), message=message, data=data, format_type=format_type, metric=metric) return func return decorator
python
{ "resource": "" }
q2466
truncate
train
def truncate(sequence): """ Create a potentially shortened text display of a list. Parameters ---------- sequence : list An indexable sequence of elements. Returns ------- str The list as a formatted string. """ if len(sequence) > LIST_SLICE: return ", ".join(sequence[:LIST_SLICE] + ["..."]) else: return ", ".join(sequence)
python
{ "resource": "" }
q2467
log_json_incompatible_types
train
def log_json_incompatible_types(obj): """ Log types that are not JSON compatible. Explore a nested dictionary structure and log types that are not JSON compatible. Parameters ---------- obj : dict A potentially nested dictionary. """ keys_to_explore = list(obj) while len(keys_to_explore) > 0: key = keys_to_explore.pop() if not isinstance(key, str): LOGGER.info(type(key)) value = obj[key] if isinstance(value, dict): LOGGER.info("%s:", key) log_json_incompatible_types(value) elif not isinstance(value, JSON_TYPES): LOGGER.info("%s: %s", key, type(value)) elif isinstance(value, (int, float)) and not isfinite(value): LOGGER.info("%s: %f", key, value)
python
{ "resource": "" }
q2468
flatten
train
def flatten(list_of_lists): """Flatten a list of lists but maintain strings and ints as entries.""" flat_list = [] for sublist in list_of_lists: if isinstance(sublist, string_types) or isinstance(sublist, int): flat_list.append(sublist) elif sublist is None: continue elif not isinstance(sublist, string_types) and len(sublist) == 1: flat_list.append(sublist[0]) else: flat_list.append(tuple(sublist)) return flat_list
python
{ "resource": "" }
q2469
stdout_notifications
train
def stdout_notifications(notifications): """ Print each entry of errors and warnings to stdout. Parameters ---------- notifications: dict A simple dictionary structure containing a list of errors and warnings. """ for error in notifications["errors"]: LOGGER.error(error) for warn in notifications["warnings"]: LOGGER.warning(warn)
python
{ "resource": "" }
q2470
ExperimentalBase.validate
train
def validate(self, model, checks=[]): """Use a defined schema to validate the given table.""" records = self.data.to_dict("records") self.evaluate_report( validate(records, headers=list(records[0]), preset='table', schema=self.schema, order_fields=True, custom_checks=checks))
python
{ "resource": "" }
q2471
ExperimentalBase.evaluate_report
train
def evaluate_report(report): """Iterate over validation errors.""" if report["valid"]: return for warn in report["warnings"]: LOGGER.warning(warn) # We only ever test one table at a time. for err in report["tables"][0]["errors"]: LOGGER.error(err["message"]) raise ValueError("Invalid data file. Please see errors above.")
python
{ "resource": "" }
q2472
add_reaction_constraints
train
def add_reaction_constraints(model, reactions, Constraint): """ Add the stoichiometric coefficients as constraints. Parameters ---------- model : optlang.Model The transposed stoichiometric matrix representation. reactions : iterable Container of `cobra.Reaction` instances. Constraint : optlang.Constraint The constraint class for the specific interface. """ constraints = [] for rxn in reactions: expression = add( [c * model.variables[m.id] for m, c in rxn.metabolites.items()]) constraints.append(Constraint(expression, lb=0, ub=0, name=rxn.id)) model.add(constraints)
python
{ "resource": "" }
q2473
stoichiometry_matrix
train
def stoichiometry_matrix(metabolites, reactions): """ Return the stoichiometry matrix representation of a set of reactions. The reactions and metabolites order is respected. All metabolites are expected to be contained and complete in terms of the reactions. Parameters ---------- reactions : iterable A somehow ordered list of unique reactions. metabolites : iterable A somehow ordered list of unique metabolites. Returns ------- numpy.array The 2D array that represents the stoichiometry matrix. dict A dictionary mapping metabolites to row indexes. dict A dictionary mapping reactions to column indexes. """ matrix = np.zeros((len(metabolites), len(reactions))) met_index = dict((met, i) for i, met in enumerate(metabolites)) rxn_index = dict() for i, rxn in enumerate(reactions): rxn_index[rxn] = i for met, coef in iteritems(rxn.metabolites): j = met_index[met] matrix[j, i] = coef return matrix, met_index, rxn_index
python
{ "resource": "" }
q2474
rank
train
def rank(matrix, atol=1e-13, rtol=0): """ Estimate the rank, i.e., the dimension of the column space, of a matrix. The algorithm used by this function is based on the singular value decomposition of `stoichiometry_matrix`. Parameters ---------- matrix : ndarray The matrix should be at most 2-D. A 1-D array with length k will be treated as a 2-D with shape (1, k) atol : float The absolute tolerance for a zero singular value. Singular values smaller than ``atol`` are considered to be zero. rtol : float The relative tolerance for a zero singular value. Singular values less than the relative tolerance times the largest singular value are considered to be zero. Notes ----- If both `atol` and `rtol` are positive, the combined tolerance is the maximum of the two; that is:: tol = max(atol, rtol * smax) Singular values smaller than ``tol`` are considered to be zero. Returns ------- int The estimated rank of the matrix. See Also -------- numpy.linalg.matrix_rank matrix_rank is basically the same as this function, but it does not provide the option of the absolute tolerance. """ matrix = np.atleast_2d(matrix) sigma = svd(matrix, compute_uv=False) tol = max(atol, rtol * sigma[0]) return int((sigma >= tol).sum())
python
{ "resource": "" }
q2475
get_interface
train
def get_interface(model): """ Return the interface specific classes. Parameters ---------- model : cobra.Model The metabolic model under investigation. """ return ( model.solver.interface.Model, model.solver.interface.Constraint, model.solver.interface.Variable, model.solver.interface.Objective )
python
{ "resource": "" }
q2476
get_internals
train
def get_internals(model): """ Return non-boundary reactions and their metabolites. Boundary reactions are unbalanced by their nature. They are excluded here and only the metabolites of the others are considered. Parameters ---------- model : cobra.Model The metabolic model under investigation. """ biomass = set(find_biomass_reaction(model)) if len(biomass) == 0: LOGGER.warning("No biomass reaction detected. Consistency test results " "are unreliable if one exists.") return set(model.reactions) - (set(model.boundary) | biomass)
python
{ "resource": "" }
q2477
add_cut
train
def add_cut(problem, indicators, bound, Constraint): """ Add an integer cut to the problem. Ensure that the same solution involving these indicator variables cannot be found by enforcing their sum to be less than before. Parameters ---------- problem : optlang.Model Specific optlang interface Model instance. indicators : iterable Binary indicator `optlang.Variable`s. bound : int Should be one less than the sum of indicators. Corresponds to P - 1 in equation (14) in [1]_. Constraint : optlang.Constraint Constraint class for a specific optlang interface. References ---------- .. [1] Gevorgyan, A., M. G Poolman, and D. A Fell. "Detection of Stoichiometric Inconsistencies in Biomolecular Models." Bioinformatics 24, no. 19 (2008): 2245. """ cut = Constraint(sympy.Add(*indicators), ub=bound) problem.add(cut) return cut
python
{ "resource": "" }
q2478
is_mass_balanced
train
def is_mass_balanced(reaction): """Confirm that a reaction is mass balanced.""" balance = defaultdict(int) for metabolite, coefficient in iteritems(reaction.metabolites): if metabolite.elements is None or len(metabolite.elements) == 0: return False for element, amount in iteritems(metabolite.elements): balance[element] += coefficient * amount return all(amount == 0 for amount in itervalues(balance))
python
{ "resource": "" }
q2479
is_charge_balanced
train
def is_charge_balanced(reaction): """Confirm that a reaction is charge balanced.""" charge = 0 for metabolite, coefficient in iteritems(reaction.metabolites): if metabolite.charge is None: return False charge += coefficient * metabolite.charge return charge == 0
python
{ "resource": "" }
q2480
check_partial
train
def check_partial(func, *args, **kwargs): """Create a partial to be used by goodtables.""" new_func = partial(func, *args, **kwargs) new_func.check = func.check return new_func
python
{ "resource": "" }
q2481
gene_id_check
train
def gene_id_check(genes, errors, columns, row_number): """ Validate gene identifiers against a known set. Parameters ---------- genes : set The known set of gene identifiers. errors : Passed by goodtables. columns : Passed by goodtables. row_number : Passed by goodtables. """ message = ("Gene '{value}' in column {col} and row {row} does not " "appear in the metabolic model.") for column in columns: if "gene" in column['header'] and column['value'] not in genes: message = message.format( value=column['value'], row=row_number, col=column['number']) errors.append({ 'code': 'bad-value', 'message': message, 'row-number': row_number, 'column-number': column['number'], })
python
{ "resource": "" }
q2482
reaction_id_check
train
def reaction_id_check(reactions, errors, columns, row_number): """ Validate reactions identifiers against a known set. Parameters ---------- reactions : set The known set of reaction identifiers. errors : Passed by goodtables. columns : Passed by goodtables. row_number : Passed by goodtables. """ message = ("Reaction '{value}' in column {col} and row {row} does not " "appear in the metabolic model.") for column in columns: if "reaction" in column['header'] and column['value'] not in reactions: message = message.format( value=column['value'], row=row_number, col=column['number']) errors.append({ 'code': 'bad-value', 'message': message, 'row-number': row_number, 'column-number': column['number'], })
python
{ "resource": "" }
q2483
metabolite_id_check
train
def metabolite_id_check(metabolites, errors, columns, row_number): """ Validate metabolite identifiers against a known set. Parameters ---------- metabolites : set The known set of metabolite identifiers. errors : Passed by goodtables. columns : Passed by goodtables. row_number : Passed by goodtables. """ message = ("Metabolite '{value}' in column {col} and row {row} does not " "appear in the metabolic model.") for column in columns: if "metabolite" in column['header'] and \ column['value'] not in metabolites: message = message.format( value=column['value'], row=row_number, col=column['number']) errors.append({ 'code': 'bad-value', 'message': message, 'row-number': row_number, 'column-number': column['number'], })
python
{ "resource": "" }
q2484
run
train
def run(model, collect, filename, location, ignore_git, pytest_args, exclusive, skip, solver, experimental, custom_tests, deployment, skip_unchanged): """ Run the test suite on a single model and collect results. MODEL: Path to model file. Can also be supplied via the environment variable MEMOTE_MODEL or configured in 'setup.cfg' or 'memote.ini'. """ def is_verbose(arg): return (arg.startswith("--verbosity") or arg.startswith("-v") or arg.startswith("--verbose") or arg.startswith("-q") or arg.startswith("--quiet")) if ignore_git: repo = None else: callbacks.git_installed() repo = callbacks.probe_git() if collect: if repo is not None: if location is None: LOGGER.critical( "Working with a repository requires a storage location.") sys.exit(1) if not any(a.startswith("--tb") for a in pytest_args): pytest_args = ["--tb", "short"] + pytest_args if not any(is_verbose(a) for a in pytest_args): pytest_args.append("-vv") # Check if the model was changed in this commit. Exit `memote run` if this # was not the case. if skip_unchanged and repo is not None: commit = repo.head.commit if not is_modified(model, commit): LOGGER.info("The model was not modified in commit '%s'. Skipping.", commit.hexsha) sys.exit(0) # Add further directories to search for tests. pytest_args.extend(custom_tests) # Check if the model can be loaded at all. model, sbml_ver, notifications = api.validate_model(model) if model is None: LOGGER.critical( "The model could not be loaded due to the following SBML errors.") stdout_notifications(notifications) sys.exit(1) model.solver = solver # Load the experimental configuration using model information. if experimental is not None: experimental.load(model) code, result = api.test_model( model=model, sbml_version=sbml_ver, results=True, pytest_args=pytest_args, skip=skip, exclusive=exclusive, experimental=experimental) if collect: if repo is None: manager = ResultManager() manager.store(result, filename=filename) else: LOGGER.info("Checking out deployment branch.") # If the repo HEAD is pointing to the most recent branch then # GitPython's `repo.active_branch` works. Yet, if the repo is in # detached HEAD state, i.e., when a user has checked out a specific # commit as opposed to a branch, this won't work and throw a # `TypeError`, which we are circumventing below. try: previous = repo.active_branch previous_cmt = previous.commit is_branch = True except TypeError: previous_cmt = repo.head.commit is_branch = False repo.git.checkout(deployment) try: manager = SQLResultManager(repository=repo, location=location) except (AttributeError, ArgumentError): manager = RepoResultManager(repository=repo, location=location) LOGGER.info( "Committing result and changing back to working branch.") manager.store(result, commit=previous_cmt.hexsha) repo.git.add(".") check_call( ['git', 'commit', '-m', "chore: add result for {}".format(previous_cmt.hexsha)] ) if is_branch: previous.checkout() else: repo.commit(previous_cmt)
python
{ "resource": "" }
q2485
new
train
def new(directory, replay): """ Create a suitable model repository structure from a template. By using a cookiecutter template, memote will ask you a couple of questions and set up a new directory structure that will make your life easier. The new directory will be placed in the current directory or respect the given --directory option. """ callbacks.git_installed() if directory is None: directory = os.getcwd() cookiecutter("gh:opencobra/cookiecutter-memote", output_dir=directory, replay=replay)
python
{ "resource": "" }
q2486
online
train
def online(note, github_repository, github_username): """Upload the repository to GitHub and enable testing on Travis CI.""" callbacks.git_installed() try: repo = git.Repo() except git.InvalidGitRepositoryError: LOGGER.critical( "'memote online' requires a git repository in order to follow " "the current branch's commit history.") sys.exit(1) if note == "memote-ci access": note = "{} to {}".format(note, github_repository) # Github API calls # Set up the git repository on GitHub via API v3. gh_repo_name, auth_token, repo_access_token = _setup_gh_repo( github_repository, github_username, note ) # Travis API calls # Configure Travis CI to use Github auth token then return encrypted token. secret = _setup_travis_ci(gh_repo_name, auth_token, repo_access_token) # Save the encrypted token in the travis config then commit and push LOGGER.info("Storing GitHub token in '.travis.yml'.") config = te.load_travis_configuration(".travis.yml") global_env = config.setdefault("env", {}).get("global") if global_env is None: config["env"]["global"] = global_env = {} try: global_env["secure"] = secret except TypeError: global_env.append({"secure": secret}) te.dump_travis_configuration(config, ".travis.yml") LOGGER.info("Add, commit and push changes to '.travis.yml' to GitHub.") repo.index.add([".travis.yml"]) check_call( ['git', 'commit', '-m', "chore: add encrypted GitHub access token"] ) check_call( ['git', 'push', '--set-upstream', 'origin', repo.active_branch.name] )
python
{ "resource": "" }
q2487
update_mock_repo
train
def update_mock_repo(): """ Clone and gzip the memote-mock-repo used for CLI and integration tests. The repo is hosted at 'https://github.com/ChristianLieven/memote-mock-repo.git' and maintained separately from """ target_file = os.path.abspath( join("tests", "data", "memote-mock-repo.tar.gz") ) temp_dir = mkdtemp(prefix='tmp_mock') previous_wd = os.getcwd() try: LOGGER.info("Cloning repository.") os.chdir(temp_dir) check_output( ['git', 'clone', 'https://github.com/ChristianLieven/memote-mock-repo.git'] ) os.chdir('memote-mock-repo/') LOGGER.info("Setting git to ignore filemode changes.") call( ['git', 'config', 'core.fileMode', 'false'] ) call( ['git', 'config', 'user.email', '[email protected]'] ) call( ['git', 'config', 'user.name', 'memote-bot'] ) finally: LOGGER.info("Compressing to tarball.") tar = tarfile.open(target_file, "w:gz") tar.add( join(temp_dir, 'memote-mock-repo/'), arcname="memote-mock-repo" ) tar.close() LOGGER.info("Success!") LOGGER.info("Removing temporary directory.") rmtree(temp_dir) LOGGER.info("Success! The mock repo has been updated.") os.chdir(previous_wd)
python
{ "resource": "" }
q2488
sum_biomass_weight
train
def sum_biomass_weight(reaction): """ Compute the sum of all reaction compounds. This function expects all metabolites of the biomass reaction to have formula information assigned. Parameters ---------- reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- float The molecular weight of the biomass reaction in units of g/mmol. """ return sum(-coef * met.formula_weight for (met, coef) in iteritems(reaction.metabolites)) / 1000.0
python
{ "resource": "" }
q2489
find_biomass_precursors
train
def find_biomass_precursors(model, reaction): """ Return a list of all biomass precursors excluding ATP and H2O. Parameters ---------- reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. model : cobra.Model The metabolic model under investigation. Returns ------- list Metabolite objects that are reactants of the biomass reaction excluding ATP and H2O. """ id_of_main_compartment = helpers.find_compartment_id_in_model(model, 'c') gam_reactants = set() try: gam_reactants.update([ helpers.find_met_in_model( model, "MNXM3", id_of_main_compartment)[0]]) except RuntimeError: pass try: gam_reactants.update([ helpers.find_met_in_model( model, "MNXM2", id_of_main_compartment)[0]]) except RuntimeError: pass biomass_precursors = set(reaction.reactants) - gam_reactants return list(biomass_precursors)
python
{ "resource": "" }
q2490
find_blocked_biomass_precursors
train
def find_blocked_biomass_precursors(reaction, model): """ Return a list of all biomass precursors that cannot be produced. Parameters ---------- reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. model : cobra.Model The metabolic model under investigation. Returns ------- list Metabolite objects that are reactants of the biomass reaction excluding ATP and H2O that cannot be produced by flux balance analysis. """ LOGGER.debug("Finding blocked biomass precursors") precursors = find_biomass_precursors(model, reaction) blocked_precursors = list() _, ub = helpers.find_bounds(model) for precursor in precursors: with model: dm_rxn = model.add_boundary( precursor, type="safe-demand", reaction_id="safe_demand", lb=0, ub=ub ) flux = helpers.run_fba(model, dm_rxn.id, direction='max') if np.isnan(flux) or abs(flux) < 1E-08: blocked_precursors.append(precursor) return blocked_precursors
python
{ "resource": "" }
q2491
gam_in_biomass
train
def gam_in_biomass(model, reaction): """ Return boolean if biomass reaction includes growth-associated maintenance. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- boolean True if the biomass reaction includes ATP and H2O as reactants and ADP, Pi and H as products, False otherwise. """ id_of_main_compartment = helpers.find_compartment_id_in_model(model, 'c') try: left = { helpers.find_met_in_model( model, "MNXM3", id_of_main_compartment)[0], helpers.find_met_in_model( model, "MNXM2", id_of_main_compartment)[0] } right = { helpers.find_met_in_model( model, "MNXM7", id_of_main_compartment)[0], helpers.find_met_in_model( model, "MNXM1", id_of_main_compartment)[0], helpers.find_met_in_model( model, "MNXM9", id_of_main_compartment)[0] } except RuntimeError: return False return ( left.issubset(set(reaction.reactants)) and right.issubset(set(reaction.products)))
python
{ "resource": "" }
q2492
find_direct_metabolites
train
def find_direct_metabolites(model, reaction, tolerance=1E-06): """ Return list of possible direct biomass precursor metabolites. The term direct metabolites describes metabolites that are involved only in either transport and/or boundary reactions, AND the biomass reaction(s), but not in any purely metabolic reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.Reaction The biomass reaction of the model under investigation. tolerance : float, optional Tolerance below which values will be regarded as zero. Returns ------- list Metabolites that qualify as direct metabolites i.e. biomass precursors that are taken up to be consumed by the biomass reaction only. """ biomass_rxns = set(helpers.find_biomass_reaction(model)) tra_bou_bio_rxns = helpers.find_interchange_biomass_reactions( model, biomass_rxns) try: precursors = find_biomass_precursors(model, reaction) main_comp = helpers.find_compartment_id_in_model(model, 'c') ext_space = helpers.find_compartment_id_in_model(model, 'e') except KeyError: LOGGER.error("Failed to properly identify cytosolic and extracellular " "compartments.") raise_with_traceback(KeyError("The cytosolic and/or extracellular " "compartments could not be identified.")) except RuntimeError: LOGGER.error("Failed to properly identify cytosolic and extracellular " "compartments.") raise_with_traceback(RuntimeError("The cytosolic and/or extracellular " "compartments could not be " "identified.")) else: tra_bou_bio_mets = [met for met in precursors if met.reactions.issubset(tra_bou_bio_rxns)] rxns_of_interest = set([rxn for met in tra_bou_bio_mets for rxn in met.reactions if rxn not in biomass_rxns]) solution = model.optimize(raise_error=True) if np.isclose(solution.objective_value, 0, atol=tolerance): LOGGER.error("Failed to generate a non-zero objective value with " "flux balance analysis.") raise OptimizationError( "The flux balance analysis on this model returned an " "objective value of zero. Make sure the model can " "grow! Check if the constraints are not too strict!") tra_bou_bio_fluxes = {r: solution[r.id] for r in rxns_of_interest} met_flux_sum = {m: 0 for m in tra_bou_bio_mets} return detect_false_positive_direct_metabolites( tra_bou_bio_mets, biomass_rxns, main_comp, ext_space, tra_bou_bio_fluxes, met_flux_sum)
python
{ "resource": "" }
q2493
detect_false_positive_direct_metabolites
train
def detect_false_positive_direct_metabolites( candidates, biomass_reactions, cytosol, extra, reaction_fluxes, metabolite_fluxes): """ Weed out false positive direct metabolites. False positives exists in the extracellular compartment with flux from the cytosolic compartment and are part of the biomass reaction(s). It sums fluxes positively or negatively depending on if direct metabolites in the extracellular compartment are defined as reactants or products in various reactions. Parameters ---------- candidates : list of cobra.Metabolite Candidate direct metabolites. biomass_reactions : set of cobra.Reaction The biomass reactions. Usually one or two. cytosol : str The identifier of the cytosolic compartment. extra : str The identifier of the extracellular compartment. Returns ------- list Definitive list of direct metabolites, i.e., biomass precursors that are taken up to be consumed by the biomass reaction only. """ for met in candidates: is_internal = met.compartment != extra for rxn in met.reactions: if rxn in biomass_reactions: continue # Internal metabolites can not be false positives. if is_internal: metabolite_fluxes[met] += abs(reaction_fluxes[rxn]) continue # if the metabolite is in the "e" compartment and a reactant, # sum the fluxes accordingly (outward=negative, inward=positive) if met in rxn.reactants: product_comps = set([p.compartment for p in rxn.products]) # if the reaction has no product (outward flux) if len(product_comps) == 0: metabolite_fluxes[met] += -reaction_fluxes[rxn] # if the reaction has a product in "c" (inward flux) elif cytosol in product_comps: metabolite_fluxes[met] += reaction_fluxes[rxn] # if the metabolite is in the "e" compartment and a product, # sum the fluxes accordingly (outward=negative, inward=positive) elif met in rxn.products: reactant_comps = set([p.compartment for p in rxn.reactants]) # if the reaction has no reactant (inward flux) if len(reactant_comps) == 0: metabolite_fluxes[met] += reaction_fluxes[rxn] # if the reaction has a reactant in "c" (outward flux) elif cytosol in reactant_comps: metabolite_fluxes[met] += -reaction_fluxes[rxn] return [m for m, f in iteritems(metabolite_fluxes) if f > 0]
python
{ "resource": "" }
q2494
bundle_biomass_components
train
def bundle_biomass_components(model, reaction): """ Return bundle biomass component reactions if it is not one lumped reaction. There are two basic ways of specifying the biomass composition. The most common is a single lumped reaction containing all biomass precursors. Alternatively, the biomass equation can be split into several reactions each focusing on a different macromolecular component for instance a (1 gDW ash) + b (1 gDW phospholipids) + c (free fatty acids)+ d (1 gDW carbs) + e (1 gDW protein) + f (1 gDW RNA) + g (1 gDW DNA) + h (vitamins/cofactors) + xATP + xH2O-> 1 gDCW biomass + xADP + xH + xPi. This function aims to identify if the given biomass reaction 'reaction', is a lumped all-in-one reaction, or whether it is just the final composing reaction of all macromolecular components. It is important to identify which other reaction belong to a given biomass reaction to be able to identify universal biomass components or calculate detailed precursor stoichiometries. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- list One or more reactions that qualify as THE biomass equation together. Notes ----- Counting H2O, ADP, Pi, H, and ATP, the amount of metabolites in a split reaction is comparatively low: Any reaction with less or equal to 15 metabolites can probably be counted as a split reaction containing Ash, Phospholipids, Fatty Acids, Carbohydrates (i.e. cell wall components), Protein, RNA, DNA, Cofactors and Vitamins, and Small Molecules. Any reaction with more than or equal to 28 metabolites, however, (21 AA + 3 Nucleotides (4-ATP) + 4 Deoxy-Nucleotides) can be considered a lumped reaction. Anything in between will be treated conservatively as a lumped reaction. For split reactions, after removing any of the metabolites associated with growth-associated energy expenditure (H2O, ADP, Pi, H, and ATP), the only remaining metabolites should be generalized macromolecule precursors e.g. Protein, Phospholipids etc. Each of these have their own composing reactions. Hence we include the reactions of these metabolites in the set that ultimately makes up the returned list of reactions that together make up the biomass equation. """ if len(reaction.metabolites) >= 16: return [reaction] id_of_main_compartment = helpers.find_compartment_id_in_model(model, 'c') gam_mets = ["MNXM3", "MNXM2", "MNXM7", "MNXM1", 'MNXM9'] try: gam = set([helpers.find_met_in_model( model, met, id_of_main_compartment)[0] for met in gam_mets]) except RuntimeError: gam = set() regex = re.compile('^{}(_[a-zA-Z]+?)*?$'.format('biomass'), re.IGNORECASE) biomass_metabolite = set(model.metabolites.query(regex)) macromolecules = set(reaction.metabolites) - gam - biomass_metabolite bundled_reactions = set() for met in macromolecules: bundled_reactions = bundled_reactions | set(met.reactions) return list(bundled_reactions)
python
{ "resource": "" }
q2495
essential_precursors_not_in_biomass
train
def essential_precursors_not_in_biomass(model, reaction): u""" Return a list of essential precursors missing from the biomass reaction. There are universal components of life that make up the biomass of all known organisms. These include all proteinogenic amino acids, deoxy- and ribonucleotides, water and a range of metabolic cofactors. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- list IDs of essential metabolites missing from the biomass reaction. The IDS will appear in the models namespace if the metabolite exists, but will be using the MetaNetX namespace if the metabolite does not exist in the model. Notes ----- "Answering the question of what to include in the core of a biomass objective function is not always straightforward. One example is different nucleotide forms, which, although inter-convertible, are essential for cellular chemistry. We propose here that all essential and irreplaceable molecules for metabolism should be included in the biomass functions of genome scale metabolic models. In the special case of cofactors, when two forms of the same cofactor take part in the same reactions (such as NAD and NADH), only one form could be included for the sake of simplicity. When a class of cofactors includes active and non-active interconvertible forms, the active forms should be preferred. [1]_." Please note, that [1]_ also suggest to count C1 carriers (derivatives of tetrahydrofolate(B9) or tetrahydromethanopterin) as universal cofactors. We have omitted these from this check because there are many individual compounds that classify as C1 carriers, and it is not clear a priori which one should be preferred. In a future update, we may consider identifying these using a chemical ontology. References ---------- .. [1] Xavier, J. C., Patil, K. R., & Rocha, I. (2017). Integration of Biomass Formulations of Genome-Scale Metabolic Models with Experimental Data Reveals Universally Essential Cofactors in Prokaryotes. Metabolic Engineering, 39(October 2016), 200–208. http://doi.org/10.1016/j.ymben.2016.12.002 """ main_comp = helpers.find_compartment_id_in_model(model, 'c') biomass_eq = bundle_biomass_components(model, reaction) pooled_precursors = set( [met for rxn in biomass_eq for met in rxn.metabolites]) missing_essential_precursors = [] for mnx_id in ESSENTIAL_PRECURSOR_IDS: try: met = helpers.find_met_in_model(model, mnx_id, main_comp)[0] if met not in pooled_precursors: missing_essential_precursors.append(met.id) except RuntimeError: missing_essential_precursors.append(mnx_id) return missing_essential_precursors
python
{ "resource": "" }
q2496
validate_experimental
train
def validate_experimental(context, param, value): """Load and validate an experimental data configuration.""" if value is None: return config = ExperimentConfiguration(value) config.validate() return config
python
{ "resource": "" }
q2497
probe_git
train
def probe_git(): """Return a git repository instance if it exists.""" try: repo = git.Repo() except git.InvalidGitRepositoryError: LOGGER.warning( "We highly recommend keeping your model in a git repository." " It allows you to track changes and to easily collaborate with" " others via online platforms such as https://github.com.\n") return if repo.is_dirty(): LOGGER.critical( "Please git commit or git stash all changes before running" " the memote suite.") sys.exit(1) return repo
python
{ "resource": "" }
q2498
git_installed
train
def git_installed(): """Interrupt execution of memote if `git` has not been installed.""" LOGGER.info("Checking `git` installation.") try: check_output(['git', '--version']) except CalledProcessError as e: LOGGER.critical( "The execution of memote was interrupted since no installation of " "`git` could be detected. Please install git to use " "this functionality: " "https://git-scm.com/book/en/v2/Getting-Started-Installing-Git") LOGGER.debug("Underlying error:", exc_info=e) sys.exit(1)
python
{ "resource": "" }
q2499
RepoResultManager.record_git_info
train
def record_git_info(self, commit=None): """ Record git meta information. Parameters ---------- commit : str, optional Unique hexsha of the desired commit. Returns ------- GitInfo Git commit meta information. """ if commit is None: commit = self._repo.head.commit else: commit = self._repo.commit(commit) return GitInfo( hexsha=commit.hexsha, author=commit.author.name, email=commit.author.email, authored_on=commit.authored_datetime )
python
{ "resource": "" }