blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6b09cc57289aebfadf3badeff4f9bef7c017e0dc | 04cd6250630b3aad49219acbae0b7682f4263afb | /sbaas/analysis/analysis_stage02_isotopomer/stage02_isotopomer_dependencies.py | 7813c8ad014ac51fbf424a16b962f14cfd089746 | [
"Apache-2.0"
] | permissive | SBRG/sbaas | ec04bd3a82248600328c053bc798d7d302fbaf9d | 9df76bbffdd620cf8566744a2b0503935998fbe0 | refs/heads/master | 2021-01-21T23:29:26.713889 | 2015-06-24T17:16:59 | 2015-06-24T17:16:59 | 28,518,590 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 297,680 | py | '''isotopomer metabolomics analysis class'''
from sbaas.analysis.analysis_base import *
from .stage02_isotopomer_query import *
from .stage02_isotopomer_io import *
# Dependencies
import operator, json, csv
from copy import copy
# Dependencies from 3rd party
import scipy.io
from numpy import histogram, mean, std, loadtxt
import matplotlib as mpl
import matplotlib.pyplot as plt
import h5py
from sbaas.resources.molmass import Formula
# Dependencies from cobra
from cobra.io.sbml import create_cobra_model_from_sbml_file
from cobra.io.sbml import write_cobra_model_to_sbml_file
from cobra.io.mat import save_matlab_model
from cobra.manipulation.modify import convert_to_irreversible, revert_to_reversible
from cobra.flux_analysis.objective import update_objective
from cobra.flux_analysis.variability import flux_variability_analysis
from cobra.flux_analysis.parsimonious import optimize_minimal_flux
from cobra.flux_analysis import flux_variability_analysis, single_deletion
from cobra.core.Reaction import Reaction
from cobra.core.Metabolite import Metabolite
class stage02_isotopomer_dependencies():
def __init__(self):
self.calculate = base_calculate();
#variables:
self.isotopomer_rxns_net_irreversible = {
'ptrc_to_4abut_1':{'reactions':['PTRCTA','ABUTD'],
'stoichiometry':[1,1]},
'ptrc_to_4abut_2':{'reactions':['GGPTRCS','GGPTRCO','GGGABADr','GGGABAH'],
'stoichiometry':[1,1,1,1]},
'glu_DASH_L_to_acg5p':{'reactions':['ACGS','ACGK'],
'stoichiometry':[1,1]},
'2obut_and_pyr_to_3mop':{'reactions':['ACHBS','KARA2','DHAD2'],
'stoichiometry':[1,1,1]},
'pyr_to_23dhmb':{'reactions':['ACLS','KARA1_reverse'],
'stoichiometry':[1,1]},
#'met_DASH_L_and_ptrc_to_spmd_and_5mta':{'reactions':['METAT','ADMDC','SPMS'],
# 'stoichiometry':[1,1,1]}, #cannot be lumped
'chor_and_prpp_to_3ig3p':{'reactions':['ANS','ANPRT','PRAIi','IGPS'],
'stoichiometry':[1,1,1,1]},
'hom_DASH_L_and_cyst_DASH_L_to_pyr_hcys_DASH_L':{'reactions':['HSST','SHSL1','CYSTL'],
'stoichiometry':[1,1,1]},
'e4p_and_pep_to_3dhq':{'reactions':['DDPA','DHQS'],
'stoichiometry':[1,1]},
'aspsa_to_sl2a6o':{'reactions':['DHDPS','DHDPRy','THDPS'],
'stoichiometry':[1,1,1]},
'glu_DASH_L_to_glu5sa':{'reactions':['GLU5K','G5SD'],
'stoichiometry':[1,1]},
'g1p_to_glycogen':{'reactions':['GLGC','GLCS1'],
'stoichiometry':[1,1]},
'thr_DASH_L_to_gly':{'reactions':['THRD','GLYAT_reverse'],
'stoichiometry':[1,1]}, #need to remove deadend mets: athr-L: ATHRDHr, ATHRDHr_reverse; aact: AACTOOR, AOBUTDs
'dhap_to_lac_DASH_D':{'reactions':['MGSA','LGTHL','GLYOX'],
'stoichiometry':[1,1,1]},
'hom_DASH_L_to_thr_DASH_L':{'reactions':['HSK','THRS'],
'stoichiometry':[1,1]},
'3pg_to_ser_DASH_L':{'reactions':['PGCD','PSERT','PSP_L'],
'stoichiometry':[1,1,1]},
'prpp_to_his_DASH_L':{'reactions':['ATPPRT','PRATPP','PRAMPC','PRMICI','IG3PS','IGPDH','HSTPT','HISTP','HISTD'],
'stoichiometry':[1,1,1,1,1,1,1,1,1]},
'UMPSYN_aerobic':{'reactions':['ASPCT','DHORTS_reverse','DHORD2','ORPT_reverse','OMPDC'],
'stoichiometry':[1,1,1,1,1]},
#'UMPSYN_anaerobic':{'reactions':['ASPCT','DHORTS_reverse','DHORD5','ORPT_reverse','OMPDC'],
# 'stoichiometry':[1,1,1,1,1]},
'IMPSYN_1':{'reactions':['GLUPRT','PRAGSr','PRFGS','PRAIS'],
'stoichiometry':[1,1,1,1]},
'IMPSYN_2':{'reactions':['AIRC2','AIRC3_reverse','PRASCSi','ADSL2r'],
'stoichiometry':[1,1,1,1]},
'IMPSYN_3':{'reactions':['AICART','IMPC_reverse'],
'stoichiometry':[1,1]},
'imp_to_gmp':{'reactions':['IMPD','GMPS2'],
'stoichiometry':[1,1]},
'imp_to_amp':{'reactions':['ADSS','ADSL1r'],
'stoichiometry':[1,1]},
#'utp_to_dump_anaerobic':{'reactions':['RNTR4c2','DUTPDP'],
# 'stoichiometry':[1,1]},
'udp_to_dump_aerobic':{'reactions':['RNDR4','NDPK6','DUTPDP'],
'stoichiometry':[1,1,1]},
#'dtmp_to_dttp':{'reactions':['DTMPK','NDPK4'],
# 'stoichiometry':[1,1]}, #cannot be lumped
'COASYN':{'reactions':['ASP1DC','MOHMT','DPR','PANTS','PNTK','PPNCL2','PPCDC','PTPATi','DPCOAK'],
'stoichiometry':[1,1,1,1,1,1,1,1,1]},
'FADSYN_1':{'reactions':['GTPCII2','DHPPDA2','APRAUR','PMDPHT','RBFSb'],
'stoichiometry':[1,1,1,1,1]},
'FADSYN_2':{'reactions':['RBFSa','DB4PS'],
'stoichiometry':[1,1]},
'FADSYN_3':{'reactions':['RBFK','FMNAT'],
'stoichiometry':[1,1]},
'NADSYN_aerobic':{'reactions':['ASPO6','QULNS','NNDPR','NNATr','NADS1','NADK'],
'stoichiometry':[1,1,1,1,1,1]},
#'NADSYN_anaerobic':{'reactions':['ASPO5','QULNS','NNDPR','NNATr','NADS1','NADK'],
# 'stoichiometry':[1,1,1,1,1,1]},
#'NADSALVAGE':{'reactions':['NADPPPS','NADN','NNAM','NAMNPP','NMNN','NMNDA','NMNAT','NADDP','ADPRDP'],
# 'stoichiometry':[1,1,1,1,1,1,1,1,1]}, #cannot be lumped
'THFSYN':{'reactions':['GTPCI','DNTPPA','DNMPPA','DHNPA2r','HPPK2','ADCS','ADCL','DHPS2','DHFS'],
'stoichiometry':[1,1,1,1,1,1,1,1,1]},
'GTHSYN':{'reactions':['GLUCYS','GTHS'],
'stoichiometry':[1,1]},
'GLYCPHOSPHOLIPID_1':{'reactions':['DASYN181','AGPAT181','G3PAT181'],'stoichiometry':[1,1,1]},
'GLYCPHOSPHOLIPID_2':{'reactions':['PSSA181','PSD181'],'stoichiometry':[1,1]},
'GLYCPHOSPHOLIPID_3':{'reactions':['PGSA160','PGPP160'],'stoichiometry':[1,1]},
'GLYCPHOSPHOLIPID_4':{'reactions':['DASYN161','AGPAT161','G3PAT161'],'stoichiometry':[1,1,1]},
'GLYCPHOSPHOLIPID_5':{'reactions':['PGSA181','PGPP181'],'stoichiometry':[1,1]},
'GLYCPHOSPHOLIPID_6':{'reactions':['PSD161','PSSA161'],'stoichiometry':[1,1]},
'GLYCPHOSPHOLIPID_7':{'reactions':['PSSA160','PSD160'],'stoichiometry':[1,1]},
'GLYCPHOSPHOLIPID_8':{'reactions':['DASYN160','AGPAT160','G3PAT160'],'stoichiometry':[1,1,1]},
'GLYCPHOSPHOLIPID_9':{'reactions':['PGSA161','PGPP161'],'stoichiometry':[1,1]},
'MOLYBDOPTERIN_1':{'reactions':['MPTAT','MPTS','CPMPS'],'stoichiometry':[1,1,1]},
'MOLYBDOPTERIN_2':{'reactions':['MOCDS','MOGDS'],'stoichiometry':[1,1]},
'MOLYBDOPTERIN_3':{'reactions':['MOADSUx','MPTSS'],'stoichiometry':[1,1]},
'COFACTOR_1':{'reactions':['GLUTRR','G1SAT','GLUTRS'],'stoichiometry':[1,1,1]},
'COFACTOR_2':{'reactions':['DHNAOT4','UPPDC1','DHNCOAT','DHNCOAS','SEPHCHCS','SUCBZS','SUCBZL','PPPGO3','FCLT','CPPPGO','SHCHCS3'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1]},
'COFACTOR_3':{'reactions':['TYRL','AMMQLT8','HEMEOS','UPP3MT','SHCHD2','SHCHF','ENTCS','CBLAT'],'stoichiometry':[1,1,1,1,1,1,1,1]},
'VITB6':{'reactions':['E4PD','PERD','OHPBAT','PDX5PS','PDX5PO2'],'stoichiometry':[1,1,1,1,1]},
#'THIAMIN':{'reactions':['AMPMS2','PMPK','THZPSN3','TMPPP','TMPK'],'stoichiometry':[1,1,1,1,1]}, # original pathway without correction
'THIAMIN':{'reactions':['AMPMS3','PMPK','THZPSN3','TMPPP','TMPK'],'stoichiometry':[1,1,1,1,1]},
'COFACTOR_4':{'reactions':['I4FE4ST','I4FE4SR','I2FE2SS2'],'stoichiometry':[1,1,1]},
'COFACTOR_5':{'reactions':['BMOGDS1','BMOGDS2','BMOCOS'],'stoichiometry':[1,1,1]},
'COFACTOR_6':{'reactions':['DMPPS','GRTT','DMATT'],'stoichiometry':[1,1,1]},
'COFACTOR_7':{'reactions':['MECDPS','DXPRIi','MEPCT','CDPMEK','MECDPDH5'],'stoichiometry':[1,1,1,1,1]},
'COFACTOR_8':{'reactions':['LIPOS','LIPOCT'],'stoichiometry':[1,1]},
'COFACTOR_9':{'reactions':['OMMBLHX','OMPHHX','OPHHX','HBZOPT','DMQMT','CHRPL','OMBZLM','OPHBDC','OHPHM'],'stoichiometry':[1,1,1,1,1,1,1,1,1]},
'COFACTOR_10':{'reactions':['SERASr','DHBD','UPP3S','HMBS','ICHORT','DHBS'],'stoichiometry':[1,1,1,1,1,1]},
'COFACTOR_11':{'reactions':['PMEACPE','EGMEACPR','DBTS','AOXSr2','I2FE2SR','OPMEACPD','MALCOAMT','AMAOTr','OPMEACPS','OPMEACPR','OGMEACPD','OGMEACPR','OGMEACPS','EPMEACPR','BTS5'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]},
'CELLENV_1':{'reactions':['UAMAGS','UAPGR','UAGPT3','PAPPT3','GLUR_reverse','UAGCVT','UAMAS','UDCPDP','UGMDDS','UAAGDS'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1]},
'CELLENV_2':{'reactions':['3HAD181','3OAR181','3OAS181','EAR181x'],'stoichiometry':[1,1,1,1]},
'CELLENV_3':{'reactions':['3HAD160','3OAR160','EAR160x','3OAS160'],'stoichiometry':[1,1,1,1]},
'CELLENV_4':{'reactions':['EAR120x','3OAR120','3HAD120','3OAS120','EAR100x'],'stoichiometry':[1,1,1,1,1]},
'CELLENV_5':{'reactions':['G1PACT','UAGDP','PGAMT_reverse','GF6PTA'],'stoichiometry':[1,1,1,1]},
'CELLENV_6':{'reactions':['3OAR40','EAR40x','3OAS60','3OAR60','3HAD80','3OAS80','3OAR80','EAR60x','3HAD60','EAR80x','3HAD40'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1]},
'CELLENV_7':{'reactions':['3HAD161','EAR161x','3OAS161','3OAR161','3OAS141','3HAD141','3OAR121','EAR121x','3HAD121','EAR141x','T2DECAI','3OAR141','3OAS121'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1,1,1]},
'CELLENV_8':{'reactions':['TDPGDH','TDPDRR','TDPDRE','G1PTT'],'stoichiometry':[1,1,1,1]},
'CELLENV_9':{'reactions':['3OAS140','3OAR140'],'stoichiometry':[1,1]},
'CELLENV_10':{'reactions':['3HAD140','EAR140x'],'stoichiometry':[1,1]},
'CELLENV_11':{'reactions':['3OAR100','3HAD100','3OAS100'],'stoichiometry':[1,1,1]},
'LIPOPOLYSACCHARIDE_1':{'reactions':['COLIPAabcpp','COLIPAabctex','EDTXS1','EDTXS2','GALT1','GLCTR1','GLCTR2','GLCTR3','HEPK1','HEPK2','HEPT1','HEPT2','HEPT3','HEPT4','LPADSS','MOAT','MOAT2','MOAT3C','RHAT1','TDSK','USHD'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]},
'LIPOPOLYSACCHARIDE_2':{'reactions':['AGMHE','GMHEPAT','GMHEPK','GMHEPPA','S7PI'],'stoichiometry':[1,1,1,1,1]},
'LIPOPOLYSACCHARIDE_3':{'reactions':['U23GAAT','UHGADA','UAGAAT'],'stoichiometry':[1,1,1]},
'LIPOPOLYSACCHARIDE_4':{'reactions':['KDOPP','KDOCT2','KDOPS'],'stoichiometry':[1,1,1]},
'ASTPathway':{'reactions':['AST','SADH','SGDS','SGSAD','SOTA'],'stoichiometry':[1,1,1,1,1]}
};
#model reduction functions
def load_ALEWt(self,anoxic = False, oxic = True, update_ampms2 = True, convert2irreversible = False):
'''load iJO1366 with the following changes:
1. update to AMPMS2 to account for carbon monoxide
2. changes to uptake bounds for glucose M9 media
3. constrain the model to use 'PFK' instead of 'F6PA', 'DHAPT' when grown on glucose
4. constrain the model to use the physiologically perferred glutamate synthesis enzymes
5. depending on oxygen availability, constrain the model to use the correct RNR enzymes
6. depending on oxygen availability, constrain the model to use the correct Dihydroorotate dehydrogenase (PyrD) enzymes
7. constrain fatty acid biosynthesis to use the physiologically preferred enzymes'''
ijo1366_sbml = settings.workspace_data+"/models/iJO1366.xml"
# Read in the sbml file and define the model conditions
cobra_model = create_cobra_model_from_sbml_file(ijo1366_sbml, print_time=True)
if update_ampms2:
# Update AMPMS2
coc = Metabolite('co_c','CO','carbon monoxide','c');
cop = Metabolite('co_p','CO','carbon monoxide','p');
coe = Metabolite('co_e','CO','carbon monoxide','e');
cobra_model.add_metabolites([coc,cop,coe])
ampms2_mets = {};
ampms2_mets[cobra_model.metabolites.get_by_id('air_c')] = -1;
ampms2_mets[cobra_model.metabolites.get_by_id('amet_c')] = -1;
ampms2_mets[cobra_model.metabolites.get_by_id('dad_DASH_5_c')] = 1;
ampms2_mets[cobra_model.metabolites.get_by_id('met_DASH_L_c')] = 1;
ampms2_mets[cobra_model.metabolites.get_by_id('4ampm_c')] = 1;
ampms2_mets[cobra_model.metabolites.get_by_id('h_c')] = 3;
ampms2_mets[cobra_model.metabolites.get_by_id('for_c')] = 1;
ampms2_mets[cobra_model.metabolites.get_by_id('co_c')] = 1;
ampms2 = Reaction('AMPMS3');
ampms2.add_metabolites(ampms2_mets);
copp_mets = {};
copp_mets[cobra_model.metabolites.get_by_id('co_c')] = -1;
copp_mets[cobra_model.metabolites.get_by_id('co_p')] = 1;
copp = Reaction('COtpp');
copp.add_metabolites(copp_mets);
coex_mets = {};
coex_mets[cobra_model.metabolites.get_by_id('co_p')] = -1;
coex_mets[cobra_model.metabolites.get_by_id('co_e')] = 1;
coex = Reaction('COtex');
coex.add_metabolites(coex_mets);
cotrans_mets = {};
cotrans_mets[cobra_model.metabolites.get_by_id('co_e')] = -1;
cotrans = Reaction('EX_co_LPAREN_e_RPAREN_');
cotrans.add_metabolites(cotrans_mets);
cobra_model.add_reactions([ampms2,copp,coex,cotrans]);
cobra_model.remove_reactions(['AMPMS2']);
# Define the model conditions:
system_boundaries = [x.id for x in cobra_model.reactions if x.boundary == 'system_boundary'];
for b in system_boundaries:
cobra_model.reactions.get_by_id(b).lower_bound = 0.0;
cobra_model.reactions.get_by_id(b).upper_bound = 0.0;
# Reset demand reactions
demand = ['DM_4CRSOL',
'DM_5DRIB',
'DM_AACALD',
'DM_AMOB',
'DM_MTHTHF',
'DM_OXAM'];
for d in demand:
cobra_model.reactions.get_by_id(d).lower_bound = 0.0;
cobra_model.reactions.get_by_id(d).upper_bound = 1000.0;
# Change the objective
update_objective(cobra_model,{'Ec_biomass_iJO1366_WT_53p95M':1.0})
# Assign KOs
# Specify media composition (M9 glucose):
cobra_model.reactions.get_by_id('EX_glc_LPAREN_e_RPAREN_').lower_bound = -10.0;
cobra_model.reactions.get_by_id('EX_o2_LPAREN_e_RPAREN_').lower_bound = -18.0;
#uptake = ['EX_cl_LPAREN_e_RPAREN_',
# 'EX_so4_LPAREN_e_RPAREN_',
# 'EX_ca2_LPAREN_e_RPAREN_',
# 'EX_pi_LPAREN_e_RPAREN_',
# 'EX_fe2_LPAREN_e_RPAREN_',
# 'EX_cu2_LPAREN_e_RPAREN_',
# 'EX_zn2_LPAREN_e_RPAREN_',
# 'EX_cbl1_LPAREN_e_RPAREN_',
# 'EX_mobd_LPAREN_e_RPAREN_',
# 'EX_ni2_LPAREN_e_RPAREN_',
# 'EX_mn2_LPAREN_e_RPAREN_',
# 'EX_k_LPAREN_e_RPAREN_',
# 'EX_nh4_LPAREN_e_RPAREN_',
# 'EX_cobalt2_LPAREN_e_RPAREN_',
# 'EX_mg2_LPAREN_e_RPAREN_'];
uptake = ['EX_ca2_LPAREN_e_RPAREN_',
'EX_cbl1_LPAREN_e_RPAREN_',
'EX_cl_LPAREN_e_RPAREN_',
'EX_co2_LPAREN_e_RPAREN_',
'EX_cobalt2_LPAREN_e_RPAREN_',
'EX_cu2_LPAREN_e_RPAREN_',
'EX_fe2_LPAREN_e_RPAREN_',
'EX_fe3_LPAREN_e_RPAREN_',
'EX_h_LPAREN_e_RPAREN_',
'EX_h2o_LPAREN_e_RPAREN_',
'EX_k_LPAREN_e_RPAREN_',
'EX_mg2_LPAREN_e_RPAREN_',
'EX_mn2_LPAREN_e_RPAREN_',
'EX_mobd_LPAREN_e_RPAREN_',
'EX_na1_LPAREN_e_RPAREN_',
'EX_nh4_LPAREN_e_RPAREN_',
'EX_ni2_LPAREN_e_RPAREN_',
'EX_pi_LPAREN_e_RPAREN_',
'EX_sel_LPAREN_e_RPAREN_',
'EX_slnt_LPAREN_e_RPAREN_',
'EX_so4_LPAREN_e_RPAREN_',
'EX_tungs_LPAREN_e_RPAREN_',
'EX_zn2_LPAREN_e_RPAREN_'];
for u in uptake:
cobra_model.reactions.get_by_id(u).lower_bound = -1000.0;
# Specify allowed secretion products
secrete = ['EX_meoh_LPAREN_e_RPAREN_',
'EX_5mtr_LPAREN_e_RPAREN_',
'EX_h_LPAREN_e_RPAREN_',
'EX_co2_LPAREN_e_RPAREN_',
'EX_co_LPAREN_e_RPAREN_',
'EX_h2o_LPAREN_e_RPAREN_',
'EX_ac_LPAREN_e_RPAREN_',
'EX_fum_LPAREN_e_RPAREN_',
'EX_for_LPAREN_e_RPAREN_',
'EX_etoh_LPAREN_e_RPAREN_',
'EX_lac_DASH_L_LPAREN_e_RPAREN_',
'EX_pyr_LPAREN_e_RPAREN_',
'EX_succ_LPAREN_e_RPAREN_'];
for s in secrete:
cobra_model.reactions.get_by_id(s).upper_bound = 1000.0;
# Constrain specific reactions
noFlux = ['F6PA', 'DHAPT'];
ammoniaExcess = ['GLUDy']; # PMCID: 196288
# RNR control (DOI:10.1111/j.1365-2958.2006.05493.x)
# Dihydroorotate dehydrogenase (PyrD) (DOI:10.1016/S0076-6879(78)51010-0, PMID: 199252, DOI:S0969212602008316 [pii])
aerobic = ['RNDR1', 'RNDR2', 'RNDR3', 'RNDR4', 'DHORD2', 'ASPO6','LCARR','PFL','FRD2','FRD3']; # see DOI:10.1111/j.1365-2958.2011.07593.x; see DOI:10.1089/ars.2006.8.773 for a review
anaerobic = ['RNTR1c2', 'RNTR2c2', 'RNTR3c2', 'RNTR4c2', 'DHORD5', 'ASPO5','PDH','SUCDi']; # see DOI:10.1074/jbc.274.44.31291, DOI:10.1128/JB.00440-07
if anoxic:
rxnList = noFlux + ammoniaExcess + anaerobic;
for rxn in rxnList:
cobra_model.reactions.get_by_id(rxn).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn).upper_bound = 0.0;
elif oxic:
rxnList = noFlux + ammoniaExcess + aerobic;
for rxn in rxnList:
cobra_model.reactions.get_by_id(rxn).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn).upper_bound = 0.0;
else:
rxnList = noFlux + ammoniaExcess;
for rxn in rxnList:
cobra_model.reactions.get_by_id(rxn).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn).upper_bound = 0.0;
# Set the direction for specific reactions
# Fatty acid biosynthesis: DOI: 10.1016/j.ymben.2010.10.007, PMCID: 372925
fattyAcidSynthesis = ['ACCOAC', 'ACOATA', 'HACD1', 'HACD2', 'HACD3', 'HACD4', 'HACD5', 'HACD6', 'HACD7', 'HACD8', 'KAS14', 'KAS15', 'MACPD', 'MCOATA', '3OAR100', '3OAR120', '3OAR121', '3OAR140', '3OAR141', '3OAR160', '3OAR161', '3OAR180', '3OAR181', '3OAR40', '3OAR60', '3OAR80']
fattyAcidOxidation = ['ACACT1r', 'ACACT2r', 'ACACT3r', 'ACACT4r', 'ACACT5r', 'ACACT6r', 'ACACT7r', 'ACACT8r', 'ACOAD1f', 'ACOAD2f', 'ACOAD3f', 'ACOAD4f', 'ACOAD5f', 'ACOAD6f', 'ACOAD7f', 'ACOAD8f', 'CTECOAI6', 'CTECOAI7', 'CTECOAI8', 'ECOAH1', 'ECOAH2', 'ECOAH3', 'ECOAH4', 'ECOAH5', 'ECOAH6', 'ECOAH7', 'ECOAH8']
ndpk = ['NDPK1','NDPK2','NDPK3','NDPK4','NDPK5','NDPK7','NDPK8'];
rxnList = fattyAcidSynthesis + fattyAcidOxidation;
for rxn in rxnList:
cobra_model.reactions.get_by_id(rxn).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn).upper_bound = 1000.0;
# convert to irreversible
if convert2irreversible: convert_to_irreversible(cobra_model);
return cobra_model;
def reduce_model(self,cobra_model,cobra_model_outFileName=None):
'''reduce model'''
# Input: cobra_model
# Output: cobra_model
# the lower and upper bounds have been set to 0.0
# for all reactions that cannot carry a flux
cobra_model.optimize()
sol_f = cobra_model.solution.f
fva_data = flux_variability_analysis(cobra_model, fraction_of_optimum=0.9,
objective_sense='maximize', the_reactions=None,
allow_loops=True, solver='gurobi',
the_problem='return', tolerance_optimality=1e-6,
tolerance_feasibility=1e-6, tolerance_barrier=1e-8,
lp_method=1, lp_parallel=0, new_objective=None,
relax_b=None, error_reporting=None,
number_of_processes=1, copy_model=False);
#with open("data/ijo1366_irrev_fva.json", 'w') as outfile:
# json.dump(data, outfile, indent=4);
#fva_data = json.load(open("data/ijo1366_irrev_fva.json"));
# Reduce model
rxns_noflux = [];
for k,v in fva_data.items():
if v['minimum'] == 0.0 and v['maximum'] == 0.0:
cobra_model.reactions.get_by_id(k).lower_bound = 0.0;
cobra_model.reactions.get_by_id(k).upper_bound = 0.0;
rxns_noflux.append(k);
if cobra_model_outFileName:
write_cobra_model_to_sbml_file(cobra_model,cobra_model_outFileName)
cobra_model.optimize()
sol_reduced_f = cobra_model.solution.f
# Check that the reduced model is consistent with the original model
if not sol_f == sol_reduced_f:
print('reduced model is inconsistent with the original model')
print('original model solution: ' + str(sol_f))
print('reduced model solution: ' + str(sol_reduced_f))
def reduce_model_pfba(self,cobra_model,cobra_model_outFileName=None,fba_outFileName=None,subs=[]):
'''reduce model using pfba'''
# Input: cobra_model
# cobra_model_outFileName
# subs = string of specific subsystems to reduce
# Output: cobra_model
# the lower and upper bounds have been set to 0.0
# for all reactions that cannot carry a flux
cobra_model.optimize()
sol_f = cobra_model.solution.f
# Find minimal flux solution:
pfba = optimize_minimal_flux(cobra_model,True,solver='gurobi');
# Reduce model
rxns_noflux = [];
# set lb and ub for all reactions with 0 flux to 0;
for k,v in cobra_model.solution.x_dict.items():
if (v < 0.0 or v == 0.0) and cobra_model.reactions.get_by_id(k).subsystem in subs:
cobra_model.reactions.get_by_id(k).lower_bound = 0.0;
cobra_model.reactions.get_by_id(k).upper_bound = 0.0;
rxns_noflux.append(k);
if cobra_model_outFileName:
write_cobra_model_to_sbml_file(cobra_model,cobra_model_outFileName)
if pfba_outFileName:
# Write pfba solution to file
with open(pfba_outFileName,mode='wb') as outfile:
writer = csv.writer(outfile)
writer.writerow(['Reaction','Flux'])
for k,v in cobra_model.solution.x_dict.items():
writer.writerow([k,v]);
cobra_model.optimize()
sol_reduced_f = cobra_model.solution.f
# Check that the reduced model is consistent with the original model
if not sol_f == sol_reduced_f:
print('reduced model is inconsistent with the original model')
print('original model solution: ' + str(sol_f))
print('reduced model solution: ' + str(sol_reduced_f))
def add_net_reaction(self,cobra_model_IO, rxn_dict_I,remove_reverse=False):
'''add a net reaction to the model after removing
the individual reactions'''
# input: rxn_dict_I = dictionary of net reaction ids and
# corresponding list of individual reaction ids
# output: cobra_model_IO = individual reactions replaced with a
# net reaction
cobra_model_IO.optimize();
sol_orig = cobra_model_IO.solution.f;
print("original model solution", sol_orig)
try:
cobra_model_tmp = cobra_model_IO.copy2();
except KeyError as e:
print(e);
# make net reactions:
rxn_dict_net = {};
for k,v in rxn_dict_I.items():
rxn_net = make_net_reaction(cobra_model_tmp, k, v['reactions'],v['stoichiometry']);
if rxn_net:
rxn_net.lower_bound = 0.0;
rxn_net.upper_bound = 1000.0;
rxn_net.objective_coefficient = 0.0;
else:
print('an error occured in add_net_reaction')
exit(-1)
#rxn_net.reversibility = False;
rxn_dict_net[k] = (v['reactions'],rxn_net);
# add replace individual reactions with net reaction
for k,v in rxn_dict_net.items():
cobra_model_IO.remove_reactions(v[0]);
# remove the reverse reaction if it exists for irreversible models
if remove_reverse:
for rxn in v[0]:
if '_reverse' in rxn:
rxn_rev = rxn.replace('_reverse','')
if cobra_model_IO.reactions.has_id(rxn_rev): cobra_model_IO.remove_reactions(rxn_rev);
else:
rxn_rev = rxn+'_reverse';
if cobra_model_IO.reactions.has_id(rxn_rev): cobra_model_IO.remove_reactions(rxn_rev);
cobra_model_IO.add_reaction(v[1]);
cobra_model_IO.optimize();
sol_new = cobra_model_IO.solution.f;
print(k, sol_new)
def make_net_reaction(self,cobra_model_I, rxn_id_I, rxn_list_I,stoich_list_I):
'''generate a net reaction from a list of individual reactions'''
# input: rxn_list_I = list of reaction IDs
# output: rxn_net_O = net reaction (cobra Reaction object)
from cobra.core.Reaction import Reaction
#rxn_net_O = cobra_model_I.reactions.get_by_id(rxn_list_I[0]);
#for r in rxn_list_I[1:]:
# if cobra_model_I.reactions.get_by_id(r).reversibility:
# print r + " is reversible!";
# print "continue?"
# rxn_net_O += cobra_model_I.reactions.get_by_id(r);
# check input:
if not len(stoich_list_I) == len(rxn_list_I):
print("error in " + rxn_id_I + ": there are " + str(len(rxn_list_I)) + " rxn ids and " + str(len(stoich_list_I)) + " coefficients");
exit(-1);
rxn_net_O = Reaction(rxn_id_I);
for i,r in enumerate(rxn_list_I):
mets = {};
metlist = [];
metlist = cobra_model_I.reactions.get_by_id(r).products + cobra_model_I.reactions.get_by_id(r).reactants;
for met in metlist:
mets[met] = cobra_model_I.reactions.get_by_id(r).get_coefficient(met)*stoich_list_I[i];
rxn_net_O.add_metabolites(mets);
rxn_net_O.subsystem = cobra_model_I.reactions.get_by_id(r).subsystem; #copy over the subsystem
# check net reaction
#if not rxn_net_O.check_mass_balance():
#print "error: " + rxn_id_I + " is not elementally balanced";
#print rxn_net_O.id;
#print rxn_net_O.build_reaction_string();
return rxn_net_O;
def get_solBySub(self,cobra_model_I,sol_I,sub_I):
sol_O = {};
for k,v in sol_I.items():
try:
if cobra_model_I.reactions.get_by_id(k).subsystem == sub_I:
sol_O[k] = v;
except:
print(k + ' reaction not found')
return sol_O;
def groupBySameFlux(self,cobra_model_I,sol_I):
flux_list = [];
for r,f in sol_I.items():
if not f in flux_list and float(f)>0.0:
flux_list.append(f)
sameFlux_O = {};
for f in flux_list:
rxn_list = [];
for r,v in sol_I.items():
if v==f:
rxn_list.append(r);
stoich = [1]*len(rxn_list)
rxnName = '';
for rxn in rxn_list:
rxnName = rxnName + rxn + '_';
rxnName = rxnName[:-1];
# check that the reaction name is less than 225 characters
if len(rxnName)>224:
rxnName = rxnName[:224];
sameFlux_O[rxnName] = {'reactions':rxn_list,
'stoichiometry':stoich,
'flux':f};
#netRxn = make_net_reaction(cobra_model_copy,rxnName,rxn_list,stoich)
#sameFlux_O[rxnName] = {'reactions':rxn_list,
# 'stoichiometry':stoich,
# 'flux':f,
# 'net':netRxn};
return sameFlux_O
def add_net_reaction_subsystem(self,cobra_model_IO,sol_I,subs_I):
'''make net reactions for specific subsystems grouped
by reactions that have the same flux from pfba'''
#input: cobra_model
# sol_I = pfba solution
# sub_I = list of model subsystems
#output: cobra_model
# convert model to irreversible
# convert_to_irreversible(cobra_model_IO);
# Make net reactions for pathways outside of the scope
# of the isotopomer model
for s in subs_I:
sol = get_solBySub(cobra_model_IO,sol_I,s)
sameFlux = groupBySameFlux(cobra_model_IO,sol)
netRxns = {};
for k,v in sameFlux.items():
if len(v['reactions'])>1:
netRxns[k] = v;
add_net_reaction(cobra_model_IO,netRxns);
# add subsystem information back in
for k in sameFlux.keys():
cobra_model_IO.reactions.get_by_id(k).subsystem = s
remove_noflux_reactions(cobra_model_IO,sol_I,subs_I)
# convert model back to reversible
# revert_to_reversible(cobra_model_IO);
def remove_noflux_reactions(self,cobra_model,sol=None,subs=[]):
'''remove noflux reactions'''
# Input: cobra_model
# sol = pfba solution
# subs = string of specific subsystems to reduce
# Output: cobra_model
# if the lower and upper bounds are zero, the reactions
# are removed
cobra_model.optimize()
sol_f = cobra_model.solution.f
# Reduce model
rxns_noflux = [];
# set lb and ub for all reactions with 0 flux to 0;
if sol:
if subs:
for k,v in sol.items():
try:
if (float(v) < 0.0 or float(v) == 0.0) and cobra_model.reactions.get_by_id(k).subsystem in subs:
cobra_model.reactions.get_by_id(k).lower_bound = 0.0;
cobra_model.reactions.get_by_id(k).upper_bound = 0.0;
cobra_model.remove_reactions(k)
rxns_noflux.append(k);
except:
print('reaction is not in model: ' + k)
else:
for k,v in sol.items():
try:
if (float(v) < 0.0 or float(v) == 0.0):
cobra_model.reactions.get_by_id(k).lower_bound = 0.0;
cobra_model.reactions.get_by_id(k).upper_bound = 0.0;
cobra_model.remove_reactions(k)
rxns_noflux.append(k);
except:
print('reaction is not in model: ' + k)
else:
if subs:
for r in cobra_model.reactions:
if r.lower_bound == 0.0 and r.upper_bound == 0.0 and cobra_model.reactions.get_by_id(r.id).subsystem in subs:
cobra_model.remove_reactions(r.id)
else:
for r in cobra_model.reactions:
if r.lower_bound == 0.0 and r.upper_bound == 0.0:
cobra_model.remove_reactions(r.id)
cobra_model.optimize()
sol_reduced_f = cobra_model.solution.f
# Check that the reduced model is consistent with the original model
if not sol_f == sol_reduced_f:
print('reduced model is inconsistent with the original model')
print('original model solution: ' + str(sol_f))
print('reduced model solution: ' + str(sol_reduced_f))
def get_reactionsInfo(self,cobra_model):
'''return the number of reactions and the number of reactions
that cannot carry a flux (i.e. lb and ub of 0.0)'''
nrxn_O = len(cobra_model.reactions);
nrxn_noflux_O = 0;
for r in cobra_model.reactions:
if r.lower_bound == 0.0 and r.upper_bound == 0.0:
nrxn_noflux_O += 1;
return nrxn_O, nrxn_noflux_O
#model reduction iteration functions
def makeIsotopomerModel_iteration01(self,pfba_file,netrxn_irreversible_model_filename,fva_reduced_model_filename,reduced_lbub_filename):
'''iteration 1:
identification of reactions that can be lumped in pathways outside the model scope'''
cobra_model = self.load_ALEWt();
# Make the model irreversible for downstream manipulations:
convert_to_irreversible(cobra_model);
# Add lumped isotopomer reactions
self.add_net_reaction(cobra_model,isotopomer_rxns_net_irreversible);
# Find minimal flux solution:
pfba = optimize_minimal_flux(cobra_model,True,solver='gurobi');
# Write pfba solution to file
with open(pfba_file,mode='wb') as outfile:
writer = csv.writer(outfile)
writer.writerow(['Reaction','Flux'])
for k,v in cobra_model.solution.x_dict.items():
writer.writerow([k,v]);
# Read in pfba solution
pfba_sol = {};
with open(pfba_file,mode='r') as infile:
dictreader = csv.DictReader(infile)
for r in dictreader:
pfba_sol[r['Reaction']] = r['Flux'];
# Make net reactions for pathways outside of the scope
# of the isotopomer model
subs = ['Cell Envelope Biosynthesis',
'Glycerophospholipid Metabolism',
'Lipopolysaccharide Biosynthesis / Recycling',
'Membrane Lipid Metabolism',
'Murein Biosynthesis'
'Murein Recycling',
'Cofactor and Prosthetic Group Biosynthesis',
#'Transport, Inner Membrane',
#'Transport, Outer Membrane',
#'Transport, Outer Membrane Porin',
'tRNA Charging',
'Unassigned',
'Exchange',
'Inorganic Ion Transport and Metabolism',
'Nitrogen Metabolism'];
self.add_net_reaction_subsystem(cobra_model,pfba_sol,subs);
self.remove_noflux_reactions(cobra_model,pfba_sol,['Transport, Outer Membrane Porin','Transport, Inner Membrane','Transport, Outer Membrane'])
revert_to_reversible(cobra_model);
# write model to sbml
write_cobra_model_to_sbml_file(cobra_model,netrxn_irreversible_model_filename)
# Reduce model using FVA:
self.reduce_model(cobra_model,fva_reduced_model_filename)
# Remove all reactions with 0 flux
self.remove_noflux_reactions(cobra_model);
with open(reduced_lbub_filename,mode='wb') as outfile:
writer = csv.writer(outfile)
writer.writerow(['Reaction','Formula','LB','UB','Subsystem'])
for r in cobra_model.reactions:
writer.writerow([r.id,
r.build_reaction_string(),
r.lower_bound,
r.upper_bound,
r.subsystem]);
def makeIsotopomerModel_iteration02(self,pfba_filename,fva_reduced_model_filename,netrxn_irreversible_model_filename,reduced_lbub_filename):
'''iteration 2:
addition of finalized lumped reactions that are in pathways that are within the scope of the model
and reduction by removing reactions with zero optimal minimal flux outside the scope of the model'''
cobra_model = load_ALEWt();
# Make the model irreversible for downstream manipulations:
convert_to_irreversible(cobra_model);
cobra_model.optimize();
# Add lumped isotopomer reactions
self.add_net_reaction(cobra_model,isotopomer_rxns_net_irreversible,True);
cobra_model.optimize();
# Find minimal flux solution:
pfba = optimize_minimal_flux(cobra_model,True,solver='gurobi');
# Write pfba solution to file
with open(pfba_filename,mode='wb') as outfile:
writer = csv.writer(outfile)
writer.writerow(['Reaction','Flux','Subsystem'])
for k,v in cobra_model.solution.x_dict.items():
writer.writerow([k,v,cobra_model.reactions.get_by_id(k).subsystem]);
# Read in pfba solution
pfba_sol = {};
with open(pfba_filename,mode='r') as infile:
dictreader = csv.DictReader(infile)
for r in dictreader:
pfba_sol[r['Reaction']] = r['Flux'];
# remove noflux reactions for pathways outside of the scope
# of the isotopomer model
subs = ['Cell Envelope Biosynthesis',
'Glycerophospholipid Metabolism',
'Lipopolysaccharide Biosynthesis / Recycling',
'Membrane Lipid Metabolism',
'Murein Biosynthesis'
'Murein Recycling',
'Cofactor and Prosthetic Group Biosynthesis',
'Transport, Inner Membrane',
'Transport, Outer Membrane',
'Transport, Outer Membrane Porin',
'tRNA Charging',
'Unassigned',
#'Exchange',
'Inorganic Ion Transport and Metabolism',
'Nitrogen Metabolism',
'Alternate Carbon Metabolism'];
self.remove_noflux_reactions(cobra_model,pfba_sol,subs)
# Reduce model using FVA:
self.reduce_model(cobra_model,fva_reduced_model_filename)
# Reset secretion products that may have been turned off
secrete = ['EX_meoh_LPAREN_e_RPAREN_',
'EX_5mtr_LPAREN_e_RPAREN_',
'EX_h_LPAREN_e_RPAREN_',
'EX_co2_LPAREN_e_RPAREN_',
'EX_co_LPAREN_e_RPAREN_',
'EX_h2o_LPAREN_e_RPAREN_',
'EX_ac_LPAREN_e_RPAREN_',
'EX_fum_LPAREN_e_RPAREN_',
'EX_for_LPAREN_e_RPAREN_',
'EX_etoh_LPAREN_e_RPAREN_',
'EX_lac_DASH_L_LPAREN_e_RPAREN_',
'EX_pyr_LPAREN_e_RPAREN_',
'EX_succ_LPAREN_e_RPAREN_'];
for s in secrete:
cobra_model.reactions.get_by_id(s).upper_bound = 1000.0;
# Remove all reactions with 0 flux
r1,r2 = self.get_reactionsInfo(cobra_model);
while r2 !=0:
self.remove_noflux_reactions(cobra_model);
r1,r2 = self.get_reactionsInfo(cobra_model);
print(r1,r2);
# write model to sbml
write_cobra_model_to_sbml_file(cobra_model,netrxn_irreversible_model_filename)
with open(reduced_lbub_filename,mode='wb') as outfile:
writer = csv.writer(outfile)
writer.writerow(['Reaction','Formula','LB','UB','Subsystem'])
for r in cobra_model.reactions:
writer.writerow([r.id,
r.build_reaction_string(),
r.lower_bound,
r.upper_bound,
r.subsystem]);
def makeIsotopomerModel_cobraMAT(self,model_filename,xml_filename,mat_filename,csv_filename,isotopomer_mapping_filename,ko_list=[],flux_dict={},description=None):
'''iteration 3:
Remove reactions that are thermodynamically unfavorable and add isotopomer data'''
# Read in the sbml file and define the model conditions
cobra_model = create_cobra_model_from_sbml_file(model_filename, print_time=True)
# Modify glucose uptake:
if cobra_model.reactions.has_id('EX_glc_LPAREN_e_RPAREN__reverse'):
lb,ub = cobra_model.reactions.get_by_id('EX_glc_LPAREN_e_RPAREN__reverse').lower_bound,cobra_model.reactions.get_by_id('EX_glc_LPAREN_e_RPAREN__reverse').upper_bound;
EX_glc_mets = {};
EX_glc_mets[cobra_model.metabolites.get_by_id('glc_DASH_D_e')] = -1;
EX_glc = Reaction('EX_glc_LPAREN_e_RPAREN_');
EX_glc.add_metabolites(EX_glc_mets);
cobra_model.add_reaction(EX_glc)
cobra_model.reactions.get_by_id('EX_glc_LPAREN_e_RPAREN_').lower_bound = -ub;
cobra_model.reactions.get_by_id('EX_glc_LPAREN_e_RPAREN_').upper_bound = lb;
cobra_model.remove_reactions(['EX_glc_LPAREN_e_RPAREN__reverse'])
## Remove thermodynamically infeasible reactions:
#infeasible = [];
#loops = [];
#cobra_model.remove_reactions(infeasible + loops);
# Apply KOs, if any:
for ko in ko_list:
cobra_model.reactions.get_by_id(ko).lower_bound = 0.0;
cobra_model.reactions.get_by_id(ko).upper_bound = 0.0;
# Apply flux constraints, if any:
for rxn,flux in flux_dict.items():
cobra_model.reactions.get_by_id(rxn).lower_bound = flux['lb'];
cobra_model.reactions.get_by_id(rxn).upper_bound = flux['ub'];
# Change description, if any:
if description:
cobra_model.description = description;
# Read in isotopomer model
isotopomer_mapping = self.read_isotopomer_mapping_csv(isotopomer_mapping_filename); #broken
isotopomer_str = self.build_isotopomer_str(isotopomer_mapping);
# write model to sbml
write_cobra_model_to_sbml_file(cobra_model,xml_filename)
# Add isotopomer field to model
for r in cobra_model.reactions:
if r.id in isotopomer_str:
cobra_model.reactions.get_by_id(r.id).isotopomer = isotopomer_str[r.id];
else:
cobra_model.reactions.get_by_id(r.id).isotopomer = '';
# Add null basis:
cobra_model_array = cobra_model.to_array_based_model();
N = self.calculate.null(cobra_model_array.S.todense()) #convert S from sparse to full and compute the nullspace
cobra_model.N = N;
# solve and save pFBA for later use:
optimize_minimal_flux(cobra_model,True,solver='gurobi');
# add match field:
match = numpy.zeros(len(cobra_model.reactions));
cobra_model.match = match;
# write model to mat
save_matlab_model_isotopomer(cobra_model,mat_filename);
with open(csv_filename,mode='wb') as outfile:
writer = csv.writer(outfile)
writer.writerow(['Reaction','Formula','LB','UB','Genes','Subsystem','Isotopomer'])
for r in cobra_model.reactions:
writer.writerow([r.id,
r.build_reaction_string(),
r.lower_bound,
r.upper_bound,
r.gene_reaction_rule,
r.subsystem,
r.isotopomer]);
#ecoli_INCA modifications
def expand_ecoliINCA01(self,model_id_I,mapping_id_I,date_I,model_id_O,mapping_id_O):
'''expand the INCA Ecoli model to account for additional metabolites'''
query = stage02_isotopomer_query()
# get the xml model
cobra_model_sbml = ''
cobra_model_sbml = query.get_row_modelID_dataStage02IsotopomerModels(model_id_I);
# load the model
if cobra_model_sbml:
if cobra_model_sbml['file_type'] == 'sbml':
with open('data/cobra_model_tmp.xml','wb') as file:
file.write(cobra_model_sbml['model_file']);
file.close()
cobra_model = None;
cobra_model = create_cobra_model_from_sbml_file('data/cobra_model_tmp.xml', print_time=True);
elif cobra_model_sbml['file_type'] == 'json':
with open('data/cobra_model_tmp.json','wb') as file:
file.write(cobra_model_sbml['model_file']);
file.close()
cobra_model = None;
cobra_model = load_json_model('data/cobra_model_tmp.json');
else:
print('file_type not supported')
#get the atomMapping_reactions
atomMappingReactions = query.get_rows_mappingID_dataStage02IsotopomerAtomMappingReactions(mapping_id_I);
#change the mapping_id
for cnt,row in enumerate(atomMappingReactions):
atomMappingReactions[cnt]['mapping_id']=mapping_id_O;
#expand the model to include glyoxylate shunt:
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','glx_c');
glx = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
glx.charge = met_row['charge']
#get metabolites in the model
icit = cobra_model.metabolites.get_by_id('icit_c')
succ = cobra_model.metabolites.get_by_id('succ_c')
accoa = cobra_model.metabolites.get_by_id('accoa_c')
mal = cobra_model.metabolites.get_by_id('mal_DASH_L_c')
#make ICL
rxn_mets = {};
rxn_mets[icit] = -1;
rxn_mets[succ] = 1;
rxn_mets[glx] = 1;
rxn = Reaction('ICL');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='ICL';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1]
row_tmp['products_stoichiometry_tracked']=[1,1]
row_tmp['reactants_ids_tracked']=['icit_c']
row_tmp['products_ids_tracked']=['glx_c','succ_c']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C"], ["C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['products_positions_tracked']=[[0, 1], [0, 1, 2, 3]]
row_tmp['reactants_mapping']=['abcdef']
row_tmp['products_mapping']=['ab','fcde']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
#make MALS
rxn_mets = {};
rxn_mets[glx] = -1;
rxn_mets[accoa] = -1;
rxn_mets[mal] = 1;
rxn = Reaction('MALS');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='MALS';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1,-1]
row_tmp['products_stoichiometry_tracked']=[1]
row_tmp['reactants_ids_tracked']=['accoa_c','glx_c']
row_tmp['products_ids_tracked']=['mal_DASH_L_c']
row_tmp['reactants_elements_tracked']=[["C", "C"], ["C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1], [0, 1]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3]]
row_tmp['reactants_mapping']=['ab','cd']
row_tmp['products_mapping']=['cdba']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
#add in glucose transporters and intracellular glc
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014',"glc_DASH_D_c");
glc_c = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
glc_c.charge = met_row['charge']
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014',"glc_DASH_D_e");
glc_e = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'e')
glc_e.charge = met_row['charge']
glcext = Metabolite('glc_DASH_D_e.ext',met_row['formula'],met_row['met_name'],'e')
glcext.charge = met_row['charge']
glcpre = Metabolite('glc_DASH_D_e.pre',met_row['formula'],met_row['met_name'],'e')
glcpre.charge = met_row['charge']
#get metabolites in the model
pep = cobra_model.metabolites.get_by_id('pep_c')
pyr = cobra_model.metabolites.get_by_id('pyr_c')
g6p = cobra_model.metabolites.get_by_id('g6p_c')
#make EX_glc_LPAREN_e_RPAREN_
rxn_mets = {};
rxn_mets[glcext] = -1;
rxn_mets[glc_e] = 1;
rxn = Reaction('EX_glc_LPAREN_e_RPAREN_');
cobra_model.remove_reactions(['EX_glc_LPAREN_e_RPAREN_']);
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='EX_glc_LPAREN_e_RPAREN_';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1]
row_tmp['products_stoichiometry_tracked']=[1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_e.ext']
row_tmp['products_ids_tracked']=['glc_DASH_D_e']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['reactants_mapping']=['abcdef']
row_tmp['products_mapping']=['abcdef']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
#make EX_glc_LPAREN_e_RPAREN__pre
rxn_mets = {};
rxn_mets[glcpre] = -1;
rxn_mets[glc_e] = 1;
rxn = Reaction('EX_glc_LPAREN_e_RPAREN__pre');
cobra_model.remove_reactions(['v60']);
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='EX_glc_LPAREN_e_RPAREN__pre';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1]
row_tmp['products_stoichiometry_tracked']=[1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_e.pre']
row_tmp['products_ids_tracked']=['glc_DASH_D_e']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['reactants_mapping']=['abcdef']
row_tmp['products_mapping']=['abcdef']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
#make GLCptspp "glc_DASH_D_p + pep_c --> g6p_c + pyr_c"
rxn_mets = {};
rxn_mets[glc_e] = -1;
rxn_mets[pep] = -1;
rxn_mets[g6p] = 1;
rxn_mets[pyr] = 1;
rxn = Reaction('GLCptspp');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='GLCptspp';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1,-1]
row_tmp['products_stoichiometry_tracked']=[1,1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_e','pep_c']
row_tmp['products_ids_tracked']=['g6p_c','pyr_c']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"],["C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"],["C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5],[0, 1, 2]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5],[0, 1, 2]]
row_tmp['reactants_mapping']=['abcdef','ghi']
row_tmp['products_mapping']=['abcdef','ghi']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
#make GLCt2pp "glc_DASH_D_p + h_p --> glc_DASH_D_c + h_c"
rxn_mets = {};
rxn_mets[glc_e] = -1;
rxn_mets[glc_c] = 1;
rxn = Reaction('GLCt2pp');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000.0;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='GLCt2pp';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1]
row_tmp['products_stoichiometry_tracked']=[1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_e']
row_tmp['products_ids_tracked']=['glc_DASH_D_c']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['reactants_mapping']=['abcdef']
row_tmp['products_mapping']=['abcdef']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
#make HEX1 "atp_c + glc_DASH_D_c --> g6p_c + h_c + adp_c"
rxn_mets = {};
rxn_mets[glc_c] = -1;
rxn_mets[g6p] = 1;
rxn = Reaction('HEX1');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000.0;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='HEX1';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1]
row_tmp['products_stoichiometry_tracked']=[1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_c']
row_tmp['products_ids_tracked']=['g6p_c']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['reactants_mapping']=['abcdef']
row_tmp['products_mapping']=['abcdef']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
##expand the model
#acon = Metabolite('acon_DASH_C_c','C6H3O6','cis-Aconitate','c');
#cit = cobra_model.metabolites.get_by_id('cit_c')
#icit = cobra_model.metabolites.get_by_id('icit_c')
#e4p = cobra_model.metabolites.get_by_id('e4p_c')
#r5p = cobra_model.metabolites.get_by_id('r5p_c')
#phe = cobra_model.metabolites.get_by_id('phe_DASH_L_c')
#his = cobra_model.metabolites.get_by_id('his_DASH_L_c')
#phpyr = Metabolite('phpyr_c','C9H7O3','Phenylpyruvate','c');
#prpp = Metabolite('prpp_c','C5H8O14P3','5-Phospho-alpha-D-ribose 1-diphosphate','c');
## update selected reactions to account for new metabolites
#for rxn,row in enumerate(atomMappingReactions):
# if row['rxn_id'] == 'ACONTa_ACONTb':
# #split ACONTa_ACONTb
# aconta_mets = {};
# aconta_mets[cit] = -1;
# aconta_mets[acon] = 1;
# aconta = Reaction('ACONTa');
# aconta.add_metabolites(aconta_mets);
# cobra_model.remove_reactions(['ACONTa_ACONTb']);
# cobra_model.add_reactions([aconta]);
# cobra_model.repair();
# # Update the mapping ids
# atomMappingReactions[rxn]['products_ids_tracked']=['acon_DASH_C_c']
# atomMappingReactions[rxn]['comment_']='updated'
# elif row['rxn_id'] == 'PheSYN':
# #split PheSYN to add in phpyr
# # Update the mapping_ids
# atomMappingReactions[rxn]['mapping_id']=mapping_id_O;
# atomMappingReactions[rxn]['rxn_id']=rxn_ids[rxn];
# atomMappingReactions[rxn]['rxn_description']='';
# atomMappingReactions[rxn]['rxn_equation']='';
# atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[]
# atomMappingReactions[rxn]['products_stoichiometry_tracked']=[]
# atomMappingReactions[rxn]['reactants_ids_tracked']=[]
# atomMappingReactions[rxn]['products_ids_tracked']=[]
# atomMappingReactions[rxn]['reactants_elements_tracked']=[]
# atomMappingReactions[rxn]['products_elements_tracked']=[]
# atomMappingReactions[rxn]['reactants_positions_tracked']=[]
# atomMappingReactions[rxn]['products_positions_tracked']=[]
# atomMappingReactions[rxn]['reactants_mapping']=[]
# atomMappingReactions[rxn]['products_mapping']=[]
# atomMappingReactions[rxn]['used_']=True
# atomMappingReactions[rxn]['comment_']=None
# elif row['rxn_id'] == 'HisSYN':
# # split HisSYN to add in prpp
# #cobra_model.reactions.get_by_id(rxn_ids[rxn])
# #cobra_model.reactions.get_by_id(rxn_ids[rxn])
# # Update the mapping_ids
# atomMappingReactions[rxn]['reactants_ids_tracked']=[r.replace('r5p_c','prpp_c') for r in atomMappingReactions[rxn]['reactants_ids_tracked']]
# # combine TKT1a and TKT1b
# # combine TKT2a and TKT2b
# # split PPC_PPCK
# # split PTAr_ACKr_ACS
## add in ACONTb
#acontb_mets = {};
#acontb_mets[acon] = -1;
#acontb_mets[icit] = 1;
#acontb = Reaction('ACONTb');
#acontb.add_metabolites(acontb_mets);
#cobra_model.add_reactions([acontb]);
#cobra_model.repair();
## add in ACONTb mapping
#row={};
#row['mapping_id']=mapping_id_O;
#row['rxn_id']='ACONTb';
#row['rxn_description']='';
#row['rxn_equation']='';
#row['reactants_stoichiometry_tracked']=[-1]
#row['products_stoichiometry_tracked']=[1]
#row['reactants_ids_tracked']=['acon_DASH_C_c']
#row['products_ids_tracked']=['icit_c']
#row['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
#row['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
#row['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
#row['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
#row['reactants_mapping']=['abcdef']
#row['products_mapping']=['abcdef']
#row['used_']=True
#row['comment_']='added'
#atomMappingReactions.append(row)
## add in e4p_to_phpyr
## add in r5p_to_prp
#r5p_to_prpp_mets = {};
#r5p_to_prpp_mets[e4p] = -1;
#r5p_to_prpp_mets[prpp] = 1;
#r5p_to_prpp = Reaction('r5p_to_prpp');
#r5p_to_prpp.add_metabolites(r5p_to_prpp_mets);
#cobra_model.add_reactions([r5p_to_prpp]);
#cobra_model.repair();
## add in r5p_to_prpp mapping
#row={};
#row['mapping_id']=mapping_id_O;
#row['rxn_id']='r5p_to_prpp';
#row['rxn_description']='';
#row['rxn_equation']='';
#row['reactants_stoichiometry_tracked']=[-1]
#row['products_stoichiometry_tracked']=[1]
#row['reactants_ids_tracked']=['r5p_c']
#row['products_ids_tracked']=['prpp_c']
#row['reactants_elements_tracked']=[["C", "C", "C", "C", "C"]]
#row['products_elements_tracked']=[["C", "C", "C", "C", "C"]]
#row['reactants_positions_tracked']=[[0, 1, 2, 3, 4]]
#row['products_positions_tracked']=[[0, 1, 2, 3, 4]]
#row['reactants_mapping']=['abcde']
#row['products_mapping']=['abcde']
#row['used_']=True
#row['comment_']='added'
#atomMappingReactions.append(row)
# write the model to a temporary file
save_json_model(cobra_model,'data/cobra_model_tmp.json')
# add the model information to the database
io = stage02_isotopomer_io()
dataStage02IsotopomerModelRxns_data = [];
dataStage02IsotopomerModelMets_data = [];
dataStage02IsotopomerModels_data,\
dataStage02IsotopomerModelRxns_data,\
dataStage02IsotopomerModelMets_data = io._parse_model_json(model_id_O, date_I, 'data/cobra_model_tmp.json')
io.add_data_stage02_isotopomer_modelMetabolites(dataStage02IsotopomerModelMets_data);
io.add_data_stage02_isotopomer_modelReactions(dataStage02IsotopomerModelRxns_data);
io.add_data_stage02_isotopomer_models(dataStage02IsotopomerModels_data);
#add atomMappingReactions to the database
io.add_data_stage02_isotopomer_atomMappingReactions(atomMappingReactions);
def expand_ecoliINCA02(self,experiment_id_I,model_id_I,mapping_id_I,date_I,model_id_O,mapping_id_O):
'''expand the INCA Ecoli model to account for additional metabolites'''
query = stage02_isotopomer_query()
# get the xml model
cobra_model_sbml = ''
cobra_model_sbml = query.get_row_modelID_dataStage02IsotopomerModels(model_id_I);
# load the model
if cobra_model_sbml:
if cobra_model_sbml['file_type'] == 'sbml':
with open('data/cobra_model_tmp.xml','wb') as file:
file.write(cobra_model_sbml['model_file']);
file.close()
cobra_model = None;
cobra_model = create_cobra_model_from_sbml_file('data/cobra_model_tmp.xml', print_time=True);
elif cobra_model_sbml['file_type'] == 'json':
with open('data/cobra_model_tmp.json','wb') as file:
file.write(cobra_model_sbml['model_file']);
file.close()
cobra_model = None;
cobra_model = load_json_model('data/cobra_model_tmp.json');
else:
print('file_type not supported')
#get the atomMapping_reactions
atomMappingReactions = query.get_rows_mappingID_dataStage02IsotopomerAtomMappingReactions(mapping_id_I);
#change the mapping_id
for cnt,row in enumerate(atomMappingReactions):
atomMappingReactions[cnt]['mapping_id']=mapping_id_O;
accoa = cobra_model.metabolites.get_by_id('accoa_c')
#expand the model to include ATPSYN:
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','atp_c');
atp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
atp.charge = met_row['charge']
#get metabolites in the model
r5p = cobra_model.metabolites.get_by_id('r5p_c')
fthf = cobra_model.metabolites.get_by_id('10fthf_c')
gly = cobra_model.metabolites.get_by_id('gly_c')
co2 = cobra_model.metabolites.get_by_id('co2_c')
glu = cobra_model.metabolites.get_by_id('glu_DASH_L_c')
gln = cobra_model.metabolites.get_by_id('gln_DASH_L_c')
asp = cobra_model.metabolites.get_by_id('asp_DASH_L_c')
fum = cobra_model.metabolites.get_by_id('fum_c')
#make ATPSYN (irreversible)
rxn_mets = {};
rxn_mets[r5p] = -1;
rxn_mets[fthf] = -1;
rxn_mets[gly] = -1;
rxn_mets[co2] = -1;
rxn_mets[fthf] = -1;
rxn_mets[gln] = -1;
rxn_mets[asp] = -1;
rxn_mets[asp] = -1;
rxn_mets[atp] = 1;
rxn_mets[glu] = 1;
rxn_mets[fum] = 1;
rxn_mets[fum] = 1;
rxn = Reaction('ATPSYN');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#expand the model to include GTPSYN:
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','gtp_c');
gtp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
gtp.charge = met_row['charge']
#get metabolites in the model
r5p = cobra_model.metabolites.get_by_id('r5p_c')
fthf = cobra_model.metabolites.get_by_id('10fthf_c')
gly = cobra_model.metabolites.get_by_id('gly_c')
co2 = cobra_model.metabolites.get_by_id('co2_c')
glu = cobra_model.metabolites.get_by_id('glu_DASH_L_c')
gln = cobra_model.metabolites.get_by_id('gln_DASH_L_c')
asp = cobra_model.metabolites.get_by_id('asp_DASH_L_c')
fum = cobra_model.metabolites.get_by_id('fum_c')
#make GTPSYN (irreversible)
rxn_mets = {};
rxn_mets[r5p] = -1;
rxn_mets[fthf] = -1;
rxn_mets[gly] = -1;
rxn_mets[co2] = -1;
rxn_mets[fthf] = -1;
rxn_mets[gln] = -1;
rxn_mets[gln] = -1;
rxn_mets[asp] = -1;
rxn_mets[gtp] = 1;
rxn_mets[glu] = 1;
rxn_mets[glu] = 1;
rxn_mets[fum] = 1;
rxn = Reaction('GTPSYN');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#expand the model to include VPMATr_reverse and VPMATr:
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','3mob_c');
mob3 = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
mob3.charge = met_row['charge']
#get metabolites in the model
val = cobra_model.metabolites.get_by_id('val_DASH_L_c')
ala = cobra_model.metabolites.get_by_id('ala_DASH_L_c')
pyr = cobra_model.metabolites.get_by_id('pyr_c')
#make VPMATr_reverse (irreversible)
rxn_mets = {};
rxn_mets[val] = -1;
rxn_mets[pyr] = -1;
rxn_mets[mob3] = 1;
rxn_mets[ala] = 1;
rxn = Reaction('VPMATr_reverse');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#make VPMATr (irreversible)
rxn_mets = {};
rxn_mets[mob3] = -1;
rxn_mets[ala] = -1;
rxn_mets[val] = 1;
rxn_mets[pyr] = 1;
rxn = Reaction('VPMATr');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#expand the model to include COASYN:
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','coa_c');
coa = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
coa.charge = met_row['charge']
#get metabolites in the model
cys = cobra_model.metabolites.get_by_id('cys_DASH_L_c')
mlthf = cobra_model.metabolites.get_by_id('mlthf_c')
#make COASYN (irreversible)
rxn_mets = {};
rxn_mets[atp] = -1;
rxn_mets[mlthf] = -1;
rxn_mets[mob3] = -1;
rxn_mets[asp] = -1;
rxn_mets[cys] = -1;
rxn_mets[coa] = 1;
rxn_mets[co2] = 1;
rxn_mets[co2] = 1;
rxn = Reaction('COASYN');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#expand the model to include FADSYN:
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','fad_c');
fad = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
fad.charge = met_row['charge']
#get metabolites in the model
ru5p = cobra_model.metabolites.get_by_id('ru5p_DASH_D_c')
#make FADSYN (irreversible)
rxn_mets = {};
rxn_mets[gtp] = -1;
rxn_mets[ru5p] = -1;
rxn_mets[ru5p] = -1;
rxn_mets[atp] = -1;
rxn_mets[fad] = 1;
rxn_mets[co2] = 1;
rxn_mets[co2] = 1;
rxn_mets[co2] = 1;
rxn = Reaction('FADSYN');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#expand the model to include CBMKr and CBMKr_reverse:
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','cbp_c');
cbp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
cbp.charge = met_row['charge']
#make CBMKr (irreversible)
rxn_mets = {};
rxn_mets[co2] = -1;
rxn_mets[cbp] = 1;
rxn = Reaction('CBMKr');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#make CBMKr_reverse (irreversible)
rxn_mets = {};
rxn_mets[cbp] = -1;
rxn_mets[co2] = 1;
rxn = Reaction('CBMKr_reverse');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#expand the model to include UTPSYN:
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','utp_c');
utp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
utp.charge = met_row['charge']
#make UTPSYN (irreversible)
rxn_mets = {};
rxn_mets[r5p] = -1;
rxn_mets[cbp] = -1;
rxn_mets[asp] = -1;
rxn_mets[utp] = 1;
rxn_mets[co2] = 1;
rxn = Reaction('UTPSYN');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
# update selected reactions to account for coa_c
cobra_model.reactions.get_by_id("ArgSYN").add_metabolites({coa:1});
cobra_model.reactions.get_by_id("CS").add_metabolites({coa:1});
cobra_model.reactions.get_by_id("LeuSYN").add_metabolites({coa:1});
cobra_model.reactions.get_by_id("PDH").add_metabolites({coa:-1});
cobra_model.reactions.get_by_id("PTAr_ACKr_ACS").add_metabolites({coa:1});
cobra_model.reactions.get_by_id("PTAr_ACKr_ACS_reverse").add_metabolites({coa:-1});
cobra_model.reactions.get_by_id("SERAT_CYSS").add_metabolites({coa:1});
cobra_model.reactions.get_by_id("THRD_GLYAT").add_metabolites({coa:-1});
cobra_model.reactions.get_by_id("MALS").add_metabolites({coa:1});
# update selected mappings to account for coa_c
for rxn,row in enumerate(atomMappingReactions):
if row['rxn_id'] == 'ArgSYN':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1,-1,-1,-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1,1,1,1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['glu_DASH_L_c','co2_c','gln_DASH_L_c','asp_DASH_L_c','accoa_c']
atomMappingReactions[rxn]['products_ids_tracked']=['arg_DASH_L_c','akg_c','fum_c','ac_c','coa_c']
atomMappingReactions[rxn]['reactants_mapping']=['abcde','f','ghijk','lmno','ABCDEFGHIJKLMNOPQRSTUpq']
atomMappingReactions[rxn]['products_mapping']=['abcdef','ghijk','lmno','pq','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
elif row['rxn_id'] == 'CS':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['oaa_c','accoa_c']
atomMappingReactions[rxn]['products_ids_tracked']=['cit_c','coa_c']
atomMappingReactions[rxn]['reactants_mapping']=['abcd','ABCDEFGHIJKLMNOPQRSTUef']
atomMappingReactions[rxn]['products_mapping']=['dcbfea','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
elif row['rxn_id'] == 'LeuSYN':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1,-1,-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1,1,1,1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['accoa_c','pyr_c','pyr_c','glu_DASH_L_c']
atomMappingReactions[rxn]['products_ids_tracked']=['leu_DASH_L_c','co2_c','co2_c','akg_c','coa_c']
atomMappingReactions[rxn]['reactants_mapping']=['ABCDEFGHIJKLMNOPQRSTUab','cde','fgh','ijklm']
atomMappingReactions[rxn]['products_mapping']=['abdghe','c','f','ijklm','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
elif row['rxn_id'] == 'PDH':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['pyr_c','coa_c']
atomMappingReactions[rxn]['products_ids_tracked']=['accoa_c','co2_c']
atomMappingReactions[rxn]['reactants_mapping']=['abc','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['products_mapping']=['ABCDEFGHIJKLMNOPQRSTUbc','a']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
elif row['rxn_id'] == 'PTAr_ACKr_ACS':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['accoa_c']
atomMappingReactions[rxn]['products_ids_tracked']=['ac_c','coa_c']
atomMappingReactions[rxn]['reactants_mapping']=['ABCDEFGHIJKLMNOPQRSTUab']
atomMappingReactions[rxn]['products_mapping']=['ab','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
elif row['rxn_id'] == 'PTAr_ACKr_ACS_reverse':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['ac_c','coa_c']
atomMappingReactions[rxn]['products_ids_tracked']=['accoa_c']
atomMappingReactions[rxn]['reactants_mapping']=['ab','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['products_mapping']=['ABCDEFGHIJKLMNOPQRSTUab']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
elif row['rxn_id'] == 'SERAT_CYSS':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1,1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['ser_DASH_L_c','accoa_c']
atomMappingReactions[rxn]['products_ids_tracked']=['cys_DASH_L_c','ac_c','coa_c']
atomMappingReactions[rxn]['reactants_mapping']=['abc','ABCDEFGHIJKLMNOPQRSTUde']
atomMappingReactions[rxn]['products_mapping']=['abc','de','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
elif row['rxn_id'] == 'THRD_GLYAT':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['thr_DASH_L_c','coa_c']
atomMappingReactions[rxn]['products_ids_tracked']=['gly_c','accoa_c']
atomMappingReactions[rxn]['reactants_mapping']=['abcd','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['products_mapping']=['ab','ABCDEFGHIJKLMNOPQRSTUcd']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
elif row['rxn_id'] == 'MALS':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['accoa_c','glx_c']
atomMappingReactions[rxn]['products_ids_tracked']=['mal_DASH_L_c','coa_c']
atomMappingReactions[rxn]['reactants_mapping']=['ABCDEFGHIJKLMNOPQRSTUab','cd']
atomMappingReactions[rxn]['products_mapping']=['cdba','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
# update BOF
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','adp_c');
adp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
adp.charge = met_row['charge']
cobra_model.reactions.get_by_id("Ec_Biomass_INCA").add_metabolites({coa:2.51,
atp:-53.95,gtp:-0.20912,fad:-0.000223,utp:-0.1401});
# write the model to a temporary file
save_json_model(cobra_model,'data/cobra_model_tmp.json')
# add the model information to the database
io = stage02_isotopomer_io()
dataStage02IsotopomerModelRxns_data = [];
dataStage02IsotopomerModelMets_data = [];
dataStage02IsotopomerModels_data,\
dataStage02IsotopomerModelRxns_data,\
dataStage02IsotopomerModelMets_data = io._parse_model_json(model_id_O, date_I, 'data/cobra_model_tmp.json')
io.add_data_stage02_isotopomer_modelMetabolites(dataStage02IsotopomerModelMets_data);
io.add_data_stage02_isotopomer_modelReactions(dataStage02IsotopomerModelRxns_data);
io.add_data_stage02_isotopomer_models(dataStage02IsotopomerModels_data);
#add atomMappingReactions to the database
io.add_data_stage02_isotopomer_atomMappingReactions(atomMappingReactions);
# expand atomMappingReactions
imm = stage02_isotopomer_metaboliteMapping()
irm = stage02_isotopomer_reactionMapping()
mappingUtilities = stage02_isotopomer_mappingUtilities()
# make atomMappingMetabolites
mappingUtilities.make_missingMetaboliteMappings(experiment_id_I,model_id_I=[model_id_O],
mapping_id_rxns_I=[mapping_id_O],
mapping_id_mets_I=[],#mapping_id_mets_I=[mapping_id_I],
mapping_id_new_I=mapping_id_O);
# update symmetric metabolites
imm.get_metaboliteMapping(mapping_id_O,'succ_c')
imm.make_symmetric()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
imm.get_metaboliteMapping(mapping_id_O,'fum_c')
imm.make_symmetric()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
imm.get_metaboliteMapping(mapping_id_O,'26dap_DASH_M_c')
imm.make_symmetric()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
## update _elements and _positions-_tracked
#irm.get_reactionMapping(mapping_id_O,'ArgSYN')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#irm.get_reactionMapping(mapping_id_O,'CS')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#irm.get_reactionMapping(mapping_id_O,'LeuSYN')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#irm.get_reactionMapping(mapping_id_O,'PDH')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#irm.get_reactionMapping(mapping_id_O,'PTAr_ACKr_ACS')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#irm.get_reactionMapping(mapping_id_O,'PTAr_ACKr_ACS_reverse')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#irm.get_reactionMapping(mapping_id_O,'SERAT_CYSS')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#irm.get_reactionMapping(mapping_id_O,'THRD_GLYAT')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#irm.get_reactionMapping(mapping_id_O,'MALS')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#make default base metabolites
imm.get_metaboliteMapping(mapping_id_O,'asp_DASH_L_c')
imm.make_defaultBaseMetabolites()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
imm.get_metaboliteMapping(mapping_id_O,'cys_DASH_L_c')
imm.make_defaultBaseMetabolites()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
imm.get_metaboliteMapping(mapping_id_O,'ru5p_DASH_D_c')
imm.make_defaultBaseMetabolites()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
#add in PRS to the network?
#if not, substitute r5p_c for prpp_c
#substitute co2_c for for_c
#substitute phe_DASH_L_c for phpyr_c
#ATPSYN
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'ATPSYN',
[{'r5p_c':'C'},{'10fthf_c':'C'},{'gly_c':'C'},{'co2_c':'C'},{'10fthf_c':'C'}],
[],
[],
'atp_c',
[],
[])
irm.add_productMapping(['atp_c'])
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'ATPSYN',
[{'gln_DASH_L_c':'C'}],
[],
[],
'glu_DASH_L_c',
[],
[])
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'ATPSYN',
[{'asp_DASH_L_c':'C'}],
[],
[],
'fum_c',
[],
[])
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'ATPSYN',
[{'asp_DASH_L_c':'C'}],
[],
[],
'fum_c',
[],
[])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#GTPSYN
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'GTPSYN',
[{'r5p_c':'C'},{'10fthf_c':'C'},{'gly_c':'C'},{'co2_c':'C'},{'10fthf_c':'C'}],
[],
[],
'gtp_c',
[],
[])
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'GTPSYN',
[{'gln_DASH_L_c':'C'}],
[],
[],
'glu_DASH_L_c',
[],
[])
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'GTPSYN',
[{'gln_DASH_L_c':'C'}],
[],
[],
'glu_DASH_L_c',
[],
[])
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'GTPSYN',
[{'asp_DASH_L_c':'C'}],
[],
[],
'fum_c',
[],
[])
irm.add_productMapping(['gtp_c'])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#VPAMTr_reverse
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'VPAMTr_reverse',
[{'val_DASH_L_c':'C'}],
[],
[],
'3mob_c',
[],
[])
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'VPAMTr_reverse',
[{'pyr_c':'C'}],
[],
[],
'ala_DASH_L_c',
[],
[])
irm.add_productMapping(['3mob_c'])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#VPAMTr
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'VPAMTr',
[{'3mob_c':'C'}],
[],
[],
'val_DASH_L_c',
[],
[])
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'VPAMTr',
[{'ala_DASH_L_c':'C'}],
[],
[],
'pyr_c',
[],
[])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#COASYN
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'COASYN',
[{'atp_c':'C'},{'mlthf_c':'C'},{'3mob_c':'C'},{'asp_DASH_L_c':'C'},{'cys_DASH_L_c':'C'}],
[{'asp_DASH_L_c':3},{'cys_DASH_L_c':4}],
[{'co2_c':0},{'co2_c':0}],
'coa_c',
[{'co2_c':'C'},{'co2_c':'C'}],
['co2_c','co2_c'])
#reverse product mapping for 3mob_c in database!
irm.update_productMapping(['coa_c'])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#ACCOA_psuedo
irm.make_trackedBinaryReaction('full04','140407_iDM2014','accoa_c_base_met_ids',
[{'coa_c':'C'},{'ac_c':'C'}],
'accoa_c')
irm.update_productMapping(['accoa_c'])
irm.clear_reactionMapping()
#FADSYN
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'FADSYN',
[{'gtp_c':'C'},{'ru5p_DASH_D_c':'C'},{'ru5p_DASH_D_c':'C'},{'atp_c':'C'}],
[{'gtp_c':0},{'ru5p_DASH_D_c':1},{'ru5p_DASH_D_c':2}],
[{'10fthf_c':0},{'co2_c':0},{'co2_c':0}],
'fad_c',
[{'10fthf_c':'C'},{'co2_c':'C'},{'co2_c':'C'}],
['co2_c','co2_c','co2_c'])
irm.add_productMapping(['fad_c'])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#CBMKr
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'CBMKr',
[{'co2_c':'C'}],
[],
[],
'cbp_c',
[],
[])
irm.add_productMapping(['cbp_c'])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#CBMKr_reverse
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'CBMKr_reverse',
[{'cbp_c':'C'}],
[],
[],
'co2_c',
[],
[])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#UTPSYN
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'UTPSYN',
[{'r5p_c':'C'},{'cbp_c':'C'},{'asp_DASH_L_c':'C'}],
[{'asp_DASH_L_c':2}],
[{'co2_c':0}],
'utp_c',
[{'co2_c':'C'}],
['co2_c'])
irm.add_productMapping(['utp_c'])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#ecoli_RL2013 modifications (TODO)
def expand_ecoliRL2013_01(self,experiment_id_I,model_id_I,mapping_id_I,date_I,model_id_O,mapping_id_O):
'''expand the INCA Ecoli model to account for additional metabolites'''
query = stage02_isotopomer_query()
# get the xml model
cobra_model_sbml = ''
cobra_model_sbml = query.get_row_modelID_dataStage02IsotopomerModels(model_id_I);
# load the model
if cobra_model_sbml:
if cobra_model_sbml['file_type'] == 'sbml':
with open('data/cobra_model_tmp.xml','wb') as file:
file.write(cobra_model_sbml['model_file']);
file.close()
cobra_model = None;
cobra_model = create_cobra_model_from_sbml_file('data/cobra_model_tmp.xml', print_time=True);
elif cobra_model_sbml['file_type'] == 'json':
with open('data/cobra_model_tmp.json','wb') as file:
file.write(cobra_model_sbml['model_file']);
file.close()
cobra_model = None;
cobra_model = load_json_model('data/cobra_model_tmp.json');
else:
print('file_type not supported')
#get the atomMapping_reactions
atomMappingReactions = query.get_rows_mappingID_dataStage02IsotopomerAtomMappingReactions(mapping_id_I);
#change the mapping_id
for cnt,row in enumerate(atomMappingReactions):
atomMappingReactions[cnt]['mapping_id']=mapping_id_O;
#add in glucose transporters and intracellular glc
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014',"atp_c");
atp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
atp.charge = met_row['charge']
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014',"glc_DASH_D_c");
glc_c = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
glc_c.charge = met_row['charge']
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014',"glc_DASH_D_e");
glc_e = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'e')
glc_e.charge = met_row['charge']
glcext = Metabolite('glc_DASH_D_e.ext',met_row['formula'],met_row['met_name'],'e')
glcext.charge = met_row['charge']
glcpre = Metabolite('glc_DASH_D_e.pre',met_row['formula'],met_row['met_name'],'e')
glcpre.charge = met_row['charge']
#get metabolites in the model
pep = cobra_model.metabolites.get_by_id('pep_c')
pyr = cobra_model.metabolites.get_by_id('pyr_c')
g6p = cobra_model.metabolites.get_by_id('g6p_c')
#make EX_glc_LPAREN_e_RPAREN_
rxn_mets = {};
rxn_mets[glcext] = -1;
rxn_mets[glc_e] = 1;
rxn = Reaction('EX_glc_LPAREN_e_RPAREN_');
cobra_model.remove_reactions(['EX_glc_LPAREN_e_RPAREN_']);
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='EX_glc_LPAREN_e_RPAREN_';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1]
row_tmp['products_stoichiometry_tracked']=[1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_e.ext']
row_tmp['products_ids_tracked']=['glc_DASH_D_e']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['reactants_mapping']=['abcdef']
row_tmp['products_mapping']=['abcdef']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
##make EX_glc_LPAREN_e_RPAREN__pre
#rxn_mets = {};
#rxn_mets[glcpre] = -1;
#rxn_mets[glc_e] = 1;
#rxn = Reaction('EX_glc_LPAREN_e_RPAREN__pre');
#cobra_model.remove_reactions(['v60']);
#rxn.add_metabolites(rxn_mets);
#cobra_model.add_reactions([rxn]);
#cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
#cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
#cobra_model.repair();
##append the new atom mappings
#row_tmp = {};
#row_tmp['mapping_id']=mapping_id_O;
#row_tmp['rxn_id']='EX_glc_LPAREN_e_RPAREN__pre';
#row_tmp['rxn_description']='';
#row_tmp['rxn_equation']='';
#row_tmp['reactants_stoichiometry_tracked']=[-1]
#row_tmp['products_stoichiometry_tracked']=[1]
#row_tmp['reactants_ids_tracked']=['glc_DASH_D_e.pre']
#row_tmp['products_ids_tracked']=['glc_DASH_D_e']
#row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
#row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
#row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
#row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
#row_tmp['reactants_mapping']=['abcdef']
#row_tmp['products_mapping']=['abcdef']
#row_tmp['used_']=True
#row_tmp['comment_']='added'
#atomMappingReactions.append(row_tmp);
#make GLCptspp "glc_DASH_D_p + pep_c --> g6p_c + pyr_c"
rxn_mets = {};
rxn_mets[glc_e] = -1;
rxn_mets[pep] = -1;
rxn_mets[g6p] = 1;
rxn_mets[pyr] = 1;
rxn = Reaction('GLCptspp');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='GLCptspp';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1,-1]
row_tmp['products_stoichiometry_tracked']=[1,1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_e','pep_c']
row_tmp['products_ids_tracked']=['g6p_c','pyr_c']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"],["C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"],["C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5],[0, 1, 2]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5],[0, 1, 2]]
row_tmp['reactants_mapping']=['abcdef','ghi']
row_tmp['products_mapping']=['abcdef','ghi']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
#make GLCt2pp "glc_DASH_D_p + h_p --> glc_DASH_D_c + h_c"
rxn_mets = {};
rxn_mets[glc_e] = -1;
rxn_mets[glc_c] = 1;
rxn = Reaction('GLCt2pp');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000.0;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='GLCt2pp';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1]
row_tmp['products_stoichiometry_tracked']=[1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_e']
row_tmp['products_ids_tracked']=['glc_DASH_D_c']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['reactants_mapping']=['abcdef']
row_tmp['products_mapping']=['abcdef']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
#make HEX1 "atp_c + glc_DASH_D_c --> g6p_c + h_c + adp_c"
rxn_mets = {};
rxn_mets[glc_c] = -1;
rxn_mets[atp] = -1;
rxn_mets[g6p] = 1;
rxn = Reaction('HEX1');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000.0;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='HEX1';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1]
row_tmp['products_stoichiometry_tracked']=[1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_c']
row_tmp['products_ids_tracked']=['g6p_c']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['reactants_mapping']=['abcdef']
row_tmp['products_mapping']=['abcdef']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
# add in PRPPS phosphoribosylpyrophosphate synthetase atp[c] + r5p[c] <=> amp[c] + h[c] + prpp[c]
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014',"prpp_c");
prpp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
prpp.charge = met_row['charge']
r5p = cobra_model.metabolites.get_by_id('r5p_c')
# expand the model
rxn_mets = {};
rxn_mets[r5p] = -1;
rxn_mets[atp] = -1;
rxn_mets[prpp] = 1;
rxn = Reaction('PRPPS');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.repair();
# add in rxn mapping
row={};
row['mapping_id']=mapping_id_O;
row['rxn_id']='PRPPS';
row['rxn_description']='';
row['rxn_equation']='';
row['reactants_stoichiometry_tracked']=[-1]
row['products_stoichiometry_tracked']=[1]
row['reactants_ids_tracked']=['r5p_c']
row['products_ids_tracked']=['prpp_c']
row['reactants_elements_tracked']=[["C", "C", "C", "C", "C"]]
row['products_elements_tracked']=[["C", "C", "C", "C", "C"]]
row['reactants_positions_tracked']=[[0, 1, 2, 3, 4]]
row['products_positions_tracked']=[[0, 1, 2, 3, 4]]
row['reactants_mapping']=['abcde']
row['products_mapping']=['abcde']
row['used_']=True
row['comment_']='added'
atomMappingReactions.append(row)
##expand the model
#acon = Metabolite('acon_DASH_C_c','C6H3O6','cis-Aconitate','c');
#cit = cobra_model.metabolites.get_by_id('cit_c')
#icit = cobra_model.metabolites.get_by_id('icit_c')
#e4p = cobra_model.metabolites.get_by_id('e4p_c')
#phe = cobra_model.metabolites.get_by_id('phe_DASH_L_c')
his = cobra_model.metabolites.get_by_id('his_DASH_L_c')
#phpyr = Metabolite('phpyr_c','C9H7O3','Phenylpyruvate','c');
# update selected reactions to account for new metabolites
for rxn,row in enumerate(atomMappingReactions):
if row['rxn_id'] == 'HisSYN':
# split HisSYN to add in prpp
cobra_model.reactions.get_by_id(row['rxn_id']).subtract_metabolites({atp:-1,r5p:-1})
cobra_model.reactions.get_by_id(row['rxn_id']).add_metabolites({prpp:-1})
# Update the mapping_ids
atomMappingReactions[rxn]['reactants_ids_tracked']=[r.replace('r5p_c','prpp_c') for r in atomMappingReactions[rxn]['reactants_ids_tracked']]
# write the model to a temporary file
save_json_model(cobra_model,'data/cobra_model_tmp.json')
# add the model information to the database
io = stage02_isotopomer_io()
dataStage02IsotopomerModelRxns_data = [];
dataStage02IsotopomerModelMets_data = [];
dataStage02IsotopomerModels_data,\
dataStage02IsotopomerModelRxns_data,\
dataStage02IsotopomerModelMets_data = io._parse_model_json(model_id_O, date_I, 'data/cobra_model_tmp.json')
io.add_data_stage02_isotopomer_modelMetabolites(dataStage02IsotopomerModelMets_data);
io.add_data_stage02_isotopomer_modelReactions(dataStage02IsotopomerModelRxns_data);
io.add_data_stage02_isotopomer_models(dataStage02IsotopomerModels_data);
#add atomMappingReactions to the database
io.add_data_stage02_isotopomer_atomMappingReactions(atomMappingReactions);
# expand atomMappingReactions
imm = stage02_isotopomer_metaboliteMapping()
irm = stage02_isotopomer_reactionMapping()
mappingUtilities = stage02_isotopomer_mappingUtilities()
# make atomMappingMetabolites
mappingUtilities.make_missingMetaboliteMappings(experiment_id_I,model_id_I=[model_id_O],
mapping_id_rxns_I=[mapping_id_O],
mapping_id_mets_I=[],
mapping_id_new_I=mapping_id_O);
# update symmetric metabolites
imm.get_metaboliteMapping(mapping_id_O,'succ_c')
imm.make_symmetric()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
imm.get_metaboliteMapping(mapping_id_O,'fum_c')
imm.make_symmetric()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
imm.get_metaboliteMapping(mapping_id_O,'26dap_DASH_M_c')
imm.make_symmetric()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
#analysis functions
def load_isotopomer_matlab(self,matlab_data,isotopomer_data=None):
'''Load 13CFlux isotopomer simulation data from matlab file'''
# load measured isotopomers from MATLAB file into numpy array
# load names and calculated isotopomers from MATLAB file into numpy array
names = scipy.io.loadmat(matlab_data)['output']['names'][0][0];
calculated_ave = scipy.io.loadmat(matlab_data)['output']['ave'][0][0];
calculated_stdev = scipy.io.loadmat(matlab_data)['output']['stdev'][0][0];
# load residuals from MATLAB file into numpy array
residuals = scipy.io.loadmat(matlab_data)['residuals'];
if isotopomer_data:
measured_dict = json.load(open(isotopomer_data,'r'));
measured_names = [];
measured_ave = [];
measured_stdev = [];
# extract data to lists
for frag,data in measured_dict['fragments'].items():
for name in data['data_names']:
measured_names.append(name);
for ave in data['data_ave']:
measured_ave.append(ave);
for stdev in data['data_stdev']:
measured_stdev.append(stdev);
# convert lists to dict
measured_dict = {};
for i,name in enumerate(measured_names):
measured_dict[name]={'measured_ave':measured_ave[i],
'measured_stdev':measured_stdev[i]};
# match measured names to calculated names
measured_ave = [];
measured_stdev = [];
residuals = [];
for i,name in enumerate(names):
if name[0][0] in measured_dict:
measured_ave.append(measured_dict[name[0][0]]['measured_ave']);
measured_stdev.append(measured_dict[name[0][0]]['measured_stdev']);
residuals.append(measured_dict[name[0][0]]['measured_ave']-calculated_ave[i][0]);
else:
measured_ave.append(None);
measured_stdev.append(None);
residuals.append(None);
else:
measured_ave_tmp = scipy.io.loadmat(matlab_data)['toCompare'];
measured_ave = [];
for d in measured_ave_tmp:
measured_ave.append(d[0]);
measured_stdev = numpy.zeros(len(measured_ave));
# combine into a dictionary
isotopomer = {};
for i in range(len(names)):
isotopomer[names[i][0][0]] = {'measured_ave':measured_ave[i], #TODO: extract out by fragment names
'measured_stdev':measured_stdev[i],
'calculated_ave':calculated_ave[i][0],
'calculated_stdev':calculated_stdev[i][0],
'residuals':residuals[i]};
return isotopomer;
def load_confidenceIntervals_matlab(self,matlab_data,cobra_model_matlab,cobra_model_name):
'''Load confidence intervals from matlab file'''
# load confidence intervals from MATLAB file into numpy array
cimin_h5py = h5py.File(matlab_data)['ci']['minv'][0];
cimax_h5py = h5py.File(matlab_data)['ci']['maxv'][0];
cimin = numpy.array(cimin_h5py);
cimax = numpy.array(cimax_h5py);
# load cobramodel
rxns = scipy.io.loadmat(cobra_model_matlab)[cobra_model_name]['rxns'][0][0]
# combine cimin, cimax, and rxns into dictionary
ci = {};
for i in range(len(cimin)):
ci[rxns[i][0][0]] = {'minv':cimin[i],'maxv':cimax[i]};
return ci;
def compare_isotopomers_calculated(self,isotopomer_1, isotopomer_2):
'''compare two calculated isotopomer distributions'''
# extract into lists
absDif_list = [];
ssr_1_list = [];
ssr_2_list = [];
bestFit_list = [];
frag_list = [];
ssr_1 = 0.0; # sum of squared residuals (threshold of 10e1, Antoniewicz poster, co-culture, Met Eng X)
ssr_2 = 0.0;
measured_1_list = [];
measured_2_list = [];
calculatedAve_1_list = [];
calculatedAve_2_list = [];
measuredStdev_1_list = [];
measuredStdev_2_list = [];
for frag,data in isotopomer_1.items():
absDif = 0.0;
sr_1 = 0.0;
sr_2 = 0.0;
bestFit = None;
absDif = fabs(isotopomer_1[frag]['calculated_ave'] - isotopomer_2[frag]['calculated_ave']);
sr_1 = pow(isotopomer_1[frag]['calculated_ave']-isotopomer_1[frag]['measured_ave'],2);
sr_2 = pow(isotopomer_2[frag]['calculated_ave']-isotopomer_2[frag]['measured_ave'],2);
if sr_1>sr_2: bestFit = '2';
elif sr_1<sr_2: bestFit = '1';
elif sr_1==sr_2: bestFit = None;
absDif_list.append(absDif);
ssr_1_list.append(sr_1);
ssr_2_list.append(sr_2);
bestFit_list.append(bestFit);
frag_list.append(frag);
ssr_1 += sr_1;
ssr_2 += sr_2;
measured_1_list.append(isotopomer_1[frag]['measured_ave'])
measured_2_list.append(isotopomer_2[frag]['measured_ave'])
calculatedAve_1_list.append(isotopomer_1[frag]['calculated_ave']);
calculatedAve_2_list.append(isotopomer_2[frag]['calculated_ave']);
measuredStdev_1_list.append(isotopomer_1[frag]['measured_stdev']);
measuredStdev_2_list.append(isotopomer_2[frag]['measured_stdev']);
# calculate the correlation coefficient
# 1. between measured vs. calculated (1 and 2)
# 2. between calculated 1 vs. calculated 2
r_measuredVsCalculated_1 = None;
r_measuredVsCalculated_2 = None;
r_measured1VsMeasured2 = None;
p_measuredVsCalculated_1 = None;
p_measuredVsCalculated_2 = None;
p_measured1VsMeasured2 = None;
r_measuredVsCalculated_1, p_measuredVsCalculated_1 = scipy.stats.pearsonr(measured_1_list,calculatedAve_1_list);
r_measuredVsCalculated_2, p_measuredVsCalculated_2 = scipy.stats.pearsonr(measured_2_list,calculatedAve_2_list);
r_measured1VsMeasured2, p_measured1VsMeasured2 = scipy.stats.pearsonr(calculatedAve_1_list,calculatedAve_2_list);
# wrap stats into a dictionary
isotopomer_comparison_stats = {};
isotopomer_comparison_stats = dict(list(zip(('r_measuredVsCalculated_1', 'p_measuredVsCalculated_1',
'r_measuredVsCalculated_2', 'p_measuredVsCalculated_2',
'r_measured1VsMeasured2', 'p_measured1VsMeasured2',
'ssr_1,ssr_2'),
(r_measuredVsCalculated_1, p_measuredVsCalculated_1,
r_measuredVsCalculated_2, p_measuredVsCalculated_2,
r_measured1VsMeasured2, p_measured1VsMeasured2,
ssr_1,ssr_2))));
## zip, sort, unzip # does not appear to sort correctly!
#zipped = zip(absDif_list,ssr_1_list,ssr_2_list,bestFit_list,frag_list,
# measured_1_list,measured_2_list,calculatedAve_1_list,calculatedAve_2_list,
# measuredStdev_1_list,measuredStdev_2_list);
#zipped.sort();
#zipped.reverse();
#absDif_list,ssr_1_list,sst_2_list,bestFit_list,frag_list,\
# measured_1_list,measured_2_list,calculatedAve_1_list,calculatedAve_2_list,\
# measuredStdev_1_list,measuredStdev_2_list = zip(*zipped);
# restructure into a list of dictionaries for easy parsing or data base viewing
isotopomer_comparison = [];
for i in range(len(absDif_list)):
isotopomer_comparison.append({'isotopomer_absDif':absDif_list[i],
'isotopomer_1_sr':ssr_1_list[i],
'isotopomer_2_sr':ssr_2_list[i],
'bestFit':bestFit_list[i],
'frag':frag_list[i],
'measured_1_ave':measured_1_list[i],
'measured_2_ave':measured_2_list[i],
'measured_1_stdev':measuredStdev_1_list[i],
'measured_2_stdev':measuredStdev_2_list[i],
'calculated_1_ave':calculatedAve_1_list[i],
'calculated_2_ave':calculatedAve_2_list[i]});
return isotopomer_comparison,isotopomer_comparison_stats;
def compare_ci_calculated(self,ci_1,ci_2):
'''compare 2 calculated confidence intervals'''
# extract into lists
rxns_1_list = [];
rxns_2_list = [];
ciminv_1_list = [];
ciminv_2_list = [];
cimaxv_1_list = [];
cimaxv_2_list = [];
cirange_1_list = [];
cirange_2_list = [];
cirange_1_sum = 0.0;
cirange_2_sum = 0.0;
# ci_1:
for k,v in ci_1.items():
rxns_1_list.append(k);
ciminv_1_list.append(v['minv']);
cimaxv_1_list.append(v['maxv']);
cirange_1_list.append(v['maxv']-v['minv']);
cirange_1_sum += v['maxv']-v['minv'];
## zip, sort, unzip
#zipped1 = zip(rxns_1_list,ciminv_1_list,cimaxv_1_list,cirange_1_list);
#zipped1.sort();
#rxns_1_list,ciminv_1_list,cimaxv_1_list,cirange_1_list = zip(*zipped1);
# ci_2:
for k,v in ci_2.items():
rxns_2_list.append(k);
ciminv_2_list.append(v['minv']);
cimaxv_2_list.append(v['maxv']);
cirange_2_list.append(v['maxv']-v['minv']);
cirange_2_sum += v['maxv']-v['minv'];
## zip, sort, unzip
#zipped2 = zip(rxns_2_list,ciminv_2_list,cimaxv_2_list,cirange_2_list);
#zipped2.sort();
#rxns_2_list,ciminv_2_list,cimaxv_2_list,cirange_2_list = zip(*zipped2);
# compare by rxn_id
cirange_absDev_list = [];
rxns_combined_list = [];
ciminv_1_combined_list = [];
ciminv_2_combined_list = [];
cimaxv_1_combined_list = [];
cimaxv_2_combined_list = [];
cirange_1_combined_list = [];
cirange_2_combined_list = [];
cirange_1_combined_sum = 0.0;
cirange_2_combined_sum = 0.0;
for i in range(len(rxns_1_list)):
for j in range(len(rxns_2_list)):
if rxns_1_list[i] == rxns_2_list[j]:
rxns_combined_list.append(rxns_1_list[i]);
cirange_absDev_list.append(fabs(cirange_1_list[i]-cirange_2_list[j]));
ciminv_1_combined_list.append(ciminv_1_list[i]);
ciminv_2_combined_list.append(ciminv_2_list[j]);
cimaxv_1_combined_list.append(cimaxv_1_list[i]);
cimaxv_2_combined_list.append(cimaxv_2_list[j]);
cirange_1_combined_list.append(cirange_1_list[i]);
cirange_2_combined_list.append(cirange_2_list[j]);
cirange_1_combined_sum += cirange_1_list[i]
cirange_2_combined_sum += cirange_2_list[j]
## zip, sort, unzip
#zippedCombined = zip(cirange_absDev_list,rxns_combined_list,ciminv_1_combined_list,ciminv_2_combined_list,cimaxv_1_combined_list,cimaxv_2_combined_list,cirange_1_combined_list,cirange_2_combined_list);
#zippedCombined.sort();
#zippedCombined.reverse();
#cirange_absDev_list,rxns_combined_list,ciminv_1_combined_list,ciminv_2_combined_list,cimaxv_1_combined_list,cimaxv_2_combined_list,cirange_1_combined_list,cirange_2_combined_list = zip(*zippedCombined);
# restructure into a list of dictionaries for easy parsing or data base viewing
ci_comparison = [];
for i in range(len(cirange_absDev_list)):
ci_comparison.append({'cirange_absDev_list':cirange_absDev_list[i],
'rxns_combined_list':rxns_combined_list[i],
'ciminv_1_combined_list':ciminv_1_combined_list[i],
'ciminv_2_combined_list':ciminv_2_combined_list[i],
'cimaxv_1_combined_list':cimaxv_1_combined_list[i],
'cimaxv_2_combined_list':cimaxv_2_combined_list[i],
'cirange_1_combined_list':cirange_1_combined_list[i],
'cirange_2_combined_list':cirange_2_combined_list[i]});
return ci_comparison,cirange_1_sum,cirange_2_sum,cirange_1_combined_sum,cirange_2_combined_sum;
def plot_compare_isotopomers_calculated(self,isotopomer_comparison,isotopomer_comparison_stats):
'''Plot 1: isotopomer fitting comparison
Plot 2: isotopomer residual comparison'''
io = base_exportData(isotopomer_comparison);
# Plot 1 and Plot 2:
io.write_dict2tsv('data//data.tsv');
def plot_ci_calculated(self,ci):
'''plot confidence intervals from fluxomics experiment using escher'''
data = [];
flux1 = {};
flux2 = {};
for k,v in ci.items():
flux1[k] = v['minv'];
flux2[k] = v['maxv'];
data.append(flux1);
data.append(flux2);
io = base_exportData(data);
io.write_dict2json('visualization/escher/ci.json');
def export_modelWithFlux(self,cobra_model_xml_I,ci_list_I,cobra_model_xml_O):
'''update model lower_bound/upper_bound with calculated flux confidence intervals'''
cobra_model = create_cobra_model_from_sbml_file(cobra_model_xml_I);
rxns_add = [];
rxns_omitted = [];
rxns_break = [];
system_boundaries = [x.id for x in cobra_model.reactions if x.boundary == 'system_boundary'];
objectives = [x.id for x in cobra_model.reactions if x.objective_coefficient == 1];
for i,ci_I in enumerate(ci_list_I):
print('add flux from ci ' + str(i));
for rxn in cobra_model.reactions:
if rxn.id in list(ci_I.keys()) and not(rxn.id in system_boundaries)\
and not(rxn.id in objectives):
cobra_model_copy = cobra_model.copy();
# check for reactions that break the model:
if ci_I[rxn.id]['minv'] > 0:
cobra_model_copy.reactions.get_by_id(rxn.id).lower_bound = ci_I[rxn.id]['minv'];
if ci_I[rxn.id]['maxv'] > 0 and ci_I[rxn.id]['maxv'] > ci_I[rxn.id]['minv']:
cobra_model_copy.reactions.get_by_id(rxn.id).upper_bound = ci_I[rxn.id]['maxv'];
cobra_model_copy.optimize(solver='gurobi');
if not cobra_model_copy.solution.f:
print(rxn.id + ' broke the model!')
rxns_break.append(rxn.id);
else:
if ci_I[rxn.id]['minv'] > 0:
cobra_model.reactions.get_by_id(rxn.id).lower_bound = ci_I[rxn.id]['minv'];
if ci_I[rxn.id]['maxv'] > 0 and ci_I[rxn.id]['maxv'] > ci_I[rxn.id]['minv']:
cobra_model.reactions.get_by_id(rxn.id).upper_bound = ci_I[rxn.id]['maxv'];
rxns_add.append(rxn.id);
else:
rxns_omitted.append(rxn.id);
write_cobra_model_to_sbml_file(cobra_model,cobra_model_xml_O)
class stage02_isotopomer_metaboliteMapping():
"""Class to standardize metabolite mapping:
A mapped metabolite takes the following form:
'met_id' + 'nMet_id' + '_' + 'element' + nElement
Input:
met_ids_elements_I = [{met_id:element},...]
[{'f6p_c':'C'},{'f6p_c':'C'},{'f6p_c':'H'},{'f6p_c':'H'},{'ac_c':'C'},{'utp_c':'C'}]
NOTE: The order matters if using multiple elements! will need to further test in future versions
Base metabolites: default base metabolite is co2 for carbon and oh for hydrogen
Base reaction: co2 + oh- + h+ = ch2o + o2"""
def __init__(self,
mapping_id_I=None,
#met_name_I=None,
met_id_I=None,
#formula_I=None,
met_elements_I=[],
met_atompositions_I=[],
met_symmetry_elements_I=[],
met_symmetry_atompositions_I=[],
used__I=True,
comment__I=None,
met_mapping_I=[],
base_met_ids_I=[],
base_met_elements_I=[],
base_met_atompositions_I=[],
base_met_symmetry_elements_I=[],
base_met_symmetry_atompositions_I=[],
base_met_indices_I=[]):
#self.session = Session();
self.stage02_isotopomer_query = stage02_isotopomer_query();
self.calculate = base_calculate();
self.metaboliteMapping={};
self.metaboliteMapping['mapping_id']=mapping_id_I;
#self.metaboliteMapping['met_name']=met_name_I;
self.metaboliteMapping['met_id']=met_id_I;
#self.metaboliteMapping['formula']=formula_I;
self.metaboliteMapping['met_elements']=met_elements_I;
self.metaboliteMapping['met_atompositions']=met_atompositions_I;
self.metaboliteMapping['met_symmetry_elements']=met_symmetry_elements_I;
self.metaboliteMapping['met_symmetry_atompositions']=met_symmetry_atompositions_I;
self.metaboliteMapping['used_']=used__I;
self.metaboliteMapping['comment_']=comment__I;
self.metaboliteMapping['met_mapping']=met_mapping_I;
self.metaboliteMapping['base_met_ids']=base_met_ids_I;
self.metaboliteMapping['base_met_elements']=base_met_elements_I;
self.metaboliteMapping['base_met_atompositions']=base_met_atompositions_I;
self.metaboliteMapping['base_met_symmetry_elements']=base_met_symmetry_elements_I;
self.metaboliteMapping['base_met_symmetry_atompositions']=base_met_symmetry_atompositions_I;
self.metaboliteMapping['base_met_indices']=base_met_indices_I;
def make_elementsAndPositionsTracked(self,met_id_I,element_I,n_elements_I):
#Input: met_id_I,element_I,n_elements_I
#Output: mapping_O,positions_O,elements_O
#E.g: make_elementsTracked('fdp','C',6)
mapping_O = [];
positions_O = [];
elements_O = [];
for elements_cnt in range(n_elements_I):
mapping = '[' + met_id_I.replace('.','_') + '_' + element_I + str(elements_cnt) + ']';
mapping_O.append(mapping);
positions_O.append(elements_cnt);
elements_O.append(element_I);
return mapping_O,positions_O,elements_O;
def make_trackedMetabolite(self,mapping_id_I,model_id_I,met_id_element_I,met_index_I=None):
'''Make an unique atom mapping for the given metabolite and element'''
currentElementPos = 0;
mapping_O = [];
positions_O = [];
elements_O = [];
base_met_ids_O = [];
base_met_elements_O = [];
base_met_atompositions_O = [];
base_met_symmetry_elements_O = [];
base_met_symmetry_atompositions_O = [];
base_met_indices_O = [];
for k,v in met_id_element_I.items():
# check if the metabolite is already in the database
met_data = {}
met_data = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id_I,k)
#NOTE: need to add in a constraint to make sure that the elements in the database and the elments in the input match!
if met_data and 'met_elements' in met_data and v==met_data['met_elements'][0]:
nElements = len(met_data['met_elements']);
else:
# get the formula for the met_id
formula_I = self.stage02_isotopomer_query.get_formula_modelIDAndMetID_dataStage02IsotopomerModelMetabolites(model_id_I,k);
# get the number of elements
if v not in Formula(formula_I)._elements: break; #check if the element is even contained in the formula
if 0 in Formula(formula_I)._elements[v]:
nElements = Formula(formula_I)._elements[v][0]; #get the # of the elements
# make the tracking
nMet = 0;
if met_index_I: nMet = met_index_I
mapping,positions,elements = self.make_elementsAndPositionsTracked(k+str(nMet),v,nElements);
positions_corrected = [currentElementPos+pos for pos in positions];
currentElementPos += max(positions)+1;
mapping_O.append(mapping);
positions_O.extend(positions_corrected);
elements_O.extend(elements);
base_met_ids_O.append(k)
base_met_elements_O.append(elements)
base_met_atompositions_O.append(positions)
base_met_indices_O.append(nMet)
self.metaboliteMapping['mapping_id']=mapping_id_I
self.metaboliteMapping['met_id']=k
self.metaboliteMapping['met_elements']=elements_O
self.metaboliteMapping['met_atompositions']=positions_O
self.metaboliteMapping['met_mapping']=mapping_O
self.metaboliteMapping['base_met_ids']=base_met_ids_O
self.metaboliteMapping['base_met_elements']=base_met_elements_O
self.metaboliteMapping['base_met_atompositions']=base_met_atompositions_O
self.metaboliteMapping['base_met_indices']=base_met_indices_O
def make_compoundTrackedMetabolite(self,mapping_id_I,model_id_I,met_ids_elements_I,met_id_O,met_ids_indices_I = []):
'''Make an unique atom mapping for the given metabolite based on base metabolites and elements'''
#Input:
# metIDs_elements_I = [{met_id:element},..]
# met_ids_elements_I = [{'f6p_c':'C'},{'ac_c':'C'},{'utp_c':'C'}}]
# metIDs_elements_I = [met_id:{elements=[string,...],stoichiometry:float}},..]
# met_ids_elements_I = [{'f6p_c':{'elements':['C'],'stoichiometry':1}},{'ac_c':{'elements':['C'],'stoichiometry':1}},{'utp_c':{'elements':['C'],'stoichiometry':1}}]
# make_compoundTrackedMetabolite('full04','140407_iDM2014',met_ids_elements_I,'uacgam_c')
currentElementPos = 0;
mapping_O = [];
positions_O = [];
elements_O = [];
base_met_ids_O = [];
base_met_elements_O = [];
base_met_atompositions_O = [];
base_met_symmetry_elements_O = [];
base_met_symmetry_atompositions_O = [];
base_met_indices_O = [];
# get unique met_ids
met_ids_all = [];
for row in met_ids_elements_I:
for k,v in row.items():
met_ids_all.append(k);
met_ids_unique = list(set(met_ids_all))
met_ids_cnt = {};
met_ids_elements = {};
for met_id in met_ids_unique:
met_ids_cnt[met_id] = 0;
met_ids_elements[met_id] = [];
# make the compound mapping
for row_cnt,row in enumerate(met_ids_elements_I):
for k,v in row.items():
# check if the metabolite is already in the database
met_data = {}
met_data = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id_I,k)
#NOTE: need to add in a constraint to make sure that the elements in the database and the elments in the input match!
if met_data and 'met_elements' in met_data and v==met_data['met_elements'][0]:
nElements = len(met_data['met_elements']);
else:
# get the formula for the met_id
formula_I = self.stage02_isotopomer_query.get_formula_modelIDAndMetID_dataStage02IsotopomerModelMetabolites(model_id_I,k);
# get the number of elements
if v not in Formula(formula_I)._elements: break; #check if the element is even contained in the formula
if 0 in Formula(formula_I)._elements[v]:
nElements = Formula(formula_I)._elements[v][0]; #get the # of the elements
# determine the metabolite index
nMets = met_ids_cnt[k];
if met_ids_indices_I: nMets = met_ids_indices_I[row_cnt]
# make the tracking
mapping,positions,elements = self.make_elementsAndPositionsTracked(k+str(nMets),v,nElements);
positions_corrected = [currentElementPos+pos for pos in positions];
currentElementPos += max(positions)+1;
# add to the compound tracking
mapping_O.append(mapping);
positions_O.extend(positions_corrected);
elements_O.extend(elements);
base_met_ids_O.append(k)
base_met_elements_O.append(elements)
base_met_atompositions_O.append(positions)
base_met_indices_O.append(nMets)
met_ids_cnt[k] += 1; # needed to ensure a unique metabolite mapping if the same met_id is used multiple times
self.metaboliteMapping['mapping_id']=mapping_id_I
self.metaboliteMapping['met_id']=met_id_O
self.metaboliteMapping['met_elements']=elements_O
self.metaboliteMapping['met_atompositions']=positions_O
self.metaboliteMapping['met_mapping']=mapping_O
self.metaboliteMapping['base_met_ids']=base_met_ids_O
self.metaboliteMapping['base_met_elements']=base_met_elements_O
self.metaboliteMapping['base_met_atompositions']=base_met_atompositions_O
self.metaboliteMapping['base_met_indices']=base_met_indices_O
def append_baseMetabolites_toMetabolite(self,model_id_I,met_ids_elements_I,met_id_O=None):
'''Append a base metabolite to the current metabolite'''
#get the currentElementPos
currentElementPos = max(self.metaboliteMapping['met_atompositions'])+1;
# get unique met_ids
met_ids_unique = list(set(self.metaboliteMapping['base_met_ids']))
met_ids_cnt = {};
met_ids_elements = {};
for met_id in met_ids_unique:
met_ids_cnt[met_id] = 0;
met_ids_elements[met_id] = [];
for met_id_cnt,met_id in enumerate(self.metaboliteMapping['base_met_ids']):
# determine the number of met_ids
met_ids_cnt[met_id]+=1
# determine the unique elements
if not self.metaboliteMapping['met_elements'][0] in met_ids_elements[met_id]:
met_ids_elements[met_id].append(self.metaboliteMapping['met_elements'][met_id_cnt][0]);
# add the mapping for the new metabolites
for row in met_ids_elements_I:
for k,v in row.items():
# check if the metabolite is already in the database
met_data = {}
met_data = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(self.metaboliteMapping['mapping_id'],k)
#NOTE: need to add in a constraint to make sure that the elements in the database and the elments in the input match!
if met_data and 'met_elements' in met_data and v==met_data['met_elements'][0]:
nElements = len(met_data['met_elements']);
else:
# get the formula for the met_id
formula_I = self.stage02_isotopomer_query.get_formula_modelIDAndMetID_dataStage02IsotopomerModelMetabolites(model_id_I,k);
# get the number of elements
if v not in Formula(formula_I)._elements: break; #check if the element is even contained in the formula
if 0 in Formula(formula_I)._elements[v]:
nElements = Formula(formula_I)._elements[v][0]; #get the # of the elements
# adjust the metabolite number if the same metabolite already exists
nMets = met_ids_cnt[k];
met_id_mapping = k+nMets;
# make the tracking
mapping,positions,elements = self.make_elementsAndPositionsTracked(met_id_mapping,v,nElements);
positions_corrected = [currentElementPos+pos for pos in positions];
currentElementPos += max(positions)+1;
# add to the compound tracking
self.metaboliteMapping['met_mapping'].append(mapping);
self.metaboliteMapping['met_atompositions'].extend(positions_corrected);
self.metaboliteMapping['met_elements'].extend(elements);
self.metaboliteMapping['base_met_ids'].append(k)
self.metaboliteMapping['base_met_elements'].append(elements)
self.metaboliteMapping['base_met_atompositions'].append(positions)
self.metaboliteMapping['base_met_indices'].append(met_ids_cnt[k]);
met_ids_cnt[met_id]+=1;
if met_id_O: self.metaboliteMapping['met_id']=met_id_O
def pop_baseMetabolite_fromMetabolite(self,model_id_I,met_id_element_I,met_id_O=None):
'''Remove a base metabolite from the current metabolite:
metabolites are removed FILO;
NOTE: this can lead to problems downstream when the mapping
is reconstructed from the base metabolites if multiple elements are used'''
#Input:
# met_id_element_I = {met_id:element}
'''Unit Test:
'''
met_mapping = self.metaboliteMapping['met_mapping'];
base_met_ids = self.metaboliteMapping['base_met_ids'];
base_met_elements = self.metaboliteMapping['base_met_elements'];
base_met_atompositions = self.metaboliteMapping['base_met_atompositions'];
base_met_indices = self.metaboliteMapping['base_met_indices'];
#base_met_symmetry_elements=self.metaboliteMapping['base_met_symmetry_elements'];
#base_met_symmetry_atompositions=self.metaboliteMapping['base_met_symmetry_atompositions'];
met_mapping.reverse();
base_met_ids.reverse();
base_met_elements.reverse();
base_met_atompositions.reverse();
base_met_indices.reverse();
#base_met_symmetry_elements.reverse();
#base_met_symmetry_atompositions.reverse();
self.metaboliteMapping['met_mapping']=[]
self.metaboliteMapping['base_met_ids']=[]
self.metaboliteMapping['base_met_elements']=[]
self.metaboliteMapping['base_met_atompositions']=[]
self.metaboliteMapping['base_met_indices']=[]
#self.metaboliteMapping['base_met_symmetry_elements']=[]
#self.metaboliteMapping['base_met_symmetry_atompositions']=[]
for met_id_remove,v in met_id_element_I.items():
removed = False
for met_cnt,met_id in enumerate(base_met_ids):
if met_id_remove == met_id and v==base_met_elements[met_cnt][0] and not removed:
removed = True;
else:
self.metaboliteMapping['met_mapping'].insert(0,met_mapping[met_cnt]);
self.metaboliteMapping['base_met_ids'].insert(0,base_met_ids[met_cnt]);
self.metaboliteMapping['base_met_elements'].insert(0,base_met_elements[met_cnt]);
self.metaboliteMapping['base_met_atompositions'].insert(0,base_met_atompositions[met_cnt]);
self.metaboliteMapping['base_met_indices'].insert(0,base_met_indices[met_cnt])
#self.metaboliteMapping['base_met_symmetry_elements'].insert(0,base_met_symmetry_elements[met_cnt]);
#self.metaboliteMapping['base_met_symmetry_atompositions'].insert(0,base_met_symmetry_atompositions[met_cnt]);
'''v1: removes ALL base metabolites that match the met_id'''
#for met_id_remove in met_ids_I:
# for met_cnt,met_id in enumerate(base_met_ids):
# if met_id_remove != met_id:
# self.metaboliteMapping['met_mapping'].append(met_mapping[met_cnt]);
# self.metaboliteMapping['base_met_ids'].append(base_met_ids[met_cnt]);
# self.metaboliteMapping['base_met_elements'].append(base_met_elements[met_cnt]);
# self.metaboliteMapping['base_met_atompositions'].append(base_met_atompositions[met_cnt]);
# #self.metaboliteMapping['base_met_symmetry_elements'].append(base_met_symmetry_elements[met_cnt]);
# #self.metaboliteMapping['base_met_symmetry_atompositions'].append(base_met_symmetry_atompositions[met_cnt]);
if met_id_O: self.metaboliteMapping['met_id']=met_id_O
self.update_trackedMetabolite_fromBaseMetabolites(model_id_I);
def remove_baseMetabolite_fromMetabolite(self,model_id_I,met_id_element_I,met_id_O=None,met_index_I=None):
'''Remove a base metabolite from the current metabolite:
metabolites are removed FIFO if the index is not specified;'''
#Input:
# met_id_element = {met_id:element}
'''Unit Test:'''
met_mapping = self.metaboliteMapping['met_mapping'];
base_met_ids = self.metaboliteMapping['base_met_ids'];
base_met_elements = self.metaboliteMapping['base_met_elements'];
base_met_atompositions = self.metaboliteMapping['base_met_atompositions'];
base_met_indices = self.metaboliteMapping['base_met_indices'];
#base_met_symmetry_elements=self.metaboliteMapping['base_met_symmetry_elements'];
#base_met_symmetry_atompositions=self.metaboliteMapping['base_met_symmetry_atompositions'];
self.metaboliteMapping['met_mapping']=[]
self.metaboliteMapping['base_met_ids']=[]
self.metaboliteMapping['base_met_elements']=[]
self.metaboliteMapping['base_met_atompositions']=[]
self.metaboliteMapping['base_met_indices']=[]
#self.metaboliteMapping['base_met_symmetry_elements']=[]
#self.metaboliteMapping['base_met_symmetry_atompositions']=[]
for met_id_remove,v in met_id_element_I.items():
removed = False
for met_cnt,met_id in enumerate(base_met_ids):
if met_index_I:
if met_index_I == base_met_indices[met_cnt] and met_id_remove == met_id and v==base_met_elements[met_cnt][0] and not removed:
removed = True
else:
self.metaboliteMapping['met_mapping'].append(met_mapping[met_cnt]);
self.metaboliteMapping['base_met_ids'].append(base_met_ids[met_cnt]);
self.metaboliteMapping['base_met_elements'].append(base_met_elements[met_cnt]);
self.metaboliteMapping['base_met_atompositions'].append(base_met_atompositions[met_cnt]);
self.metaboliteMapping['base_met_indices'].append(base_met_indices[met_cnt]);
#self.metaboliteMapping['base_met_symmetry_elements'].append(base_met_symmetry_elements[met_cnt]);
#self.metaboliteMapping['base_met_symmetry_atompositions'].append(base_met_symmetry_atompositions[met_cnt]);
else:
if met_id_remove == met_id and v==base_met_elements[met_cnt][0] and not removed:
removed = True
else:
self.metaboliteMapping['met_mapping'].append(met_mapping[met_cnt]);
self.metaboliteMapping['base_met_ids'].append(base_met_ids[met_cnt]);
self.metaboliteMapping['base_met_elements'].append(base_met_elements[met_cnt]);
self.metaboliteMapping['base_met_atompositions'].append(base_met_atompositions[met_cnt]);
self.metaboliteMapping['base_met_indices'].append(base_met_indices[met_cnt]);
#self.metaboliteMapping['base_met_symmetry_elements'].append(base_met_symmetry_elements[met_cnt]);
#self.metaboliteMapping['base_met_symmetry_atompositions'].append(base_met_symmetry_atompositions[met_cnt]);
'''v1: removes ALL base metabolites that match the met_id'''
#for met_id_remove in met_ids_I:
# for met_cnt,met_id in enumerate(base_met_ids):
# if met_id_remove != met_id:
# self.metaboliteMapping['met_mapping'].append(met_mapping[met_cnt]);
# self.metaboliteMapping['base_met_ids'].append(base_met_ids[met_cnt]);
# self.metaboliteMapping['base_met_elements'].append(base_met_elements[met_cnt]);
# self.metaboliteMapping['base_met_atompositions'].append(base_met_atompositions[met_cnt]);
# #self.metaboliteMapping['base_met_symmetry_elements'].append(base_met_symmetry_elements[met_cnt]);
# #self.metaboliteMapping['base_met_symmetry_atompositions'].append(base_met_symmetry_atompositions[met_cnt]);
if met_id_O: self.metaboliteMapping['met_id']=met_id_O
self.update_trackedMetabolite_fromBaseMetabolites(model_id_I);
def extract_baseMetabolite_fromMetabolite(self,model_id_I,met_id_element_I,met_index_I=None):
'''Returns a base metabolites from the current metabolite:
returns metabolites in FIFO'''
base_metaboliteMapping = stage02_isotopomer_metaboliteMapping();
base_met_ids = self.metaboliteMapping['base_met_ids'];
met_id_remove = {};
met_index = None
for k,v in met_id_element_I.items():
for met_cnt,met_id in enumerate(base_met_ids):
if met_index_I:
if met_index_I == self.metaboliteMapping['base_met_indices'][met_cnt] and k == met_id and v==self.metaboliteMapping['base_met_elements'][met_cnt][0]:
met_id_remove = {k:self.metaboliteMapping['base_met_elements'][met_cnt][0]};
met_index = met_index_I;
break;
else:
if k == met_id and v==self.metaboliteMapping['base_met_elements'][met_cnt][0]:
met_id_remove = {k:self.metaboliteMapping['base_met_elements'][met_cnt][0]};
met_index = self.metaboliteMapping['base_met_indices'][met_cnt]
break;
base_metaboliteMapping.make_trackedMetabolite(self.metaboliteMapping['mapping_id'],model_id_I,met_id_remove,met_index);
return base_metaboliteMapping
def update_trackedMetabolite_fromBaseMetabolites(self,model_id_I):
'''update mapping, elements, and atompositions from base metabolites;
NOTE: issues may arise in the number assigned to each metabolite if multiple elements are used'''
# get unique met_ids
met_ids_unique = list(set(self.metaboliteMapping['base_met_ids']))
met_ids_cnt = {};
met_ids_elements = {};
for met_id in met_ids_unique:
met_ids_cnt[met_id] = 0;
met_ids_elements[met_id] = [];
# make the input structure
met_ids_elements_I = [];
for met_id_cnt,met_id in enumerate(self.metaboliteMapping['base_met_ids']):
met_ids_elements_I.append({met_id:self.metaboliteMapping['base_met_elements'][met_id_cnt][0]})
self.make_compoundTrackedMetabolite(self.metaboliteMapping['mapping_id'],model_id_I,met_ids_elements_I,self.metaboliteMapping['met_id'],self.metaboliteMapping['base_met_indices'])
def make_newMetaboliteMapping(self):
'''Make a new mapping for the metabolite that switches out the names of the base metabolites
for the current metabolite'''
mapping_O= [];
elements = list(set(self.metaboliteMapping['met_elements']))
element_cnt = {};
for element in elements:
element_cnt[element] = 0;
for met_element in self.metaboliteMapping['met_elements']:
mapping = '[' + self.metaboliteMapping['met_id'].replace('.','_') + '_' + met_element + str(element_cnt[met_element]) + ']';
mapping_O.append(mapping);
element_cnt[met_element]+=1
return mapping_O
def make_defaultBaseMetabolites(self):
'''Add default base metabolite to the metabolite'''
self.metaboliteMapping['base_met_ids']=[];
self.metaboliteMapping['base_met_elements']=[];
self.metaboliteMapping['base_met_atompositions']=[];
self.metaboliteMapping['base_met_symmetry_elements']=[];
self.metaboliteMapping['base_met_symmetry_atompositions']=[];
self.metaboliteMapping['base_met_indices']=[];
compartment = self.metaboliteMapping['met_id'].split('_')[-1]
for cnt,element in enumerate(self.metaboliteMapping['met_elements']):
if element == 'C':
self.metaboliteMapping['base_met_ids'].append('co2'+'_'+compartment);
self.metaboliteMapping['base_met_elements'].append([element]);
self.metaboliteMapping['base_met_atompositions'].append([0]);
self.metaboliteMapping['base_met_indices'].append(cnt);
elif element == 'H':
self.metaboliteMapping['base_met_ids'].append('h'+'_'+element);
self.metaboliteMapping['base_met_elements'].append([element]);
self.metaboliteMapping['base_met_atompositions'].append([0]);
self.metaboliteMapping['base_met_indices'].append(cnt);
else: print("element not yet supported")
def convert_arrayMapping2StringMapping(self):
'''Convert an array representation of a mapping to a string representation'''
arrayMapping = self.metaboliteMapping['met_mapping']
stringMapping = ''
for mapping in self.metaboliteMapping['met_mapping']:
stringMapping+=''.join(mapping)
return stringMapping;
def convert_stringMapping2ArrayMapping(self):
'''Convert a string representation of a mapping to an array representation'''
stringMapping = self.metaboliteMapping['met_mapping']
if '[' in self.metaboliteMapping['met_mapping']:
stringMapping = self.metaboliteMapping['met_mapping'].split('][');
stringMapping = [m.replace('[','') for m in stringMapping];
stringMapping = [m.replace(']','') for m in stringMapping];
else:
stringMapping = [m for m in stringMapping];
# add in '[]'
arrayMapping = [];
for m in stringMapping:
arrayMapping.append('['+m+']')
return arrayMapping;
def add_metaboliteMapping(self,
mapping_id_I=None,
met_id_I=None,
met_elements_I=None,
met_atompositions_I=None,
met_symmetry_elements_I=None,
met_symmetry_atompositions_I=None,
used__I=True,
comment__I=None):
'''Add tracked metabolite to the database'''
if mapping_id_I: self.metaboliteMapping['mapping_id']=mapping_id_I;
if met_id_I: self.metaboliteMapping['met_id']=met_id_I;
if met_elements_I: self.metaboliteMapping['met_elements']=met_elements_I;
if met_atompositions_I: self.metaboliteMapping['met_atompositions']=met_atompositions_I;
if met_symmetry_elements_I: self.metaboliteMapping['met_symmetry_elements']=met_symmetry_elements_I;
if met_symmetry_atompositions_I: self.metaboliteMapping['met_symmetry_atompositions']=met_symmetry_atompositions_I;
if used__I: self.metaboliteMapping['used_']=used__I;
if comment__I: self.metaboliteMapping['comment_']=comment__I;
#add data to the database
#row = None;
#row = data_stage02_isotopomer_atomMappingMetabolites(self.metaboliteMapping['mapping_id'],
# self.metaboliteMapping['met_id'],
# self.metaboliteMapping['met_elements'],
# self.metaboliteMapping['met_atompositions'],
# self.metaboliteMapping['met_symmetry_elements'],
# self.metaboliteMapping['met_symmetry_atompositions'],
# self.metaboliteMapping['used_'],
# self.metaboliteMapping['comment_'],
# self.make_newMetaboliteMapping(),
# self.metaboliteMapping['base_met_ids'],
# self.metaboliteMapping['base_met_elements'],
# self.metaboliteMapping['base_met_atompositions'],
# self.metaboliteMapping['base_met_symmetry_elements'],
# self.metaboliteMapping['base_met_symmetry_atompositions'],
# self.metaboliteMapping['base_met_indices']);
#self.session.add(row);
#self.session.commit();
data = self.metaboliteMapping;
data['met_mapping'] = self.make_newMetaboliteMapping();
self.stage02_isotopomer_query.add_data_dataStage02IsotopomerAtomMappingMetabolites([data]);
def update_metaboliteMapping(self,
mapping_id_I=None,
met_id_I=None,
met_elements_I=None,
met_atompositions_I=None,
met_symmetry_elements_I=None,
met_symmetry_atompositions_I=None,
used__I=True,
comment__I=None):
'''Add tracked metabolite to the database'''
if mapping_id_I: self.metaboliteMapping['mapping_id']=mapping_id_I;
if met_id_I: self.metaboliteMapping['met_id']=met_id_I;
if met_elements_I: self.metaboliteMapping['met_elements']=met_elements_I;
if met_atompositions_I: self.metaboliteMapping['met_atompositions']=met_atompositions_I;
if met_symmetry_elements_I: self.metaboliteMapping['met_symmetry_elements']=met_symmetry_elements_I;
if met_symmetry_atompositions_I: self.metaboliteMapping['met_symmetry_atompositions']=met_symmetry_atompositions_I;
if used__I: self.metaboliteMapping['used_']=used__I;
if comment__I: self.metaboliteMapping['comment_']=comment__I;
self.metaboliteMapping['met_mapping']=self.make_newMetaboliteMapping()
#add update data in the database
self.stage02_isotopomer_query.update_rows_dataStage02IsotopomerAtomMappingMetabolites([self.metaboliteMapping]);
def get_metaboliteMapping(self,mapping_id_I,met_id_I):
'''Get tracked metabolite from the database'''
row = {}
row = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id_I,met_id_I);
self.metaboliteMapping=row;
def get_baseMetabolites(self):
'''Get base metabolite from the database for the current metabolite'''
row = {}
row = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(self.metaboliteMapping['mapping_id'],self.metaboliteMapping['met_id']);
self.metaboliteMapping['base_met_ids']=row['base_met_ids'];
self.metaboliteMapping['base_met_elements']=row['base_met_elements']
self.metaboliteMapping['base_met_atompositions']=row['base_met_atompositions']
self.metaboliteMapping['base_met_symmetry_elements']=row['base_met_symmetry_elements']
self.metaboliteMapping['base_met_symmetry_atompositions']=row['base_met_symmetry_atompositions']
## if the current base_met_indices are already set, add to them
## NOTE: works only if the base metabolite is also the current metabolite
#if len(self.metaboliteMapping['base_met_indices'])==1:
# currentIndex = self.metaboliteMapping['base_met_indices'][0]
# self.metaboliteMapping['base_met_indices'] = [currentIndex + i for i in row['base_met_indices']];
## else ensure that all met_id/base_met_index pairs are unique
#else:
# self.metaboliteMapping['base_met_indices']=row['base_met_indices']
self.metaboliteMapping['base_met_indices']=row['base_met_indices']
def clear_metaboliteMapping(self):
self.metaboliteMapping={};
self.metaboliteMapping['mapping_id']=None;
#self.metaboliteMapping['met_name']=None;
self.metaboliteMapping['met_id']=None;
#self.metaboliteMapping['formula']=None;
self.metaboliteMapping['met_elements']=None;
self.metaboliteMapping['met_atompositions']=None;
self.metaboliteMapping['met_symmetry_elements']=None;
self.metaboliteMapping['met_symmetry_atompositions']=None;
self.metaboliteMapping['used_']=True;
self.metaboliteMapping['comment_']=None;
self.metaboliteMapping['met_mapping']=None;
self.metaboliteMapping['base_met_ids']=None;
self.metaboliteMapping['base_met_elements']=None;
self.metaboliteMapping['base_met_atompositions']=None;
self.metaboliteMapping['base_met_symmetry_elements']=None;
self.metaboliteMapping['base_met_symmetry_atompositions']=None;
self.metaboliteMapping['base_met_indices']=None;
def make_symmetric(self,met_symmetry_elements_I=[],met_symmetry_atompositions_I=[]):
'''Make the current metabolite symmetric
default = 180 symmetry'''
if met_symmetry_elements_I and met_symmetry_atompositions_I:
self.metaboliteMapping['met_symmetry_elements']=met_symmetry_elements_I;
self.metaboliteMapping['met_symmetry_atompositions']=met_symmetry_atompositions_I;
else:
self.metaboliteMapping['met_symmetry_elements']=[m for m in reversed(self.metaboliteMapping['met_elements'])];
self.metaboliteMapping['met_symmetry_atompositions']=[m for m in reversed(self.metaboliteMapping['met_atompositions'])];
def copy_metaboliteMappingDict(self):
'''Copy the current metabolite mapping'''
copy_metaboliteMapping = {};
copy_metaboliteMapping['mapping_id']=self.metaboliteMapping['mapping_id']
#copy_metaboliteMapping['met_name']=self.metaboliteMapping['met_name']
copy_metaboliteMapping['met_id']=self.metaboliteMapping['met_id']
#copy_metaboliteMapping['formula']=self.metaboliteMapping['formula']
copy_metaboliteMapping['met_elements']=self.metaboliteMapping['met_elements']
copy_metaboliteMapping['met_atompositions']=self.metaboliteMapping['met_atompositions']
copy_metaboliteMapping['met_symmetry_elements']=self.metaboliteMapping['met_symmetry_elements']
copy_metaboliteMapping['met_symmetry_atompositions']=self.metaboliteMapping['met_symmetry_atompositions']
copy_metaboliteMapping['used_']=self.metaboliteMapping['used_']
copy_metaboliteMapping['comment_']=self.metaboliteMapping['comment_']
copy_metaboliteMapping['met_mapping']=self.metaboliteMapping['met_mapping']
copy_metaboliteMapping['base_met_ids']=self.metaboliteMapping['base_met_ids']
copy_metaboliteMapping['base_met_elements']=self.metaboliteMapping['base_met_elements']
copy_metaboliteMapping['base_met_atompositions']=self.metaboliteMapping['base_met_atompositions']
copy_metaboliteMapping['base_met_symmetry_elements']=self.metaboliteMapping['base_met_symmetry_elements']
copy_metaboliteMapping['base_met_symmetry_atompositions']=self.metaboliteMapping['base_met_symmetry_atompositions']
copy_metaboliteMapping['base_met_indices']=self.metaboliteMapping['base_met_indices'];
return copy_metaboliteMapping
def copy_metaboliteMapping(self):
'''Copy the current metabolite mapping'''
return self;
class stage02_isotopomer_reactionMapping():
def __init__(self,
mapping_id_I=None,
rxn_id_I=None,
rxn_description_I=None,
reactants_stoichiometry_tracked_I=[],
products_stoichiometry_tracked_I=[],
reactants_ids_tracked_I=[],
products_ids_tracked_I=[],
reactants_elements_tracked_I=[],
products_elements_tracked_I=[],
reactants_positions_tracked_I=[],
products_positions_tracked_I=[],
reactants_mapping_I=[],
products_mapping_I=[],
rxn_equation_I=None,
used__I=None,
comment__I=None,
reactants_metaboliteMappings_I=[],
products_metaboliteMappings_I=[]):
#self.session = Session();
self.stage02_isotopomer_query = stage02_isotopomer_query();
self.calculate = base_calculate();
self.reactionMapping={}
self.reactionMapping['mapping_id']=mapping_id_I
self.reactionMapping['rxn_id']=rxn_id_I
self.reactionMapping['rxn_description']=rxn_description_I
self.reactionMapping['reactants_stoichiometry_tracked']=reactants_stoichiometry_tracked_I
self.reactionMapping['products_stoichiometry_tracked']=products_stoichiometry_tracked_I
self.reactionMapping['reactants_ids_tracked']=reactants_ids_tracked_I
self.reactionMapping['products_ids_tracked']=products_ids_tracked_I
self.reactionMapping['reactants_elements_tracked']=reactants_elements_tracked_I
self.reactionMapping['products_elements_tracked']=products_elements_tracked_I
self.reactionMapping['reactants_positions_tracked']=reactants_positions_tracked_I
self.reactionMapping['products_positions_tracked']=products_positions_tracked_I
self.reactionMapping['reactants_mapping']=reactants_mapping_I
self.reactionMapping['products_mapping']=products_mapping_I
self.reactionMapping['rxn_equation']=rxn_equation_I
self.reactionMapping['used_']=used__I
self.reactionMapping['comment_']=comment__I
self.reactionMapping['reactants_metaboliteMappings']=reactants_metaboliteMappings_I
self.reactionMapping['products_metaboliteMappings']=products_metaboliteMappings_I
self.reactants_base_met_ids=[];
self.reactants_base_met_elements=[];
self.reactants_base_met_atompositions=[];
self.reactants_base_met_symmetry_elements=[];
self.reactants_base_met_symmetry_atompositions=[];
self.reactants_base_met_indices=[];
self.products_base_met_ids=[];
self.products_base_met_elements=[];
self.products_base_met_atompositions=[];
self.products_base_met_symmetry_elements=[];
self.products_base_met_symmetry_atompositions=[];
self.products_base_met_indices=[];
def make_trackedCompoundReaction_fromRow(self,mapping_id_I,model_id_I,rxn_id_I,
rxn_description_I=None,
reactants_stoichiometry_tracked_I=[],
products_stoichiometry_tracked_I=[],
reactants_ids_tracked_I=[],
products_ids_tracked_I=[],
reactants_mapping_I=[],
products_mapping_I=[],
rxn_equation_I=None,
used__I=True,
comment__I=None):
irm = stage02_isotopomer_reactionMapping(
mapping_id_I=mapping_id_I,
rxn_id_I=rxn_id_I,
rxn_description_I=rxn_id_I,
reactants_stoichiometry_tracked_I=reactants_stoichiometry_tracked_I,
products_stoichiometry_tracked_I=products_stoichiometry_tracked_I,
reactants_ids_tracked_I=reactants_ids_tracked_I,
products_ids_tracked_I=products_ids_tracked_I,
reactants_mapping_I=reactants_mapping_I,
products_mapping_I=products_mapping_I,
rxn_equation_I=rxn_equation_I,
used__I=used__I,
comment__I=comment__I);
irm.reactionMapping['reactants_elements_tracked']=None;
irm.reactionMapping['reactants_positions_tracked']=None;
irm.reactionMapping['products_elements_tracked']=None;
irm.reactionMapping['products_positions_tracked']=None;
irm.checkAndCorrect_elementsAndPositions();
self.reactionMapping['mapping_id']=irm.reactionMapping['mapping_id']
self.reactionMapping['rxn_id']=irm.reactionMapping['rxn_id']
self.reactionMapping['rxn_description']=irm.reactionMapping['rxn_description']
self.reactionMapping['rxn_equation']=irm.reactionMapping['rxn_equation']
self.reactionMapping['used_']=irm.reactionMapping['used_']
self.reactionMapping['comment_']=irm.reactionMapping['comment_']
for reactant_id_cnt,reactant_id in enumerate(irm.reactionMapping['reactants_ids_tracked']):
self.reactionMapping['reactants_stoichiometry_tracked'].append(irm.reactionMapping['reactants_stoichiometry_tracked'][reactant_id_cnt])
self.reactionMapping['reactants_ids_tracked'].append(irm.reactionMapping['reactants_ids_tracked'][reactant_id_cnt])
self.reactionMapping['reactants_elements_tracked'].append(irm.reactionMapping['reactants_elements_tracked'][reactant_id_cnt])
self.reactionMapping['reactants_positions_tracked'].append(irm.reactionMapping['reactants_positions_tracked'][reactant_id_cnt])
self.reactionMapping['reactants_mapping'].append(irm.reactionMapping['reactants_mapping'][reactant_id_cnt])
for product_id_cnt,product_id in enumerate(irm.reactionMapping['products_ids_tracked']):
self.reactionMapping['products_stoichiometry_tracked'].append(irm.reactionMapping['products_stoichiometry_tracked'][product_id_cnt])
self.reactionMapping['products_ids_tracked'].append(irm.reactionMapping['products_ids_tracked'][product_id_cnt])
self.reactionMapping['products_elements_tracked'].append(irm.reactionMapping['products_elements_tracked'][product_id_cnt])
self.reactionMapping['products_positions_tracked'].append(irm.reactionMapping['products_positions_tracked'][product_id_cnt])
self.reactionMapping['products_mapping'].append(irm.reactionMapping['products_mapping'][product_id_cnt])
self.make_reactantsAndProductsMetaboliteMappings(reactionMapping_I=irm.reactionMapping);
def make_trackedBinaryReaction(self,mapping_id_I,model_id_I,rxn_id_I,reactant_ids_elements_I,product_id_I):
'''Make a binary reaction of the form A + B + ... = C'''
#Input
# reactant_ids_elements_I = [met_id:{elements=[string,...],stoichiometry:float}},..]
# product_ids_elements_I = {met_id:{elements=[string,...],stoichiometry:float}}}
# e.g. met_ids_elements_I = [{'f6p_c':'C'},{'ac_c':'C'},{'utp_c','C'}]
# e.g. irm.make_trackedBinaryReaction('full04','140407_iDM2014','rxn01',met_ids_elements_I,'uacgam_c')
imm = stage02_isotopomer_metaboliteMapping();
# get unique met_ids
reactant_ids_all = [];
for row in reactant_ids_elements_I:
for k,v in row.items():
reactant_ids_all.append(k);
reactant_ids_unique = list(set(reactant_ids_all))
reactant_ids_cnt = {};
for reactant_id in reactant_ids_unique:
reactant_ids_cnt[reactant_id] = 0;
# make the reactants mapping
reactants_stoichiometry_tracked_O = [];
reactants_ids_tracked_O = [];
reactants_elements_tracked_O = [];
reactants_positions_tracked_O = [];
reactants_mapping_O = [];
reactants_metaboliteMappings_O = [];
for row in reactant_ids_elements_I:
for k,v in row.items():
imm.make_trackedMetabolite(mapping_id_I,model_id_I,{k:v},reactant_ids_cnt[k]);
reactants_elements_tracked_O.append(imm.metaboliteMapping['met_elements']);
reactants_positions_tracked_O.append(imm.metaboliteMapping['met_atompositions']);
reactants_mapping_O.append(imm.convert_arrayMapping2StringMapping());
reactants_stoichiometry_tracked_O.append(-1.0);
reactants_ids_tracked_O.append(k);
reactants_metaboliteMappings_O.append(copy(imm.copy_metaboliteMapping()));
imm.clear_metaboliteMapping()
reactant_ids_cnt[k]+=1
# make the products mapping
products_stoichiometry_tracked_O = [];
products_ids_tracked_O = [];
products_elements_tracked_O = [];
products_positions_tracked_O = [];
products_mapping_O = [];
products_metaboliteMappings_O = [];
if product_id_I:
imm.make_compoundTrackedMetabolite(mapping_id_I,model_id_I,reactant_ids_elements_I,product_id_I);
products_elements_tracked_O.append(imm.metaboliteMapping['met_elements']);
products_positions_tracked_O.append(imm.metaboliteMapping['met_atompositions']);
products_mapping_O.append(imm.convert_arrayMapping2StringMapping());
products_stoichiometry_tracked_O.append(1.0);
products_ids_tracked_O.append(product_id_I);
products_metaboliteMappings_O.append(copy(imm.copy_metaboliteMapping()));
# save the reaction
self.reactionMapping['mapping_id']=mapping_id_I
self.reactionMapping['rxn_id']=rxn_id_I
self.reactionMapping['rxn_description']=None
self.reactionMapping['reactants_stoichiometry_tracked']=reactants_stoichiometry_tracked_O
self.reactionMapping['products_stoichiometry_tracked']=products_stoichiometry_tracked_O
self.reactionMapping['reactants_ids_tracked']=reactants_ids_tracked_O
self.reactionMapping['products_ids_tracked']=products_ids_tracked_O
self.reactionMapping['reactants_elements_tracked']=reactants_elements_tracked_O
self.reactionMapping['products_elements_tracked']=products_elements_tracked_O
self.reactionMapping['reactants_positions_tracked']=reactants_positions_tracked_O
self.reactionMapping['products_positions_tracked']=products_positions_tracked_O
self.reactionMapping['reactants_mapping']=reactants_mapping_O
self.reactionMapping['products_mapping']=products_mapping_O
self.reactionMapping['rxn_equation']=None
self.reactionMapping['used_']=True
self.reactionMapping['comment_']=None
self.reactionMapping['reactants_metaboliteMappings']=reactants_metaboliteMappings_O
self.reactionMapping['products_metaboliteMappings']=products_metaboliteMappings_O
def make_trackedCompoundReaction(self,mapping_id_I,model_id_I,rxn_id_I,reactant_ids_elements_I,base_reactant_positions_I,base_reactant_indices_I,compound_product_id_I,base_product_ids_elements_I,base_product_ids_O):
'''Make a compound tracked reaction
1. make compound product
2. remove specified base products from compound product
3. update the compound product
4. rename the base products
5. append base products to products list'''
#Input
# reactant_ids_elements_I = [{met_id:elements},...]
# base_reactant_positions_I = [{met_id_reactant:position},...] #Note: must be listed in order (positions of the reactant to be partitioned)
# base_reactant_indices_I = [{met_id_product:position in base_reactants_ids},...] #Note: must be listed in order (positions of the reactant to be partitioned)
# index referes to the position of the base met_id in the reactant to be partitioned
# compound_product_id_I = met_id
# base_product_ids_elements_I = [{met_id:elements},...] #Note: must be listed in order
# base_product_ids_O = [met_id_new,...] #Note: must be listed in order
imm = stage02_isotopomer_metaboliteMapping();
imm_product = stage02_isotopomer_metaboliteMapping();
# initialize the structure to track the base_met_ids
reactant_ids_all = [];
for k in self.reactionMapping['reactants_ids_tracked']:
reactant_ids_all.append(k);
reactant_ids_unique = list(set(reactant_ids_all))
reactant_ids_cnt = {};
for reactant_id in reactant_ids_unique:
reactant_ids_cnt[reactant_id] = 0;
for reactant_id in reactant_ids_all:
reactant_ids_cnt[reactant_id]+=1;
# initialize the count for unique base_met_ids
reactants_base_met_ids = [];
reactants_base_indices = [];
for cnt,mm in enumerate(self.reactionMapping['reactants_metaboliteMappings']):
reactants_base_met_ids.extend(mm.metaboliteMapping['base_met_ids'])
reactants_base_indices.extend(self.reactionMapping['reactants_metaboliteMappings'][cnt].metaboliteMapping['base_met_indices'])
reactants_base_met_ids_I = [];
# get unique reactants_base_met_ids
reactants_base_met_ids_unique = list(set(reactants_base_met_ids));
reactants_base_met_ids_cnt = {};
for base_met_id in reactants_base_met_ids_unique:
reactants_base_met_ids_cnt[base_met_id]=0;
for cnt,base_met_id in enumerate(reactants_base_met_ids):
reactants_base_met_ids_cnt[base_met_id]=reactants_base_indices[cnt]+1
# make the reactants mapping
imm_product.metaboliteMapping['mapping_id'] = mapping_id_I
imm_product.metaboliteMapping['base_met_ids']=[];
imm_product.metaboliteMapping['base_met_elements']=[];
imm_product.metaboliteMapping['base_met_atompositions']=[];
imm_product.metaboliteMapping['base_met_symmetry_elements']=[];
imm_product.metaboliteMapping['base_met_symmetry_atompositions']=[];
imm_product.metaboliteMapping['base_met_indices']=[];
# initialize the counter the input
matched_cnt = 0;
for row_cnt,row in enumerate(reactant_ids_elements_I):
for k,v in row.items():
# initialize new metabolites
if not k in list(reactant_ids_cnt.keys()):
reactant_ids_cnt[k]=0
# make the metabolite mapping
imm.make_trackedMetabolite(mapping_id_I,model_id_I,{k:v},reactant_ids_cnt[k]);
#update the counter for unique met_ids
reactant_ids_cnt[k]+=1
# update base_metabolites from the database for reactant that will be partitioned
base_found = False;
if matched_cnt < len(base_reactant_positions_I):
for k1,v1 in base_reactant_positions_I[matched_cnt].items(): #there will be only 1 key-value pair
if k1 == k and row_cnt == v1:
imm.get_baseMetabolites();
imm.update_trackedMetabolite_fromBaseMetabolites(model_id_I);
base_found = True;
break;
# assign new indices for each base metabolite based on the current indices in the reactants
base_met_indices_tmp = copy(imm.metaboliteMapping['base_met_indices']);
for cnt1,met_id1 in enumerate(imm.metaboliteMapping['base_met_ids']):
# initialize new base metabolites
if not met_id1 in list(reactants_base_met_ids_cnt.keys()):
reactants_base_met_ids_cnt[met_id1]=0;
# assign the next current base_metabolite_index
imm.metaboliteMapping['base_met_indices'][cnt1]=reactants_base_met_ids_cnt[met_id1]
# update the base_reactant_indices_I if the corresponding base_met_index was changed
if matched_cnt < len(base_reactant_positions_I):
for k1,v1 in base_reactant_positions_I[matched_cnt].items(): #there will be only 1 key-value pair
if k1 == k and row_cnt == v1: # does the met_id and position in the reactant list match?
for k2,v2 in base_reactant_indices_I[matched_cnt].items():
if k2==met_id1 and v2==base_met_indices_tmp[cnt1]: # does the base_met_id and previous index match?
base_reactant_indices_I[matched_cnt][k2]=imm.metaboliteMapping['base_met_indices'][cnt1];
reactants_base_met_ids_cnt[met_id1]+=1;
# update counter for matched input
if base_found: matched_cnt+=1;
# update met_mapping
imm.update_trackedMetabolite_fromBaseMetabolites(model_id_I);
# add in the new metaboliteMapping information
self.reactionMapping['reactants_elements_tracked'].append(imm.metaboliteMapping['met_elements']);
self.reactionMapping['reactants_positions_tracked'].append(imm.metaboliteMapping['met_atompositions']);
self.reactionMapping['reactants_mapping'].append(imm.convert_arrayMapping2StringMapping());
self.reactionMapping['reactants_stoichiometry_tracked'].append(-1.0);
self.reactionMapping['reactants_ids_tracked'].append(k);
self.reactionMapping['reactants_metaboliteMappings'].append(copy(imm.copy_metaboliteMapping()));
self.reactants_base_met_ids.extend(imm.metaboliteMapping['base_met_ids']);
self.reactants_base_met_elements.extend(imm.metaboliteMapping['base_met_elements']);
self.reactants_base_met_atompositions.extend(imm.metaboliteMapping['base_met_atompositions']);
#self.reactants_base_met_symmetry_elements.extend(imm.metaboliteMapping['base_met_symmetry_elements']);
#self.reactants_base_met_symmetry_atompositions.extend(imm.metaboliteMapping['base_met_symmetry_atompositions']);
self.reactants_base_met_indices.extend(imm.metaboliteMapping['base_met_indices']);
# copy out all of the base information for the product
imm_product.metaboliteMapping['base_met_ids'].extend(imm.metaboliteMapping['base_met_ids']);
imm_product.metaboliteMapping['base_met_elements'].extend(imm.metaboliteMapping['base_met_elements']);
imm_product.metaboliteMapping['base_met_atompositions'].extend(imm.metaboliteMapping['base_met_atompositions']);
#imm_product.metaboliteMapping['base_met_symmetry_elements'].extend(imm.metaboliteMapping['base_met_symmetry_elements']);
#imm_product.metaboliteMapping['base_met_symmetry_atompositions'].extend(imm.metaboliteMapping['base_met_symmetry_atompositions']);
imm_product.metaboliteMapping['base_met_indices'].extend(imm.metaboliteMapping['base_met_indices']);
#
imm.clear_metaboliteMapping()
# make the initial compound product mapping
imm_product.update_trackedMetabolite_fromBaseMetabolites(model_id_I)
imm_product.metaboliteMapping['met_id']=compound_product_id_I;
# extract out the products from the compound product
base_products = [];
for cnt,row in enumerate(base_product_ids_elements_I):
for k,v in row.items():
base_products.append(imm_product.extract_baseMetabolite_fromMetabolite(model_id_I,{k:v},base_reactant_indices_I[cnt][k]));
# remove the base_products from the compound product
for cnt,row in enumerate(base_product_ids_elements_I):
for k,v in row.items():
imm_product.remove_baseMetabolite_fromMetabolite(model_id_I,{k:v},met_id_O=compound_product_id_I,met_index_I=base_reactant_indices_I[cnt][k]);
# make the final products
if compound_product_id_I: imm_final_products = [imm_product];
else: imm_final_products = [];
for d in base_products:
imm_final_products.append(d);
if compound_product_id_I: imm_final_products_ids = [compound_product_id_I];
else: imm_final_products_ids = [];
for id in base_product_ids_O:
imm_final_products_ids.append(id);
for cnt,d in enumerate(imm_final_products):
self.reactionMapping['products_elements_tracked'].append(d.metaboliteMapping['met_elements']);
self.reactionMapping['products_positions_tracked'].append(d.metaboliteMapping['met_atompositions']);
self.reactionMapping['products_mapping'].append(d.convert_arrayMapping2StringMapping());
self.reactionMapping['products_stoichiometry_tracked'].append(1.0);
self.reactionMapping['products_ids_tracked'].append(imm_final_products_ids[cnt]);
self.reactionMapping['products_metaboliteMappings'].append(copy(d.copy_metaboliteMapping()));
# save the reaction
self.reactionMapping['mapping_id']=mapping_id_I
self.reactionMapping['rxn_id']=rxn_id_I
self.reactionMapping['rxn_description']=None
self.reactionMapping['rxn_equation']=None
self.reactionMapping['used_']=True
self.reactionMapping['comment_']=None
def make_trackedCompoundReaction_fromMetaboliteMappings(self,mapping_id_I,model_id_I,rxn_id_I,reactant_metaboliteMappings_I,base_reactant_positions_I,base_reactant_indices_I,compound_product_id_I,base_product_ids_elements_I,base_product_ids_O):
'''Make a compound tracked reaction
1. make compound product
2. remove specified base products from compound product
3. update the compound product
4. rename the base products
5. append base products to products list'''
#Input
# reactant_metaboliteMappings_I = [mm_1,mm_2,...]
# base_reactant_positions_I = [{met_id_reactant:position},...] #Note: must be listed in order (positions of the reactant to be partitioned)
# base_reactant_indices_I = [{met_id_product:position in base_reactants_ids},...] #Note: must be listed in order (positions of the reactant to be partitioned)
# index referes to the position of the base met_id in the reactant to be partitioned
# compound_product_id_I = met_id
# base_product_ids_elements_I = [{met_id:elements},...] #Note: must be listed in order
# base_product_ids_O = [met_id_new,...] #Note: must be listed in order
imm_product = stage02_isotopomer_metaboliteMapping();
# initialize the structure to track the base_met_ids
reactant_ids_all = [];
for k in self.reactionMapping['reactants_ids_tracked']:
reactant_ids_all.append(k);
reactant_ids_unique = list(set(reactant_ids_all))
reactant_ids_cnt = {};
for reactant_id in reactant_ids_unique:
reactant_ids_cnt[reactant_id] = 0;
for reactant_id in reactant_ids_all:
reactant_ids_cnt[reactant_id]+=1;
# initialize the count for unique base_met_ids
reactants_base_met_ids = [];
reactants_base_indices = [];
for cnt,mm in enumerate(self.reactionMapping['reactants_metaboliteMappings']):
reactants_base_met_ids.extend(mm.metaboliteMapping['base_met_ids'])
reactants_base_indices.extend(self.reactionMapping['reactants_metaboliteMappings'][cnt].metaboliteMapping['base_met_indices'])
reactants_base_met_ids_I = [];
# get unique reactants_base_met_ids
reactants_base_met_ids_unique = list(set(reactants_base_met_ids));
reactants_base_met_ids_cnt = {};
for base_met_id in reactants_base_met_ids_unique:
reactants_base_met_ids_cnt[base_met_id]=0;
for cnt,base_met_id in enumerate(reactants_base_met_ids):
reactants_base_met_ids_cnt[base_met_id]=reactants_base_indices[cnt]+1
# make the reactants mapping
imm_product.metaboliteMapping['mapping_id'] = mapping_id_I
imm_product.metaboliteMapping['base_met_ids']=[];
imm_product.metaboliteMapping['base_met_elements']=[];
imm_product.metaboliteMapping['base_met_atompositions']=[];
imm_product.metaboliteMapping['base_met_symmetry_elements']=[];
imm_product.metaboliteMapping['base_met_symmetry_atompositions']=[];
imm_product.metaboliteMapping['base_met_indices']=[];
# initialize the counter the input
matched_cnt = 0;
for row_cnt,imm in enumerate(reactant_metaboliteMappings_I):
# initialize new metabolites
if not imm.metaboliteMapping['met_id'] in list(reactant_ids_cnt.keys()):
reactant_ids_cnt[imm.metaboliteMapping['met_id']]=0
# make the metabolite mapping
#update the counter for unique met_ids
reactant_ids_cnt[imm.metaboliteMapping['met_id']]+=1
# update base_metabolites from the database for reactant that will be partitioned
base_found = False;
if matched_cnt < len(base_reactant_positions_I):
for k1,v1 in base_reactant_positions_I[matched_cnt].items(): #there will be only 1 key-value pair
if k1 == imm.metaboliteMapping['met_id'] and row_cnt == v1:
base_found = True;
break;
# assign new indices for each base metabolite based on the current indices in the reactants
base_met_indices_tmp = copy(imm.metaboliteMapping['base_met_indices']);
for cnt1,met_id1 in enumerate(imm.metaboliteMapping['base_met_ids']):
# initialize new base metabolites
if not met_id1 in list(reactants_base_met_ids_cnt.keys()):
reactants_base_met_ids_cnt[met_id1]=0;
# assign the next current base_metabolite_index
imm.metaboliteMapping['base_met_indices'][cnt1]=reactants_base_met_ids_cnt[met_id1]
# update the base_reactant_indices_I if the corresponding base_met_index was changed
if matched_cnt < len(base_reactant_positions_I):
for k1,v1 in base_reactant_positions_I[matched_cnt].items(): #there will be only 1 key-value pair
if k1 == imm.metaboliteMapping['met_id'] and row_cnt == v1: # does the met_id and position in the reactant list match?
for k2,v2 in base_reactant_indices_I[matched_cnt].items():
if k2==met_id1 and v2==base_met_indices_tmp[cnt1]: # does the base_met_id and previous index match?
base_reactant_indices_I[matched_cnt][k2]=imm.metaboliteMapping['base_met_indices'][cnt1];
reactants_base_met_ids_cnt[met_id1]+=1;
# update counter for matched input
if base_found: matched_cnt+=1;
# update met_mapping
imm.update_trackedMetabolite_fromBaseMetabolites(model_id_I);
# add in the new metaboliteMapping information
self.reactionMapping['reactants_elements_tracked'].append(imm.metaboliteMapping['met_elements']);
self.reactionMapping['reactants_positions_tracked'].append(imm.metaboliteMapping['met_atompositions']);
self.reactionMapping['reactants_mapping'].append(imm.convert_arrayMapping2StringMapping());
self.reactionMapping['reactants_stoichiometry_tracked'].append(-1.0);
self.reactionMapping['reactants_ids_tracked'].append(imm.metaboliteMapping['met_id']);
self.reactionMapping['reactants_metaboliteMappings'].append(copy(imm.copy_metaboliteMapping()));
self.reactants_base_met_ids.extend(imm.metaboliteMapping['base_met_ids']);
self.reactants_base_met_elements.extend(imm.metaboliteMapping['base_met_elements']);
self.reactants_base_met_atompositions.extend(imm.metaboliteMapping['base_met_atompositions']);
#self.reactants_base_met_symmetry_elements.extend(imm.metaboliteMapping['base_met_symmetry_elements']);
#self.reactants_base_met_symmetry_atompositions.extend(imm.metaboliteMapping['base_met_symmetry_atompositions']);
self.reactants_base_met_indices.extend(imm.metaboliteMapping['base_met_indices']);
# copy out all of the base information for the product
imm_product.metaboliteMapping['base_met_ids'].extend(imm.metaboliteMapping['base_met_ids']);
imm_product.metaboliteMapping['base_met_elements'].extend(imm.metaboliteMapping['base_met_elements']);
imm_product.metaboliteMapping['base_met_atompositions'].extend(imm.metaboliteMapping['base_met_atompositions']);
#imm_product.metaboliteMapping['base_met_symmetry_elements'].extend(imm.metaboliteMapping['base_met_symmetry_elements']);
#imm_product.metaboliteMapping['base_met_symmetry_atompositions'].extend(imm.metaboliteMapping['base_met_symmetry_atompositions']);
imm_product.metaboliteMapping['base_met_indices'].extend(imm.metaboliteMapping['base_met_indices']);
# make the initial compound product mapping
imm_product.update_trackedMetabolite_fromBaseMetabolites(model_id_I)
imm_product.metaboliteMapping['met_id']=compound_product_id_I;
# extract out the products from the compound product
base_products = [];
for cnt,row in enumerate(base_product_ids_elements_I):
for k,v in row.items():
base_products.append(imm_product.extract_baseMetabolite_fromMetabolite(model_id_I,{k:v},base_reactant_indices_I[cnt][k]));
# remove the base_products from the compound product
for cnt,row in enumerate(base_product_ids_elements_I):
for k,v in row.items():
imm_product.remove_baseMetabolite_fromMetabolite(model_id_I,{k:v},met_id_O=compound_product_id_I,met_index_I=base_reactant_indices_I[cnt][k]);
# make the final products
if compound_product_id_I: imm_final_products = [imm_product];
else: imm_final_products = [];
for d in base_products:
imm_final_products.append(d);
if compound_product_id_I: imm_final_products_ids = [compound_product_id_I];
else: imm_final_products_ids = [];
for id in base_product_ids_O:
imm_final_products_ids.append(id);
for cnt,d in enumerate(imm_final_products):
self.reactionMapping['products_elements_tracked'].append(d.metaboliteMapping['met_elements']);
self.reactionMapping['products_positions_tracked'].append(d.metaboliteMapping['met_atompositions']);
self.reactionMapping['products_mapping'].append(d.convert_arrayMapping2StringMapping());
self.reactionMapping['products_stoichiometry_tracked'].append(1.0);
self.reactionMapping['products_ids_tracked'].append(imm_final_products_ids[cnt]);
self.reactionMapping['products_metaboliteMappings'].append(copy(d.copy_metaboliteMapping()));
# save the reaction
self.reactionMapping['mapping_id']=mapping_id_I
self.reactionMapping['rxn_id']=rxn_id_I
self.reactionMapping['rxn_description']=None
self.reactionMapping['rxn_equation']=None
self.reactionMapping['used_']=True
self.reactionMapping['comment_']=None
def make_trackedUnitaryReactions(self,mapping_id_I,model_id_I,rxn_id_I,reactant_ids_elements_I,product_ids_I):
'''Make a unitary reaction of the form aA = bB where the coefficient a = b'''
#Input
# reactant_ids_elements_I = [{met_id:elements},]
# product_ids_elements_I = [met_id,...]
# check input
if len(reactant_ids_elements_I)!=len(product_ids_I):
print("length of reactants_ids does not match the length of products_ids");
return;
imm = stage02_isotopomer_metaboliteMapping();
# get unique met_ids
reactant_ids_all = [];
for row in reactant_ids_elements_I:
for k,v in row.items():
reactant_ids_all.append(k);
reactant_ids_unique = list(set(reactant_ids_all))
reactant_ids_cnt = {};
for reactant_id in reactant_ids_unique:
reactant_ids_cnt[reactant_id] = 0;
# make the reactants mapping
reactants_stoichiometry_tracked_O = [];
reactants_ids_tracked_O = [];
reactants_elements_tracked_O = [];
reactants_positions_tracked_O = [];
reactants_mapping_O = [];
reactants_metaboliteMappings_O = [];
for row in reactant_ids_elements_I:
for k,v in row.items():
imm.make_trackedMetabolite(mapping_id_I,model_id_I,{k:v},reactant_ids_cnt[k]);
reactants_elements_tracked_O.append(imm.metaboliteMapping['met_elements']);
reactants_positions_tracked_O.append(imm.metaboliteMapping['met_atompositions']);
reactants_mapping_O.append(imm.convert_arrayMapping2StringMapping());
reactants_stoichiometry_tracked_O.append(-abs(1));
reactants_ids_tracked_O.append(k);
reactants_metaboliteMappings_O.append(copy(imm.copy_metaboliteMapping()));
imm.clear_metaboliteMapping()
reactant_ids_cnt[k]+=1
# make the products mapping
products_stoichiometry_tracked_O = [];
products_ids_tracked_O = [];
products_elements_tracked_O = [];
products_positions_tracked_O = [];
products_mapping_O = [];
products_metaboliteMappings_O = [];
for product_cnt,product in enumerate(product_ids_I):
products_elements_tracked_O.append(reactants_elements_tracked_O[product_cnt]);
products_positions_tracked_O.append(reactants_positions_tracked_O[product_cnt]);
products_mapping_O.append(reactants_mapping_O[product_cnt]);
products_stoichiometry_tracked_O.append(abs(reactants_stoichiometry_tracked_O[product_cnt]));
products_ids_tracked_O.append(product);
imm_tmp = copy(reactants_metaboliteMappings_O[product_cnt].copy_metaboliteMapping());
imm_tmp.metaboliteMapping['met_id']=product; # change the name
products_metaboliteMappings_O.append(imm_tmp);
# save the reaction
self.reactionMapping['mapping_id']=mapping_id_I
self.reactionMapping['rxn_id']=rxn_id_I
self.reactionMapping['rxn_description']=None
self.reactionMapping['reactants_stoichiometry_tracked']=reactants_stoichiometry_tracked_O
self.reactionMapping['products_stoichiometry_tracked']=products_stoichiometry_tracked_O
self.reactionMapping['reactants_ids_tracked']=reactants_ids_tracked_O
self.reactionMapping['products_ids_tracked']=products_ids_tracked_O
self.reactionMapping['reactants_elements_tracked']=reactants_elements_tracked_O
self.reactionMapping['products_elements_tracked']=products_elements_tracked_O
self.reactionMapping['reactants_positions_tracked']=reactants_positions_tracked_O
self.reactionMapping['products_positions_tracked']=products_positions_tracked_O
self.reactionMapping['reactants_mapping']=reactants_mapping_O
self.reactionMapping['products_mapping']=products_mapping_O
self.reactionMapping['rxn_equation']=None
self.reactionMapping['used_']=True
self.reactionMapping['comment_']=None
self.reactionMapping['reactants_metaboliteMappings']=reactants_metaboliteMappings_O
self.reactionMapping['products_metaboliteMappings']=products_metaboliteMappings_O
def make_reverseReaction(self,rxn_id_I=None):
'''Make the reverse of the current reaction'''
forward_reactionMapping = {}
forward_reactionMapping['mapping_id']=self.reactionMapping['mapping_id']
forward_reactionMapping['rxn_id']=self.reactionMapping['rxn_id']
forward_reactionMapping['rxn_description']=self.reactionMapping['rxn_description']
forward_reactionMapping['reactants_stoichiometry_tracked']=self.reactionMapping['reactants_stoichiometry_tracked']
forward_reactionMapping['products_stoichiometry_tracked']=self.reactionMapping['products_stoichiometry_tracked']
forward_reactionMapping['reactants_ids_tracked']=self.reactionMapping['reactants_ids_tracked']
forward_reactionMapping['products_ids_tracked']=self.reactionMapping['products_ids_tracked']
forward_reactionMapping['reactants_elements_tracked']=self.reactionMapping['reactants_elements_tracked']
forward_reactionMapping['products_elements_tracked']=self.reactionMapping['products_elements_tracked']
forward_reactionMapping['reactants_positions_tracked']=self.reactionMapping['reactants_positions_tracked']
forward_reactionMapping['products_positions_tracked']=self.reactionMapping['products_positions_tracked']
forward_reactionMapping['reactants_mapping']=self.reactionMapping['reactants_mapping']
forward_reactionMapping['products_mapping']=self.reactionMapping['products_mapping']
forward_reactionMapping['rxn_equation']=self.reactionMapping['rxn_equation']
forward_reactionMapping['used_']=self.reactionMapping['used_']
forward_reactionMapping['comment_']=self.reactionMapping['comment_']
forward_reactionMapping['reactants_metaboliteMappings']=self.reactionMapping['reactants_metaboliteMappings']
forward_reactionMapping['products_metaboliteMappings']=self.reactionMapping['products_metaboliteMappings']
reverse_reactionMapping = {}
reverse_reactionMapping['mapping_id']=self.reactionMapping['mapping_id']
if rxn_id_I: reverse_reactionMapping['rxn_id']=rxn_id_I
else: reverse_reactionMapping['rxn_id']=self.reactionMapping['rxn_id']
reverse_reactionMapping['rxn_description']=self.reactionMapping['rxn_description']
reverse_reactionMapping['reactants_stoichiometry_tracked']=[-s for s in self.reactionMapping['products_stoichiometry_tracked']]
reverse_reactionMapping['products_stoichiometry_tracked']=[-s for s in self.reactionMapping['reactants_stoichiometry_tracked']]
reverse_reactionMapping['reactants_ids_tracked']=self.reactionMapping['products_ids_tracked']
reverse_reactionMapping['products_ids_tracked']=self.reactionMapping['reactants_ids_tracked']
reverse_reactionMapping['reactants_elements_tracked']=self.reactionMapping['products_elements_tracked']
reverse_reactionMapping['products_elements_tracked']=self.reactionMapping['reactants_elements_tracked']
reverse_reactionMapping['reactants_positions_tracked']=self.reactionMapping['products_positions_tracked']
reverse_reactionMapping['products_positions_tracked']=self.reactionMapping['reactants_positions_tracked']
reverse_reactionMapping['reactants_mapping']=self.reactionMapping['products_mapping']
reverse_reactionMapping['products_mapping']=self.reactionMapping['reactants_mapping']
reverse_reactionMapping['rxn_equation']=self.reactionMapping['rxn_equation']
reverse_reactionMapping['used_']=self.reactionMapping['used_']
reverse_reactionMapping['comment_']=self.reactionMapping['comment_']
reverse_reactionMapping['reactants_metaboliteMappings']=self.reactionMapping['products_metaboliteMappings']
reverse_reactionMapping['products_metaboliteMappings']=self.reactionMapping['reactants_metaboliteMappings']
self.reactionMapping = reverse_reactionMapping;
def add_reactionMapping(self,
mapping_id_I=None,
rxn_id_I=None,
rxn_description_I=None,
reactants_stoichiometry_tracked_I=[],
products_stoichiometry_tracked_I=[],
reactants_ids_tracked_I=[],
products_ids_tracked_I=[],
reactants_elements_tracked_I=[],
products_elements_tracked_I=[],
reactants_positions_tracked_I=[],
products_positions_tracked_I=[],
reactants_mapping_I=[],
products_mapping_I=[],
rxn_equation_I=None,
used__I=None,
comment__I=None):
if mapping_id_I: self.reactionMapping['mapping_id']=mapping_id_I
if rxn_id_I: self.reactionMapping['rxn_id']=rxn_id_I
if rxn_description_I: self.reactionMapping['rxn_description']=rxn_description_I
if reactants_stoichiometry_tracked_I: self.reactionMapping['reactants_stoichiometry_tracked']=reactants_stoichiometry_tracked_I
if products_stoichiometry_tracked_I: self.reactionMapping['products_stoichiometry_tracked']=products_stoichiometry_tracked_I
if reactants_ids_tracked_I: self.reactionMapping['reactants_ids_tracked']=reactants_ids_tracked_I
if products_ids_tracked_I: self.reactionMapping['products_ids_tracked']=products_ids_tracked_I
if reactants_elements_tracked_I: self.reactionMapping['reactants_elements_tracked']=reactants_elements_tracked_I
if products_elements_tracked_I: self.reactionMapping['products_elements_tracked']=products_elements_tracked_I
if reactants_positions_tracked_I: self.reactionMapping['reactants_positions_tracked']=reactants_positions_tracked_I
if products_positions_tracked_I: self.reactionMapping['products_positions_tracked']=products_positions_tracked_I
if reactants_mapping_I: self.reactionMapping['reactants_mapping']=reactants_mapping_I
if products_mapping_I: self.reactionMapping['products_mapping']=products_mapping_I
if rxn_equation_I: self.reactionMapping['rxn_equation']=rxn_equation_I
if used__I: self.reactionMapping['used_']=used__I
if comment__I: self.reactionMapping['comment_']=comment__I
# add data to the database
self.stage02_isotopomer_query.add_data_dataStage02IsotopomerAtomMappingReactions([self.reactionMapping])
def add_productMapping(self,product_ids_I):
'''Add newly made products to the atomMappingMetabolite table for future use'''
for product in self.reactionMapping['products_metaboliteMappings']:
if product.metaboliteMapping['met_id'] in product_ids_I:
product.add_metaboliteMapping();
def update_productMapping(self,product_ids_I):
'''Update newly made products to the atomMappingMetabolite table for future use'''
for product in self.reactionMapping['products_metaboliteMappings']:
if product.metaboliteMapping['met_id'] in product_ids_I:
product.update_metaboliteMapping();
def update_reactionMapping(self,
mapping_id_I=None,
rxn_id_I=None,
rxn_description_I=None,
reactants_stoichiometry_tracked_I=[],
products_stoichiometry_tracked_I=[],
reactants_ids_tracked_I=[],
products_ids_tracked_I=[],
reactants_elements_tracked_I=[],
products_elements_tracked_I=[],
reactants_positions_tracked_I=[],
products_positions_tracked_I=[],
reactants_mapping_I=[],
products_mapping_I=[],
rxn_equation_I=None,
used__I=None,
comment__I=None):
if mapping_id_I: self.reactionMapping['mapping_id']=mapping_id_I
if rxn_id_I: self.reactionMapping['rxn_id']=rxn_id_I
if rxn_description_I: self.reactionMapping['rxn_description']=rxn_description_I
if reactants_stoichiometry_tracked_I: self.reactionMapping['reactants_stoichiometry_tracked']=reactants_stoichiometry_tracked_I
if products_stoichiometry_tracked_I: self.reactionMapping['products_stoichiometry_tracked']=products_stoichiometry_tracked_I
if reactants_ids_tracked_I: self.reactionMapping['reactants_ids_tracked']=reactants_ids_tracked_I
if products_ids_tracked_I: self.reactionMapping['products_ids_tracked']=products_ids_tracked_I
if reactants_elements_tracked_I: self.reactionMapping['reactants_elements_tracked']=reactants_elements_tracked_I
if products_elements_tracked_I: self.reactionMapping['products_elements_tracked']=products_elements_tracked_I
if reactants_positions_tracked_I: self.reactionMapping['reactants_positions_tracked']=reactants_positions_tracked_I
if products_positions_tracked_I: self.reactionMapping['products_positions_tracked']=products_positions_tracked_I
if reactants_mapping_I: self.reactionMapping['reactants_mapping']=reactants_mapping_I
if products_mapping_I: self.reactionMapping['products_mapping']=products_mapping_I
if rxn_equation_I: self.reactionMapping['rxn_equation']=rxn_equation_I
if used__I: self.reactionMapping['used_']=used__I
if comment__I: self.reactionMapping['comment_']=comment__I
self.stage02_isotopomer_query.update_rows_dataStage02IsotopomerAtomMappingReactions([self.reactionMapping]);
def get_reactionMapping(self,mapping_id_I,rxn_id_I):
row = {};
row = self.stage02_isotopomer_query.get_row_mappingIDAndRxnID_dataStage02IsotopomerAtomMappingReactions(mapping_id_I,rxn_id_I);
self.reactionMapping = row;
self.reactionMapping['reactants_metaboliteMappings']=[]
self.reactionMapping['products_metaboliteMappings']=[]
self.make_reactantsAndProductsMetaboliteMappings();
def make_reactantsAndProductsMetaboliteMappings(self,reactionMapping_I=None):
'''Make reactants and products metabolite mapping from atomMappingReaction information'''
#Input:
# reactionMapping_I = row of atomMappingReactions
# default: None, user current self
if reactionMapping_I: reactionMapping_tmp = reactionMapping_I;
else: reactionMapping_tmp = self.reactionMapping;
for cnt,met in enumerate(reactionMapping_tmp['reactants_ids_tracked']):
imm = stage02_isotopomer_metaboliteMapping(mapping_id_I=reactionMapping_tmp['mapping_id'],
met_id_I=met,
met_elements_I=reactionMapping_tmp['reactants_elements_tracked'][cnt],
met_atompositions_I=reactionMapping_tmp['reactants_positions_tracked'][cnt],
met_symmetry_elements_I=[],
met_symmetry_atompositions_I=[],
used__I=True,
comment__I=None,
met_mapping_I=reactionMapping_tmp['reactants_mapping'][cnt],
base_met_ids_I=[],
base_met_elements_I=[],
base_met_atompositions_I=[],
base_met_symmetry_elements_I=[],
base_met_symmetry_atompositions_I=[],
base_met_indices_I=[]);
self.reactionMapping['reactants_metaboliteMappings'].append(copy(imm.copy_metaboliteMapping()));
for cnt,met in enumerate(reactionMapping_tmp['products_ids_tracked']):
imm = stage02_isotopomer_metaboliteMapping(mapping_id_I=reactionMapping_tmp['mapping_id'],
met_id_I=met,
met_elements_I=reactionMapping_tmp['products_elements_tracked'][cnt],
met_atompositions_I=reactionMapping_tmp['products_positions_tracked'][cnt],
met_symmetry_elements_I=[],
met_symmetry_atompositions_I=[],
used__I=True,
comment__I=None,
met_mapping_I=reactionMapping_tmp['products_mapping'][cnt],
base_met_ids_I=[],
base_met_elements_I=[],
base_met_atompositions_I=[],
base_met_symmetry_elements_I=[],
base_met_symmetry_atompositions_I=[],
base_met_indices_I=[]);
self.reactionMapping['products_metaboliteMappings'].append(copy(imm.copy_metaboliteMapping()));
def clear_reactionMapping(self):
self.reactionMapping={}
self.reactionMapping['mapping_id']=None
self.reactionMapping['rxn_id']=None
self.reactionMapping['rxn_description']=None
self.reactionMapping['reactants_stoichiometry_tracked']=[]
self.reactionMapping['products_stoichiometry_tracked']=[]
self.reactionMapping['reactants_ids_tracked']=[]
self.reactionMapping['products_ids_tracked']=[]
self.reactionMapping['reactants_elements_tracked']=[]
self.reactionMapping['products_elements_tracked']=[]
self.reactionMapping['reactants_positions_tracked']=[]
self.reactionMapping['products_positions_tracked']=[]
self.reactionMapping['reactants_mapping']=[]
self.reactionMapping['products_mapping']=[]
self.reactionMapping['rxn_equation']=None
self.reactionMapping['used_']=True
self.reactionMapping['comment_']=None
self.reactionMapping['reactants_metaboliteMappings']=[]
self.reactionMapping['products_metaboliteMappings']=[]
self.reactants_base_met_ids=[];
self.reactants_base_met_elements=[];
self.reactants_base_met_atompositions=[];
self.reactants_base_met_symmetry_elements=[];
self.reactants_base_met_symmetry_atompositions=[];
self.reactants_base_met_indices=[];
self.products_base_met_ids=[];
self.products_base_met_elements=[];
self.products_base_met_atompositions=[];
self.products_base_met_symmetry_elements=[];
self.products_base_met_symmetry_atompositions=[];
self.products_base_met_indices=[];
def checkAndCorrect_elementsAndPositions(self):
'''Check that the reactant/product elements/positions are consistent with the
reactants/products ids_tracked; if they are not, correct them'''
# check that elements/positions are initialized
if not self.reactionMapping['reactants_elements_tracked']:
self.reactionMapping['reactants_elements_tracked']=[];
for cnt,reactant_id in enumerate(self.reactionMapping['reactants_ids_tracked']):
self.reactionMapping['reactants_elements_tracked'].append([]);
if not self.reactionMapping['reactants_positions_tracked']:
self.reactionMapping['reactants_positions_tracked']=[];
for cnt,reactant_id in enumerate(self.reactionMapping['reactants_ids_tracked']):
self.reactionMapping['reactants_positions_tracked'].append([]);
# check that the length of the elements/positions match the length of the ids_tracked
#TODO...
# check each elements/positions
for cnt,reactant_id in enumerate(self.reactionMapping['reactants_ids_tracked']):
# get the metabolite data from the database
met_data = {}
met_data = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(self.reactionMapping['mapping_id'],reactant_id);
if len(met_data['met_elements'])!=len(self.reactionMapping['reactants_elements_tracked'][cnt]):
self.reactionMapping['reactants_elements_tracked'][cnt]=met_data['met_elements'];
if len(met_data['met_atompositions'])!=len(self.reactionMapping['reactants_positions_tracked'][cnt]):
self.reactionMapping['reactants_positions_tracked'][cnt]=met_data['met_atompositions'];
# check that elements/positions are initialized
if not self.reactionMapping['products_elements_tracked']:
self.reactionMapping['products_elements_tracked']=[];
for cnt,product_id in enumerate(self.reactionMapping['products_ids_tracked']):
self.reactionMapping['products_elements_tracked'].append([]);
if not self.reactionMapping['products_positions_tracked']:
self.reactionMapping['products_positions_tracked']=[];
for cnt,product_id in enumerate(self.reactionMapping['products_ids_tracked']):
self.reactionMapping['products_positions_tracked'].append([]);
# check that the length of the elements/positions match the length of the ids_tracked
#TODO...
# check each elements/positions
for cnt,product_id in enumerate(self.reactionMapping['products_ids_tracked']):
# get the metabolite data from the database
met_data = {}
met_data = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(self.reactionMapping['mapping_id'],product_id);
if len(met_data['met_elements'])!=len(self.reactionMapping['products_elements_tracked'][cnt]):
self.reactionMapping['products_elements_tracked'][cnt]=met_data['met_elements'];
if len(met_data['met_atompositions'])!=len(self.reactionMapping['products_positions_tracked'][cnt]):
self.reactionMapping['products_positions_tracked'][cnt]=met_data['met_atompositions'];
def add_balanceProducts(self,unbalanced_met_I=None,unbalanced_met_position_I=None,unbalanced_met_positions_tracked_I=[],make_lumped_unbalanced_met_I=False,make_unique_unbalanced_mets_I=True):
'''Add psuedo metabolites to the product in order to elementally balance the tracked reaction'''
#Input:
# unbalanced_met_I = reactant_id that is not elementally balanced
# unbalanced_met_position_I = position of the reactant_id in the reactants_list
# unbalanced_met_positions_tracked_I = positions of the elements that are not elementally balanced
# make_lumped_unbalanced_met_I = boolean,
# automatically detect mappings that are not elementally balanced and make an unbalanced product metabolite to balance all elementally unbalanced reactants
# NOTE: does not work if the stoichiometry of all unbalanced reactants are not 1
# make_unique_unbalanced_mets_I = boolean,
# automatically detect mappings/metabolites that are not elementally balanced and makes unbalanced product mappings/metabolites to balance each elementally unbalanced reactant mapping/metabolite
if make_lumped_unbalanced_met_I:
#TODO: check that all unbalanced reactants have a stoichiometry of 1
balance_met = self.reactionMapping['rxn_id'] + '_' + 'balance_c' + '.balance';
reactants_mappings = []; #list of a list
products_mappings = []; #list
# extract out reactants and products mappings
for imm in self.reactionMapping['reactants_metaboliteMappings']:
reactant_mapping=[];
reactant_mapping = imm.convert_stringMapping2ArrayMapping();
reactants_mappings.append(reactant_mapping);
for imm in self.reactionMapping['products_metaboliteMappings']:
product_mapping=[];
product_mapping = imm.convert_stringMapping2ArrayMapping();
products_mappings.extend(product_mapping);
# find unbalanced reactant_mappings and
# make the product mapping, positions, and elements
product_mapping = [];
product_positions_tracked = [];
product_elements_tracked = [];
product_cnt = 0;
for reactant_cnt,reactants_mapping in enumerate(reactants_mappings):
for element_cnt,reactant_mapping in enumerate(reactants_mapping):
if not reactant_mapping in products_mappings:
product_mapping.append(reactant_mapping);
product_elements_tracked.append(self.reactionMapping['reactants_elements_tracked'][reactant_cnt][element_cnt]);
product_positions_tracked.append(product_cnt);
product_cnt += 1;
imm = stage02_isotopomer_metaboliteMapping(mapping_id_I=self.reactionMapping['mapping_id'],
met_id_I=balance_met,
met_elements_I=product_elements_tracked,
met_atompositions_I=product_positions_tracked,
met_symmetry_elements_I=[],
met_symmetry_atompositions_I=[],
used__I=True,
comment__I=None,
met_mapping_I=product_mapping,
base_met_ids_I=[],
base_met_elements_I=[],
base_met_atompositions_I=[],
base_met_symmetry_elements_I=[],
base_met_symmetry_atompositions_I=[],
base_met_indices_I=[]);
# add balance metabolite to the products
self.reactionMapping['products_ids_tracked'].append(balance_met);
self.reactionMapping['products_mapping'].append(imm.convert_arrayMapping2StringMapping());
self.reactionMapping['products_positions_tracked'].append(product_positions_tracked);
self.reactionMapping['products_stoichiometry_tracked'].append(1);
self.reactionMapping['products_elements_tracked'].append(product_elements_tracked);
self.reactionMapping['products_metaboliteMappings'].append(copy(imm.copy_metaboliteMapping()));
elif make_unique_unbalanced_mets_I:
products_mappings = []; #list
# extract out products mappings
for imm in self.reactionMapping['products_metaboliteMappings']:
product_mapping=[];
product_mapping = imm.convert_stringMapping2ArrayMapping();
products_mappings.extend(product_mapping);
# check each reactant mapping/metabolite
for reactant_pos,imm in enumerate(self.reactionMapping['reactants_metaboliteMappings']):
reactant_mapping=[];
reactant_mapping = imm.convert_stringMapping2ArrayMapping();
# find missing mappings
product_mapping = [];
product_positions_tracked = [];
product_elements_tracked = [];
balance_met = None;
product_cnt = 0;
for mapping_pos,mapping in enumerate(reactant_mapping):
if mapping not in products_mappings:
balance_met = self.reactionMapping['rxn_id'] + '_' + self.reactionMapping['reactants_ids_tracked'][reactant_pos] + '_' + str(reactant_pos) + '.balance';
product_mapping.append(mapping);
#product_positions_tracked.append(self.reactionMapping['reactants_positions_tracked'][reactant_pos][mapping_pos]);
product_positions_tracked.append(product_cnt);
product_elements_tracked.append(self.reactionMapping['reactants_elements_tracked'][reactant_pos][mapping_pos]);
product_cnt += 1;
if balance_met:
imm = stage02_isotopomer_metaboliteMapping(mapping_id_I=self.reactionMapping['mapping_id'],
met_id_I=balance_met,
met_elements_I=product_elements_tracked,
met_atompositions_I=product_positions_tracked,
met_symmetry_elements_I=[],
met_symmetry_atompositions_I=[],
used__I=True,
comment__I=None,
met_mapping_I=product_mapping,
base_met_ids_I=[],
base_met_elements_I=[],
base_met_atompositions_I=[],
base_met_symmetry_elements_I=[],
base_met_symmetry_atompositions_I=[],
base_met_indices_I=[]);
# add balance metabolite to the products
self.reactionMapping['products_ids_tracked'].append(balance_met);
self.reactionMapping['products_mapping'].append(imm.convert_arrayMapping2StringMapping());
self.reactionMapping['products_positions_tracked'].append(product_positions_tracked);
self.reactionMapping['products_elements_tracked'].append(product_elements_tracked);
self.reactionMapping['products_metaboliteMappings'].append(copy(imm.copy_metaboliteMapping()));
self.reactionMapping['products_stoichiometry_tracked'].append(abs(self.reactionMapping['reactants_stoichiometry_tracked'][reactant_pos]));
# use user specifications
else:
# find the position of the tracked metabolite
if self.reactionMapping['reactants_ids_tracked'].index(unbalanced_met_I):
if unbalanced_met_position_I: unbalanced_met_pos = unbalanced_met_position_I;
else: unbalanced_met_pos = self.reactionMapping['reactants_ids_tracked'].index(unbalanced_met_I);
balance_met = self.reactionMapping['rxn_id'] + '_' + unbalanced_met_I + '_' + str(unbalanced_met_pos) + '.balance';
# extract out mapping, positions, and elements
reactant_mapping = self.reactionMapping['reactants_metaboliteMappings'][unbalanced_met_pos].convert_stringMapping2ArrayMapping();
reactant_positions_tracked = self.reactionMapping['reactants_positions_tracked'][unbalanced_met_pos];
reactant_elements_tracked = self.reactionMapping['reactants_elements_tracked'][unbalanced_met_pos];
# make the product mapping, positions, and elements
product_mapping = [];
product_positions_tracked = [];
product_elements_tracked = [];
if unbalanced_met_positions_tracked_I:
for pos_cnt,pos in enumerate(unbalanced_met_positions_tracked_I):
product_mapping.append(reactant_mapping[pos]);
product_positions_tracked.append(pos_cnt);
product_elements_tracked.append(reactant_elements_tracked[pos]);
else:
product_mapping=reactant_mapping
product_positions_tracked=reactant_positions_tracked
product_elements_tracked=reactant_elements_tracked
imm = stage02_isotopomer_metaboliteMapping(mapping_id_I=self.reactionMapping['mapping_id'],
met_id_I=balance_met,
met_elements_I=product_elements_tracked,
met_atompositions_I=product_positions_tracked,
met_symmetry_elements_I=[],
met_symmetry_atompositions_I=[],
used__I=True,
comment__I=None,
met_mapping_I=product_mapping,
base_met_ids_I=[],
base_met_elements_I=[],
base_met_atompositions_I=[],
base_met_symmetry_elements_I=[],
base_met_symmetry_atompositions_I=[],
base_met_indices_I=[]);
# add balance metabolite to the products
self.reactionMapping['products_ids_tracked'].append(balance_met);
self.reactionMapping['products_mapping'].append(imm.convert_arrayMapping2StringMapping());
self.reactionMapping['products_positions_tracked'].append(product_positions_tracked);
self.reactionMapping['products_elements_tracked'].append(product_elements_tracked);
self.reactionMapping['products_metaboliteMappings'].append(copy(imm.copy_metaboliteMapping()));
self.reactionMapping['products_stoichiometry_tracked'].append(1);
else:
print('unbalanced metabolite not found!')
def check_elementalBalance(self):
'''
1. Check that the number of elements tracked in the reactant matches the number of elements tracked
in the products
2. Check that the reactant positions tracked match the reactant elements tracked'''
#Output:
# reactants_positions_tracked_cnt
# products_positions_tracked_cnt
element_balance = True;
#check reactants
reactants_positions_tracked_cnt = 0;
for reactant_cnt,reactant in enumerate(self.reactionMapping['reactants_ids_tracked']):
print('checking reactant ' + reactant);
# check that the reactant positions == reactant elements
if len(self.reactionMapping['reactants_positions_tracked'][reactant_cnt])!=len(self.reactionMapping['reactants_elements_tracked'][reactant_cnt]):
print('inconsistent reactants_positions and reactants_elements');
continue;
reactants_positions_tracked_cnt += len(self.reactionMapping['reactants_positions_tracked'][reactant_cnt]);
#check products
products_positions_tracked_cnt = 0;
for product_cnt,product in enumerate(self.reactionMapping['products_ids_tracked']):
print('checking product ' + product);
# check that the product positions == product elements
if len(self.reactionMapping['products_positions_tracked'][product_cnt])!=len(self.reactionMapping['products_elements_tracked'][product_cnt]):
print('inconsistent products_positions and products_elements');
continue;
products_positions_tracked_cnt += len(self.reactionMapping['products_positions_tracked'][product_cnt]);
#record
if reactants_positions_tracked_cnt!=products_positions_tracked_cnt:
return reactants_positions_tracked_cnt,products_positions_tracked_cnt;
else:
return reactants_positions_tracked_cnt,products_positions_tracked_cnt;
def check_reactionMapping(self):
'''
1. Check that the number of elements tracked in the reactant matches the number of elements tracked
in the products
2. Check that the reactant positions tracked match the reactant elements tracked
3. Check that the mappings are 1-to-1
4. Check that the elements/positions/mappings are of the same length
5. Check that the stoichiometry and ids tracked are of the same length'''
#Output:
# reactants_positions_tracked_cnt
# products_positions_tracked_cnt
#checks:
reactants_ids_stoichiometry_check = True;
reactants_elements_positions_check = True;
reactants_elements_mapping_check = True;
reactants_positions_mapping_check = True;
products_ids_stoichiometry_check = True;
products_elements_positions_check = True;
products_elements_mapping_check = True;
products_positions_mapping_check = True;
element_balance_check = True;
mapping_check = True;
#check reactants
reactants_positions_tracked_cnt = 0;
reactants_elements_tracked_cnt = 0;
reactants_mappings_cnt = 0;
reactants_stoichiometry_cnt = 0;
reactants_ids_cnt = 0;
reactants_mappings = [];
# check that the reactant stoichiometry == reactant ids
if len(self.reactionMapping['reactants_ids_tracked'])!=len(self.reactionMapping['reactants_stoichiometry_tracked']):
print('inconsistent reactants_stoichiometry_tracked and reactants_ids_tracked');
reactants_ids_stoichiometry_check = False;
reactants_ids_cnt += len(self.reactionMapping['reactants_ids_tracked']);
reactants_stoichiometry_cnt += len(self.reactionMapping['reactants_stoichiometry_tracked']);
# check elemental balance
for reactant_cnt,reactant in enumerate(self.reactionMapping['reactants_ids_tracked']):
print('checking reactant elemental balance ' + reactant);
reactant_mapping=[];
reactant_mapping = self.reactionMapping['reactants_metaboliteMappings'][reactant_cnt].convert_stringMapping2ArrayMapping();
# check that the reactant positions == reactant elements
if len(self.reactionMapping['reactants_positions_tracked'][reactant_cnt])!=len(self.reactionMapping['reactants_elements_tracked'][reactant_cnt]):
print('inconsistent reactants_positions and reactants_elements');
reactants_elements_positions_check = False;
# check that the reactant positions == reactant mapping
if len(self.reactionMapping['reactants_positions_tracked'][reactant_cnt])!=len(reactant_mapping):
print('inconsistent reactants_positions and reactants_mapping');
reactants_elements_mapping_check = False;
# check that the reactant elements == reactant mapping
if len(self.reactionMapping['reactants_elements_tracked'][reactant_cnt])!=len(reactant_mapping):
print('inconsistent reactants_elements and reactants_mapping');
reactants_positions_mapping_check = False;
reactants_positions_tracked_cnt += len(self.reactionMapping['reactants_positions_tracked'][reactant_cnt]);
reactants_elements_tracked_cnt += len(self.reactionMapping['reactants_elements_tracked'][reactant_cnt]);
reactants_mappings_cnt += len(reactant_mapping);
reactants_mappings.append(reactant_mapping);
#check products
products_positions_tracked_cnt = 0;
products_elements_tracked_cnt = 0;
products_mappings_cnt = 0;
products_stoichiometry_cnt = 0;
products_ids_cnt = 0;
products_mappings = [];
# check that the product stoichiometry == product ids
if len(self.reactionMapping['products_ids_tracked'])!=len(self.reactionMapping['products_stoichiometry_tracked']):
print('inconsistent products_stoichiometry_tracked and products_ids_tracked');
products_ids_stoichiometry_check = False;
products_ids_cnt += len(self.reactionMapping['products_ids_tracked']);
products_stoichiometry_cnt += len(self.reactionMapping['products_stoichiometry_tracked']);
# check elemental balance
for product_cnt,product in enumerate(self.reactionMapping['products_ids_tracked']):
print('checking product elemental balance ' + product);
product_mapping=[];
product_mapping = self.reactionMapping['products_metaboliteMappings'][product_cnt].convert_stringMapping2ArrayMapping();
# check that the product positions == product elements
if len(self.reactionMapping['products_positions_tracked'][product_cnt])!=len(self.reactionMapping['products_elements_tracked'][product_cnt]):
print('inconsistent products_positions and products_elements');
products_elements_positions_check = False;
# check that the product positions == product mapping
if len(self.reactionMapping['products_positions_tracked'][product_cnt])!=len(product_mapping):
print('inconsistent products_positions and products_mapping');
products_elements_mapping_check = False;
# check that the product elements == product mapping
if len(self.reactionMapping['products_elements_tracked'][product_cnt])!=len(product_mapping):
print('inconsistent products_elements and products_mapping');
products_positions_mapping_check = False;
products_positions_tracked_cnt += len(self.reactionMapping['products_positions_tracked'][product_cnt]);
products_elements_tracked_cnt += len(self.reactionMapping['products_elements_tracked'][product_cnt]);
products_mappings_cnt += len(product_mapping);
products_mappings.append(product_mapping);
#check elemental balance
if reactants_positions_tracked_cnt != products_positions_tracked_cnt:
print('the length of reactants_positions_tracked does not match the length of products_positions_tracked');
element_balance_check = False;
if reactants_elements_tracked_cnt != products_elements_tracked_cnt:
print('reactants_elements_tracked does not match the length of products_elements_tracked');
element_balance_check = False;
if reactants_mappings_cnt != products_mappings_cnt:
print('the length of reactants_mapping does not match the length of products_mapping');
element_balance_check = False;
#check 1-to-1 mapping
reactants_mappings_list = [];
for reactants_mapping in reactants_mappings:
reactants_mappings_list.extend(reactants_mapping);
# check for duplicate reactant mappings
reactants_mappings_unique = list(set(reactants_mappings_list));
if len(reactants_mappings_list)!=len(reactants_mappings_unique):
print('duplicate reactants_mappings found');
mapping_check = False;
products_mappings_list = [];
for products_mapping in products_mappings:
products_mappings_list.extend(products_mapping);
# check for duplicate product mappings
products_mappings_unique = list(set(products_mappings_list));
if len(products_mappings_list)!=len(products_mappings_unique):
print('duplicate products_mappings found');
mapping_check = False;
# check that each product mapping has a matching reactant mapping, and vice versa
for reactant_cnt,reactant in enumerate(reactants_mappings):
print('checking reactant mapping ' + self.reactionMapping['reactants_ids_tracked'][reactant_cnt]);
for mapping_cnt,mapping in enumerate(reactant):
if not mapping in products_mappings_list:
print('no mapping found for reactant mapping ' + mapping + ' and position ' + str(mapping_cnt));
mapping_check = False;
for product_cnt,product in enumerate(products_mappings):
print('checking product mapping ' + self.reactionMapping['products_ids_tracked'][product_cnt]);
for mapping_cnt,mapping in enumerate(product):
if not mapping in reactants_mappings_list:
print('no mapping found for product mapping ' + mapping + ' and position ' + str(mapping_cnt));
mapping_check = False;
if not element_balance_check or not mapping_check:
print('check reaction mapping');
return reactants_ids_stoichiometry_check,reactants_elements_positions_check,reactants_elements_mapping_check,reactants_positions_mapping_check,\
products_ids_stoichiometry_check,products_elements_positions_check,products_elements_mapping_check,products_positions_mapping_check,\
element_balance_check,mapping_check;
def clear_elementsAndPositions(self):
'''Clear the reactants/products elements/positions'''
self.reactionMapping['reactants_elements_tracked']=None;
self.reactionMapping['reactants_positions_tracked']=None;
self.reactionMapping['products_elements_tracked']=None;
self.reactionMapping['products_positions_tracked']=None;
class stage02_isotopomer_mappingUtilities():
def __init__(self):
self.stage02_isotopomer_query = stage02_isotopomer_query();
def make_missingMetaboliteMappings(self,experiment_id_I,model_id_I=[],mapping_id_rxns_I=[],mapping_id_mets_I=[],mapping_id_new_I=None):
'''Make atom mapping metabolites from atom mapping reactions, QC atom mapping reactions;
and create a new set of metabolite mappings that correspond to the current reaction mappings that need to be QC/QA'd'''
#Input:
# experiment_id_I = experiment_id
# model_id_I = model_id
# mapping_id_rxns_I = reaction mapping id (#default atomMappingMetabolite mapping id to add new metabolites to)
# mapping_id_mets_I = existing metabolite mappings to use when making the new metabolite mappings
# mapping_id_new_I = name of mapping id for the new metabolite mappings
#Output:
# default: new metabolite mappings will be added for the mapping id of the reactions
# existing metabolite mappings will not be added
# mapping_id_new_I != None: new metabolite mappings will be added for the mapping id specified
#get model ids:
if model_id_I:
model_ids = model_id_I;
else:
model_ids = [];
model_ids = self.stage02_isotopomer_query.get_modelID_experimentID_dataStage02IsotopomerSimulation(experiment_id_I);
for model_id in model_ids:
#get mapping ids
if mapping_id_rxns_I and mapping_id_mets_I:
mapping_ids_rxns=mapping_id_rxns_I;
mapping_ids_mets=mapping_id_mets_I;
elif mapping_id_rxns_I:
mapping_ids_rxns=mapping_id_rxns_I;
else:
mapping_ids_rxns=[];
mapping_ids_rxns=self.stage02_isotopomer_query.get_mappingID_experimentIDAndModelID_dataStage02IsotopomerSimulation(experiment_id_I,model_id);
for mapping_cnt,mapping_id_rxns in enumerate(mapping_ids_rxns):
# get the metabolite mappings
if mapping_id_rxns_I and mapping_id_mets_I:
mappings=self.stage02_isotopomer_query.get_atomMappingMetabolites_mappingID_dataStage02IsotopomerAtomMappingReactionsAndAtomMappingMetabolites(mapping_id_rxns,mapping_ids_mets[mapping_cnt]);
else:
mappings = self.stage02_isotopomer_query.get_atomMappingMetabolites_mappingID_dataStage02IsotopomerAtomMappingReactions(mapping_id_rxns);
# remove duplicates
duplicate_ind = [];
for d1_cnt,d1 in enumerate(mappings):
for d2_cnt in range(d1_cnt+1,len(mappings)):
if d1['mapping_id'] == mappings[d2_cnt]['mapping_id'] and \
d1['met_id'] == mappings[d2_cnt]['met_id'] and \
d1['met_elements'] == mappings[d2_cnt]['met_elements'] and \
d1['met_atompositions'] == mappings[d2_cnt]['met_atompositions'] and \
d1['met_symmetry_elements'] == mappings[d2_cnt]['met_symmetry_elements'] and \
d1['met_symmetry_atompositions'] == mappings[d2_cnt]['met_symmetry_atompositions']:
duplicate_ind.append(d2_cnt);
duplicate_ind_unique=list(set(duplicate_ind));
# copy out unique metabolites
data_O = [];
for d1_cnt,d1 in enumerate(mappings):
if d1_cnt in duplicate_ind_unique:
continue;
else:
if mapping_id_new_I: d1['mapping_id']=mapping_id_new_I; # change to the new mapping
data_O.append(d1);
met_ids = [x['met_id'] for x in data_O];
met_ids_unique = list(set(met_ids));
data_mets_cnt = {};
for met in met_ids_unique:
data_mets_cnt[met] = 0;
for d in data_O:
data_mets_cnt[d['met_id']] += 1;
# add data to the database
if mapping_id_new_I:
self.stage02_isotopomer_query.add_data_dataStage02IsotopomerAtomMappingMetabolites(data_O);
else:
data_add_O = [];
for d in data_O:
# check to see if the metabolite is already in the database
mapping_row = {};
mapping_row = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id_rxns,d['met_id']);
if not mapping_row: data_add_O.append(d);
self.stage02_isotopomer_query.add_data_dataStage02IsotopomerAtomMappingMetabolites(data_add_O);
def make_missingReactionMappings(self,experiment_id_I,model_id_I=[],mapping_id_rxns_I=[],mapping_id_mets_I=[],mapping_id_new_I=None):
'''Update missing or incomplete reaction mappings for the current mapping from the matching metabolite mappings,
and optionally, from the previous reaction mappings'''
#Note: prior to running, remove all reaction mappings that are not used.
imm = stage02_isotopomer_metaboliteMapping();
data_O = [];
#get model ids:
if model_id_I:
model_ids = model_id_I;
else:
model_ids = [];
model_ids = self.stage02_isotopomer_query.get_modelID_experimentID_dataStage02IsotopomerSimulation(experiment_id_I);
for model_id in model_ids:
#get all reactions in the model:
reactions = [];
reactions = self.stage02_isotopomer_query.get_rows_modelID_dataStage02IsotopomerModelReactions(model_id);
#get mapping ids
if mapping_id_rxns_I and mapping_id_mets_I:
mapping_ids_rxns=mapping_id_rxns_I;
mapping_ids_mets=mapping_id_mets_I;
elif mapping_id_rxns_I:
mapping_ids_rxns=mapping_id_rxns_I;
else:
mapping_rxns=[];
mapping_rxns=self.stage02_isotopomer_query.get_mappingID_experimentIDAndModelID_dataStage02IsotopomerSimulation(experiment_id_I,model_id);
for mapping_cnt,mapping_id_rxns in enumerate(mapping_ids_rxns):
missing_reactions_O = [];
missing_metabolites_O = [];
for reaction_cnt,reaction in enumerate(reactions):
#get the current reaction mappings
mapping_rxns = [];
mapping_rxns = self.stage02_isotopomer_query.get_row_mappingIDAndRxnID_dataStage02IsotopomerAtomMappingReactions(mapping_id_rxns,reaction['rxn_id']);
#if mapping_rxns: # atom mapping for the reaction already exists and is used
# continue;
if mapping_id_new_I:
mapping_id_current = mapping_id_new_I;
else:
mapping_id_current = mapping_id_rxns;
data_tmp={'mapping_id':mapping_id_current,
'rxn_id':reaction['rxn_id'],
'rxn_description':None,
'reactants_stoichiometry_tracked':[],
'products_stoichiometry_tracked':[],
'reactants_ids_tracked':[],
'products_ids_tracked':[],
'reactants_mapping':[],
'products_mapping':[],
'rxn_equation':reaction['equation'],
'products_elements_tracked':[],
'products_positions_tracked':[],
'reactants_elements_tracked':[],
'reactants_positions_tracked':[],
'used_':True,
'comment_':''};
#check if the reactants or products are tracked
tracked_reactants = [];
for reactant in reaction['reactants_ids']:
tracked_reactant = {};
if mapping_id_mets_I:
tracked_reactant = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_ids_mets[mapping_cnt],reactant);
else:
tracked_reactant = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id_rxns,reactant);
if tracked_reactant:
tracked_reactants.append(tracked_reactant);
tracked_products = [];
for product in reaction['products_ids']:
tracked_product = {};
if mapping_id_mets_I:
tracked_product = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_ids_mets[mapping_cnt],product);
else:
tracked_product = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id_rxns,product);
if tracked_product:
tracked_products.append(tracked_product);
if tracked_reactants or tracked_products:
#check if the reaction is missing or is missing a tracked metabolite
tracked_reaction = {};
tracked_reaction = self.stage02_isotopomer_query.get_row_mappingIDAndRxnID_dataStage02IsotopomerAtomMappingReactions(mapping_id_rxns,reaction['rxn_id']);
if tracked_reaction:
missing_reactants = [];
# get the stoichiometry for each reactant
tracked_reaction_reactant_ids_stoich = {};
for tracked_reactant_id_cnt,tracked_reactant_id in enumerate(tracked_reaction['reactants_ids_tracked']):
tracked_reaction_reactant_ids_stoich[tracked_reactant_id] = 0;
for tracked_reactant_id_cnt,tracked_reactant_id in enumerate(tracked_reaction['reactants_ids_tracked']):
tracked_reaction_reactant_ids_stoich[tracked_reactant_id] += abs(tracked_reaction['reactants_stoichiometry_tracked'][tracked_reactant_id_cnt]);
#copy existing data
data_tmp['reactants_ids_tracked'].extend(tracked_reaction['reactants_ids_tracked']);
data_tmp['reactants_stoichiometry_tracked'].extend(tracked_reaction['reactants_stoichiometry_tracked']);
data_tmp['reactants_mapping'].extend(tracked_reaction['reactants_mapping']);
data_tmp['reactants_elements_tracked'].extend(tracked_reaction['reactants_elements_tracked']);
data_tmp['reactants_positions_tracked'].extend(tracked_reaction['reactants_positions_tracked']);
data_tmp['rxn_description']=tracked_reaction['rxn_description'];
for tracked_reactant in tracked_reactants:
if tracked_reactant['met_id'] in tracked_reaction['reactants_ids_tracked']:
# check for matching stoichiometry
reaction_stoich = 0;
for met_id_cnt,met_id in enumerate(reaction['reactants_ids']):
if met_id == tracked_reactant['met_id']:
reaction_stoich = abs(reaction['reactants_stoichiometry'][met_id_cnt]);
break;
unbalanced_stoich = reaction_stoich - tracked_reaction_reactant_ids_stoich[tracked_reactant['met_id']];
if tracked_reaction_reactant_ids_stoich[tracked_reactant['met_id']] != reaction_stoich:
for stoich_cnt in range(int(unbalanced_stoich)):
missing_reactants.append(tracked_reactant);
#add missing data
data_tmp['reactants_ids_tracked'].append(tracked_reactant['met_id']);
data_tmp['reactants_stoichiometry_tracked'].append(0);
imm.make_trackedMetabolite(mapping_id_rxns,model_id,{tracked_reactant['met_id']:tracked_reactant['met_elements'][0]},stoich_cnt)
new_mapping = imm.convert_arrayMapping2StringMapping();
imm.clear_metaboliteMapping();
data_tmp['reactants_mapping'].append(new_mapping);
#data_tmp['reactants_mapping'].append('');
data_tmp['reactants_elements_tracked'].append(tracked_reactant['met_elements']);
data_tmp['reactants_positions_tracked'].append(tracked_reactant['met_atompositions']);
data_tmp['rxn_description']=tracked_reaction['rxn_description'];
data_tmp['used_']=False;
data_tmp['comment_']+=tracked_reactant['met_id']+',';
else:
missing_reactants.append(tracked_reactant);
reaction_stoich = 0;
for met_id_cnt,met_id in enumerate(reaction['reactants_ids']):
if met_id == tracked_reactant['met_id']:
reaction_stoich = reaction['reactants_stoichiometry'][met_id_cnt];
break;
#add missing data
data_tmp['reactants_ids_tracked'].append(tracked_reactant['met_id']);
data_tmp['reactants_stoichiometry_tracked'].append(reaction_stoich);
imm.make_trackedMetabolite(mapping_id_rxns,model_id,{tracked_reactant['met_id']:tracked_reactant['met_elements'][0]},0)
new_mapping = imm.convert_arrayMapping2StringMapping();
imm.clear_metaboliteMapping();
data_tmp['reactants_mapping'].append(new_mapping);
#data_tmp['reactants_mapping'].append('');
data_tmp['reactants_elements_tracked'].append(tracked_reactant['met_elements']);
data_tmp['reactants_positions_tracked'].append(tracked_reactant['met_atompositions']);
data_tmp['rxn_description']=tracked_reaction['rxn_description'];
data_tmp['used_']=False;
data_tmp['comment_']+=tracked_reactant['met_id']+',';
missing_products = [];
# get the stoichiometry for each product
tracked_reaction_product_ids_stoich = {};
for tracked_product_id_cnt,tracked_product_id in enumerate(tracked_reaction['products_ids_tracked']):
tracked_reaction_product_ids_stoich[tracked_product_id] = 0;
for tracked_product_id_cnt,tracked_product_id in enumerate(tracked_reaction['products_ids_tracked']):
tracked_reaction_product_ids_stoich[tracked_product_id] += abs(tracked_reaction['products_stoichiometry_tracked'][tracked_product_id_cnt]);
#copy existing data
data_tmp['products_ids_tracked'].extend(tracked_reaction['products_ids_tracked']);
data_tmp['products_stoichiometry_tracked'].extend(tracked_reaction['products_stoichiometry_tracked']);
data_tmp['products_mapping'].extend(tracked_reaction['products_mapping']);
data_tmp['products_elements_tracked'].extend(tracked_reaction['products_elements_tracked']);
data_tmp['products_positions_tracked'].extend(tracked_reaction['products_positions_tracked']);
data_tmp['rxn_description']=tracked_reaction['rxn_description'];
for tracked_product in tracked_products:
if tracked_product['met_id'] in tracked_reaction['products_ids_tracked']:
# check for matching stoichiometry
reaction_stoich = 0;
for met_id_cnt,met_id in enumerate(reaction['products_ids']):
if met_id == tracked_product['met_id']:
reaction_stoich = abs(reaction['products_stoichiometry'][met_id_cnt]);
break;
unbalanced_stoich = reaction_stoich - tracked_reaction_product_ids_stoich[tracked_product['met_id']];
if tracked_reaction_product_ids_stoich[tracked_product['met_id']] != reaction_stoich:
for stoich_cnt in range(int(unbalanced_stoich)):
missing_products.append(tracked_product);
#add missing data
data_tmp['products_ids_tracked'].append(tracked_product['met_id']);
data_tmp['products_stoichiometry_tracked'].append(0);
imm.make_trackedMetabolite(mapping_id_rxns,model_id,{tracked_product['met_id']:tracked_product['met_elements'][0]},stoich_cnt)
new_mapping = imm.convert_arrayMapping2StringMapping();
imm.clear_metaboliteMapping();
data_tmp['products_mapping'].append(new_mapping);
#data_tmp['products_mapping'].append('');
data_tmp['products_elements_tracked'].append(tracked_product['met_elements']);
data_tmp['products_positions_tracked'].append(tracked_product['met_atompositions']);
data_tmp['rxn_description']=tracked_reaction['rxn_description'];
data_tmp['used_']=False;
data_tmp['comment_']+=tracked_product['met_id']+',';
else:
missing_products.append(tracked_product);
reaction_stoich = 0;
for met_id_cnt,met_id in enumerate(reaction['products_ids']):
if met_id == tracked_product['met_id']:
reaction_stoich = abs(reaction['products_stoichiometry'][met_id_cnt]);
break;
#add missing data
data_tmp['products_ids_tracked'].append(tracked_product['met_id']);
data_tmp['products_stoichiometry_tracked'].append(reaction_stoich);
imm.make_trackedMetabolite(mapping_id_rxns,model_id,{tracked_product['met_id']:tracked_product['met_elements'][0]},0)
new_mapping = imm.convert_arrayMapping2StringMapping();
imm.clear_metaboliteMapping();
data_tmp['products_mapping'].append(new_mapping);
#data_tmp['products_mapping'].append('');
data_tmp['products_elements_tracked'].append(tracked_product['met_elements']);
data_tmp['products_positions_tracked'].append(tracked_product['met_atompositions']);
data_tmp['rxn_description']=tracked_reaction['rxn_description'];
data_tmp['used_']=False;
data_tmp['comment_']+=tracked_product['met_id']+',';
if missing_reactants or missing_products:
tmp = {};
tmp = tracked_reaction;
tmp.update({'missing_reactants':missing_reactants});
tmp.update({'missing_products':missing_products});
tmp.update({'equation':reaction['equation']})
missing_metabolites_O.append(tmp);
else:
tmp = {};
tmp = reaction;
tmp.update({'tracked_reactants':tracked_reactants});
tmp.update({'tracked_products':tracked_products});
missing_reactions_O.append(reaction);
for tracked_reactant in tracked_reactants:
reaction_stoich = 0;
for met_id_cnt,met_id in enumerate(reaction['reactants_ids']):
if met_id == tracked_reactant['met_id']:
reaction_stoich = reaction['reactants_stoichiometry'][met_id_cnt];
break;
#add missing data
data_tmp['reactants_ids_tracked'].append(tracked_reactant['met_id']);
data_tmp['reactants_stoichiometry_tracked'].append(reaction_stoich);
imm.make_trackedMetabolite(mapping_id_rxns,model_id,{tracked_reactant['met_id']:tracked_reactant['met_elements'][0]},0)
new_mapping = imm.convert_arrayMapping2StringMapping();
imm.clear_metaboliteMapping();
data_tmp['reactants_mapping'].append(new_mapping);
#data_tmp['reactants_mapping'].append('');
data_tmp['reactants_elements_tracked'].append(tracked_reactant['met_elements']);
data_tmp['reactants_positions_tracked'].append(tracked_reactant['met_atompositions']);
data_tmp['rxn_description']=None;
data_tmp['used_']=False;
data_tmp['comment_']=reaction['rxn_id'];
for tracked_product in tracked_products:
reaction_stoich = 0;
for met_id_cnt,met_id in enumerate(reaction['products_ids']):
if met_id == tracked_product['met_id']:
reaction_stoich = abs(reaction['products_stoichiometry'][met_id_cnt]);
break;
#add missing data
data_tmp['products_ids_tracked'].append(tracked_product['met_id']);
data_tmp['products_stoichiometry_tracked'].append(reaction_stoich);
imm.make_trackedMetabolite(mapping_id_rxns,model_id,{tracked_product['met_id']:tracked_product['met_elements'][0]},0)
new_mapping = imm.convert_arrayMapping2StringMapping();
imm.clear_metaboliteMapping();
data_tmp['products_mapping'].append(new_mapping);
#data_tmp['products_mapping'].append('');
data_tmp['products_elements_tracked'].append(tracked_product['met_elements']);
data_tmp['products_positions_tracked'].append(tracked_product['met_atompositions']);
data_tmp['rxn_description']=None;
data_tmp['used_']=False;
data_tmp['comment_']=reaction['rxn_id'];
data_O.append(data_tmp);
#self.print_missingReactionMappings(missing_reactions_O,missing_metabolites_O);
return missing_reactions_O,missing_metabolites_O;
#add data to the database:
self.stage02_isotopomer_query.add_data_dataStage02IsotopomerAtomMappingReactions(data_O);
def print_missingReactionMappings(self,missing_reactions_I,missing_metabolites_I):
'''print missing reaction mappings to the screen'''
#missing reactions
script = '';
for missing_reaction in missing_reactions_I:
script+= missing_reaction['rxn_id']+'\t'+missing_reaction['equation']+'\t'+str(missing_reaction['reactants_ids'])+'\t'+str(missing_reaction['products_ids'])+'\t';
for tracked_reactant in missing_reaction['tracked_reactants']:
script+= tracked_reactant['met_id']+',';
script+= '\t'
for tracked_product in missing_reaction['tracked_products']:
script+= tracked_product['met_id']+',';
script+='\n'
print(script)
#missing metabolites
script = '';
for missing_metabolite in missing_metabolites_I:
script+= missing_metabolite['rxn_id']+'\t'+missing_metabolite['equation']+'\t'+str(missing_metabolite['reactants_ids_tracked'])+'\t'+str(missing_metabolite['products_ids_tracked'])+'\t';
for tracked_reactant in missing_metabolite['missing_reactants']:
script+= tracked_reactant['met_id']+',';
script+= '\t'
for tracked_product in missing_metabolite['missing_products']:
script+= tracked_product['met_id']+',';
script+='\n'
print(script)
def find_inconsistentMetaboliteMappings(self,experiment_id_I,model_id_I=[],mapping_id_I=[]):
'''Find inconsistencies in the atom mapping by comparing the metabolite information in
atomMappingMetabolites table to the atom mapping in the atomMappingReactions table'''
#Output:
# data_O = row of atomMappingReactions filled only with the inconsistent metabolite mapping information
# missing_mets_O = metabolites that are tracked in atomMappingReactions, but are not present in atomMappingMetabolites
data_O = [];
missing_mets_O = [];
#get model ids:
if model_id_I:
model_ids = model_id_I;
else:
model_ids = [];
model_ids = self.stage02_isotopomer_query.get_modelID_experimentID_dataStage02IsotopomerSimulation(experiment_id_I);
for model_id in model_ids:
print('checking model_id ' + model_id);
#get mapping ids
if mapping_id_I:
mapping_ids=mapping_id_I;
else:
mapping_ids=[];
mapping_ids=self.stage02_isotopomer_query.get_mappingID_experimentIDAndModelID_dataStage02IsotopomerSimulation(experiment_id_I,model_id);
for mapping_cnt,mapping_id in enumerate(mapping_ids):
print('checking mapping_id ' + mapping_id);
# get the reaction mapping
reaction_mappings = [];
reaction_mappings = self.stage02_isotopomer_query.get_rows_mappingID_dataStage02IsotopomerAtomMappingReactions(mapping_id);
for reaction_cnt,reaction_mapping in enumerate(reaction_mappings):
print('checking reaction ' + reaction_mapping['rxn_id']);
#debug:
if reaction_mapping['rxn_id'] == 'COFACTOR_3':
print('check');
#check reactants
rxn_tmp = {};
rxn_tmp['mapping_id']=mapping_id
rxn_tmp['rxn_id']=reaction_mapping['rxn_id']
rxn_tmp['rxn_description']=reaction_mapping['rxn_description']
rxn_tmp['reactants_stoichiometry_tracked']=[]
rxn_tmp['products_stoichiometry_tracked']=[]
rxn_tmp['reactants_ids_tracked']=[]
rxn_tmp['products_ids_tracked']=[]
rxn_tmp['reactants_elements_tracked']=[]
rxn_tmp['products_elements_tracked']=[]
rxn_tmp['reactants_positions_tracked']=[]
rxn_tmp['products_positions_tracked']=[]
rxn_tmp['reactants_mapping']=[]
rxn_tmp['products_mapping']=[]
rxn_tmp['rxn_equation']=None
rxn_tmp['used_']=True
rxn_tmp['comment_']='Inconsistent metabolites found';
rxn_tmp['reactants_metaboliteMappings']=[]
rxn_tmp['products_metaboliteMappings']=[]
bad_reactant = False;
for reactant_cnt,reactant in enumerate(reaction_mapping['reactants_ids_tracked']):
print('checking reactant ' + reactant);
# get the metabolite mapping
metabolite_mapping = {};
metabolite_mapping = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id,reactant);
if not metabolite_mapping:
print('metabolite mapping not found')
missing_mets_O.append(reactant);
continue;
# check the reaction mapping
reactants_mapping = reaction_mapping['reactants_mapping'][reactant_cnt];
if '[' in reaction_mapping['reactants_mapping'][reactant_cnt]:
reactants_mapping = reaction_mapping['reactants_mapping'][reactant_cnt].split('][');
reactants_mapping = [m.replace('[','') for m in reactants_mapping];
reactants_mapping = [m.replace(']','') for m in reactants_mapping];
if len(metabolite_mapping['met_atompositions']) != len(reactants_mapping):
rxn_tmp['reactants_metaboliteMappings'].append(reaction_mapping['reactants_mapping'][reactant_cnt]);
print('bad reactants_metaboliteMappings');
bad_reactant = True;
# check the reaction elements tracked
if metabolite_mapping['met_atompositions'] != reaction_mapping['reactants_positions_tracked'][reactant_cnt]:
rxn_tmp['reactants_positions_tracked'].append(reaction_mapping['reactants_positions_tracked'][reactant_cnt]);
print('bad reactants_positions_tracked');
bad_reactant = True;
# check the reaction positions tracked
if metabolite_mapping['met_elements'] != reaction_mapping['reactants_elements_tracked'][reactant_cnt]:
rxn_tmp['reactants_elements_tracked'].append(reaction_mapping['reactants_elements_tracked'][reactant_cnt]);
print('bad reactants_elements_tracked');
bad_reactant = True;
if bad_reactant:
rxn_tmp['reactants_ids_tracked'].append(reactant);
rxn_tmp['reactants_stoichiometry_tracked'].append(reaction_mapping['reactants_stoichiometry_tracked'][reactant_cnt]);
#check products
bad_product = False;
for product_cnt,product in enumerate(reaction_mapping['products_ids_tracked']):
print('checking product ' + product);
# get the metabolite mapping
metabolite_mapping = {};
metabolite_mapping = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id,product);
if not metabolite_mapping:
print('metabolite mapping not found')
missing_mets_O.append(product);
continue;
# check the reaction mapping
products_mapping = reaction_mapping['products_mapping'][product_cnt];
if '[' in reaction_mapping['products_mapping'][product_cnt]:
products_mapping = reaction_mapping['products_mapping'][product_cnt].split('][');
products_mapping = [m.replace('[','') for m in products_mapping];
products_mapping = [m.replace(']','') for m in products_mapping];
if len(metabolite_mapping['met_atompositions']) != len(products_mapping):
rxn_tmp['products_metaboliteMappings'].append(reaction_mapping['products_mapping'][product_cnt]);
print('bad products_metaboliteMappings');
bad_product = True;
# check the reaction elements tracked
if metabolite_mapping['met_atompositions'] != reaction_mapping['products_positions_tracked'][product_cnt]:
rxn_tmp['products_positions_tracked'].append(reaction_mapping['products_positions_tracked'][product_cnt]);
print('bad products_positions_tracked');
bad_product = True;
# check the reaction positions tracked
if metabolite_mapping['met_elements'] != reaction_mapping['products_elements_tracked'][product_cnt]:
rxn_tmp['products_elements_tracked'].append(reaction_mapping['products_elements_tracked'][product_cnt]);
print('bad products_elements_tracked');
bad_product = True;
if bad_product:
rxn_tmp['products_ids_tracked'].append(product);
rxn_tmp['products_stoichiometry_tracked'].append(reaction_mapping['products_stoichiometry_tracked'][product_cnt]);
#record
if bad_reactant or bad_product:
data_O.append(rxn_tmp);
return data_O,missing_mets_O;
def find_unbalancedReactionMappings(self,experiment_id_I,model_id_I=[],mapping_id_I=[]):
'''Find reactions mappings that are not elementally balanced'''
#Output:
# unbalanced_rxns_O = {rxn_id:{'n_products_elements_tracked':products_positions_tracked_cnt,
# 'n_reactants_elements_tracked':reactants_positions_tracked_cnt},...}
unbalanced_rxns_O = {};
#get model ids:
if model_id_I:
model_ids = model_id_I;
else:
model_ids = [];
model_ids = self.stage02_isotopomer_query.get_modelID_experimentID_dataStage02IsotopomerSimulation(experiment_id_I);
for model_id in model_ids:
print('checking model_id ' + model_id);
#get mapping ids
if mapping_id_I:
mapping_ids=mapping_id_I;
else:
mapping_ids=[];
mapping_ids=self.stage02_isotopomer_query.get_mappingID_experimentIDAndModelID_dataStage02IsotopomerSimulation(experiment_id_I,model_id);
for mapping_cnt,mapping_id in enumerate(mapping_ids):
print('checking mapping_id ' + mapping_id);
# get the reaction mapping
reaction_mappings = [];
reaction_mappings = self.stage02_isotopomer_query.get_rows_mappingID_dataStage02IsotopomerAtomMappingReactions(mapping_id);
for reaction_cnt,reaction_mapping in enumerate(reaction_mappings):
print('checking reaction ' + reaction_mapping['rxn_id']);
#check reactants
reactants_positions_tracked_cnt = 0;
for reactant_cnt,reactant in enumerate(reaction_mapping['reactants_ids_tracked']):
print('checking reactant ' + reactant);
# check that the reactant positions == reactant elements
if len(reaction_mapping['reactants_positions_tracked'][reactant_cnt])!=len(reaction_mapping['reactants_elements_tracked'][reactant_cnt]):
print('inconsistent reactants_positions and reactants_elements');
continue;
reactants_positions_tracked_cnt += len(reaction_mapping['reactants_positions_tracked'][reactant_cnt]);
#check products
products_positions_tracked_cnt = 0;
for product_cnt,product in enumerate(reaction_mapping['products_ids_tracked']):
print('checking product ' + product);
# check that the product positions == product elements
if len(reaction_mapping['products_positions_tracked'][product_cnt])!=len(reaction_mapping['products_elements_tracked'][product_cnt]):
print('inconsistent products_positions and products_elements');
continue;
products_positions_tracked_cnt += len(reaction_mapping['products_positions_tracked'][product_cnt]);
#record
if reactants_positions_tracked_cnt!=products_positions_tracked_cnt:
unbalanced_rxns_O[reaction_mapping['rxn_id']] = {'n_products_elements_tracked':products_positions_tracked_cnt,
'n_reactants_elements_tracked':reactants_positions_tracked_cnt};
#unbalanced_rxns_O.append(reaction_mapping);
return unbalanced_rxns_O;
def find_inconsistentReactionMappings(self,experiment_id_I,model_id_I=[],mapping_id_I=[]):
'''Find inconsistencies in the reaction mapping'''
#Output:
# unbalanced_rxns_O = {rxn_id:{'n_products_elements_tracked':products_positions_tracked_cnt,
# 'n_reactants_elements_tracked':reactants_positions_tracked_cnt},...}
irm = stage02_isotopomer_reactionMapping();
#get model ids:
if model_id_I:
model_ids = model_id_I;
else:
model_ids = [];
model_ids = self.stage02_isotopomer_query.get_modelID_experimentID_dataStage02IsotopomerSimulation(experiment_id_I);
for model_id in model_ids:
print('checking model_id ' + model_id);
#get mapping ids
if mapping_id_I:
mapping_ids=mapping_id_I;
else:
mapping_ids=[];
mapping_ids=self.stage02_isotopomer_query.get_mappingID_experimentIDAndModelID_dataStage02IsotopomerSimulation(experiment_id_I,model_id);
for mapping_cnt,mapping_id in enumerate(mapping_ids):
print('checking mapping_id ' + mapping_id);
# get the reaction ids
reaction_ids = [];
reaction_ids = self.stage02_isotopomer_query.get_rxnIDs_mappingID_dataStage02IsotopomerAtomMappingReactions(mapping_id);
for reaction_cnt,reaction_id in enumerate(reaction_ids):
print('checking reaction ' + reaction_id);
#check each reaction
irm.get_reactionMapping(mapping_id,reaction_id);
reactants_ids_stoichiometry_check,reactants_elements_positions_check,reactants_elements_mapping_check,reactants_positions_mapping_check,\
products_ids_stoichiometry_check,products_elements_positions_check,products_elements_mapping_check,products_positions_mapping_check,\
element_balance_check,mapping_check = irm.check_reactionMapping();
#clear reaction
irm.clear_reactionMapping();
class isotopomer_netRxns():
def __init__(self):
self.isotopomer_rxns_net = {};
self.isotopomer_rxns_net = self.define_netRxns();
def define_netRxns(self):
isotopomer_rxns_net = {};
isotopomer_rxns_net.update(self.define_netRxns_iDM2014_reversible());
isotopomer_rxns_net.update(self.define_netRxns_RL2013_reversible());
return isotopomer_rxns_net
def define_netRxns_iDM2014_reversible(self):
isotopomer_rxns_net = {
'ptrc_to_4abut_1':{'reactions':['PTRCTA','ABUTD'],
'stoichiometry':[1,1]},
'ptrc_to_4abut_2':{'reactions':['GGPTRCS','GGPTRCO','GGGABADr','GGGABAH'],
'stoichiometry':[1,1,1,1]},
'glu_DASH_L_to_acg5p':{'reactions':['ACGS','ACGK'],
'stoichiometry':[1,1]},
'2obut_and_pyr_to_3mop':{'reactions':['ACHBS','KARA2','DHAD2'],
'stoichiometry':[1,1,1]},
'pyr_to_23dhmb':{'reactions':['ACLS','KARA1'],
'stoichiometry':[1,-1]},
#'met_DASH_L_and_ptrc_to_spmd_and_5mta':{'reactions':['METAT','ADMDC','SPMS'],
# 'stoichiometry':[1,1,1]}, #cannot be lumped
'chor_and_prpp_to_3ig3p':{'reactions':['ANS','ANPRT','PRAIi','IGPS'],
'stoichiometry':[1,1,1,1]},
'hom_DASH_L_and_cyst_DASH_L_to_pyr_hcys_DASH_L':{'reactions':['HSST','SHSL1','CYSTL'],
'stoichiometry':[1,1,1]},
'e4p_and_pep_to_3dhq':{'reactions':['DDPA','DHQS'],
'stoichiometry':[1,1]},
'aspsa_to_sl2a6o':{'reactions':['DHDPS','DHDPRy','THDPS'],
'stoichiometry':[1,1,1]},
'glu_DASH_L_to_glu5sa':{'reactions':['GLU5K','G5SD'],
'stoichiometry':[1,1]},
'g1p_to_glycogen':{'reactions':['GLGC','GLCS1'],
'stoichiometry':[1,1]},
'thr_DASH_L_to_gly':{'reactions':['THRD','GLYAT'],
'stoichiometry':[1,-1]}, #need to remove deadend mets: athr-L: ATHRDHr, ATHRDHr_reverse; aact: AACTOOR, AOBUTDs
'dhap_to_lac_DASH_D':{'reactions':['MGSA','LGTHL','GLYOX'],
'stoichiometry':[1,1,1]},
'hom_DASH_L_to_thr_DASH_L':{'reactions':['HSK','THRS'],
'stoichiometry':[1,1]},
'3pg_to_ser_DASH_L':{'reactions':['PGCD','PSERT','PSP_L'],
'stoichiometry':[1,1,1]},
'prpp_to_his_DASH_L':{'reactions':['ATPPRT','PRATPP','PRAMPC','PRMICI','IG3PS','IGPDH','HSTPT','HISTP','HISTD'],
'stoichiometry':[1,1,1,1,1,1,1,1,1]},
'UMPSYN_aerobic':{'reactions':['ASPCT','DHORTS','DHORD2','ORPT','OMPDC'],
'stoichiometry':[1,-1,1,-1,1]},
#'UMPSYN_anaerobic':{'reactions':['ASPCT','DHORTS','DHORD5','ORPT','OMPDC'],
# 'stoichiometry':[1,-1,1,-1,1]},
'IMPSYN_1':{'reactions':['GLUPRT','PRAGSr','PRFGS','PRAIS'],
'stoichiometry':[1,1,1,1]},
'IMPSYN_2':{'reactions':['AIRC2','AIRC3','PRASCSi','ADSL2r'],
'stoichiometry':[1,-1,1,1]},
'IMPSYN_3':{'reactions':['AICART','IMPC'],
'stoichiometry':[1,-1]},
'imp_to_gmp':{'reactions':['IMPD','GMPS2'],
'stoichiometry':[1,1]},
'imp_to_amp':{'reactions':['ADSS','ADSL1r'],
'stoichiometry':[1,1]},
#'utp_to_dump_anaerobic':{'reactions':['RNTR4c2','DUTPDP'],
# 'stoichiometry':[1,1]},
'udp_to_dump_aerobic':{'reactions':['RNDR4','NDPK6','DUTPDP'],
'stoichiometry':[1,1,1]},
#'dtmp_to_dttp':{'reactions':['DTMPK','NDPK4'],
# 'stoichiometry':[1,1]}, #cannot be lumped
'COASYN':{'reactions':['ASP1DC','MOHMT','DPR','PANTS','PNTK','PPNCL2','PPCDC','PTPATi','DPCOAK'],
'stoichiometry':[1,1,1,1,1,1,1,1,1]},
'FADSYN_1':{'reactions':['GTPCII2','DHPPDA2','APRAUR','PMDPHT','RBFSb'],
'stoichiometry':[1,1,1,1,1]},
'FADSYN_2':{'reactions':['RBFSa','DB4PS'],
'stoichiometry':[1,1]},
'FADSYN_3':{'reactions':['RBFK','FMNAT'],
'stoichiometry':[1,1]},
'NADSYN_aerobic':{'reactions':['ASPO6','QULNS','NNDPR','NNATr','NADS1','NADK'],
'stoichiometry':[1,1,1,1,1,1]},
'NADSYN_anaerobic':{'reactions':['ASPO5','QULNS','NNDPR','NNATr','NADS1','NADK'],
'stoichiometry':[1,1,1,1,1,1]},
#'NADSALVAGE':{'reactions':['NADPPPS','NADN','NNAM','NAMNPP','NMNN','NMNDA','NMNAT','NADDP','ADPRDP'],
# 'stoichiometry':[1,1,1,1,1,1,1,1,1]}, #cannot be lumped
'THFSYN':{'reactions':['GTPCI','DNTPPA','DNMPPA','DHNPA2r','HPPK2','ADCS','ADCL','DHPS2','DHFS'],
'stoichiometry':[1,1,1,1,1,1,1,1,1]},
'GTHSYN':{'reactions':['GLUCYS','GTHS'],
'stoichiometry':[1,1]},
'GLYCPHOSPHOLIPID_1':{'reactions':['DASYN181','AGPAT181','G3PAT181'],'stoichiometry':[1,1,1]},
'GLYCPHOSPHOLIPID_2':{'reactions':['PSSA181','PSD181'],'stoichiometry':[1,1]},
'GLYCPHOSPHOLIPID_3':{'reactions':['PGSA160','PGPP160'],'stoichiometry':[1,1]},
'GLYCPHOSPHOLIPID_4':{'reactions':['DASYN161','AGPAT161','G3PAT161'],'stoichiometry':[1,1,1]},
'GLYCPHOSPHOLIPID_5':{'reactions':['PGSA181','PGPP181'],'stoichiometry':[1,1]},
'GLYCPHOSPHOLIPID_6':{'reactions':['PSD161','PSSA161'],'stoichiometry':[1,1]},
'GLYCPHOSPHOLIPID_7':{'reactions':['PSSA160','PSD160'],'stoichiometry':[1,1]},
'GLYCPHOSPHOLIPID_8':{'reactions':['DASYN160','AGPAT160','G3PAT160'],'stoichiometry':[1,1,1]},
'GLYCPHOSPHOLIPID_9':{'reactions':['PGSA161','PGPP161'],'stoichiometry':[1,1]},
'MOLYBDOPTERIN_1':{'reactions':['MPTAT','MPTS','CPMPS'],'stoichiometry':[1,1,1]},
'MOLYBDOPTERIN_2':{'reactions':['MOCDS','MOGDS'],'stoichiometry':[1,1]},
'MOLYBDOPTERIN_3':{'reactions':['MOADSUx','MPTSS'],'stoichiometry':[1,1]},
'COFACTOR_1':{'reactions':['GLUTRR','G1SAT','GLUTRS'],'stoichiometry':[1,1,1]},
'COFACTOR_2':{'reactions':['DHNAOT4','UPPDC1','DHNCOAT','DHNCOAS','SEPHCHCS','SUCBZS','SUCBZL','PPPGO3','FCLT','CPPPGO','SHCHCS3'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1]},
'COFACTOR_3':{'reactions':['TYRL','AMMQLT8','HEMEOS','UPP3MT','SHCHD2','SHCHF','ENTCS','CBLAT'],'stoichiometry':[1,1,1,1,1,1,1,1]},
'VITB6':{'reactions':['E4PD','PERD','OHPBAT','PDX5PS','PDX5PO2'],'stoichiometry':[1,1,1,1,1]},
#'THIAMIN':{'reactions':['AMPMS2','PMPK','THZPSN3','TMPPP','TMPK'],'stoichiometry':[1,1,1,1,1]}, # original pathway without correction
'THIAMIN':{'reactions':['AMPMS3','PMPK','THZPSN3','TMPPP','TMPK'],'stoichiometry':[1,1,1,1,1]},
'COFACTOR_4':{'reactions':['I4FE4ST','I4FE4SR','I2FE2SS2'],'stoichiometry':[1,1,1]},
'COFACTOR_5':{'reactions':['BMOGDS1','BMOGDS2','BMOCOS'],'stoichiometry':[1,1,1]},
'COFACTOR_6':{'reactions':['DMPPS','GRTT','DMATT'],'stoichiometry':[1,1,1]},
'COFACTOR_7':{'reactions':['MECDPS','DXPRIi','MEPCT','CDPMEK','MECDPDH5'],'stoichiometry':[1,1,1,1,1]},
'COFACTOR_8':{'reactions':['LIPOS','LIPOCT'],'stoichiometry':[1,1]},
'COFACTOR_9':{'reactions':['OMMBLHX','OMPHHX','OPHHX','HBZOPT','DMQMT','CHRPL','OMBZLM','OPHBDC','OHPHM'],'stoichiometry':[1,1,1,1,1,1,1,1,1]},
'COFACTOR_10':{'reactions':['SERASr','DHBD','UPP3S','HMBS','ICHORT','DHBS'],'stoichiometry':[1,1,1,1,1,1]},
'COFACTOR_11':{'reactions':['PMEACPE','EGMEACPR','DBTS','AOXSr2','I2FE2SR','OPMEACPD','MALCOAMT','AMAOTr','OPMEACPS','OPMEACPR','OGMEACPD','OGMEACPR','OGMEACPS','EPMEACPR','BTS5'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]},
'CELLENV_1':{'reactions':['UAMAGS','UAPGR','UAGPT3','PAPPT3','GLUR','UAGCVT','UAMAS','UDCPDP','UGMDDS','UAAGDS'],'stoichiometry':[1,1,1,1,-1,1,1,1,1,1]},
'CELLENV_2':{'reactions':['3HAD181','3OAR181','3OAS181','EAR181x'],'stoichiometry':[1,1,1,1]},
'CELLENV_3':{'reactions':['3HAD160','3OAR160','EAR160x','3OAS160'],'stoichiometry':[1,1,1,1]},
'CELLENV_4':{'reactions':['EAR120x','3OAR120','3HAD120','3OAS120','EAR100x'],'stoichiometry':[1,1,1,1,1]},
'CELLENV_5':{'reactions':['G1PACT','UAGDP','PGAMT','GF6PTA'],'stoichiometry':[1,1,-1,1]},
'CELLENV_6':{'reactions':['3OAR40','EAR40x','3OAS60','3OAR60','3HAD80','3OAS80','3OAR80','EAR60x','3HAD60','EAR80x','3HAD40'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1]},
'CELLENV_7':{'reactions':['3HAD161','EAR161x','3OAS161','3OAR161','3OAS141','3HAD141','3OAR121','EAR121x','3HAD121','EAR141x','T2DECAI','3OAR141','3OAS121'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1,1,1]},
'CELLENV_8':{'reactions':['TDPGDH','TDPDRR','TDPDRE','G1PTT'],'stoichiometry':[1,1,1,1]},
'CELLENV_9':{'reactions':['3OAS140','3OAR140'],'stoichiometry':[1,1]},
'CELLENV_10':{'reactions':['3HAD140','EAR140x'],'stoichiometry':[1,1]},
'CELLENV_11':{'reactions':['3OAR100','3HAD100','3OAS100'],'stoichiometry':[1,1,1]},
'LIPOPOLYSACCHARIDE_1':{'reactions':['COLIPAabcpp','COLIPAabctex','EDTXS1','EDTXS2','GALT1','GLCTR1','GLCTR2','GLCTR3','HEPK1','HEPK2','HEPT1','HEPT2','HEPT3','HEPT4','LPADSS','MOAT','MOAT2','MOAT3C','RHAT1','TDSK','USHD'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]},
'LIPOPOLYSACCHARIDE_2':{'reactions':['AGMHE','GMHEPAT','GMHEPK','GMHEPPA','S7PI'],'stoichiometry':[1,1,1,1,1]},
'LIPOPOLYSACCHARIDE_3':{'reactions':['U23GAAT','UHGADA','UAGAAT'],'stoichiometry':[1,1,1]},
'LIPOPOLYSACCHARIDE_4':{'reactions':['KDOPP','KDOCT2','KDOPS'],'stoichiometry':[1,1,1]},
'ASTPathway':{'reactions':['AST','SADH','SGDS','SGSAD','SOTA'],'stoichiometry':[1,1,1,1,1]}
};
return isotopomer_rxns_net
def define_netRxns_RL2013_reversible(self):
isotopomer_rxns_net = {
'PTAr_ACKr_ACS':{'reactions':['PTAr','ACKr','ACS'],
'stoichiometry':[1,-1,-1]}, #acetate secretion
'ACONTa_ACONTb':{'reactions':['ACONTa','ACONTb'],
'stoichiometry':[1,1]},
'G6PDH2r_PGL':{'reactions':['G6PDH2r','PGL'],
'stoichiometry':[1,1]},
'GAPD_PGK':{'reactions':['GAPD','PGK'], #glycolysis
'stoichiometry':[1,-1]},
'PGM':{'reactions':['PGM','ENO'], #glycolysis
'stoichiometry':[-1,1]},
'SUCCOAS':{'reactions':['SUCOAS'], #mispelling
'stoichiometry':[1]}
#TODO: amino acid synthesis reactions
};
return isotopomer_rxns_net;
class isotopomer_fluxSplits():
def __init__(self):
self.isotopomer_splits = {};
self.isotopomer_splits = self.define_fluxSplits();
def define_fluxSplits(self):
isotopomer_splits = {};
isotopomer_splits['g6p_2_f6p_or_6pgc']=['PGI','G6PDH2r'];
isotopomer_splits['6pgc_2_2ddg6p_or_ru5p-D']=['EDD','GND'];
isotopomer_splits['pep_2_oaa_or_pyr']=['PPC','PYK','GLCptspp'];
isotopomer_splits['accoa_2_ac_or_cit']=['PTAr','CS'];
isotopomer_splits['icit_2_akg_or_glx']=['ICDHyr','ICL'];
isotopomer_splits['glc-D_2_g6p']=['HEX1','GLCptspp'];
isotopomer_splits['mal-L_2_oaa_or_pyr']=['ME1','ME2','MDH'];
return isotopomer_splits | [
"[email protected]"
] | |
b14adaf5a89b66b23c4ea53b5a93cd242caca777 | 0f16edb46a48f9b5a125abb56fc0545ede1d65aa | /test_utilities/src/d1_test/mock_api/tests/test_get.py | d1eaef95d18355fd89576cc41c693343b6516ba0 | [
"Apache-2.0"
] | permissive | DataONEorg/d1_python | 5e685f1af0c356190f2d6df45d1ac849e2f56972 | d72a9461894d9be7d71178fb7310101b8ef9066a | refs/heads/master | 2023-08-29T03:16:38.131760 | 2023-06-27T21:59:37 | 2023-06-27T21:59:37 | 60,103,877 | 15 | 12 | Apache-2.0 | 2023-09-06T18:27:53 | 2016-05-31T16:01:00 | Python | UTF-8 | Python | false | false | 2,721 | py | # This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import responses
import d1_test.d1_test_case
import d1_test.mock_api.get
class TestMockGet(d1_test.d1_test_case.D1TestCase):
@responses.activate
def test_1000(self, mn_client_v1_v2):
"""mock_api.get() returns a Requests Response object."""
d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL)
assert isinstance(mn_client_v1_v2.get("test_pid_1"), requests.Response)
@responses.activate
def test_1010(self, mn_client_v1_v2):
"""mock_api.get() returns the same content each time for a given PID."""
d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL)
obj_1a_str = mn_client_v1_v2.get("test_pid_1").content
obj_2a_str = mn_client_v1_v2.get("test_pid_2").content
obj_1b_str = mn_client_v1_v2.get("test_pid_1").content
obj_2b_str = mn_client_v1_v2.get("test_pid_2").content
assert obj_1a_str == obj_1b_str
assert obj_2a_str == obj_2b_str
@responses.activate
def test_1020(self, mn_client_v1_v2):
"""mock_api.get(): Redirects."""
d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL)
direct_sciobj_bytes = mn_client_v1_v2.get("test_pid_1").content
redirect_sciobj_bytes = mn_client_v1_v2.get(
"<REDIRECT:303:3>test_pid_1"
).content
assert direct_sciobj_bytes == redirect_sciobj_bytes
# @responses.activate
# def test_0012(self):
# """mock_api.get() returns 1024 bytes"""
# obj_str = self.client.get('test_pid_1').content
# self.assertEqual(len(obj_str), 1024)
# @responses.activate
# def test_0013(self):
# """mock_api.get(): Passing a trigger header triggers a DataONEException"""
# self.assertRaises(
# d1_common.types.exceptions.NotAuthorized, self.client.get, 'test_pid',
# vendorSpecific={'trigger': '401'}
# )
| [
"[email protected]"
] | |
3e43c121fa98f0c8fd7478f5ac8cd4cfe08fcd43 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/sql/azure-mgmt-sql/generated_samples/transparent_data_encryption_list.py | 3e2275f884eabc284c7627538174b4de0a236e32 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,661 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.sql import SqlManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-sql
# USAGE
python transparent_data_encryption_list.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = SqlManagementClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-1111-2222-3333-444444444444",
)
response = client.transparent_data_encryptions.list_by_database(
resource_group_name="security-tde-resourcegroup",
server_name="securitytde",
database_name="testdb",
)
for item in response:
print(item)
# x-ms-original-file: specification/sql/resource-manager/Microsoft.Sql/preview/2022-08-01-preview/examples/TransparentDataEncryptionList.json
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
fe69d824ce277807f6d3e0d5eaaff8a66490ae4b | b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4 | /otp/src/level/ModelEntity.py | 5850215d12244dd9e104ca4eebaf6cf5fd012828 | [] | no_license | satire6/Anesidora | da3a44e2a49b85252b87b612b435fb4970469583 | 0e7bfc1fe29fd595df0b982e40f94c30befb1ec7 | refs/heads/master | 2022-12-16T20:05:13.167119 | 2020-09-11T16:58:04 | 2020-09-11T17:02:06 | 294,751,966 | 89 | 32 | null | null | null | null | UTF-8 | Python | false | false | 4,052 | py | from toontown.toonbase.ToontownGlobals import *
from direct.directnotify import DirectNotifyGlobal
import BasicEntities
class ModelEntity(BasicEntities.NodePathEntity):
LoadFuncs = {
'loadModelCopy': loader.loadModelCopy,
'loadModel': loader.loadModel,
'loadModelOnce': loader.loadModelOnce,
}
def __init__(self, level, entId):
# TODO: fill in default values automatically for missing attribs
self.collisionsOnly = False
self.loadType = 'loadModelCopy'
self.flattenType = 'light'
self.goonHatType = 'none'
self.entInitialized = False
BasicEntities.NodePathEntity.__init__(self, level, entId)
self.entInitialized = True
self.model = None
self.loadModel()
def destroy(self):
if self.model:
self.model.removeNode()
del self.model
BasicEntities.NodePathEntity.destroy(self)
def loadModel(self):
if self.model:
self.model.removeNode()
self.model = None
if self.modelPath is None:
return
self.model = ModelEntity.LoadFuncs[self.loadType](self.modelPath)
if self.model:
self.model.reparentTo(self)
# hide/show as appropriate
if self.collisionsOnly:
if __dev__:
self.model.setTransparency(1)
self.model.setColorScale(1,1,1,.1)
else:
self.model.hide()
else:
self.model.show()
# HACK SDN: special code for moving crate wall collisions down
if self.modelPath in ("phase_9/models/cogHQ/woodCrateB.bam",
"phase_9/models/cogHQ/metal_crateB.bam",
"phase_10/models/cashbotHQ/CBMetalCrate.bam",
"phase_10/models/cogHQ/CBMetalCrate2.bam",
"phase_10/models/cashbotHQ/CBWoodCrate.bam",
"phase_11/models/lawbotHQ/LB_metal_crate.bam",
"phase_11/models/lawbotHQ/LB_metal_crate2.bam",
):
# get rid of any scales
#self.model.flattenLight()
# move walls down
cNode = self.find("**/wall")
cNode.setZ(cNode, -.75)
# duplicate the floor and move it down to crate a
# catch effect for low-hopped toons
colNode = self.find("**/collision")
floor = colNode.find("**/floor")
floor2 = floor.copyTo(colNode)
floor2.setZ(floor2, -.75)
"""
# incorporate the entity's overall scale
self.model.setScale(self.getScale())
self.setScale(1)
self.model.flattenLight()
"""
if self.goonHatType is not 'none':
self.goonType = {'hardhat':'pg','security':'sg'}[self.goonHatType]
self.hat = self.model
### this was copied from Goon.createHead
if self.goonType == "pg":
self.hat.find("**/security_hat").hide()
elif self.goonType == "sg":
self.hat.find("**/hard_hat").hide()
###
del self.hat
del self.goonType
if self.flattenType == 'light':
self.model.flattenLight()
elif self.flattenType == 'medium':
self.model.flattenMedium()
elif self.flattenType == 'strong':
self.model.flattenStrong()
def setModelPath(self, path):
self.modelPath = path
self.loadModel()
def setCollisionsOnly(self, collisionsOnly):
self.collisionsOnly = collisionsOnly
self.loadModel()
def setGoonHatType(self, goonHatType):
self.goonHatType = goonHatType
self.loadModel()
| [
"[email protected]"
] | |
22e5a66e84c47b3691015f299972b4f9e43427f4 | 71c331e4b1e00fa3be03b7f711fcb05a793cf2af | /QA-System-master/SpeechToText_test/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/firestore/v1/firestore_v1_client.py | ac370070865d488484aa602c2024b65bf41079fa | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | iofh/QA-System | 568228bb0c0adf9ec23b45cd144d61049e720002 | af4a8f1b5f442ddf4905740ae49ed23d69afb0f6 | refs/heads/master | 2022-11-27T23:04:16.385021 | 2020-08-12T10:11:44 | 2020-08-12T10:11:44 | 286,980,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39,606 | py | """Generated client library for firestore version v1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.firestore.v1 import firestore_v1_messages as messages
class FirestoreV1(base_api.BaseApiClient):
"""Generated client library for service firestore version v1."""
MESSAGES_MODULE = messages
BASE_URL = 'https://firestore.googleapis.com/'
MTLS_BASE_URL = 'https://firestore.mtls.googleapis.com/'
_PACKAGE = 'firestore'
_SCOPES = ['https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/datastore']
_VERSION = 'v1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'google-cloud-sdk'
_CLIENT_CLASS_NAME = 'FirestoreV1'
_URL_VERSION = 'v1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new firestore handle."""
url = url or self.BASE_URL
super(FirestoreV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_databases_collectionGroups_fields = self.ProjectsDatabasesCollectionGroupsFieldsService(self)
self.projects_databases_collectionGroups_indexes = self.ProjectsDatabasesCollectionGroupsIndexesService(self)
self.projects_databases_collectionGroups = self.ProjectsDatabasesCollectionGroupsService(self)
self.projects_databases_documents = self.ProjectsDatabasesDocumentsService(self)
self.projects_databases_operations = self.ProjectsDatabasesOperationsService(self)
self.projects_databases = self.ProjectsDatabasesService(self)
self.projects_locations = self.ProjectsLocationsService(self)
self.projects = self.ProjectsService(self)
class ProjectsDatabasesCollectionGroupsFieldsService(base_api.BaseApiService):
"""Service class for the projects_databases_collectionGroups_fields resource."""
_NAME = 'projects_databases_collectionGroups_fields'
def __init__(self, client):
super(FirestoreV1.ProjectsDatabasesCollectionGroupsFieldsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Gets the metadata and configuration for a Field.
Args:
request: (FirestoreProjectsDatabasesCollectionGroupsFieldsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleFirestoreAdminV1Field) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/fields/{fieldsId}',
http_method='GET',
method_id='firestore.projects.databases.collectionGroups.fields.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='FirestoreProjectsDatabasesCollectionGroupsFieldsGetRequest',
response_type_name='GoogleFirestoreAdminV1Field',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists the field configuration and metadata for this database.
Currently, FirestoreAdmin.ListFields only supports listing fields
that have been explicitly overridden. To issue this query, call
FirestoreAdmin.ListFields with the filter set to
`indexConfig.usesAncestorConfig:false`.
Args:
request: (FirestoreProjectsDatabasesCollectionGroupsFieldsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleFirestoreAdminV1ListFieldsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/fields',
http_method='GET',
method_id='firestore.projects.databases.collectionGroups.fields.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1/{+parent}/fields',
request_field='',
request_type_name='FirestoreProjectsDatabasesCollectionGroupsFieldsListRequest',
response_type_name='GoogleFirestoreAdminV1ListFieldsResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates a field configuration. Currently, field updates apply only to.
single field index configuration. However, calls to
FirestoreAdmin.UpdateField should provide a field mask to avoid
changing any configuration that the caller isn't aware of. The field mask
should be specified as: `{ paths: "index_config" }`.
This call returns a google.longrunning.Operation which may be used to
track the status of the field update. The metadata for
the operation will be the type FieldOperationMetadata.
To configure the default field settings for the database, use
the special `Field` with resource name:
`projects/{project_id}/databases/{database_id}/collectionGroups/__default__/fields/*`.
Args:
request: (FirestoreProjectsDatabasesCollectionGroupsFieldsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/fields/{fieldsId}',
http_method='PATCH',
method_id='firestore.projects.databases.collectionGroups.fields.patch',
ordered_params=['name'],
path_params=['name'],
query_params=['updateMask'],
relative_path='v1/{+name}',
request_field='googleFirestoreAdminV1Field',
request_type_name='FirestoreProjectsDatabasesCollectionGroupsFieldsPatchRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
class ProjectsDatabasesCollectionGroupsIndexesService(base_api.BaseApiService):
"""Service class for the projects_databases_collectionGroups_indexes resource."""
_NAME = 'projects_databases_collectionGroups_indexes'
def __init__(self, client):
super(FirestoreV1.ProjectsDatabasesCollectionGroupsIndexesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a composite index. This returns a google.longrunning.Operation.
which may be used to track the status of the creation. The metadata for
the operation will be the type IndexOperationMetadata.
Args:
request: (FirestoreProjectsDatabasesCollectionGroupsIndexesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/indexes',
http_method='POST',
method_id='firestore.projects.databases.collectionGroups.indexes.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1/{+parent}/indexes',
request_field='googleFirestoreAdminV1Index',
request_type_name='FirestoreProjectsDatabasesCollectionGroupsIndexesCreateRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a composite index.
Args:
request: (FirestoreProjectsDatabasesCollectionGroupsIndexesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/indexes/{indexesId}',
http_method='DELETE',
method_id='firestore.projects.databases.collectionGroups.indexes.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='FirestoreProjectsDatabasesCollectionGroupsIndexesDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets a composite index.
Args:
request: (FirestoreProjectsDatabasesCollectionGroupsIndexesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleFirestoreAdminV1Index) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/indexes/{indexesId}',
http_method='GET',
method_id='firestore.projects.databases.collectionGroups.indexes.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='FirestoreProjectsDatabasesCollectionGroupsIndexesGetRequest',
response_type_name='GoogleFirestoreAdminV1Index',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists composite indexes.
Args:
request: (FirestoreProjectsDatabasesCollectionGroupsIndexesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleFirestoreAdminV1ListIndexesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/indexes',
http_method='GET',
method_id='firestore.projects.databases.collectionGroups.indexes.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1/{+parent}/indexes',
request_field='',
request_type_name='FirestoreProjectsDatabasesCollectionGroupsIndexesListRequest',
response_type_name='GoogleFirestoreAdminV1ListIndexesResponse',
supports_download=False,
)
class ProjectsDatabasesCollectionGroupsService(base_api.BaseApiService):
"""Service class for the projects_databases_collectionGroups resource."""
_NAME = 'projects_databases_collectionGroups'
def __init__(self, client):
super(FirestoreV1.ProjectsDatabasesCollectionGroupsService, self).__init__(client)
self._upload_configs = {
}
class ProjectsDatabasesDocumentsService(base_api.BaseApiService):
"""Service class for the projects_databases_documents resource."""
_NAME = 'projects_databases_documents'
def __init__(self, client):
super(FirestoreV1.ProjectsDatabasesDocumentsService, self).__init__(client)
self._upload_configs = {
}
def BatchGet(self, request, global_params=None):
r"""Gets multiple documents.
Documents returned by this method are not guaranteed to be returned in the
same order that they were requested.
Args:
request: (FirestoreProjectsDatabasesDocumentsBatchGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BatchGetDocumentsResponse) The response message.
"""
config = self.GetMethodConfig('BatchGet')
return self._RunMethod(
config, request, global_params=global_params)
BatchGet.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents:batchGet',
http_method='POST',
method_id='firestore.projects.databases.documents.batchGet',
ordered_params=['database'],
path_params=['database'],
query_params=[],
relative_path='v1/{+database}/documents:batchGet',
request_field='batchGetDocumentsRequest',
request_type_name='FirestoreProjectsDatabasesDocumentsBatchGetRequest',
response_type_name='BatchGetDocumentsResponse',
supports_download=False,
)
def BeginTransaction(self, request, global_params=None):
r"""Starts a new transaction.
Args:
request: (FirestoreProjectsDatabasesDocumentsBeginTransactionRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BeginTransactionResponse) The response message.
"""
config = self.GetMethodConfig('BeginTransaction')
return self._RunMethod(
config, request, global_params=global_params)
BeginTransaction.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents:beginTransaction',
http_method='POST',
method_id='firestore.projects.databases.documents.beginTransaction',
ordered_params=['database'],
path_params=['database'],
query_params=[],
relative_path='v1/{+database}/documents:beginTransaction',
request_field='beginTransactionRequest',
request_type_name='FirestoreProjectsDatabasesDocumentsBeginTransactionRequest',
response_type_name='BeginTransactionResponse',
supports_download=False,
)
def Commit(self, request, global_params=None):
r"""Commits a transaction, while optionally updating documents.
Args:
request: (FirestoreProjectsDatabasesDocumentsCommitRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(CommitResponse) The response message.
"""
config = self.GetMethodConfig('Commit')
return self._RunMethod(
config, request, global_params=global_params)
Commit.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents:commit',
http_method='POST',
method_id='firestore.projects.databases.documents.commit',
ordered_params=['database'],
path_params=['database'],
query_params=[],
relative_path='v1/{+database}/documents:commit',
request_field='commitRequest',
request_type_name='FirestoreProjectsDatabasesDocumentsCommitRequest',
response_type_name='CommitResponse',
supports_download=False,
)
def CreateDocument(self, request, global_params=None):
r"""Creates a new document.
Args:
request: (FirestoreProjectsDatabasesDocumentsCreateDocumentRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Document) The response message.
"""
config = self.GetMethodConfig('CreateDocument')
return self._RunMethod(
config, request, global_params=global_params)
CreateDocument.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{collectionId}',
http_method='POST',
method_id='firestore.projects.databases.documents.createDocument',
ordered_params=['parent', 'collectionId'],
path_params=['collectionId', 'parent'],
query_params=['documentId', 'mask_fieldPaths'],
relative_path='v1/{+parent}/{collectionId}',
request_field='document',
request_type_name='FirestoreProjectsDatabasesDocumentsCreateDocumentRequest',
response_type_name='Document',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a document.
Args:
request: (FirestoreProjectsDatabasesDocumentsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}',
http_method='DELETE',
method_id='firestore.projects.databases.documents.delete',
ordered_params=['name'],
path_params=['name'],
query_params=['currentDocument_exists', 'currentDocument_updateTime'],
relative_path='v1/{+name}',
request_field='',
request_type_name='FirestoreProjectsDatabasesDocumentsDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets a single document.
Args:
request: (FirestoreProjectsDatabasesDocumentsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Document) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}',
http_method='GET',
method_id='firestore.projects.databases.documents.get',
ordered_params=['name'],
path_params=['name'],
query_params=['mask_fieldPaths', 'readTime', 'transaction'],
relative_path='v1/{+name}',
request_field='',
request_type_name='FirestoreProjectsDatabasesDocumentsGetRequest',
response_type_name='Document',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists documents.
Args:
request: (FirestoreProjectsDatabasesDocumentsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListDocumentsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}/{collectionId}',
http_method='GET',
method_id='firestore.projects.databases.documents.list',
ordered_params=['parent', 'collectionId'],
path_params=['collectionId', 'parent'],
query_params=['mask_fieldPaths', 'orderBy', 'pageSize', 'pageToken', 'readTime', 'showMissing', 'transaction'],
relative_path='v1/{+parent}/{collectionId}',
request_field='',
request_type_name='FirestoreProjectsDatabasesDocumentsListRequest',
response_type_name='ListDocumentsResponse',
supports_download=False,
)
def ListCollectionIds(self, request, global_params=None):
r"""Lists all the collection IDs underneath a document.
Args:
request: (FirestoreProjectsDatabasesDocumentsListCollectionIdsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListCollectionIdsResponse) The response message.
"""
config = self.GetMethodConfig('ListCollectionIds')
return self._RunMethod(
config, request, global_params=global_params)
ListCollectionIds.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}:listCollectionIds',
http_method='POST',
method_id='firestore.projects.databases.documents.listCollectionIds',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1/{+parent}:listCollectionIds',
request_field='listCollectionIdsRequest',
request_type_name='FirestoreProjectsDatabasesDocumentsListCollectionIdsRequest',
response_type_name='ListCollectionIdsResponse',
supports_download=False,
)
def Listen(self, request, global_params=None):
r"""Listens to changes.
Args:
request: (FirestoreProjectsDatabasesDocumentsListenRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListenResponse) The response message.
"""
config = self.GetMethodConfig('Listen')
return self._RunMethod(
config, request, global_params=global_params)
Listen.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents:listen',
http_method='POST',
method_id='firestore.projects.databases.documents.listen',
ordered_params=['database'],
path_params=['database'],
query_params=[],
relative_path='v1/{+database}/documents:listen',
request_field='listenRequest',
request_type_name='FirestoreProjectsDatabasesDocumentsListenRequest',
response_type_name='ListenResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates or inserts a document.
Args:
request: (FirestoreProjectsDatabasesDocumentsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Document) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}',
http_method='PATCH',
method_id='firestore.projects.databases.documents.patch',
ordered_params=['name'],
path_params=['name'],
query_params=['currentDocument_exists', 'currentDocument_updateTime', 'mask_fieldPaths', 'updateMask_fieldPaths'],
relative_path='v1/{+name}',
request_field='document',
request_type_name='FirestoreProjectsDatabasesDocumentsPatchRequest',
response_type_name='Document',
supports_download=False,
)
def Rollback(self, request, global_params=None):
r"""Rolls back a transaction.
Args:
request: (FirestoreProjectsDatabasesDocumentsRollbackRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Rollback')
return self._RunMethod(
config, request, global_params=global_params)
Rollback.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents:rollback',
http_method='POST',
method_id='firestore.projects.databases.documents.rollback',
ordered_params=['database'],
path_params=['database'],
query_params=[],
relative_path='v1/{+database}/documents:rollback',
request_field='rollbackRequest',
request_type_name='FirestoreProjectsDatabasesDocumentsRollbackRequest',
response_type_name='Empty',
supports_download=False,
)
def RunQuery(self, request, global_params=None):
r"""Runs a query.
Args:
request: (FirestoreProjectsDatabasesDocumentsRunQueryRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(RunQueryResponse) The response message.
"""
config = self.GetMethodConfig('RunQuery')
return self._RunMethod(
config, request, global_params=global_params)
RunQuery.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}:runQuery',
http_method='POST',
method_id='firestore.projects.databases.documents.runQuery',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1/{+parent}:runQuery',
request_field='runQueryRequest',
request_type_name='FirestoreProjectsDatabasesDocumentsRunQueryRequest',
response_type_name='RunQueryResponse',
supports_download=False,
)
def Write(self, request, global_params=None):
r"""Streams batches of document updates and deletes, in order.
Args:
request: (FirestoreProjectsDatabasesDocumentsWriteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(WriteResponse) The response message.
"""
config = self.GetMethodConfig('Write')
return self._RunMethod(
config, request, global_params=global_params)
Write.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents:write',
http_method='POST',
method_id='firestore.projects.databases.documents.write',
ordered_params=['database'],
path_params=['database'],
query_params=[],
relative_path='v1/{+database}/documents:write',
request_field='writeRequest',
request_type_name='FirestoreProjectsDatabasesDocumentsWriteRequest',
response_type_name='WriteResponse',
supports_download=False,
)
class ProjectsDatabasesOperationsService(base_api.BaseApiService):
"""Service class for the projects_databases_operations resource."""
_NAME = 'projects_databases_operations'
def __init__(self, client):
super(FirestoreV1.ProjectsDatabasesOperationsService, self).__init__(client)
self._upload_configs = {
}
def Cancel(self, request, global_params=None):
r"""Starts asynchronous cancellation on a long-running operation. The server.
makes a best effort to cancel the operation, but success is not
guaranteed. If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`. Clients can use
Operations.GetOperation or
other methods to check whether the cancellation succeeded or whether the
operation completed despite cancellation. On successful cancellation,
the operation is not deleted; instead, it becomes an operation with
an Operation.error value with a google.rpc.Status.code of 1,
corresponding to `Code.CANCELLED`.
Args:
request: (FirestoreProjectsDatabasesOperationsCancelRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Cancel')
return self._RunMethod(
config, request, global_params=global_params)
Cancel.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/operations/{operationsId}:cancel',
http_method='POST',
method_id='firestore.projects.databases.operations.cancel',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:cancel',
request_field='googleLongrunningCancelOperationRequest',
request_type_name='FirestoreProjectsDatabasesOperationsCancelRequest',
response_type_name='Empty',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a long-running operation. This method indicates that the client is.
no longer interested in the operation result. It does not cancel the
operation. If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`.
Args:
request: (FirestoreProjectsDatabasesOperationsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/operations/{operationsId}',
http_method='DELETE',
method_id='firestore.projects.databases.operations.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='FirestoreProjectsDatabasesOperationsDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets the latest state of a long-running operation. Clients can use this.
method to poll the operation result at intervals as recommended by the API
service.
Args:
request: (FirestoreProjectsDatabasesOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/operations/{operationsId}',
http_method='GET',
method_id='firestore.projects.databases.operations.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='FirestoreProjectsDatabasesOperationsGetRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists operations that match the specified filter in the request. If the.
server doesn't support this method, it returns `UNIMPLEMENTED`.
NOTE: the `name` binding allows API services to override the binding
to use different resource name schemes, such as `users/*/operations`. To
override the binding, API services can add a binding such as
`"/v1/{name=users/*}/operations"` to their service configuration.
For backwards compatibility, the default name includes the operations
collection id, however overriding users must ensure the name binding
is the parent resource, without the operations collection id.
Args:
request: (FirestoreProjectsDatabasesOperationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningListOperationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/operations',
http_method='GET',
method_id='firestore.projects.databases.operations.list',
ordered_params=['name'],
path_params=['name'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1/{+name}/operations',
request_field='',
request_type_name='FirestoreProjectsDatabasesOperationsListRequest',
response_type_name='GoogleLongrunningListOperationsResponse',
supports_download=False,
)
class ProjectsDatabasesService(base_api.BaseApiService):
"""Service class for the projects_databases resource."""
_NAME = 'projects_databases'
def __init__(self, client):
super(FirestoreV1.ProjectsDatabasesService, self).__init__(client)
self._upload_configs = {
}
def ExportDocuments(self, request, global_params=None):
r"""Exports a copy of all or a subset of documents from Google Cloud Firestore.
to another storage system, such as Google Cloud Storage. Recent updates to
documents may not be reflected in the export. The export occurs in the
background and its progress can be monitored and managed via the
Operation resource that is created. The output of an export may only be
used once the associated operation is done. If an export operation is
cancelled before completion it may leave partial data behind in Google
Cloud Storage.
Args:
request: (FirestoreProjectsDatabasesExportDocumentsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('ExportDocuments')
return self._RunMethod(
config, request, global_params=global_params)
ExportDocuments.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}:exportDocuments',
http_method='POST',
method_id='firestore.projects.databases.exportDocuments',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:exportDocuments',
request_field='googleFirestoreAdminV1ExportDocumentsRequest',
request_type_name='FirestoreProjectsDatabasesExportDocumentsRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
def ImportDocuments(self, request, global_params=None):
r"""Imports documents into Google Cloud Firestore. Existing documents with the.
same name are overwritten. The import occurs in the background and its
progress can be monitored and managed via the Operation resource that is
created. If an ImportDocuments operation is cancelled, it is possible
that a subset of the data has already been imported to Cloud Firestore.
Args:
request: (FirestoreProjectsDatabasesImportDocumentsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('ImportDocuments')
return self._RunMethod(
config, request, global_params=global_params)
ImportDocuments.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}:importDocuments',
http_method='POST',
method_id='firestore.projects.databases.importDocuments',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:importDocuments',
request_field='googleFirestoreAdminV1ImportDocumentsRequest',
request_type_name='FirestoreProjectsDatabasesImportDocumentsRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
class ProjectsLocationsService(base_api.BaseApiService):
"""Service class for the projects_locations resource."""
_NAME = 'projects_locations'
def __init__(self, client):
super(FirestoreV1.ProjectsLocationsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Gets information about a location.
Args:
request: (FirestoreProjectsLocationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Location) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}',
http_method='GET',
method_id='firestore.projects.locations.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='FirestoreProjectsLocationsGetRequest',
response_type_name='Location',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists information about the supported locations for this service.
Args:
request: (FirestoreProjectsLocationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListLocationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations',
http_method='GET',
method_id='firestore.projects.locations.list',
ordered_params=['name'],
path_params=['name'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1/{+name}/locations',
request_field='',
request_type_name='FirestoreProjectsLocationsListRequest',
response_type_name='ListLocationsResponse',
supports_download=False,
)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = 'projects'
def __init__(self, client):
super(FirestoreV1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
| [
"[email protected]"
] | |
bb35ccd3ccfc92a049807e3711182d740eb677b8 | eab2dc435028b2548554d97b24eb7b7e3576b953 | /iblrig/check_sync_pulses.py | b53097729443914a5879f7b454f1900b4316e049 | [
"MIT"
] | permissive | k1o0/iblrig | 35edd8570215ca591b1f1e26e47439e633aa587a | 9177b852b344a9bbc26e4a4aeb5f0182bd8a9b25 | refs/heads/master | 2021-05-24T12:58:47.552912 | 2020-02-25T20:19:59 | 2020-02-25T20:19:59 | 253,573,669 | 0 | 0 | MIT | 2020-04-06T17:48:28 | 2020-04-06T17:48:28 | null | UTF-8 | Python | false | false | 2,875 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author: Niccolò Bonacchi
# @Date: Monday, February 25th 2019, 2:10:38 pm
import logging
import sys
from pathlib import Path
import ibllib.io.raw_data_loaders as raw
import matplotlib.pyplot as plt
import numpy as np
from iblrig.misc import get_port_events
log = logging.getLogger("iblrig")
def sync_check(tph):
events = tph.behavior_data["Events timestamps"]
ev_bnc1 = get_port_events(events, name="BNC1")
ev_bnc2 = get_port_events(events, name="BNC2")
ev_port1 = get_port_events(events, name="Port1")
NOT_FOUND = "COULD NOT FIND DATA ON {}"
bnc1_msg = NOT_FOUND.format("BNC1") if not ev_bnc1 else "OK"
bnc2_msg = NOT_FOUND.format("BNC2") if not ev_bnc2 else "OK"
port1_msg = NOT_FOUND.format("Port1") if not ev_port1 else "OK"
warn_msg = f"""
##########################################
NOT FOUND: SYNC PULSES
##########################################
VISUAL STIMULUS SYNC: {bnc1_msg}
SOUND SYNC: {bnc2_msg}
CAMERA SYNC: {port1_msg}
##########################################"""
if not ev_bnc1 or not ev_bnc2 or not ev_port1:
log.warning(warn_msg)
if __name__ == "__main__":
if len(sys.argv) == 1:
print("I need a file name...")
session_data_file = Path(sys.argv[1])
if not session_data_file.exists():
raise FileNotFoundError(f"{session_data_file}")
if session_data_file.name.endswith(".jsonable"):
data = raw.load_data(session_data_file.parent.parent)
else:
try:
data = raw.load_data(session_data_file)
except Exception:
print("Not a file or a valid session folder")
unsynced_trial_count = 0
frame2ttl = []
sound = []
camera = []
trial_end = []
for trial_data in data:
tevents = trial_data["behavior_data"]["Events timestamps"]
ev_bnc1 = get_port_events(tevents, name="BNC1")
ev_bnc2 = get_port_events(tevents, name="BNC2")
ev_port1 = get_port_events(tevents, name="Port1")
if not ev_bnc1 or not ev_bnc2 or not ev_port1:
unsynced_trial_count += 1
frame2ttl.extend(ev_bnc1)
sound.extend(ev_bnc2)
camera.extend(ev_port1)
trial_end.append(trial_data["behavior_data"]["Trial end timestamp"])
print(f"Found {unsynced_trial_count} trials with bad sync data")
f = plt.figure() # figsize=(19.2, 10.8), dpi=100)
ax = plt.subplot2grid((1, 1), (0, 0), rowspan=1, colspan=1)
ax.plot(camera, np.ones(len(camera)) * 1, "|")
ax.plot(sound, np.ones(len(sound)) * 2, "|")
ax.plot(frame2ttl, np.ones(len(frame2ttl)) * 3, "|")
[ax.axvline(t, alpha=0.5) for t in trial_end]
ax.set_ylim([0, 4])
ax.set_yticks(range(4))
ax.set_yticklabels(["", "camera", "sound", "frame2ttl"])
plt.show()
| [
"[email protected]"
] | |
c43dee062a7499d04b64507171d861b11b09912e | df3c8c521a51f2b412118bd9d0e477da06a3b7cc | /build/view_environments/post_create_/create_post/create_post.py | 2a6a13f8a1551a30e01dd4e643e8f14b345f9bfd | [] | no_license | bharatmudragada/fb_post | c30b900731db5844df6b438e5d38a0dfb607412a | c5e7bb185a561bdcfcd7b2e30264554b07106044 | refs/heads/master | 2020-06-21T04:05:22.296755 | 2019-07-17T07:48:22 | 2019-07-17T07:48:22 | 197,339,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,835 | py | from django_swagger_utils.drf_server.decorators.request_response import request_response
from django_swagger_utils.drf_server.default.parser_mapping import PARSER_MAPPING
from django_swagger_utils.drf_server.default.renderer_mapping import RENDERER_MAPPING
from fb_post.build.serializers.definitions.PostContent.PostContentSerializer import PostContentSerializer
from fb_post.build.serializers.definitions.PostId.PostIdSerializer import PostIdSerializer
options = {
'METHOD': 'POST',
'REQUEST_WRAPPING_REQUIRED': True,
'REQUEST_ENCRYPTION_REQUIRED': False,
'REQUEST_IS_PARTIAL': False,
'PARSER_CLASSES': [
PARSER_MAPPING["application/json"]
],
'RENDERER_CLASSES': [
RENDERER_MAPPING["application/json"]
],
'REQUEST_QUERY_PARAMS_SERIALIZER': None,
'REQUEST_HEADERS_SERIALIZER': None,
'REQUEST_SERIALIZER': PostContentSerializer,
'REQUEST_SERIALIZER_MANY_ITEMS': False,
'RESPONSE': {
'201' : {
'RESPONSE_SERIALIZER': PostIdSerializer,
'RESPONSE_SERIALIZER_MANY_ITEMS': False,
'HEADERS_SERIALIZER': None,
}
,
'400' : {
'RESPONSE_SERIALIZER': None,
'RESPONSE_SERIALIZER_MANY_ITEMS': False,
'HEADERS_SERIALIZER': None,
}
},
"SECURITY":{
"oauth" : [
"write"
]
}
}
app_name = "fb_post"
operation_id = "create_post"
group_name = ""
@request_response(options=options, app_name=app_name, operation_id=operation_id, group_name=group_name)
def create_post(request, *args, **kwargs):
args = (request,) + args
from django_swagger_utils.drf_server.wrappers.view_env_wrapper import view_env_wrapper
return view_env_wrapper(app_name, "create_post", group_name, *args, **kwargs)
| [
"[email protected]"
] | |
8608678850cf6031586f8b1bce7e8531244232c5 | 7869035b72807394154285d307e0597ee16f11d8 | /src/data_loader.py | 2a23407ac8c03daa931088d7b07b81b5ff04a48b | [] | no_license | tiffany70072/TokenPositioning | cb74edae92e19c16f8ca763935e56b0f2e698b85 | a2ab63640a2aff1abfccaa1c1486d8a97026ef0b | refs/heads/master | 2022-07-19T11:21:04.716882 | 2020-04-17T06:02:18 | 2020-04-17T06:02:18 | 254,995,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,225 | py | import numpy as np
import os
from sklearn.model_selection import train_test_split
def load_data(task, data_name, data_type):
if task == "autoenc-last" or task == 'token-posi':
assert data_type == "train" or data_type == "valid", "no this data type."
data_path = os.path.join("../data", data_name)
encoder_data = np.load(os.path.join(data_path, "encoder_%s.npy" % data_type))
decoder_data = np.load(os.path.join(data_path, "decoder_%s.npy" % data_type))
assert encoder_data.shape[0] == decoder_data.shape[0], "data size not match."
decoder_output = set_decoder_output_data(decoder_data)
return encoder_data, decoder_data, decoder_output
else:
raise "No this task for load_data."
def set_decoder_output_data(decoder_input):
# Reshape 2d array into 3d array for Keras training.
# Shift one time step because decoder_input and decoder_output are different with one time step.
decoder_output = decoder_input.copy()
for i in range(len(decoder_output)):
decoder_output[i, :-1] = decoder_input[i, 1:] # Remove the first token in decoder output.
decoder_output[i, -1] *= 0
decoder_output = np.reshape(decoder_output, [decoder_output.shape[0], decoder_output.shape[1], 1])
return decoder_output
"""
def cut_validation(self):
# TODO: cut training, validation and testing
split_result = data_reader.data_split(self.encoder_in, self.decoder_in, self.decoder_out)
self.encoder_in = split_result[0]
self.decoder_in = split_result[1]
self.decoder_out = split_result[2]
self.encoder_in_valid = split_result[3][:50000] # TODO: Deal with too many data.
self.decoder_in_valid = split_result[4][:50000]
self.decoder_out_valid = split_result[5][:50000]
self.encoder_in_test = split_result[6]
self.decoder_in_test = split_result[7]
self.decoder_out_test = split_result[8]
self.encoder_in = split_result[0]#[:3000]
self.decoder_in = split_result[1]#[:3000]
self.decoder_out = split_result[2]#[:3000]
print("(Cut validation) training size:", self.encoder_in.shape)
print("(Cut validation) validation size:", self.encoder_in_valid.shape)
print("(Cut validation) testing size:", self.encoder_in_test.shape)
""" | [
"[email protected]"
] | |
84fdc9040b3bcc55c94270233da3cce4c9b669d5 | babc56e88a3b5f5038be70ad676d5bd8f1bbf0d2 | /wind_direction_byo.py | 94bc6600dd5986d16cb2cf6d96ba20ac2a7f7738 | [] | no_license | VicenteYago/CustomWeatherStation | 873405ca16aa0b6f4f291cbc0068a6ea10aef745 | c655f947cca2cd0f8827c18f6f7a7c4c11ef4d43 | refs/heads/master | 2022-11-13T06:48:05.736830 | 2020-06-30T00:43:07 | 2020-06-30T00:43:07 | 269,812,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,408 | py | from gpiozero import MCP3008
import time
import math
adc = MCP3008(channel=0)
count = 0
values = []
volts = [0.4, 1.4, 1.2, 2.8,
2.9, 2.2, 2.5, 1.8,
2.0, 0.7, 0.8, 0.1,
0.3, 0.2, 0.6, 2.7]
volts_dic = {
0.4: 0.0,
1.4: 22.5,
1.2: 45.0,
2.8: 67.5,
2.7: 90.5,
2.9: 112.5,
2.2: 135.0,
2.5: 157.5,
1.8: 180.0,
2.0: 202.5,
0.7: 225.0,
0.8: 247.5,
0.1: 270.0,
0.3: 292.5,
0.2: 315.0,
0.6: 337.5
}
def get_average(angles):
sin_sum = 0.0
cos_sum = 0.0
for angle in angles:
r = math.radians(angle)
sin_sum += math.sin(r)
cos_sum += math.cos(r)
flen = float(len(angles))
s = sin_sum / flen
c = cos_sum / flen
arc = math.degrees(math.atan(s / c))
average = 0.0
if s > 0 and c > 0:
average = arc
elif c < 0:
average = arc + 180
elif s < 0 and c > 0:
average = arc + 360
return 0.0 if average == 360 else average
def get_value(length = 5):
data = []
print("Measuring wind direction for %d seconds..." % length)
start_time = time.time()
while time.time() - start_time <= length:
wind = round(adc.value*3.3,1)
if not wind in volts_dic:
print("Unknown value :", str(wind))
else:
data.append(volts_dic[wind])
return get_average(data)
while True:
print(get_value())
| [
"="
] | = |
b676c5cba48c2e1efd64286543f5f6aadfef51fd | ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1 | /res/packages/scripts/scripts/common/wotdecorators.py | 1554469a75cbd2eab8d57565f8457da484b5051a | [] | no_license | webiumsk/WOT-0.9.20.0 | de3d7441c5d442f085c47a89fa58a83f1cd783f2 | 811cb4e1bca271372a1d837a268b6e0e915368bc | refs/heads/master | 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,832 | py | # 2017.08.29 21:52:48 Střední Evropa (letní čas)
# Embedded file name: scripts/common/wotdecorators.py
import inspect
from functools import update_wrapper
from debug_utils import LOG_WRAPPED_CURRENT_EXCEPTION, CRITICAL_ERROR
from time_tracking import LOG_TIME_WARNING
import time
import time_tracking
def noexcept(func):
def wrapper(*args, **kwArgs):
try:
return func(*args, **kwArgs)
except:
LOG_WRAPPED_CURRENT_EXCEPTION(wrapper.__name__, func.__name__, func.func_code.co_filename, func.func_code.co_firstlineno + 1)
return wrapper
def nofail(func):
def wrapper(*args, **kwArgs):
try:
return func(*args, **kwArgs)
except:
LOG_WRAPPED_CURRENT_EXCEPTION(wrapper.__name__, func.__name__, func.func_code.co_filename, func.func_code.co_firstlineno + 1)
CRITICAL_ERROR('Exception in no-fail code')
return wrapper
def exposedtoclient(func):
def wrapper(*args, **kwArgs):
try:
lastTick = time.time()
result = func(*args, **kwArgs)
timeSinceLastTick = time.time() - lastTick
if timeSinceLastTick > time_tracking.DEFAULT_TIME_LIMIT:
LOG_TIME_WARNING(timeSinceLastTick, context=(getattr(args[0], 'id', 0),
func.__name__,
args,
kwArgs))
return result
except:
LOG_WRAPPED_CURRENT_EXCEPTION(wrapper.__name__, func.__name__, func.func_code.co_filename, func.func_code.co_firstlineno + 1)
return wrapper
def singleton(cls):
return cls()
def decorate(func, dec):
argspec = inspect.getargspec(func)
name = func.__name__
signature = inspect.formatargspec(*argspec)
params = inspect.formatargspec(formatvalue=(lambda value: ''), *argspec)
source = 'def %s%s: return __dec%s\n' % (name, signature, params)
code = compile(source, '<decorator-gen>', 'single')
env = {'__dec': dec}
eval(code, env)
return update_wrapper(env[name], func)
def decorator(dec):
def wrapper(func):
return decorate(func, dec(func))
return wrapper
def condition(attributeName, logFunc = None, logStack = True):
def decorator(func):
def wrapper(*args, **kwargs):
attribute = getattr(args[0], attributeName)
if not bool(attribute):
if logFunc:
logFunc('Method condition failed', args, kwargs, stack=logStack)
return
return func(*args, **kwargs)
return decorate(func, wrapper)
return decorator
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\common\wotdecorators.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:52:48 Střední Evropa (letní čas)
| [
"[email protected]"
] | |
c177f0da14bb7731c15a9e25ad35b2bb78f5ca63 | 3d2192385e65889d20b74742755f5369d0d09161 | /stock_colis/models/__init__.py | da8dece232489928427446f10dfd1d1af8ea259d | [] | no_license | FIDINGSARL/audoune | 9ba746a9d7424a41f8775a6e30f42f2a97224edf | 39cecd44497d5fa227cc594a6bf5807eb14976d3 | refs/heads/main | 2023-06-18T09:49:13.778878 | 2021-06-30T15:06:51 | 2021-06-30T15:06:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | # -*- coding: utf-8 -*-
from . import stock_colis, stock_colis_request
| [
"[email protected]"
] | |
218046a18f59c8cc6a566f6a16807e74d5250298 | a4e502e9487cf17c53f9f931ec0dbc12168fea52 | /packages/pyre/platforms/PackageManager.py | 0877270914d7a2f1326787f57abfbb1ac0125b31 | [
"BSD-3-Clause"
] | permissive | bryanvriel/pyre | bdc5dd59c46d53ff81f2ece532b9073ac3b65be1 | 179359634a7091979cced427b6133dd0ec4726ea | refs/heads/master | 2021-09-28T00:10:26.454282 | 2018-11-11T16:42:07 | 2018-11-11T16:42:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,373 | py | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2018 all rights reserved
#
# the framework
import pyre
# declaration
class PackageManager(pyre.protocol, family='pyre.platforms.packagers'):
"""
Encapsulation of host specific information
"""
# requirements
@pyre.provides
def prefix(self):
"""
The package manager install location
"""
@pyre.provides
def installed(self):
"""
Retrieve available information for all installed packages
"""
@pyre.provides
def packages(self, category):
"""
Provide a sequence of package names that provide compatible installations for the given
package {category}. If the package manager provides a way for the user to select a
specific installation as the default, care should be taken to rank the sequence
appropriately.
"""
@pyre.provides
def info(self, package):
"""
Return information about the given {package}
The type of information returned is determined by the package manager. This method
should return success if and only if {package} is actually fully installed.
"""
@pyre.provides
def contents(self, package):
"""
Generate a sequence of the contents of {package}
The type of information returned is determined by the package manager. Typically, it
contains the list of files that are installed by this package, but it may contain other
filesystem entities as well. This method should return a non-empty sequence if and only
if {pakage} is actually fully installed
"""
@pyre.provides
def configure(self, packageInstance):
"""
Dispatch to the {packageInstance} configuration procedure that is specific to the
particular implementation of this protocol
"""
# framework obligations
@classmethod
def pyre_default(cls, **kwds):
"""
Build the preferred host implementation
"""
# the host should specify a sensible default; if there is nothing there, this is an
# unmanaged system that relies on environment variables and standard locations
from .Bare import Bare
# return the support for unmanaged systems
return Bare
# end of file
| [
"[email protected]"
] | |
68caed12611a8b789a1964a22fb49575eca70c7f | 76d388b5d2e74ff0eda748c7868fadf0704cf700 | /tensorpack/utils/develop.py | 496de1dd245db766c3e4ba256ddb638d5e621b48 | [
"Apache-2.0"
] | permissive | jooyounghun/tensorpack | eebf0867e5a82ffd52660dccfbd34879b8d0f5af | 90cdae380c40a1e91f627520c4a739bd6ee3f18b | refs/heads/master | 2020-03-23T23:24:41.651089 | 2018-07-27T02:57:19 | 2018-07-27T02:57:19 | 142,232,523 | 1 | 0 | Apache-2.0 | 2018-07-25T01:45:06 | 2018-07-25T01:45:05 | null | UTF-8 | Python | false | false | 4,773 | py | # -*- coding: utf-8 -*-
# File: develop.py
# Author: tensorpack contributors
""" Utilities for developers only.
These are not visible to users (not automatically imported). And should not
appeared in docs."""
import os
import functools
from datetime import datetime
import importlib
import types
import six
from . import logger
def create_dummy_class(klass, dependency):
"""
When a dependency of a class is not available, create a dummy class which throws ImportError when used.
Args:
klass (str): name of the class.
dependency (str): name of the dependency.
Returns:
class: a class object
"""
class _DummyMetaClass(type):
# throw error on class attribute access
def __getattr__(_, __):
raise ImportError("Cannot import '{}', therefore '{}' is not available".format(dependency, klass))
@six.add_metaclass(_DummyMetaClass)
class _Dummy(object):
# throw error on constructor
def __init__(self, *args, **kwargs):
raise ImportError("Cannot import '{}', therefore '{}' is not available".format(dependency, klass))
return _Dummy
def create_dummy_func(func, dependency):
"""
When a dependency of a function is not available, create a dummy function which throws ImportError when used.
Args:
func (str): name of the function.
dependency (str or list[str]): name(s) of the dependency.
Returns:
function: a function object
"""
if isinstance(dependency, (list, tuple)):
dependency = ','.join(dependency)
def _dummy(*args, **kwargs):
raise ImportError("Cannot import '{}', therefore '{}' is not available".format(dependency, func))
return _dummy
def building_rtfd():
"""
Returns:
bool: if tensorpack is being imported to generate docs now.
"""
return os.environ.get('READTHEDOCS') == 'True' \
or os.environ.get('DOC_BUILDING')
def log_deprecated(name="", text="", eos=""):
"""
Log deprecation warning.
Args:
name (str): name of the deprecated item.
text (str, optional): information about the deprecation.
eos (str, optional): end of service date such as "YYYY-MM-DD".
"""
assert name or text
if eos:
eos = "after " + datetime(*map(int, eos.split("-"))).strftime("%d %b")
if name:
if eos:
warn_msg = "%s will be deprecated %s. %s" % (name, eos, text)
else:
warn_msg = "%s was deprecated. %s" % (name, text)
else:
warn_msg = text
if eos:
warn_msg += " Legacy period ends %s" % eos
logger.warn("[Deprecated] " + warn_msg)
def deprecated(text="", eos=""):
"""
Args:
text, eos: same as :func:`log_deprecated`.
Returns:
a decorator which deprecates the function.
Example:
.. code-block:: python
@deprecated("Explanation of what to do instead.", "2017-11-4")
def foo(...):
pass
"""
def get_location():
import inspect
frame = inspect.currentframe()
if frame:
callstack = inspect.getouterframes(frame)[-1]
return '%s:%i' % (callstack[1], callstack[2])
else:
stack = inspect.stack(0)
entry = stack[2]
return '%s:%i' % (entry[1], entry[2])
def deprecated_inner(func):
@functools.wraps(func)
def new_func(*args, **kwargs):
name = "{} [{}]".format(func.__name__, get_location())
log_deprecated(name, text, eos)
return func(*args, **kwargs)
return new_func
return deprecated_inner
def HIDE_DOC(func):
func.__HIDE_SPHINX_DOC__ = True
return func
# Copied from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/util/lazy_loader.py
class LazyLoader(types.ModuleType):
def __init__(self, local_name, parent_module_globals, name):
self._local_name = local_name
self._parent_module_globals = parent_module_globals
super(LazyLoader, self).__init__(name)
def _load(self):
# Import the target module and insert it into the parent's namespace
module = importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
# Update this object's dict so that if someone keeps a reference to the
# LazyLoader, lookups are efficient (__getattr__ is only called on lookups
# that fail).
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
def __dir__(self):
module = self._load()
return dir(module)
| [
"[email protected]"
] | |
820708161506216faa57b389f2f0890d60afef5d | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible/modules/cron.py | 2424f5c065543ddd96be359b69a92e58495389fd | [
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 26,537 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Dane Summers <[email protected]>
# Copyright: (c) 2013, Mike Grozak <[email protected]>
# Copyright: (c) 2013, Patrick Callahan <[email protected]>
# Copyright: (c) 2015, Evan Kaufman <[email protected]>
# Copyright: (c) 2015, Luca Berruti <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: cron
short_description: Manage cron.d and crontab entries
description:
- Use this module to manage crontab and environment variables entries. This module allows
you to create environment variables and named crontab entries, update, or delete them.
- 'When crontab jobs are managed: the module includes one line with the description of the
crontab entry C("#Ansible: <name>") corresponding to the "name" passed to the module,
which is used by future ansible/module calls to find/check the state. The "name"
parameter should be unique, and changing the "name" value will result in a new cron
task being created (or a different one being removed).'
- When environment variables are managed, no comment line is added, but, when the module
needs to find/check the state, it uses the "name" parameter to find the environment
variable definition line.
- When using symbols such as %, they must be properly escaped.
version_added: "0.9"
options:
name:
description:
- Description of a crontab entry or, if env is set, the name of environment variable.
- Required if I(state=absent).
- Note that if name is not set and I(state=present), then a
new crontab entry will always be created, regardless of existing ones.
- This parameter will always be required in future releases.
type: str
user:
description:
- The specific user whose crontab should be modified.
- When unset, this parameter defaults to the current user.
type: str
job:
description:
- The command to execute or, if env is set, the value of environment variable.
- The command should not contain line breaks.
- Required if I(state=present).
type: str
aliases: [ value ]
state:
description:
- Whether to ensure the job or environment variable is present or absent.
type: str
choices: [ absent, present ]
default: present
cron_file:
description:
- If specified, uses this file instead of an individual user's crontab.
- If this is a relative path, it is interpreted with respect to I(/etc/cron.d).
- If it is absolute, it will typically be C(/etc/crontab).
- Many linux distros expect (and some require) the filename portion to consist solely
of upper- and lower-case letters, digits, underscores, and hyphens.
- To use the I(cron_file) parameter you must specify the I(user) as well.
type: str
backup:
description:
- If set, create a backup of the crontab before it is modified.
The location of the backup is returned in the C(backup_file) variable by this module.
type: bool
default: no
minute:
description:
- Minute when the job should run (C(0-59), C(*), C(*/2), and so on).
type: str
default: "*"
hour:
description:
- Hour when the job should run (C(0-23), C(*), C(*/2), and so on).
type: str
default: "*"
day:
description:
- Day of the month the job should run (C(1-31), C(*), C(*/2), and so on).
type: str
default: "*"
aliases: [ dom ]
month:
description:
- Month of the year the job should run (C(1-12), C(*), C(*/2), and so on).
type: str
default: "*"
weekday:
description:
- Day of the week that the job should run (C(0-6) for Sunday-Saturday, C(*), and so on).
type: str
default: "*"
aliases: [ dow ]
reboot:
description:
- If the job should be run at reboot. This option is deprecated. Users should use I(special_time).
version_added: "1.0"
type: bool
default: no
special_time:
description:
- Special time specification nickname.
type: str
choices: [ annually, daily, hourly, monthly, reboot, weekly, yearly ]
version_added: "1.3"
disabled:
description:
- If the job should be disabled (commented out) in the crontab.
- Only has effect if I(state=present).
type: bool
default: no
version_added: "2.0"
env:
description:
- If set, manages a crontab's environment variable.
- New variables are added on top of crontab.
- I(name) and I(value) parameters are the name and the value of environment variable.
type: bool
default: false
version_added: "2.1"
insertafter:
description:
- Used with I(state=present) and I(env).
- If specified, the environment variable will be inserted after the declaration of specified environment variable.
type: str
version_added: "2.1"
insertbefore:
description:
- Used with I(state=present) and I(env).
- If specified, the environment variable will be inserted before the declaration of specified environment variable.
type: str
version_added: "2.1"
requirements:
- cron (or cronie on CentOS)
author:
- Dane Summers (@dsummersl)
- Mike Grozak (@rhaido)
- Patrick Callahan (@dirtyharrycallahan)
- Evan Kaufman (@EvanK)
- Luca Berruti (@lberruti)
notes:
- Supports C(check_mode).
'''
EXAMPLES = r'''
- name: Ensure a job that runs at 2 and 5 exists. Creates an entry like "0 5,2 * * ls -alh > /dev/null"
ansible.builtin.cron:
name: "check dirs"
minute: "0"
hour: "5,2"
job: "ls -alh > /dev/null"
- name: 'Ensure an old job is no longer present. Removes any job that is prefixed by "#Ansible: an old job" from the crontab'
ansible.builtin.cron:
name: "an old job"
state: absent
- name: Creates an entry like "@reboot /some/job.sh"
ansible.builtin.cron:
name: "a job for reboot"
special_time: reboot
job: "/some/job.sh"
- name: Creates an entry like "PATH=/opt/bin" on top of crontab
ansible.builtin.cron:
name: PATH
env: yes
job: /opt/bin
- name: Creates an entry like "APP_HOME=/srv/app" and insert it after PATH declaration
ansible.builtin.cron:
name: APP_HOME
env: yes
job: /srv/app
insertafter: PATH
- name: Creates a cron file under /etc/cron.d
ansible.builtin.cron:
name: yum autoupdate
weekday: "2"
minute: "0"
hour: "12"
user: root
job: "YUMINTERACTIVE=0 /usr/sbin/yum-autoupdate"
cron_file: ansible_yum-autoupdate
- name: Removes a cron file from under /etc/cron.d
ansible.builtin.cron:
name: "yum autoupdate"
cron_file: ansible_yum-autoupdate
state: absent
- name: Removes "APP_HOME" environment variable from crontab
ansible.builtin.cron:
name: APP_HOME
env: yes
state: absent
'''
RETURN = r'''#'''
import os
import platform
import pwd
import re
import sys
import tempfile
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_bytes, to_native
from ansible.module_utils.six.moves import shlex_quote
class CronTabError(Exception):
pass
class CronTab(object):
"""
CronTab object to write time based crontab file
user - the user of the crontab (defaults to current user)
cron_file - a cron file under /etc/cron.d, or an absolute path
"""
def __init__(self, module, user=None, cron_file=None):
self.module = module
self.user = user
self.root = (os.getuid() == 0)
self.lines = None
self.ansible = "#Ansible: "
self.n_existing = ''
self.cron_cmd = self.module.get_bin_path('crontab', required=True)
if cron_file:
if os.path.isabs(cron_file):
self.cron_file = cron_file
self.b_cron_file = to_bytes(cron_file, errors='surrogate_or_strict')
else:
self.cron_file = os.path.join('/etc/cron.d', cron_file)
self.b_cron_file = os.path.join(b'/etc/cron.d', to_bytes(cron_file, errors='surrogate_or_strict'))
else:
self.cron_file = None
self.read()
def read(self):
# Read in the crontab from the system
self.lines = []
if self.cron_file:
# read the cronfile
try:
f = open(self.b_cron_file, 'rb')
self.n_existing = to_native(f.read(), errors='surrogate_or_strict')
self.lines = self.n_existing.splitlines()
f.close()
except IOError:
# cron file does not exist
return
except Exception:
raise CronTabError("Unexpected error:", sys.exc_info()[0])
else:
# using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
(rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
raise CronTabError("Unable to read crontab")
self.n_existing = out
lines = out.splitlines()
count = 0
for l in lines:
if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l) and
not re.match(r'# \(/tmp/.*installed on.*\)', l) and
not re.match(r'# \(.*version.*\)', l)):
self.lines.append(l)
else:
pattern = re.escape(l) + '[\r\n]?'
self.n_existing = re.sub(pattern, '', self.n_existing, 1)
count += 1
def is_empty(self):
if len(self.lines) == 0:
return True
else:
return False
def write(self, backup_file=None):
"""
Write the crontab to the system. Saves all information.
"""
if backup_file:
fileh = open(backup_file, 'wb')
elif self.cron_file:
fileh = open(self.b_cron_file, 'wb')
else:
filed, path = tempfile.mkstemp(prefix='crontab')
os.chmod(path, int('0644', 8))
fileh = os.fdopen(filed, 'wb')
fileh.write(to_bytes(self.render()))
fileh.close()
# return if making a backup
if backup_file:
return
# Add the entire crontab back to the user crontab
if not self.cron_file:
# quoting shell args for now but really this should be two non-shell calls. FIXME
(rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
os.unlink(path)
if rc != 0:
self.module.fail_json(msg=err)
# set SELinux permissions
if self.module.selinux_enabled() and self.cron_file:
self.module.set_default_selinux_context(self.cron_file, False)
def do_comment(self, name):
return "%s%s" % (self.ansible, name)
def add_job(self, name, job):
# Add the comment
self.lines.append(self.do_comment(name))
# Add the job
self.lines.append("%s" % (job))
def update_job(self, name, job):
return self._update_job(name, job, self.do_add_job)
def do_add_job(self, lines, comment, job):
lines.append(comment)
lines.append("%s" % (job))
def remove_job(self, name):
return self._update_job(name, "", self.do_remove_job)
def do_remove_job(self, lines, comment, job):
return None
def add_env(self, decl, insertafter=None, insertbefore=None):
if not (insertafter or insertbefore):
self.lines.insert(0, decl)
return
if insertafter:
other_name = insertafter
elif insertbefore:
other_name = insertbefore
other_decl = self.find_env(other_name)
if len(other_decl) > 0:
if insertafter:
index = other_decl[0] + 1
elif insertbefore:
index = other_decl[0]
self.lines.insert(index, decl)
return
self.module.fail_json(msg="Variable named '%s' not found." % other_name)
def update_env(self, name, decl):
return self._update_env(name, decl, self.do_add_env)
def do_add_env(self, lines, decl):
lines.append(decl)
def remove_env(self, name):
return self._update_env(name, '', self.do_remove_env)
def do_remove_env(self, lines, decl):
return None
def remove_job_file(self):
try:
os.unlink(self.cron_file)
return True
except OSError:
# cron file does not exist
return False
except Exception:
raise CronTabError("Unexpected error:", sys.exc_info()[0])
def find_job(self, name, job=None):
# attempt to find job by 'Ansible:' header comment
comment = None
for l in self.lines:
if comment is not None:
if comment == name:
return [comment, l]
else:
comment = None
elif re.match(r'%s' % self.ansible, l):
comment = re.sub(r'%s' % self.ansible, '', l)
# failing that, attempt to find job by exact match
if job:
for i, l in enumerate(self.lines):
if l == job:
# if no leading ansible header, insert one
if not re.match(r'%s' % self.ansible, self.lines[i - 1]):
self.lines.insert(i, self.do_comment(name))
return [self.lines[i], l, True]
# if a leading blank ansible header AND job has a name, update header
elif name and self.lines[i - 1] == self.do_comment(None):
self.lines[i - 1] = self.do_comment(name)
return [self.lines[i - 1], l, True]
return []
def find_env(self, name):
for index, l in enumerate(self.lines):
if re.match(r'^%s=' % name, l):
return [index, l]
return []
def get_cron_job(self, minute, hour, day, month, weekday, job, special, disabled):
# normalize any leading/trailing newlines (ansible/ansible-modules-core#3791)
job = job.strip('\r\n')
if disabled:
disable_prefix = '#'
else:
disable_prefix = ''
if special:
if self.cron_file:
return "%s@%s %s %s" % (disable_prefix, special, self.user, job)
else:
return "%s@%s %s" % (disable_prefix, special, job)
else:
if self.cron_file:
return "%s%s %s %s %s %s %s %s" % (disable_prefix, minute, hour, day, month, weekday, self.user, job)
else:
return "%s%s %s %s %s %s %s" % (disable_prefix, minute, hour, day, month, weekday, job)
def get_jobnames(self):
jobnames = []
for l in self.lines:
if re.match(r'%s' % self.ansible, l):
jobnames.append(re.sub(r'%s' % self.ansible, '', l))
return jobnames
def get_envnames(self):
envnames = []
for l in self.lines:
if re.match(r'^\S+=', l):
envnames.append(l.split('=')[0])
return envnames
def _update_job(self, name, job, addlinesfunction):
ansiblename = self.do_comment(name)
newlines = []
comment = None
for l in self.lines:
if comment is not None:
addlinesfunction(newlines, comment, job)
comment = None
elif l == ansiblename:
comment = l
else:
newlines.append(l)
self.lines = newlines
if len(newlines) == 0:
return True
else:
return False # TODO add some more error testing
def _update_env(self, name, decl, addenvfunction):
newlines = []
for l in self.lines:
if re.match(r'^%s=' % name, l):
addenvfunction(newlines, decl)
else:
newlines.append(l)
self.lines = newlines
def render(self):
"""
Render this crontab as it would be in the crontab.
"""
crons = []
for cron in self.lines:
crons.append(cron)
result = '\n'.join(crons)
if result:
result = result.rstrip('\r\n') + '\n'
return result
def _read_user_execute(self):
"""
Returns the command line for reading a crontab
"""
user = ''
if self.user:
if platform.system() == 'SunOS':
return "su %s -c '%s -l'" % (shlex_quote(self.user), shlex_quote(self.cron_cmd))
elif platform.system() == 'AIX':
return "%s -l %s" % (shlex_quote(self.cron_cmd), shlex_quote(self.user))
elif platform.system() == 'HP-UX':
return "%s %s %s" % (self.cron_cmd, '-l', shlex_quote(self.user))
elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % shlex_quote(self.user)
return "%s %s %s" % (self.cron_cmd, user, '-l')
def _write_execute(self, path):
"""
Return the command line for writing a crontab
"""
user = ''
if self.user:
if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
return "chown %s %s ; su '%s' -c '%s %s'" % (
shlex_quote(self.user), shlex_quote(path), shlex_quote(self.user), self.cron_cmd, shlex_quote(path))
elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % shlex_quote(self.user)
return "%s %s %s" % (self.cron_cmd, user, shlex_quote(path))
def main():
# The following example playbooks:
#
# - cron: name="check dirs" hour="5,2" job="ls -alh > /dev/null"
#
# - name: do the job
# cron: name="do the job" hour="5,2" job="/some/dir/job.sh"
#
# - name: no job
# cron: name="an old job" state=absent
#
# - name: sets env
# cron: name="PATH" env=yes value="/bin:/usr/bin"
#
# Would produce:
# PATH=/bin:/usr/bin
# # Ansible: check dirs
# * * 5,2 * * ls -alh > /dev/null
# # Ansible: do the job
# * * 5,2 * * /some/dir/job.sh
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str'),
user=dict(type='str'),
job=dict(type='str', aliases=['value']),
cron_file=dict(type='str'),
state=dict(type='str', default='present', choices=['present', 'absent']),
backup=dict(type='bool', default=False),
minute=dict(type='str', default='*'),
hour=dict(type='str', default='*'),
day=dict(type='str', default='*', aliases=['dom']),
month=dict(type='str', default='*'),
weekday=dict(type='str', default='*', aliases=['dow']),
reboot=dict(type='bool', default=False),
special_time=dict(type='str', choices=["reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly"]),
disabled=dict(type='bool', default=False),
env=dict(type='bool', default=False),
insertafter=dict(type='str'),
insertbefore=dict(type='str'),
),
supports_check_mode=True,
mutually_exclusive=[
['reboot', 'special_time'],
['insertafter', 'insertbefore'],
],
)
name = module.params['name']
user = module.params['user']
job = module.params['job']
cron_file = module.params['cron_file']
state = module.params['state']
backup = module.params['backup']
minute = module.params['minute']
hour = module.params['hour']
day = module.params['day']
month = module.params['month']
weekday = module.params['weekday']
reboot = module.params['reboot']
special_time = module.params['special_time']
disabled = module.params['disabled']
env = module.params['env']
insertafter = module.params['insertafter']
insertbefore = module.params['insertbefore']
do_install = state == 'present'
changed = False
res_args = dict()
warnings = list()
if cron_file:
cron_file_basename = os.path.basename(cron_file)
if not re.search(r'^[A-Z0-9_-]+$', cron_file_basename, re.I):
warnings.append('Filename portion of cron_file ("%s") should consist' % cron_file_basename +
' solely of upper- and lower-case letters, digits, underscores, and hyphens')
# Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
os.umask(int('022', 8))
crontab = CronTab(module, user, cron_file)
module.debug('cron instantiated - name: "%s"' % name)
if not name:
module.deprecate(
msg="The 'name' parameter will be required in future releases.",
version='2.12', collection_name='ansible.builtin'
)
if reboot:
module.deprecate(
msg="The 'reboot' parameter will be removed in future releases. Use 'special_time' option instead.",
version='2.12', collection_name='ansible.builtin'
)
if module._diff:
diff = dict()
diff['before'] = crontab.n_existing
if crontab.cron_file:
diff['before_header'] = crontab.cron_file
else:
if crontab.user:
diff['before_header'] = 'crontab for user "%s"' % crontab.user
else:
diff['before_header'] = 'crontab'
# --- user input validation ---
if env and not name:
module.fail_json(msg="You must specify 'name' while working with environment variables (env=yes)")
if (special_time or reboot) and \
(True in [(x != '*') for x in [minute, hour, day, month, weekday]]):
module.fail_json(msg="You must specify time and date fields or special time.")
# cannot support special_time on solaris
if (special_time or reboot) and platform.system() == 'SunOS':
module.fail_json(msg="Solaris does not support special_time=... or @reboot")
if cron_file and do_install:
if not user:
module.fail_json(msg="To use cron_file=... parameter you must specify user=... as well")
if job is None and do_install:
module.fail_json(msg="You must specify 'job' to install a new cron job or variable")
if (insertafter or insertbefore) and not env and do_install:
module.fail_json(msg="Insertafter and insertbefore parameters are valid only with env=yes")
if reboot:
special_time = "reboot"
# if requested make a backup before making a change
if backup and not module.check_mode:
(backuph, backup_file) = tempfile.mkstemp(prefix='crontab')
crontab.write(backup_file)
if crontab.cron_file and not do_install:
if module._diff:
diff['after'] = ''
diff['after_header'] = '/dev/null'
else:
diff = dict()
if module.check_mode:
changed = os.path.isfile(crontab.cron_file)
else:
changed = crontab.remove_job_file()
module.exit_json(changed=changed, cron_file=cron_file, state=state, diff=diff)
if env:
if ' ' in name:
module.fail_json(msg="Invalid name for environment variable")
decl = '%s="%s"' % (name, job)
old_decl = crontab.find_env(name)
if do_install:
if len(old_decl) == 0:
crontab.add_env(decl, insertafter, insertbefore)
changed = True
if len(old_decl) > 0 and old_decl[1] != decl:
crontab.update_env(name, decl)
changed = True
else:
if len(old_decl) > 0:
crontab.remove_env(name)
changed = True
else:
if do_install:
for char in ['\r', '\n']:
if char in job.strip('\r\n'):
warnings.append('Job should not contain line breaks')
break
job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time, disabled)
old_job = crontab.find_job(name, job)
if len(old_job) == 0:
crontab.add_job(name, job)
changed = True
if len(old_job) > 0 and old_job[1] != job:
crontab.update_job(name, job)
changed = True
if len(old_job) > 2:
crontab.update_job(name, job)
changed = True
else:
old_job = crontab.find_job(name)
if len(old_job) > 0:
crontab.remove_job(name)
changed = True
# no changes to env/job, but existing crontab needs a terminating newline
if not changed and crontab.n_existing != '':
if not (crontab.n_existing.endswith('\r') or crontab.n_existing.endswith('\n')):
changed = True
res_args = dict(
jobs=crontab.get_jobnames(),
envs=crontab.get_envnames(),
warnings=warnings,
changed=changed
)
if changed:
if not module.check_mode:
crontab.write()
if module._diff:
diff['after'] = crontab.render()
if crontab.cron_file:
diff['after_header'] = crontab.cron_file
else:
if crontab.user:
diff['after_header'] = 'crontab for user "%s"' % crontab.user
else:
diff['after_header'] = 'crontab'
res_args['diff'] = diff
# retain the backup only if crontab or cron file have changed
if backup and not module.check_mode:
if changed:
res_args['backup_file'] = backup_file
else:
os.unlink(backup_file)
if cron_file:
res_args['cron_file'] = cron_file
module.exit_json(**res_args)
# --- should never get here
module.exit_json(msg="Unable to execute cron task.")
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
bcded7ca3347b631cb06ccb49aa49c5ef2291909 | 6cb18c62758bfbf783d3fabe851d1c4d9f323483 | /setup.py | 9319f44e05f51de89cc40224949e07be98a9e018 | [
"MIT"
] | permissive | bruinxiong/performer-pytorch | 68e505ff5e59d35e339b23661feef377795fd2df | c368b5e4efd46f72e2abaa655dc813021f911014 | refs/heads/main | 2023-01-04T02:25:42.898296 | 2020-10-26T22:41:09 | 2020-10-26T22:41:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | from setuptools import setup, find_packages
setup(
name = 'performer-pytorch',
packages = find_packages(exclude=['examples']),
version = '0.1.4',
license='MIT',
description = 'Performer - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/performer-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'efficient attention',
'transformers'
],
install_requires=[
'pytorch-fast-transformers>=0.3.0',
'torch>=1.6',
'einops>=0.3'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | [
"[email protected]"
] | |
a4c71809c35378bb39dbbce97d55d2a122ab4dcd | f51c6d0cebb27c377ce9830deec4b727b9b2ee90 | /AI/05_tictactoe/02grid_plot.py | b2fb6cbc7f65ddac4fc048c6664f6bdd82dfb227 | [] | no_license | dbbudd/Python-Experiments | 1c3c1322583aaaf2016a2f2f3061e6d034c5d1c8 | b6d294bf11a5c92b8578d16aa2f63cc27fc47b07 | refs/heads/master | 2020-04-17T02:21:36.693593 | 2019-01-17T00:18:34 | 2019-01-17T00:18:34 | 166,130,283 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py | #!/usr/bin/env python
import numpy as np
import itertools
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
class gameboard(object):
def __init__(self):
#player 1 puts a "X", player 2 puts a "O"
self.g = [[1,0,1],[0,0,2],[0,2,0]]
self.grid = np.array(self.g)
print(self.grid)
def drawGrid(self):
fig = plt.figure()
ax = fig.add_subplot(111, xlim=(0,3), ylim = (0,3))
self.myCells = [(0,0),(0,1),(0,2),(1,0),(1,1),(1,2),(2,0),(2,1),(2,2)]
for i in self.myCells:
if self.grid[i] == 1:
cell = mpatches.Rectangle((i), 1, 1, alpha=1, facecolor="red")
ax.add_patch(cell)
elif self.grid[i] == 2:
cell = mpatches.Rectangle((i), 1, 1, alpha=1, facecolor="blue")
ax.add_patch(cell)
else:
cell = mpatches.Rectangle((i), 1, 1, alpha=1, facecolor="none")
ax.add_patch(cell)
plt.show()
board = gameboard()
board.drawGrid() | [
"[email protected]"
] | |
1697ff12097d074fe9a08b7e8cfbf1ecd1348016 | cca89a7bbe2da907a38eb00e9a083f57597273f0 | /162. 寻找峰值/pythonCode.py | ecfc5d414241c3d0b4d2b4aac3531e9ced628696 | [] | no_license | xerprobe/LeetCodeAnswer | cc87941ef2a25c6aa1366e7a64480dbd72750670 | ea1822870f15bdb1a828a63569368b7cd10c6ab8 | refs/heads/master | 2022-09-23T09:15:42.628793 | 2020-06-06T16:29:59 | 2020-06-06T16:29:59 | 270,215,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | from typing import List
class Solution:
def findPeakElement(self, nums: List[int]) -> int:
def binarySearch(l:int,r:int) -> int:
if(l == r): return l
mid = (l + r) // 2
if(nums[mid] > nums[mid + 1]):
return binarySearch(l,mid)
else:
return binarySearch(mid+1,r)
return binarySearch(0,len(nums)-1)
# 峰值元素是指其值大于左右相邻值的元素。
# 给定一个输入数组 nums,其中 nums[i] ≠ nums[i+1],找到峰值元素并返回其索引。
# 数组可能包含多个峰值,在这种情况下,返回任何一个峰值所在位置即可。
# 你可以假设 nums[-1] = nums[n] = -∞。
# 示例 1:
# 输入: nums = [1,2,3,1]
# 输出: 2
# 解释: 3 是峰值元素,你的函数应该返回其索引 2。
# 示例 2:
# 输入: nums = [1,2,1,3,5,6,4]
# 输出: 1 或 5
# 解释: 你的函数可以返回索引 1,其峰值元素为 2;
# 或者返回索引 5, 其峰值元素为 6。
# 说明:
# 你的解法应该是 O(logN) 时间复杂度的。
# 链接:https://leetcode-cn.com/problems/find-peak-element/ | [
"[email protected]"
] | |
96eb58da2807780f7f78eb49453cd03e2e4a57bb | 33f30925224a7db3e3bf6948c6c569ad850e9c76 | /Server/bin/rst2xml.py | 6a7fab179644d60c2959331900cdea30a7350337 | [] | no_license | duelle/CTT | 2bc64fffaf4b2eb3976fedd7aea231a51da8fbe9 | e2da2ab9c599833cc8409728b456a9e37825986b | refs/heads/master | 2022-04-06T15:25:06.747919 | 2020-02-19T14:04:37 | 2020-02-19T14:04:37 | 237,939,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | #!/home/duelle/Repositories/git/RadonCTT/Server/bin/python
# $Id: rst2xml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Docutils XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates Docutils-native XML from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='xml', description=description)
| [
"[email protected]"
] | |
55c5e4126f52501d3ab1f9cd4f9c49c47dc30d18 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/ZXR10-MACPING-MIB.py | 805cbd59b0fb3a90dcafa3b37ef03e6abdf405d0 | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 12,798 | py | #
# PySNMP MIB module ZXR10-MACPING-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ZXR10-MACPING-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:42:08 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
iso, Bits, ModuleIdentity, Gauge32, Unsigned32, enterprises, IpAddress, Counter32, experimental, ObjectIdentity, MibIdentifier, NotificationType, TimeTicks, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, mgmt, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "Bits", "ModuleIdentity", "Gauge32", "Unsigned32", "enterprises", "IpAddress", "Counter32", "experimental", "ObjectIdentity", "MibIdentifier", "NotificationType", "TimeTicks", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "mgmt", "Counter64")
TruthValue, DisplayString, RowStatus, MacAddress, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "DisplayString", "RowStatus", "MacAddress", "TextualConvention")
zxr10L2vpn, = mibBuilder.importSymbols("ZXR10-SMI", "zxr10L2vpn")
zxr10MacPingMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4))
class DisplayString(OctetString):
pass
class OptionType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1))
namedValues = NamedValues(("ce", 0), ("pe", 1))
zxr10MacPingTable = MibTable((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1), )
if mibBuilder.loadTexts: zxr10MacPingTable.setStatus('current')
zxr10MacPingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1), ).setIndexNames((0, "ZXR10-MACPING-MIB", "zxr10PingMacSerial"))
if mibBuilder.loadTexts: zxr10MacPingEntry.setStatus('current')
zxr10PingMacSerial = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacSerial.setStatus('current')
zxr10PingMacDestMac = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 2), MacAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacDestMac.setStatus('current')
zxr10PingMacControlOutEtherIf = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacControlOutEtherIf.setStatus('current')
zxr10PingMacIfOption = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("none", 0), ("option", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacIfOption.setStatus('current')
zxr10PingMacPacketCount = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacPacketCount.setStatus('current')
zxr10PingMacTimeOut = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 60))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacTimeOut.setStatus('current')
zxr10PingMacHops = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacHops.setStatus('current')
zxr10PingMacControlResultType = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("summary", 0), ("detail", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacControlResultType.setStatus('current')
zxr10PingMacTrapOncompletion = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 9), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacTrapOncompletion.setStatus('current')
zxr10PingMacRosStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("not-active", 1), ("start-ping", 2), ("ping-processing", 3), ("ping-completed", 4))).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacRosStatus.setStatus('current')
zxr10PingMacEntryOwner = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 11), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacEntryOwner.setStatus('current')
zxr10PingMacIfPeOption = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 12), OptionType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacIfPeOption.setStatus('current')
zxr10PingMacVfiName = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 13), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacVfiName.setStatus('current')
zxr10PingMacPeerAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 14), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacPeerAddress.setStatus('current')
zxr10PingMacResultTable = MibTable((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2), )
if mibBuilder.loadTexts: zxr10PingMacResultTable.setStatus('current')
zxr10pingMacResultEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1), ).setIndexNames((0, "ZXR10-MACPING-MIB", "zxr10PingMacResultSerial"))
if mibBuilder.loadTexts: zxr10pingMacResultEntry.setStatus('current')
zxr10PingMacResultSerial = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultSerial.setStatus('current')
zxr10PingMacResultSentPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultSentPkts.setStatus('current')
zxr10PingMacResultRcvPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRcvPkts.setStatus('current')
zxr10PingMacResultRoundTripMinTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRoundTripMinTime.setStatus('current')
zxr10PingMacResultRoundTripMaxTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRoundTripMaxTime.setStatus('current')
zxr10PingMacResultRoundTripAvgTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRoundTripAvgTime.setStatus('current')
zxr10PingMacResultType = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("summary", 0), ("detail", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultType.setStatus('current')
zxr10PingMacExtResultDestIfName = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacExtResultDestIfName.setStatus('current')
zxr10PingMacExtResultDestHostName = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 17))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacExtResultDestHostName.setStatus('current')
zxr10PingMacExtResultSourceIfName = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 10), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacExtResultSourceIfName.setStatus('current')
zxr10PingMacExtResultSourceHostName = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 11), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 17))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacExtResultSourceHostName.setStatus('current')
zxr10PingMacExtResultOutVlanId = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4096))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacExtResultOutVlanId.setStatus('current')
zxr10PingMacExtResultInVlanId = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4096))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacExtResultInVlanId.setStatus('current')
zxr10PingMacResultEntryOwner = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 14), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultEntryOwner.setStatus('current')
zxr10PingMacResultRoundWobbleMinTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRoundWobbleMinTime.setStatus('current')
zxr10PingMacResultRoundWobbleMaxTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRoundWobbleMaxTime.setStatus('current')
zxr10PingMacResultRoundWobbleAvgTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRoundWobbleAvgTime.setStatus('current')
macpingNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 3))
macpingTrapResult = NotificationType((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 3, 1)).setObjects(("ZXR10-MACPING-MIB", "zxr10PingMacResultSerial"), ("ZXR10-MACPING-MIB", "zxr10PingMacResultSentPkts"), ("ZXR10-MACPING-MIB", "zxr10PingMacResultRcvPkts"), ("ZXR10-MACPING-MIB", "zxr10PingMacResultRoundTripMinTime"), ("ZXR10-MACPING-MIB", "zxr10PingMacResultRoundTripMaxTime"), ("ZXR10-MACPING-MIB", "zxr10PingMacResultRoundTripAvgTime"))
if mibBuilder.loadTexts: macpingTrapResult.setStatus('current')
mibBuilder.exportSymbols("ZXR10-MACPING-MIB", zxr10PingMacResultRoundTripAvgTime=zxr10PingMacResultRoundTripAvgTime, zxr10MacPingMIB=zxr10MacPingMIB, zxr10PingMacPeerAddress=zxr10PingMacPeerAddress, zxr10PingMacTimeOut=zxr10PingMacTimeOut, macpingNotifications=macpingNotifications, zxr10PingMacEntryOwner=zxr10PingMacEntryOwner, zxr10PingMacRosStatus=zxr10PingMacRosStatus, zxr10PingMacIfOption=zxr10PingMacIfOption, zxr10PingMacResultRoundWobbleAvgTime=zxr10PingMacResultRoundWobbleAvgTime, zxr10PingMacResultTable=zxr10PingMacResultTable, OptionType=OptionType, zxr10MacPingTable=zxr10MacPingTable, zxr10PingMacPacketCount=zxr10PingMacPacketCount, zxr10PingMacResultRcvPkts=zxr10PingMacResultRcvPkts, zxr10PingMacSerial=zxr10PingMacSerial, zxr10pingMacResultEntry=zxr10pingMacResultEntry, zxr10PingMacResultRoundWobbleMinTime=zxr10PingMacResultRoundWobbleMinTime, zxr10PingMacResultRoundTripMinTime=zxr10PingMacResultRoundTripMinTime, zxr10MacPingEntry=zxr10MacPingEntry, zxr10PingMacHops=zxr10PingMacHops, zxr10PingMacIfPeOption=zxr10PingMacIfPeOption, zxr10PingMacResultSerial=zxr10PingMacResultSerial, DisplayString=DisplayString, zxr10PingMacExtResultSourceHostName=zxr10PingMacExtResultSourceHostName, zxr10PingMacResultEntryOwner=zxr10PingMacResultEntryOwner, zxr10PingMacControlOutEtherIf=zxr10PingMacControlOutEtherIf, zxr10PingMacResultSentPkts=zxr10PingMacResultSentPkts, zxr10PingMacResultType=zxr10PingMacResultType, zxr10PingMacResultRoundWobbleMaxTime=zxr10PingMacResultRoundWobbleMaxTime, zxr10PingMacResultRoundTripMaxTime=zxr10PingMacResultRoundTripMaxTime, zxr10PingMacExtResultDestIfName=zxr10PingMacExtResultDestIfName, zxr10PingMacExtResultDestHostName=zxr10PingMacExtResultDestHostName, macpingTrapResult=macpingTrapResult, zxr10PingMacVfiName=zxr10PingMacVfiName, zxr10PingMacExtResultOutVlanId=zxr10PingMacExtResultOutVlanId, zxr10PingMacExtResultSourceIfName=zxr10PingMacExtResultSourceIfName, zxr10PingMacControlResultType=zxr10PingMacControlResultType, zxr10PingMacExtResultInVlanId=zxr10PingMacExtResultInVlanId, zxr10PingMacDestMac=zxr10PingMacDestMac, zxr10PingMacTrapOncompletion=zxr10PingMacTrapOncompletion)
| [
"[email protected]"
] | |
eba5e24cb7ae539f05831d88b27d99b2346a8f0a | ec9129d3eb1880df9f0b54c76510352a7e004b0c | /tools/make_vps_tarball.py | b03537feaa59ec1a6a93c522cfd621963bf12eba | [] | no_license | eugen-don/vps | 4057e6ddb1db274dbd8d78fa926376cfc3a40aa7 | 6a16569868241b35d8137b7f2b2f8db0cf67ff55 | refs/heads/master | 2021-01-11T16:29:53.109075 | 2014-05-14T09:20:33 | 2014-05-14T09:20:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | #!/usr/bin/env python
import sys
import os
import _env
import ops.os_init as os_init
import conf
assert conf.OS_IMAGE_DIR and os.path.isdir(conf.OS_IMAGE_DIR)
def usage():
print """usage: \n%s [image_path/partion_path] [tarball_dir]
""" % (sys.argv[0])
def main():
if len(sys.argv) < 3:
usage()
os._exit(0)
img_path = sys.argv[1]
tarball_dir = sys.argv[2]
if not os.path.exists(img_path):
print "%s not exists" % (img_path)
os._exit(1)
if not os.path.isdir(tarball_dir):
print '%s is not a directory' % (tarball_dir)
os._exit(1)
tarball_path = os_init.pack_vps_fs_tarball(img_path, tarball_dir)
print "%s packed in %s" % (img_path, tarball_path)
if "__main__" == __name__:
main()
| [
"[email protected]"
] | |
f716de44a80a10f01bfaa8b3a8d58b4ec092c945 | dbe1f4110921a08cb13e22ea325d503bd5627195 | /chuhuo_2.71/bluedon/monitor/sbin/checkproc.py | cd3521785adb14ce48baf65ec961b05655ab0e50 | [] | no_license | Hehouhua/waf_branches | 92dc1b1cbecba20f24ef6c7372dde7caa43f9158 | ca76f3a1ed8150b423474c9e37aee37841a5ee35 | refs/heads/main | 2023-01-07T11:33:31.667688 | 2020-11-03T06:58:33 | 2020-11-03T06:58:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | py | import os, re, sys
rexplogstart = re.compile(r'grep logstart.pl')
rexpwebvisit = re.compile(r'grep webvisit.pl')
def checklogstart():
if not os.path.exists("/usr/local/bdwaf/logs_bridge/data"):
os.popen("mkdir -p /usr/local/bdwaf/logs_bridge/data")
if not os.path.exists("/usr/local/bdwaf/logs_proxy/data"):
os.popen("mkdir -p /usr/local/bdwaf/logs_proxy/data")
flag = 0
pfp = os.popen('ps ax | grep logstart.pl')
lines = pfp.readlines()
for line in lines:
match = rexplogstart.search(line)
if match:
flag += 1
if flag >= len(lines):
os.system('/usr/local/bluedon/monitor/sbin/logstart.pl')
def checkwebvisit():
flag = 0
pfp = os.popen('ps ax | grep webvisit.pl')
lines = pfp.readlines()
for line in lines:
match = rexplogstart.search(line)
if match:
flag += 1
if flag >= len(lines):
os.system('/usr/local/bluedon/monitor/sbin/webvisit.pl')
if __name__ == '__main__':
checklogstart()
checkwebvisit()
| [
"[email protected]"
] | |
dc95cfc1d53773ef74245ed5c8a5b6bbbf3ce933 | 65e076e4fcc00a67faa0932b3f3a3d3a3a11e2aa | /sdk/python/pulumi_google_native/datastore/v1/_enums.py | 15df09472641b2ebbeb23bd87aeab08fb357fbf9 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | TheJaySmith-Google/pulumi-google-native | 816babe5c7316724e02d5b8b9d789df00262bb8e | 566c295a39fe8c3dd16e4a7894ff6de72423e5da | refs/heads/master | 2023-06-05T06:45:19.979837 | 2021-06-23T11:42:27 | 2021-06-23T11:42:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'GoogleDatastoreAdminV1IndexedPropertyDirection',
'IndexAncestor',
]
class GoogleDatastoreAdminV1IndexedPropertyDirection(str, Enum):
"""
Required. The indexed property's direction. Must not be DIRECTION_UNSPECIFIED.
"""
DIRECTION_UNSPECIFIED = "DIRECTION_UNSPECIFIED"
ASCENDING = "ASCENDING"
DESCENDING = "DESCENDING"
class IndexAncestor(str, Enum):
"""
Required. The index's ancestor mode. Must not be ANCESTOR_MODE_UNSPECIFIED.
"""
ANCESTOR_MODE_UNSPECIFIED = "ANCESTOR_MODE_UNSPECIFIED"
NONE = "NONE"
ALL_ANCESTORS = "ALL_ANCESTORS"
| [
"[email protected]"
] | |
cef9a68afdddd61d9d2c7d5510d7a38174bc8f1c | 4b68243d9db908945ee500174a8a12be27d150f9 | /pogoprotos/networking/requests/messages/update_fitness_metrics_message_pb2.py | 522382d168f4fe3adab53afbb40fe730c7070bd9 | [] | no_license | ykram/pogoprotos-py | 7285c86498f57dcbbec8e6c947597e82b2518d80 | a045b0140740625d9a19ded53ece385a16c4ad4a | refs/heads/master | 2020-04-20T10:19:51.628964 | 2019-02-02T02:58:03 | 2019-02-02T02:58:03 | 168,787,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 2,937 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/networking/requests/messages/update_fitness_metrics_message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pogoprotos.data.fitness import fitness_sample_pb2 as pogoprotos_dot_data_dot_fitness_dot_fitness__sample__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/networking/requests/messages/update_fitness_metrics_message.proto',
package='pogoprotos.networking.requests.messages',
syntax='proto3',
serialized_pb=_b('\nLpogoprotos/networking/requests/messages/update_fitness_metrics_message.proto\x12\'pogoprotos.networking.requests.messages\x1a,pogoprotos/data/fitness/fitness_sample.proto\"^\n\x1bUpdateFitnessMetricsMessage\x12?\n\x0f\x66itness_samples\x18\x01 \x03(\x0b\x32&.pogoprotos.data.fitness.FitnessSampleb\x06proto3')
,
dependencies=[pogoprotos_dot_data_dot_fitness_dot_fitness__sample__pb2.DESCRIPTOR,])
_UPDATEFITNESSMETRICSMESSAGE = _descriptor.Descriptor(
name='UpdateFitnessMetricsMessage',
full_name='pogoprotos.networking.requests.messages.UpdateFitnessMetricsMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='fitness_samples', full_name='pogoprotos.networking.requests.messages.UpdateFitnessMetricsMessage.fitness_samples', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=167,
serialized_end=261,
)
_UPDATEFITNESSMETRICSMESSAGE.fields_by_name['fitness_samples'].message_type = pogoprotos_dot_data_dot_fitness_dot_fitness__sample__pb2._FITNESSSAMPLE
DESCRIPTOR.message_types_by_name['UpdateFitnessMetricsMessage'] = _UPDATEFITNESSMETRICSMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UpdateFitnessMetricsMessage = _reflection.GeneratedProtocolMessageType('UpdateFitnessMetricsMessage', (_message.Message,), dict(
DESCRIPTOR = _UPDATEFITNESSMETRICSMESSAGE,
__module__ = 'pogoprotos.networking.requests.messages.update_fitness_metrics_message_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.networking.requests.messages.UpdateFitnessMetricsMessage)
))
_sym_db.RegisterMessage(UpdateFitnessMetricsMessage)
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
22ffc7c4ae1f6b16b2ece3c70722f0a2d0ec48c5 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2480/59018/262642.py | 80da0919c460c290863470859367203af1d15933 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | def even_odd(N,a):
b=[]
for j in a:
if j%2==0:
b.append(j)
a.pop(j)
c=b+a
return c
T=int(input())
for i in range(T):
N=int(input())
info=input().split(' ')
a=[int(y) for y in info]
print(even_odd(N,a))
| [
"[email protected]"
] | |
66e5e2cd1dd250b00922b3b3211b1c0c1c510d35 | 53565e19de1d345552f5f469f4e4ea311a421bb8 | /app/artist/models/artist.py | de30a6078bcfde1cf589a711184a2c568c8bfd52 | [] | no_license | standbyme227/fc-melon | 18e17aa8b85906a62e1631e54a70ff85d72ea435 | 8f0f4d40021f75a025e91fa6aebea143bccb6ce3 | refs/heads/master | 2021-05-03T18:59:13.495171 | 2018-03-20T02:32:02 | 2018-03-20T02:32:02 | 120,418,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,632 | py | from django.conf import settings
from django.db import models
from django.forms import model_to_dict
from django.http import JsonResponse, HttpResponse
from .artist_youtube import ArtistYouTube
from .managers import ArtistManager
__all__ = (
'Artist',
)
class Artist(models.Model):
BLOOD_TYPE_A = 'a'
BLOOD_TYPE_B = 'b'
BLOOD_TYPE_O = 'o'
BLOOD_TYPE_AB = 'c'
BLOOD_TYPE_OTHER = 'x'
CHOICES_BLOOD_TYPE = (
(BLOOD_TYPE_A, 'A형'),
(BLOOD_TYPE_B, 'B형'),
(BLOOD_TYPE_O, 'O형'),
(BLOOD_TYPE_AB, 'AB형'),
(BLOOD_TYPE_OTHER, '기타'),
)
melon_id = models.CharField('멜론 Artist ID', max_length=20, blank=True, null=True, unique=True)
image = models.ImageField('프로필 이미지', upload_to='artist', blank=True)
# upload_to는 media폴더를 기준으로 그안의 경로를 지정
name = models.CharField('이름', max_length=50, )
real_name = models.CharField('본명', max_length=30, blank=True, default='')
nationality = models.CharField('국적', max_length=50, blank=True, )
birth_date = models.DateField(max_length=50, blank=True, null=True, )
constellation = models.CharField('별자리', max_length=30, blank=True, null=True)
blood_type = models.CharField('혈액형', max_length=50, blank=True, choices=CHOICES_BLOOD_TYPE)
# choices를 넣어야지만 위의 선택을 이용할 수 있다.
intro = models.TextField('소개', blank=True)
# likes = models.IntegerField(default=0)
like_users = models.ManyToManyField(
settings.AUTH_USER_MODEL,
through='ArtistLike',
related_name='like_artists',
blank=True,
)
youtube_videos = models.ManyToManyField(
ArtistYouTube,
related_name='artists',
blank=True,
)
objects = ArtistManager()
def __str__(self):
return self.name
def toggle_like_user(self, user):
# 자신이 'artist이며 user가 주어진 user인 ArtistLike를 가져오거나 없으면 생성
like, like_created = self.like_user_info_list.get_or_create(user=user)
# 만약 이미 잇엇을 경우 (새로 생성 X)
if not like_created:
# Like를 지워줌
like.delete()
# 생성여부를 반환
return like_created
# if self.like_users.filter(user=user).exists():
# self.like_users.filter(user).delete()
# else:
# self.like_users.create(user=user)
# # 자신이 artist이며, 주어진 user와의 ArtistLike의 QuerySet
# query = ArtistLike.objects.filter(artist=self, user=user)
# # QuerySet이 존재할 졍우
# if query.exists():
# query.delete()
# return False
# # QuerySet이 존재하지 않을 경우
# else:
# ArtistLike.objects.create(artist=self, user=user)
# return True
def to_json(self):
from django.db.models.fields.files import FieldFile
from django.contrib.auth import get_user_model
user_class = get_user_model()
ret = model_to_dict(self)
# model_to_dict의 결과가 dict
# 해당 dict의 item을 순회하며
# JSON Serialize할때 에러나는 타입의 value를
# 적절히 변환해서 value에 다시 대입
def convert_value(value):
if isinstance(value, FieldFile):
return value.url if value else None
elif isinstance(value, user_class):
return value.pk
elif isinstance(value, ArtistYouTube):
return value.pk
return value
def convert_obj(obj):
"""
객체 또는 컨테이너 객체에 포함된 객체들 중
직렬화가 불가능한 객체를 가능하도록 형태를 변환해주는 함수
:param obj:
:return: convert_value()를 거친 객체
"""
if isinstance(obj, list):
# list타입일 경우 각 항목을 순회하며 index에 해당하는 값을 변환
for index, item in enumerate(obj):
obj[index] = convert_obj(item)
elif isinstance(obj, dict):
# dict타입일 경우 각 항목을 순회하며 key에 해당하는 값을 변환
for key, value in obj.items():
obj[key] = convert_obj(value)
# list나 dict가 아닐 경우, 객체 자체를 변환한 값을 리턴
return convert_value(obj)
convert_obj(ret)
return ret | [
"[email protected]"
] | |
ac071ec7c195c0c7838f31cdd9f41fe37a46ad9c | a44a9279258ace54be0ea6d410e6ddb5a2d72bcb | /project-addons/custom_reports/models/product.py | 719faf154fd24aa8c981b08a03877ad3b5b456aa | [] | no_license | athlontado/PXGO_00064_2014_PHA | 346f33185a07c2e1766a7cc79cd300252d9b2480 | 3086baba490e47a5dcc7942c7c5fee9fc047ddcd | refs/heads/master | 2020-04-06T03:56:15.828784 | 2016-04-18T12:24:53 | 2016-04-18T12:24:53 | 59,216,028 | 0 | 0 | null | 2016-05-19T14:50:54 | 2016-05-19T14:50:54 | null | UTF-8 | Python | false | false | 1,240 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Pharmadus. All Rights Reserved
# $Óscar Salvador <[email protected]>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import fields, models
class ProductCategory(models.Model):
_inherit = 'product.category'
commissions_parent_category = fields.Boolean('Commissions parent category',
default=False)
| [
"[email protected]"
] | |
0e029895d75465efd99006fba963cce56d4204ed | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/pandas-dev_pandas/pandas-master/pandas/tests/test_nanops.py | 937c20d009b6bfb2143c62b9aa96a110e0d6c71f | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 43,023 | py | # -*- coding: utf-8 -*-
from __future__ import division, print_function
from functools import partial
import warnings
import numpy as np
from pandas import Series, isnull
from pandas.types.common import is_integer_dtype
import pandas.core.nanops as nanops
import pandas.util.testing as tm
use_bn = nanops._USE_BOTTLENECK
class TestnanopsDataFrame(tm.TestCase):
def setUp(self):
np.random.seed(11235)
nanops._USE_BOTTLENECK = False
self.arr_shape = (11, 7, 5)
self.arr_float = np.random.randn(*self.arr_shape)
self.arr_float1 = np.random.randn(*self.arr_shape)
self.arr_complex = self.arr_float + self.arr_float1 * 1j
self.arr_int = np.random.randint(-10, 10, self.arr_shape)
self.arr_bool = np.random.randint(0, 2, self.arr_shape) == 0
self.arr_str = np.abs(self.arr_float).astype('S')
self.arr_utf = np.abs(self.arr_float).astype('U')
self.arr_date = np.random.randint(0, 20000,
self.arr_shape).astype('M8[ns]')
self.arr_tdelta = np.random.randint(0, 20000,
self.arr_shape).astype('m8[ns]')
self.arr_nan = np.tile(np.nan, self.arr_shape)
self.arr_float_nan = np.vstack([self.arr_float, self.arr_nan])
self.arr_float1_nan = np.vstack([self.arr_float1, self.arr_nan])
self.arr_nan_float1 = np.vstack([self.arr_nan, self.arr_float1])
self.arr_nan_nan = np.vstack([self.arr_nan, self.arr_nan])
self.arr_inf = self.arr_float * np.inf
self.arr_float_inf = np.vstack([self.arr_float, self.arr_inf])
self.arr_float1_inf = np.vstack([self.arr_float1, self.arr_inf])
self.arr_inf_float1 = np.vstack([self.arr_inf, self.arr_float1])
self.arr_inf_inf = np.vstack([self.arr_inf, self.arr_inf])
self.arr_nan_inf = np.vstack([self.arr_nan, self.arr_inf])
self.arr_float_nan_inf = np.vstack([self.arr_float, self.arr_nan,
self.arr_inf])
self.arr_nan_float1_inf = np.vstack([self.arr_float, self.arr_inf,
self.arr_nan])
self.arr_nan_nan_inf = np.vstack([self.arr_nan, self.arr_nan,
self.arr_inf])
self.arr_obj = np.vstack([self.arr_float.astype(
'O'), self.arr_int.astype('O'), self.arr_bool.astype(
'O'), self.arr_complex.astype('O'), self.arr_str.astype(
'O'), self.arr_utf.astype('O'), self.arr_date.astype('O'),
self.arr_tdelta.astype('O')])
with np.errstate(invalid='ignore'):
self.arr_nan_nanj = self.arr_nan + self.arr_nan * 1j
self.arr_complex_nan = np.vstack([self.arr_complex,
self.arr_nan_nanj])
self.arr_nan_infj = self.arr_inf * 1j
self.arr_complex_nan_infj = np.vstack([self.arr_complex,
self.arr_nan_infj])
self.arr_float_2d = self.arr_float[:, :, 0]
self.arr_float1_2d = self.arr_float1[:, :, 0]
self.arr_complex_2d = self.arr_complex[:, :, 0]
self.arr_int_2d = self.arr_int[:, :, 0]
self.arr_bool_2d = self.arr_bool[:, :, 0]
self.arr_str_2d = self.arr_str[:, :, 0]
self.arr_utf_2d = self.arr_utf[:, :, 0]
self.arr_date_2d = self.arr_date[:, :, 0]
self.arr_tdelta_2d = self.arr_tdelta[:, :, 0]
self.arr_nan_2d = self.arr_nan[:, :, 0]
self.arr_float_nan_2d = self.arr_float_nan[:, :, 0]
self.arr_float1_nan_2d = self.arr_float1_nan[:, :, 0]
self.arr_nan_float1_2d = self.arr_nan_float1[:, :, 0]
self.arr_nan_nan_2d = self.arr_nan_nan[:, :, 0]
self.arr_nan_nanj_2d = self.arr_nan_nanj[:, :, 0]
self.arr_complex_nan_2d = self.arr_complex_nan[:, :, 0]
self.arr_inf_2d = self.arr_inf[:, :, 0]
self.arr_float_inf_2d = self.arr_float_inf[:, :, 0]
self.arr_nan_inf_2d = self.arr_nan_inf[:, :, 0]
self.arr_float_nan_inf_2d = self.arr_float_nan_inf[:, :, 0]
self.arr_nan_nan_inf_2d = self.arr_nan_nan_inf[:, :, 0]
self.arr_float_1d = self.arr_float[:, 0, 0]
self.arr_float1_1d = self.arr_float1[:, 0, 0]
self.arr_complex_1d = self.arr_complex[:, 0, 0]
self.arr_int_1d = self.arr_int[:, 0, 0]
self.arr_bool_1d = self.arr_bool[:, 0, 0]
self.arr_str_1d = self.arr_str[:, 0, 0]
self.arr_utf_1d = self.arr_utf[:, 0, 0]
self.arr_date_1d = self.arr_date[:, 0, 0]
self.arr_tdelta_1d = self.arr_tdelta[:, 0, 0]
self.arr_nan_1d = self.arr_nan[:, 0, 0]
self.arr_float_nan_1d = self.arr_float_nan[:, 0, 0]
self.arr_float1_nan_1d = self.arr_float1_nan[:, 0, 0]
self.arr_nan_float1_1d = self.arr_nan_float1[:, 0, 0]
self.arr_nan_nan_1d = self.arr_nan_nan[:, 0, 0]
self.arr_nan_nanj_1d = self.arr_nan_nanj[:, 0, 0]
self.arr_complex_nan_1d = self.arr_complex_nan[:, 0, 0]
self.arr_inf_1d = self.arr_inf.ravel()
self.arr_float_inf_1d = self.arr_float_inf[:, 0, 0]
self.arr_nan_inf_1d = self.arr_nan_inf[:, 0, 0]
self.arr_float_nan_inf_1d = self.arr_float_nan_inf[:, 0, 0]
self.arr_nan_nan_inf_1d = self.arr_nan_nan_inf[:, 0, 0]
def tearDown(self):
nanops._USE_BOTTLENECK = use_bn
def check_results(self, targ, res, axis, check_dtype=True):
res = getattr(res, 'asm8', res)
res = getattr(res, 'values', res)
# timedeltas are a beast here
def _coerce_tds(targ, res):
if hasattr(targ, 'dtype') and targ.dtype == 'm8[ns]':
if len(targ) == 1:
targ = targ[0].item()
res = res.item()
else:
targ = targ.view('i8')
return targ, res
try:
if axis != 0 and hasattr(
targ, 'shape') and targ.ndim and targ.shape != res.shape:
res = np.split(res, [targ.shape[0]], axis=0)[0]
except:
targ, res = _coerce_tds(targ, res)
try:
tm.assert_almost_equal(targ, res, check_dtype=check_dtype)
except:
# handle timedelta dtypes
if hasattr(targ, 'dtype') and targ.dtype == 'm8[ns]':
targ, res = _coerce_tds(targ, res)
tm.assert_almost_equal(targ, res, check_dtype=check_dtype)
return
# There are sometimes rounding errors with
# complex and object dtypes.
# If it isn't one of those, re-raise the error.
if not hasattr(res, 'dtype') or res.dtype.kind not in ['c', 'O']:
raise
# convert object dtypes to something that can be split into
# real and imaginary parts
if res.dtype.kind == 'O':
if targ.dtype.kind != 'O':
res = res.astype(targ.dtype)
else:
try:
res = res.astype('c16')
except:
res = res.astype('f8')
try:
targ = targ.astype('c16')
except:
targ = targ.astype('f8')
# there should never be a case where numpy returns an object
# but nanops doesn't, so make that an exception
elif targ.dtype.kind == 'O':
raise
tm.assert_almost_equal(targ.real, res.real,
check_dtype=check_dtype)
tm.assert_almost_equal(targ.imag, res.imag,
check_dtype=check_dtype)
def check_fun_data(self, testfunc, targfunc, testarval, targarval,
targarnanval, check_dtype=True, **kwargs):
for axis in list(range(targarval.ndim)) + [None]:
for skipna in [False, True]:
targartempval = targarval if skipna else targarnanval
try:
targ = targfunc(targartempval, axis=axis, **kwargs)
res = testfunc(testarval, axis=axis, skipna=skipna,
**kwargs)
self.check_results(targ, res, axis,
check_dtype=check_dtype)
if skipna:
res = testfunc(testarval, axis=axis, **kwargs)
self.check_results(targ, res, axis,
check_dtype=check_dtype)
if axis is None:
res = testfunc(testarval, skipna=skipna, **kwargs)
self.check_results(targ, res, axis,
check_dtype=check_dtype)
if skipna and axis is None:
res = testfunc(testarval, **kwargs)
self.check_results(targ, res, axis,
check_dtype=check_dtype)
except BaseException as exc:
exc.args += ('axis: %s of %s' % (axis, testarval.ndim - 1),
'skipna: %s' % skipna, 'kwargs: %s' % kwargs)
raise
if testarval.ndim <= 1:
return
try:
testarval2 = np.take(testarval, 0, axis=-1)
targarval2 = np.take(targarval, 0, axis=-1)
targarnanval2 = np.take(targarnanval, 0, axis=-1)
except ValueError:
return
self.check_fun_data(testfunc, targfunc, testarval2, targarval2,
targarnanval2, check_dtype=check_dtype, **kwargs)
def check_fun(self, testfunc, targfunc, testar, targar=None,
targarnan=None, **kwargs):
if targar is None:
targar = testar
if targarnan is None:
targarnan = testar
testarval = getattr(self, testar)
targarval = getattr(self, targar)
targarnanval = getattr(self, targarnan)
try:
self.check_fun_data(testfunc, targfunc, testarval, targarval,
targarnanval, **kwargs)
except BaseException as exc:
exc.args += ('testar: %s' % testar, 'targar: %s' % targar,
'targarnan: %s' % targarnan)
raise
def check_funs(self, testfunc, targfunc, allow_complex=True,
allow_all_nan=True, allow_str=True, allow_date=True,
allow_tdelta=True, allow_obj=True, **kwargs):
self.check_fun(testfunc, targfunc, 'arr_float', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_float_nan', 'arr_float',
**kwargs)
self.check_fun(testfunc, targfunc, 'arr_int', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_bool', **kwargs)
objs = [self.arr_float.astype('O'), self.arr_int.astype('O'),
self.arr_bool.astype('O')]
if allow_all_nan:
self.check_fun(testfunc, targfunc, 'arr_nan', **kwargs)
if allow_complex:
self.check_fun(testfunc, targfunc, 'arr_complex', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_complex_nan',
'arr_complex', **kwargs)
if allow_all_nan:
self.check_fun(testfunc, targfunc, 'arr_nan_nanj', **kwargs)
objs += [self.arr_complex.astype('O')]
if allow_str:
self.check_fun(testfunc, targfunc, 'arr_str', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_utf', **kwargs)
objs += [self.arr_str.astype('O'), self.arr_utf.astype('O')]
if allow_date:
try:
targfunc(self.arr_date)
except TypeError:
pass
else:
self.check_fun(testfunc, targfunc, 'arr_date', **kwargs)
objs += [self.arr_date.astype('O')]
if allow_tdelta:
try:
targfunc(self.arr_tdelta)
except TypeError:
pass
else:
self.check_fun(testfunc, targfunc, 'arr_tdelta', **kwargs)
objs += [self.arr_tdelta.astype('O')]
if allow_obj:
self.arr_obj = np.vstack(objs)
# some nanops handle object dtypes better than their numpy
# counterparts, so the numpy functions need to be given something
# else
if allow_obj == 'convert':
targfunc = partial(self._badobj_wrap, func=targfunc,
allow_complex=allow_complex)
self.check_fun(testfunc, targfunc, 'arr_obj', **kwargs)
def check_funs_ddof(self,
testfunc,
targfunc,
allow_complex=True,
allow_all_nan=True,
allow_str=True,
allow_date=False,
allow_tdelta=False,
allow_obj=True, ):
for ddof in range(3):
try:
self.check_funs(testfunc, targfunc, allow_complex,
allow_all_nan, allow_str, allow_date,
allow_tdelta, allow_obj, ddof=ddof)
except BaseException as exc:
exc.args += ('ddof %s' % ddof, )
raise
def _badobj_wrap(self, value, func, allow_complex=True, **kwargs):
if value.dtype.kind == 'O':
if allow_complex:
value = value.astype('c16')
else:
value = value.astype('f8')
return func(value, **kwargs)
def test_nanany(self):
self.check_funs(nanops.nanany, np.any, allow_all_nan=False,
allow_str=False, allow_date=False, allow_tdelta=False)
def test_nanall(self):
self.check_funs(nanops.nanall, np.all, allow_all_nan=False,
allow_str=False, allow_date=False, allow_tdelta=False)
def test_nansum(self):
self.check_funs(nanops.nansum, np.sum, allow_str=False,
allow_date=False, allow_tdelta=True, check_dtype=False)
def test_nanmean(self):
self.check_funs(nanops.nanmean, np.mean, allow_complex=False,
allow_obj=False, allow_str=False, allow_date=False,
allow_tdelta=True)
def test_nanmean_overflow(self):
# GH 10155
# In the previous implementation mean can overflow for int dtypes, it
# is now consistent with numpy
# numpy < 1.9.0 is not computing this correctly
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= '1.9.0':
for a in [2 ** 55, -2 ** 55, 20150515061816532]:
s = Series(a, index=range(500), dtype=np.int64)
result = s.mean()
np_result = s.values.mean()
self.assertEqual(result, a)
self.assertEqual(result, np_result)
self.assertTrue(result.dtype == np.float64)
def test_returned_dtype(self):
dtypes = [np.int16, np.int32, np.int64, np.float32, np.float64]
if hasattr(np, 'float128'):
dtypes.append(np.float128)
for dtype in dtypes:
s = Series(range(10), dtype=dtype)
group_a = ['mean', 'std', 'var', 'skew', 'kurt']
group_b = ['min', 'max']
for method in group_a + group_b:
result = getattr(s, method)()
if is_integer_dtype(dtype) and method in group_a:
self.assertTrue(
result.dtype == np.float64,
"return dtype expected from %s is np.float64, "
"got %s instead" % (method, result.dtype))
else:
self.assertTrue(
result.dtype == dtype,
"return dtype expected from %s is %s, "
"got %s instead" % (method, dtype, result.dtype))
def test_nanmedian(self):
with warnings.catch_warnings(record=True):
self.check_funs(nanops.nanmedian, np.median, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=True, allow_obj='convert')
def test_nanvar(self):
self.check_funs_ddof(nanops.nanvar, np.var, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=True, allow_obj='convert')
def test_nanstd(self):
self.check_funs_ddof(nanops.nanstd, np.std, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=True, allow_obj='convert')
def test_nansem(self):
tm.skip_if_no_package('scipy.stats')
tm._skip_if_scipy_0_17()
from scipy.stats import sem
self.check_funs_ddof(nanops.nansem, sem, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=True, allow_obj='convert')
def _minmax_wrap(self, value, axis=None, func=None):
res = func(value, axis)
if res.dtype.kind == 'm':
res = np.atleast_1d(res)
return res
def test_nanmin(self):
func = partial(self._minmax_wrap, func=np.min)
self.check_funs(nanops.nanmin, func, allow_str=False, allow_obj=False)
def test_nanmax(self):
func = partial(self._minmax_wrap, func=np.max)
self.check_funs(nanops.nanmax, func, allow_str=False, allow_obj=False)
def _argminmax_wrap(self, value, axis=None, func=None):
res = func(value, axis)
nans = np.min(value, axis)
nullnan = isnull(nans)
if res.ndim:
res[nullnan] = -1
elif (hasattr(nullnan, 'all') and nullnan.all() or
not hasattr(nullnan, 'all') and nullnan):
res = -1
return res
def test_nanargmax(self):
func = partial(self._argminmax_wrap, func=np.argmax)
self.check_funs(nanops.nanargmax, func, allow_str=False,
allow_obj=False, allow_date=True, allow_tdelta=True)
def test_nanargmin(self):
func = partial(self._argminmax_wrap, func=np.argmin)
if tm.sys.version_info[0:2] == (2, 6):
self.check_funs(nanops.nanargmin, func, allow_date=True,
allow_tdelta=True, allow_str=False,
allow_obj=False)
else:
self.check_funs(nanops.nanargmin, func, allow_str=False,
allow_obj=False)
def _skew_kurt_wrap(self, values, axis=None, func=None):
if not isinstance(values.dtype.type, np.floating):
values = values.astype('f8')
result = func(values, axis=axis, bias=False)
# fix for handling cases where all elements in an axis are the same
if isinstance(result, np.ndarray):
result[np.max(values, axis=axis) == np.min(values, axis=axis)] = 0
return result
elif np.max(values) == np.min(values):
return 0.
return result
def test_nanskew(self):
tm.skip_if_no_package('scipy.stats')
tm._skip_if_scipy_0_17()
from scipy.stats import skew
func = partial(self._skew_kurt_wrap, func=skew)
self.check_funs(nanops.nanskew, func, allow_complex=False,
allow_str=False, allow_date=False, allow_tdelta=False)
def test_nankurt(self):
tm.skip_if_no_package('scipy.stats')
tm._skip_if_scipy_0_17()
from scipy.stats import kurtosis
func1 = partial(kurtosis, fisher=True)
func = partial(self._skew_kurt_wrap, func=func1)
self.check_funs(nanops.nankurt, func, allow_complex=False,
allow_str=False, allow_date=False, allow_tdelta=False)
def test_nanprod(self):
self.check_funs(nanops.nanprod, np.prod, allow_str=False,
allow_date=False, allow_tdelta=False)
def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs):
res00 = checkfun(self.arr_float_2d, self.arr_float1_2d, **kwargs)
res01 = checkfun(self.arr_float_2d, self.arr_float1_2d,
min_periods=len(self.arr_float_2d) - 1, **kwargs)
tm.assert_almost_equal(targ0, res00)
tm.assert_almost_equal(targ0, res01)
res10 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d,
**kwargs)
res11 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d,
min_periods=len(self.arr_float_2d) - 1, **kwargs)
tm.assert_almost_equal(targ1, res10)
tm.assert_almost_equal(targ1, res11)
targ2 = np.nan
res20 = checkfun(self.arr_nan_2d, self.arr_float1_2d, **kwargs)
res21 = checkfun(self.arr_float_2d, self.arr_nan_2d, **kwargs)
res22 = checkfun(self.arr_nan_2d, self.arr_nan_2d, **kwargs)
res23 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d,
**kwargs)
res24 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d,
min_periods=len(self.arr_float_2d) - 1, **kwargs)
res25 = checkfun(self.arr_float_2d, self.arr_float1_2d,
min_periods=len(self.arr_float_2d) + 1, **kwargs)
tm.assert_almost_equal(targ2, res20)
tm.assert_almost_equal(targ2, res21)
tm.assert_almost_equal(targ2, res22)
tm.assert_almost_equal(targ2, res23)
tm.assert_almost_equal(targ2, res24)
tm.assert_almost_equal(targ2, res25)
def check_nancorr_nancov_1d(self, checkfun, targ0, targ1, **kwargs):
res00 = checkfun(self.arr_float_1d, self.arr_float1_1d, **kwargs)
res01 = checkfun(self.arr_float_1d, self.arr_float1_1d,
min_periods=len(self.arr_float_1d) - 1, **kwargs)
tm.assert_almost_equal(targ0, res00)
tm.assert_almost_equal(targ0, res01)
res10 = checkfun(self.arr_float_nan_1d, self.arr_float1_nan_1d,
**kwargs)
res11 = checkfun(self.arr_float_nan_1d, self.arr_float1_nan_1d,
min_periods=len(self.arr_float_1d) - 1, **kwargs)
tm.assert_almost_equal(targ1, res10)
tm.assert_almost_equal(targ1, res11)
targ2 = np.nan
res20 = checkfun(self.arr_nan_1d, self.arr_float1_1d, **kwargs)
res21 = checkfun(self.arr_float_1d, self.arr_nan_1d, **kwargs)
res22 = checkfun(self.arr_nan_1d, self.arr_nan_1d, **kwargs)
res23 = checkfun(self.arr_float_nan_1d, self.arr_nan_float1_1d,
**kwargs)
res24 = checkfun(self.arr_float_nan_1d, self.arr_nan_float1_1d,
min_periods=len(self.arr_float_1d) - 1, **kwargs)
res25 = checkfun(self.arr_float_1d, self.arr_float1_1d,
min_periods=len(self.arr_float_1d) + 1, **kwargs)
tm.assert_almost_equal(targ2, res20)
tm.assert_almost_equal(targ2, res21)
tm.assert_almost_equal(targ2, res22)
tm.assert_almost_equal(targ2, res23)
tm.assert_almost_equal(targ2, res24)
tm.assert_almost_equal(targ2, res25)
def test_nancorr(self):
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat,
self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1)
targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.corrcoef(self.arr_float_1d.flat,
self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='pearson')
def test_nancorr_pearson(self):
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat,
self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1,
method='pearson')
targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.corrcoef(self.arr_float_1d.flat,
self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='pearson')
def test_nancorr_kendall(self):
tm.skip_if_no_package('scipy.stats')
from scipy.stats import kendalltau
targ0 = kendalltau(self.arr_float_2d, self.arr_float1_2d)[0]
targ1 = kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1,
method='kendall')
targ0 = kendalltau(self.arr_float_1d, self.arr_float1_1d)[0]
targ1 = kendalltau(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='kendall')
def test_nancorr_spearman(self):
tm.skip_if_no_package('scipy.stats')
from scipy.stats import spearmanr
targ0 = spearmanr(self.arr_float_2d, self.arr_float1_2d)[0]
targ1 = spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1,
method='spearman')
targ0 = spearmanr(self.arr_float_1d, self.arr_float1_1d)[0]
targ1 = spearmanr(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='spearman')
def test_nancov(self):
targ0 = np.cov(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.cov(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancov, targ0, targ1)
targ0 = np.cov(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.cov(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancov, targ0, targ1)
def check_nancomp(self, checkfun, targ0):
arr_float = self.arr_float
arr_float1 = self.arr_float1
arr_nan = self.arr_nan
arr_nan_nan = self.arr_nan_nan
arr_float_nan = self.arr_float_nan
arr_float1_nan = self.arr_float1_nan
arr_nan_float1 = self.arr_nan_float1
while targ0.ndim:
try:
res0 = checkfun(arr_float, arr_float1)
tm.assert_almost_equal(targ0, res0)
if targ0.ndim > 1:
targ1 = np.vstack([targ0, arr_nan])
else:
targ1 = np.hstack([targ0, arr_nan])
res1 = checkfun(arr_float_nan, arr_float1_nan)
tm.assert_numpy_array_equal(targ1, res1, check_dtype=False)
targ2 = arr_nan_nan
res2 = checkfun(arr_float_nan, arr_nan_float1)
tm.assert_numpy_array_equal(targ2, res2, check_dtype=False)
except Exception as exc:
exc.args += ('ndim: %s' % arr_float.ndim, )
raise
try:
arr_float = np.take(arr_float, 0, axis=-1)
arr_float1 = np.take(arr_float1, 0, axis=-1)
arr_nan = np.take(arr_nan, 0, axis=-1)
arr_nan_nan = np.take(arr_nan_nan, 0, axis=-1)
arr_float_nan = np.take(arr_float_nan, 0, axis=-1)
arr_float1_nan = np.take(arr_float1_nan, 0, axis=-1)
arr_nan_float1 = np.take(arr_nan_float1, 0, axis=-1)
targ0 = np.take(targ0, 0, axis=-1)
except ValueError:
break
def test_nangt(self):
targ0 = self.arr_float > self.arr_float1
self.check_nancomp(nanops.nangt, targ0)
def test_nange(self):
targ0 = self.arr_float >= self.arr_float1
self.check_nancomp(nanops.nange, targ0)
def test_nanlt(self):
targ0 = self.arr_float < self.arr_float1
self.check_nancomp(nanops.nanlt, targ0)
def test_nanle(self):
targ0 = self.arr_float <= self.arr_float1
self.check_nancomp(nanops.nanle, targ0)
def test_naneq(self):
targ0 = self.arr_float == self.arr_float1
self.check_nancomp(nanops.naneq, targ0)
def test_nanne(self):
targ0 = self.arr_float != self.arr_float1
self.check_nancomp(nanops.nanne, targ0)
def check_bool(self, func, value, correct, *args, **kwargs):
while getattr(value, 'ndim', True):
try:
res0 = func(value, *args, **kwargs)
if correct:
self.assertTrue(res0)
else:
self.assertFalse(res0)
except BaseException as exc:
exc.args += ('dim: %s' % getattr(value, 'ndim', value), )
raise
if not hasattr(value, 'ndim'):
break
try:
value = np.take(value, 0, axis=-1)
except ValueError:
break
def test__has_infs(self):
pairs = [('arr_complex', False), ('arr_int', False),
('arr_bool', False), ('arr_str', False), ('arr_utf', False),
('arr_complex', False), ('arr_complex_nan', False),
('arr_nan_nanj', False), ('arr_nan_infj', True),
('arr_complex_nan_infj', True)]
pairs_float = [('arr_float', False), ('arr_nan', False),
('arr_float_nan', False), ('arr_nan_nan', False),
('arr_float_inf', True), ('arr_inf', True),
('arr_nan_inf', True), ('arr_float_nan_inf', True),
('arr_nan_nan_inf', True)]
for arr, correct in pairs:
val = getattr(self, arr)
try:
self.check_bool(nanops._has_infs, val, correct)
except BaseException as exc:
exc.args += (arr, )
raise
for arr, correct in pairs_float:
val = getattr(self, arr)
try:
self.check_bool(nanops._has_infs, val, correct)
self.check_bool(nanops._has_infs, val.astype('f4'), correct)
self.check_bool(nanops._has_infs, val.astype('f2'), correct)
except BaseException as exc:
exc.args += (arr, )
raise
def test__isfinite(self):
pairs = [('arr_complex', False), ('arr_int', False),
('arr_bool', False), ('arr_str', False), ('arr_utf', False),
('arr_complex', False), ('arr_complex_nan', True),
('arr_nan_nanj', True), ('arr_nan_infj', True),
('arr_complex_nan_infj', True)]
pairs_float = [('arr_float', False), ('arr_nan', True),
('arr_float_nan', True), ('arr_nan_nan', True),
('arr_float_inf', True), ('arr_inf', True),
('arr_nan_inf', True), ('arr_float_nan_inf', True),
('arr_nan_nan_inf', True)]
func1 = lambda x: np.any(nanops._isfinite(x).ravel())
# TODO: unused?
# func2 = lambda x: np.any(nanops._isfinite(x).values.ravel())
for arr, correct in pairs:
val = getattr(self, arr)
try:
self.check_bool(func1, val, correct)
except BaseException as exc:
exc.args += (arr, )
raise
for arr, correct in pairs_float:
val = getattr(self, arr)
try:
self.check_bool(func1, val, correct)
self.check_bool(func1, val.astype('f4'), correct)
self.check_bool(func1, val.astype('f2'), correct)
except BaseException as exc:
exc.args += (arr, )
raise
def test__bn_ok_dtype(self):
self.assertTrue(nanops._bn_ok_dtype(self.arr_float.dtype, 'test'))
self.assertTrue(nanops._bn_ok_dtype(self.arr_complex.dtype, 'test'))
self.assertTrue(nanops._bn_ok_dtype(self.arr_int.dtype, 'test'))
self.assertTrue(nanops._bn_ok_dtype(self.arr_bool.dtype, 'test'))
self.assertTrue(nanops._bn_ok_dtype(self.arr_str.dtype, 'test'))
self.assertTrue(nanops._bn_ok_dtype(self.arr_utf.dtype, 'test'))
self.assertFalse(nanops._bn_ok_dtype(self.arr_date.dtype, 'test'))
self.assertFalse(nanops._bn_ok_dtype(self.arr_tdelta.dtype, 'test'))
self.assertFalse(nanops._bn_ok_dtype(self.arr_obj.dtype, 'test'))
class TestEnsureNumeric(tm.TestCase):
def test_numeric_values(self):
# Test integer
self.assertEqual(nanops._ensure_numeric(1), 1, 'Failed for int')
# Test float
self.assertEqual(nanops._ensure_numeric(1.1), 1.1, 'Failed for float')
# Test complex
self.assertEqual(nanops._ensure_numeric(1 + 2j), 1 + 2j,
'Failed for complex')
def test_ndarray(self):
# Test numeric ndarray
values = np.array([1, 2, 3])
self.assertTrue(np.allclose(nanops._ensure_numeric(values), values),
'Failed for numeric ndarray')
# Test object ndarray
o_values = values.astype(object)
self.assertTrue(np.allclose(nanops._ensure_numeric(o_values), values),
'Failed for object ndarray')
# Test convertible string ndarray
s_values = np.array(['1', '2', '3'], dtype=object)
self.assertTrue(np.allclose(nanops._ensure_numeric(s_values), values),
'Failed for convertible string ndarray')
# Test non-convertible string ndarray
s_values = np.array(['foo', 'bar', 'baz'], dtype=object)
self.assertRaises(ValueError, lambda: nanops._ensure_numeric(s_values))
def test_convertable_values(self):
self.assertTrue(np.allclose(nanops._ensure_numeric('1'), 1.0),
'Failed for convertible integer string')
self.assertTrue(np.allclose(nanops._ensure_numeric('1.1'), 1.1),
'Failed for convertible float string')
self.assertTrue(np.allclose(nanops._ensure_numeric('1+1j'), 1 + 1j),
'Failed for convertible complex string')
def test_non_convertable_values(self):
self.assertRaises(TypeError, lambda: nanops._ensure_numeric('foo'))
self.assertRaises(TypeError, lambda: nanops._ensure_numeric({}))
self.assertRaises(TypeError, lambda: nanops._ensure_numeric([]))
class TestNanvarFixedValues(tm.TestCase):
# xref GH10242
def setUp(self):
# Samples from a normal distribution.
self.variance = variance = 3.0
self.samples = self.prng.normal(scale=variance ** 0.5, size=100000)
def test_nanvar_all_finite(self):
samples = self.samples
actual_variance = nanops.nanvar(samples)
tm.assert_almost_equal(actual_variance, self.variance,
check_less_precise=2)
def test_nanvar_nans(self):
samples = np.nan * np.ones(2 * self.samples.shape[0])
samples[::2] = self.samples
actual_variance = nanops.nanvar(samples, skipna=True)
tm.assert_almost_equal(actual_variance, self.variance,
check_less_precise=2)
actual_variance = nanops.nanvar(samples, skipna=False)
tm.assert_almost_equal(actual_variance, np.nan, check_less_precise=2)
def test_nanstd_nans(self):
samples = np.nan * np.ones(2 * self.samples.shape[0])
samples[::2] = self.samples
actual_std = nanops.nanstd(samples, skipna=True)
tm.assert_almost_equal(actual_std, self.variance ** 0.5,
check_less_precise=2)
actual_std = nanops.nanvar(samples, skipna=False)
tm.assert_almost_equal(actual_std, np.nan,
check_less_precise=2)
def test_nanvar_axis(self):
# Generate some sample data.
samples_norm = self.samples
samples_unif = self.prng.uniform(size=samples_norm.shape[0])
samples = np.vstack([samples_norm, samples_unif])
actual_variance = nanops.nanvar(samples, axis=1)
tm.assert_almost_equal(actual_variance, np.array(
[self.variance, 1.0 / 12]), check_less_precise=2)
def test_nanvar_ddof(self):
n = 5
samples = self.prng.uniform(size=(10000, n + 1))
samples[:, -1] = np.nan # Force use of our own algorithm.
variance_0 = nanops.nanvar(samples, axis=1, skipna=True, ddof=0).mean()
variance_1 = nanops.nanvar(samples, axis=1, skipna=True, ddof=1).mean()
variance_2 = nanops.nanvar(samples, axis=1, skipna=True, ddof=2).mean()
# The unbiased estimate.
var = 1.0 / 12
tm.assert_almost_equal(variance_1, var,
check_less_precise=2)
# The underestimated variance.
tm.assert_almost_equal(variance_0, (n - 1.0) / n * var,
check_less_precise=2)
# The overestimated variance.
tm.assert_almost_equal(variance_2, (n - 1.0) / (n - 2.0) * var,
check_less_precise=2)
def test_ground_truth(self):
# Test against values that were precomputed with Numpy.
samples = np.empty((4, 4))
samples[:3, :3] = np.array([[0.97303362, 0.21869576, 0.55560287
], [0.72980153, 0.03109364, 0.99155171],
[0.09317602, 0.60078248, 0.15871292]])
samples[3] = samples[:, 3] = np.nan
# Actual variances along axis=0, 1 for ddof=0, 1, 2
variance = np.array([[[0.13762259, 0.05619224, 0.11568816
], [0.20643388, 0.08428837, 0.17353224],
[0.41286776, 0.16857673, 0.34706449]],
[[0.09519783, 0.16435395, 0.05082054
], [0.14279674, 0.24653093, 0.07623082],
[0.28559348, 0.49306186, 0.15246163]]])
# Test nanvar.
for axis in range(2):
for ddof in range(3):
var = nanops.nanvar(samples, skipna=True, axis=axis, ddof=ddof)
tm.assert_almost_equal(var[:3], variance[axis, ddof])
self.assertTrue(np.isnan(var[3]))
# Test nanstd.
for axis in range(2):
for ddof in range(3):
std = nanops.nanstd(samples, skipna=True, axis=axis, ddof=ddof)
tm.assert_almost_equal(std[:3], variance[axis, ddof] ** 0.5)
self.assertTrue(np.isnan(std[3]))
def test_nanstd_roundoff(self):
# Regression test for GH 10242 (test data taken from GH 10489). Ensure
# that variance is stable.
data = Series(766897346 * np.ones(10))
for ddof in range(3):
result = data.std(ddof=ddof)
self.assertEqual(result, 0.0)
@property
def prng(self):
return np.random.RandomState(1234)
class TestNanskewFixedValues(tm.TestCase):
# xref GH 11974
def setUp(self):
# Test data + skewness value (computed with scipy.stats.skew)
self.samples = np.sin(np.linspace(0, 1, 200))
self.actual_skew = -0.1875895205961754
def test_constant_series(self):
# xref GH 11974
for val in [3075.2, 3075.3, 3075.5]:
data = val * np.ones(300)
skew = nanops.nanskew(data)
self.assertEqual(skew, 0.0)
def test_all_finite(self):
alpha, beta = 0.3, 0.1
left_tailed = self.prng.beta(alpha, beta, size=100)
self.assertLess(nanops.nanskew(left_tailed), 0)
alpha, beta = 0.1, 0.3
right_tailed = self.prng.beta(alpha, beta, size=100)
self.assertGreater(nanops.nanskew(right_tailed), 0)
def test_ground_truth(self):
skew = nanops.nanskew(self.samples)
self.assertAlmostEqual(skew, self.actual_skew)
def test_axis(self):
samples = np.vstack([self.samples,
np.nan * np.ones(len(self.samples))])
skew = nanops.nanskew(samples, axis=1)
tm.assert_almost_equal(skew, np.array([self.actual_skew, np.nan]))
def test_nans(self):
samples = np.hstack([self.samples, np.nan])
skew = nanops.nanskew(samples, skipna=False)
self.assertTrue(np.isnan(skew))
def test_nans_skipna(self):
samples = np.hstack([self.samples, np.nan])
skew = nanops.nanskew(samples, skipna=True)
tm.assert_almost_equal(skew, self.actual_skew)
@property
def prng(self):
return np.random.RandomState(1234)
class TestNankurtFixedValues(tm.TestCase):
# xref GH 11974
def setUp(self):
# Test data + kurtosis value (computed with scipy.stats.kurtosis)
self.samples = np.sin(np.linspace(0, 1, 200))
self.actual_kurt = -1.2058303433799713
def test_constant_series(self):
# xref GH 11974
for val in [3075.2, 3075.3, 3075.5]:
data = val * np.ones(300)
kurt = nanops.nankurt(data)
self.assertEqual(kurt, 0.0)
def test_all_finite(self):
alpha, beta = 0.3, 0.1
left_tailed = self.prng.beta(alpha, beta, size=100)
self.assertLess(nanops.nankurt(left_tailed), 0)
alpha, beta = 0.1, 0.3
right_tailed = self.prng.beta(alpha, beta, size=100)
self.assertGreater(nanops.nankurt(right_tailed), 0)
def test_ground_truth(self):
kurt = nanops.nankurt(self.samples)
self.assertAlmostEqual(kurt, self.actual_kurt)
def test_axis(self):
samples = np.vstack([self.samples,
np.nan * np.ones(len(self.samples))])
kurt = nanops.nankurt(samples, axis=1)
tm.assert_almost_equal(kurt, np.array([self.actual_kurt, np.nan]))
def test_nans(self):
samples = np.hstack([self.samples, np.nan])
kurt = nanops.nankurt(samples, skipna=False)
self.assertTrue(np.isnan(kurt))
def test_nans_skipna(self):
samples = np.hstack([self.samples, np.nan])
kurt = nanops.nankurt(samples, skipna=True)
tm.assert_almost_equal(kurt, self.actual_kurt)
@property
def prng(self):
return np.random.RandomState(1234)
| [
"[email protected]"
] | |
2b2a54641d5f56d801a5a0f1798713935087ef28 | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /modules/cctbx_project/simtbx/run_tests.py | 5c3244e65192c78f2e1b57410133b5e40024a0a5 | [
"BSD-3-Clause",
"BSD-3-Clause-LBNL"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 468 | py | from __future__ import absolute_import, division, print_function
from libtbx import test_utils
import libtbx.load_env
tst_list = (
"$D/nanoBragg/tst_nanoBragg_minimal.py",
"$D/nanoBragg/tst_nanoBragg_mosaic.py",
"$D/nanoBragg/tst_gaussian_mosaicity.py",
)
def run():
build_dir = libtbx.env.under_build("simtbx")
dist_dir = libtbx.env.dist_path("simtbx")
test_utils.run_tests(build_dir, dist_dir, tst_list)
if (__name__ == "__main__"):
run()
| [
"[email protected]"
] | |
80d457fe0e0df539d494873fa3d8e41ce774ae0b | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1/palemale/a.py | f78d73ef5adea50522114802f390513ce3e2cfff | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 810 | py | import os, sys
with open(sys.argv[1], 'r') as infile:
N = int(infile.readline().strip())
for x in xrange(1, N+1):
T = infile.readline().strip()
cases = set(list(T))
intT = int(T)
current = intT
count = 2
stablecount = 0
while len(cases) < 10:
current = count*intT
count += 1
cur_num = len(cases)
cases.update(list(str(current)))
if cur_num == len(cases):
stablecount += 1
else:
stablecount = 0
if stablecount > 100:
current = 'INSOMNIA'
break
if isinstance(current, int):
current = str(current)
print "Case #%s: %s" % (x, current) | [
"[[email protected]]"
] | |
0bf9f14a7d8f3b313cb14ebe38a4ae36709d9164 | 92237641f61e9b35ff6af6294153a75074757bec | /Algorithm/programmers/lv2/lv2_짝지어 제거하기.py | dc49c17ce25e718214f85eb4831fb672b343a239 | [] | no_license | taepd/study | 8ded115765c4f804813e255d9272b727bf41ec80 | 846d3f2a5a4100225b750f00f992a640e9287d9c | refs/heads/master | 2023-03-08T13:56:57.366577 | 2022-05-08T15:24:35 | 2022-05-08T15:24:35 | 245,838,600 | 0 | 1 | null | 2023-03-05T23:54:41 | 2020-03-08T15:25:15 | JavaScript | UTF-8 | Python | false | false | 278 | py | def solution(s):
stack = []
for e in s:
if not stack:
stack.append(e)
else:
if stack[-1] == e:
stack.pop()
else:
stack.append(e)
if stack:
return 0
else:
return 1 | [
"[email protected]"
] | |
69b384952afa18b41fb769869d637c21f4a61bbb | 2075052d028ed31a30bdb9acb0a2022c2634f52b | /chat/consumers.py | 761dd8369a35c0e33e7d8ef65e1ce163904ade18 | [] | no_license | igoo-Y/live_chat_app | b67704caa2e5944b131a4299716e501b555985b5 | d65c87a35d3f3a120da35290addb798e412dad72 | refs/heads/main | 2023-06-30T13:21:49.860265 | 2021-08-03T09:11:29 | 2021-08-03T09:11:29 | 392,256,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,140 | py | import json
from channels.generic.websocket import AsyncWebsocketConsumer
class ChatConsumer(AsyncWebsocketConsumer):
async def connect(self):
self.room_name = self.scope["url_route"]["kwargs"]["room_name"]
self.room_group_name = "chat_%s" % self.room_name
# Join room group
await self.channel_layer.group_add(self.room_group_name, self.channel_name)
await self.accept()
async def disconnect(self, close_code):
# Leave room group
await self.channel_layer.group_discard(self.room_group_name, self.channel_name)
# Receive message from WebSocket
async def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json["message"]
# Send message to room group
await self.channel_layer.group_send(
self.room_group_name, {"type": "chat_message", "message": message}
)
# Receive message from room group
async def chat_message(self, event):
message = event["message"]
# Send message to WebSocket
await self.send(text_data=json.dumps({"message": message}))
| [
"[email protected]"
] | |
aa41fbd83ac1923d6fda08de4cc8f3ebd55904e0 | 90390ddcc21d2f2c0dd5ee3c0e7a3d8d61be9638 | /wsgi/app/forms.py | 4141cbb7183fc430344eb1bf806ca44a244d8598 | [
"MIT"
] | permissive | pjamesjoyce/lcoptview_legacy | b27926e31c16f1fca07c6294e66d706fcb600682 | e0ebeb155d6f62d8619d33cf48db98bab8b7a4cd | refs/heads/master | 2021-07-16T11:38:58.451239 | 2017-09-26T10:43:50 | 2017-09-26T10:43:50 | 107,691,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | from flask_wtf import FlaskForm
from wtforms import TextField, PasswordField
from wtforms.validators import DataRequired
class LoginForm(FlaskForm):
login_data = TextField('username or email', validators=[DataRequired()])
password = PasswordField('password', validators=[DataRequired()])
class RegistrationForm(FlaskForm):
username = TextField('username', validators=[DataRequired()])
email = TextField('email', validators=[DataRequired()])
password = PasswordField('password', validators=[DataRequired()])
password_repeat = PasswordField('repeat password', validators=[DataRequired()])
| [
"[email protected]"
] | |
3823340ea644b2feec0858721dad3a7c2d67d330 | 1b597dd7630f9a3023faf557e383b0fae703e72b | /test_autogalaxy/unit/aggregator/test_aggregator.py | 40b7acd97191da8084e06012b80ef34395849c57 | [
"MIT"
] | permissive | knut0815/PyAutoGalaxy | 96e9dfc558182169c41e19d3297cdf46b42d5f77 | cc2bc0db5080a278ba7519f94d2a8b2468141e2d | refs/heads/master | 2023-03-05T00:59:51.594715 | 2021-02-09T18:21:30 | 2021-02-09T18:21:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,428 | py | from os import path
import pytest
import autofit as af
import autogalaxy as ag
from autogalaxy.mock import mock
directory = path.dirname(path.realpath(__file__))
@pytest.fixture(name="path")
def make_path():
return path.join("{}".format(path.dirname(path.realpath(__file__))), "files")
@pytest.fixture(name="samples")
def make_samples():
galaxy_0 = ag.Galaxy(redshift=0.5, light=ag.lp.EllipticalSersic(centre=(0.0, 1.0)))
galaxy_1 = ag.Galaxy(redshift=1.0, light=ag.lp.EllipticalSersic())
plane = ag.Plane(galaxies=[galaxy_0, galaxy_1])
return mock.MockSamples(max_log_likelihood_instance=plane)
def test__dataset_generator_from_aggregator(imaging_7x7, mask_7x7, samples):
phase_imaging_7x7 = ag.PhaseImaging(
galaxies=dict(
galaxy=ag.GalaxyModel(redshift=0.5, light=ag.lp.EllipticalSersic),
source=ag.GalaxyModel(redshift=1.0, light=ag.lp.EllipticalSersic),
),
search=mock.MockSearch(samples=samples, name="test_phase_aggregator"),
)
imaging_7x7.positions = ag.Grid2DIrregular([[1.0, 1.0], [2.0, 2.0]])
phase_imaging_7x7.run(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults(samples=samples)
)
agg = af.Aggregator(directory=phase_imaging_7x7.paths.output_path)
dataset = list(agg.values("dataset"))
print(dataset)
def test__plane_generator_from_aggregator(imaging_7x7, mask_7x7, samples):
phase_imaging_7x7 = ag.PhaseImaging(
galaxies=dict(
galaxy=ag.GalaxyModel(redshift=0.5, light=ag.lp.EllipticalSersic),
source=ag.GalaxyModel(redshift=1.0, light=ag.lp.EllipticalSersic),
),
search=mock.MockSearch(samples=samples, name="test_phase_aggregator"),
)
phase_imaging_7x7.run(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults(samples=samples)
)
agg = af.Aggregator(directory=phase_imaging_7x7.paths.output_path)
plane_gen = ag.agg.Plane(aggregator=agg)
for plane in plane_gen:
assert plane.galaxies[0].redshift == 0.5
assert plane.galaxies[0].light.centre == (0.0, 1.0)
assert plane.galaxies[1].redshift == 1.0
def test__masked_imaging_generator_from_aggregator(imaging_7x7, mask_7x7, samples):
phase_imaging_7x7 = ag.PhaseImaging(
galaxies=dict(
galaxy=ag.GalaxyModel(redshift=0.5, light=ag.lp.EllipticalSersic),
source=ag.GalaxyModel(redshift=1.0, light=ag.lp.EllipticalSersic),
),
settings=ag.SettingsPhaseImaging(
settings_masked_imaging=ag.SettingsMaskedImaging(
grid_class=ag.Grid2DIterate,
grid_inversion_class=ag.Grid2DIterate,
fractional_accuracy=0.5,
sub_steps=[2],
)
),
search=mock.MockSearch(samples=samples, name="test_phase_aggregator"),
)
phase_imaging_7x7.run(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults(samples=samples)
)
agg = af.Aggregator(directory=phase_imaging_7x7.paths.output_path)
masked_imaging_gen = ag.agg.MaskedImaging(aggregator=agg)
for masked_imaging in masked_imaging_gen:
assert (masked_imaging.imaging.image == imaging_7x7.image).all()
assert isinstance(masked_imaging.grid, ag.Grid2DIterate)
assert isinstance(masked_imaging.grid_inversion, ag.Grid2DIterate)
assert masked_imaging.grid.sub_steps == [2]
assert masked_imaging.grid.fractional_accuracy == 0.5
def test__fit_imaging_generator_from_aggregator(imaging_7x7, mask_7x7, samples):
phase_imaging_7x7 = ag.PhaseImaging(
galaxies=dict(
galaxy=ag.GalaxyModel(redshift=0.5, light=ag.lp.EllipticalSersic),
source=ag.GalaxyModel(redshift=1.0, light=ag.lp.EllipticalSersic),
),
search=mock.MockSearch(samples=samples, name="test_phase_aggregator"),
)
phase_imaging_7x7.run(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults(samples=samples)
)
agg = af.Aggregator(directory=phase_imaging_7x7.paths.output_path)
fit_imaging_gen = ag.agg.FitImaging(aggregator=agg)
for fit_imaging in fit_imaging_gen:
assert (fit_imaging.masked_imaging.imaging.image == imaging_7x7.image).all()
def test__masked_interferometer_generator_from_aggregator(
interferometer_7, visibilities_mask_7, mask_7x7, samples
):
phase_interferometer_7x7 = ag.PhaseInterferometer(
galaxies=dict(
galaxy=ag.GalaxyModel(redshift=0.5, light=ag.lp.EllipticalSersic),
source=ag.GalaxyModel(redshift=1.0, light=ag.lp.EllipticalSersic),
),
settings=ag.SettingsPhaseInterferometer(
settings_masked_interferometer=ag.SettingsMaskedInterferometer(
grid_class=ag.Grid2DIterate,
grid_inversion_class=ag.Grid2DIterate,
fractional_accuracy=0.5,
sub_steps=[2],
transformer_class=ag.TransformerDFT,
)
),
search=mock.MockSearch(samples=samples, name="test_phase_aggregator"),
real_space_mask=mask_7x7,
)
phase_interferometer_7x7.run(
dataset=interferometer_7,
mask=visibilities_mask_7,
results=mock.MockResults(samples=samples),
)
agg = af.Aggregator(directory=phase_interferometer_7x7.paths.output_path)
masked_interferometer_gen = ag.agg.MaskedInterferometer(aggregator=agg)
for masked_interferometer in masked_interferometer_gen:
assert (
masked_interferometer.interferometer.visibilities
== interferometer_7.visibilities
).all()
assert (masked_interferometer.real_space_mask == mask_7x7).all()
assert isinstance(masked_interferometer.grid, ag.Grid2DIterate)
assert isinstance(masked_interferometer.grid_inversion, ag.Grid2DIterate)
assert masked_interferometer.grid.sub_steps == [2]
assert masked_interferometer.grid.fractional_accuracy == 0.5
assert isinstance(masked_interferometer.transformer, ag.TransformerDFT)
def test__fit_interferometer_generator_from_aggregator(
interferometer_7, visibilities_mask_7, mask_7x7, samples
):
phase_interferometer_7x7 = ag.PhaseInterferometer(
galaxies=dict(
galaxy=ag.GalaxyModel(redshift=0.5, light=ag.lp.EllipticalSersic),
source=ag.GalaxyModel(redshift=1.0, light=ag.lp.EllipticalSersic),
),
search=mock.MockSearch(samples=samples, name="test_phase_aggregator"),
real_space_mask=mask_7x7,
)
phase_interferometer_7x7.run(
dataset=interferometer_7,
mask=visibilities_mask_7,
results=mock.MockResults(samples=samples),
)
agg = af.Aggregator(directory=phase_interferometer_7x7.paths.output_path)
fit_interferometer_gen = ag.agg.FitInterferometer(aggregator=agg)
for fit_interferometer in fit_interferometer_gen:
assert (
fit_interferometer.masked_interferometer.interferometer.visibilities
== interferometer_7.visibilities
).all()
assert (
fit_interferometer.masked_interferometer.real_space_mask == mask_7x7
).all()
| [
"[email protected]"
] | |
b708ef0ba29cc97092ba45507823ff4dd82a5350 | 97062249c6eb04069c6fb01e71d06bc334c828e1 | /desktop/core/ext-py/Django-1.6.10/tests/decorators/tests.py | 05016be231c1703dbabc6a7a8f688f91e33ceaf2 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | Albertsss/hue | 1c8b31c64cc420a029f5b5b80712fb3d0c6cbd6e | 454d320dd09b6f7946f3cc05bc97c3e2ca6cd485 | refs/heads/master | 2021-07-08T17:21:13.237871 | 2018-05-30T06:03:21 | 2018-05-30T06:03:21 | 135,386,450 | 0 | 1 | Apache-2.0 | 2020-07-25T13:36:58 | 2018-05-30T04:06:18 | Python | UTF-8 | Python | false | false | 8,243 | py | from functools import wraps
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required, permission_required, user_passes_test
from django.http import HttpResponse, HttpRequest, HttpResponseNotAllowed
from django.middleware.clickjacking import XFrameOptionsMiddleware
from django.utils.decorators import method_decorator
from django.utils.functional import allow_lazy, lazy, memoize
from django.utils.unittest import TestCase
from django.views.decorators.cache import cache_page, never_cache, cache_control
from django.views.decorators.clickjacking import xframe_options_deny, xframe_options_sameorigin, xframe_options_exempt
from django.views.decorators.http import require_http_methods, require_GET, require_POST, require_safe, condition
from django.views.decorators.vary import vary_on_headers, vary_on_cookie
def fully_decorated(request):
"""Expected __doc__"""
return HttpResponse('<html><body>dummy</body></html>')
fully_decorated.anything = "Expected __dict__"
def compose(*functions):
# compose(f, g)(*args, **kwargs) == f(g(*args, **kwargs))
functions = list(reversed(functions))
def _inner(*args, **kwargs):
result = functions[0](*args, **kwargs)
for f in functions[1:]:
result = f(result)
return result
return _inner
full_decorator = compose(
# django.views.decorators.http
require_http_methods(["GET"]),
require_GET,
require_POST,
require_safe,
condition(lambda r: None, lambda r: None),
# django.views.decorators.vary
vary_on_headers('Accept-language'),
vary_on_cookie,
# django.views.decorators.cache
cache_page(60*15),
cache_control(private=True),
never_cache,
# django.contrib.auth.decorators
# Apply user_passes_test twice to check #9474
user_passes_test(lambda u:True),
login_required,
permission_required('change_world'),
# django.contrib.admin.views.decorators
staff_member_required,
# django.utils.functional
lambda f: memoize(f, {}, 1),
allow_lazy,
lazy,
)
fully_decorated = full_decorator(fully_decorated)
class DecoratorsTest(TestCase):
def test_attributes(self):
"""
Tests that django decorators set certain attributes of the wrapped
function.
"""
self.assertEqual(fully_decorated.__name__, 'fully_decorated')
self.assertEqual(fully_decorated.__doc__, 'Expected __doc__')
self.assertEqual(fully_decorated.__dict__['anything'], 'Expected __dict__')
def test_user_passes_test_composition(self):
"""
Test that the user_passes_test decorator can be applied multiple times
(#9474).
"""
def test1(user):
user.decorators_applied.append('test1')
return True
def test2(user):
user.decorators_applied.append('test2')
return True
def callback(request):
return request.user.decorators_applied
callback = user_passes_test(test1)(callback)
callback = user_passes_test(test2)(callback)
class DummyUser(object): pass
class DummyRequest(object): pass
request = DummyRequest()
request.user = DummyUser()
request.user.decorators_applied = []
response = callback(request)
self.assertEqual(response, ['test2', 'test1'])
def test_cache_page_new_style(self):
"""
Test that we can call cache_page the new way
"""
def my_view(request):
return "response"
my_view_cached = cache_page(123)(my_view)
self.assertEqual(my_view_cached(HttpRequest()), "response")
my_view_cached2 = cache_page(123, key_prefix="test")(my_view)
self.assertEqual(my_view_cached2(HttpRequest()), "response")
def test_require_safe_accepts_only_safe_methods(self):
"""
Test for the require_safe decorator.
A view returns either a response or an exception.
Refs #15637.
"""
def my_view(request):
return HttpResponse("OK")
my_safe_view = require_safe(my_view)
request = HttpRequest()
request.method = 'GET'
self.assertIsInstance(my_safe_view(request), HttpResponse)
request.method = 'HEAD'
self.assertIsInstance(my_safe_view(request), HttpResponse)
request.method = 'POST'
self.assertIsInstance(my_safe_view(request), HttpResponseNotAllowed)
request.method = 'PUT'
self.assertIsInstance(my_safe_view(request), HttpResponseNotAllowed)
request.method = 'DELETE'
self.assertIsInstance(my_safe_view(request), HttpResponseNotAllowed)
# For testing method_decorator, a decorator that assumes a single argument.
# We will get type arguments if there is a mismatch in the number of arguments.
def simple_dec(func):
def wrapper(arg):
return func("test:" + arg)
return wraps(func)(wrapper)
simple_dec_m = method_decorator(simple_dec)
# For testing method_decorator, two decorators that add an attribute to the function
def myattr_dec(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.myattr = True
return wraps(func)(wrapper)
myattr_dec_m = method_decorator(myattr_dec)
def myattr2_dec(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.myattr2 = True
return wraps(func)(wrapper)
myattr2_dec_m = method_decorator(myattr2_dec)
class MethodDecoratorTests(TestCase):
"""
Tests for method_decorator
"""
def test_preserve_signature(self):
class Test(object):
@simple_dec_m
def say(self, arg):
return arg
self.assertEqual("test:hello", Test().say("hello"))
def test_preserve_attributes(self):
# Sanity check myattr_dec and myattr2_dec
@myattr_dec
@myattr2_dec
def func():
pass
self.assertEqual(getattr(func, 'myattr', False), True)
self.assertEqual(getattr(func, 'myattr2', False), True)
# Now check method_decorator
class Test(object):
@myattr_dec_m
@myattr2_dec_m
def method(self):
"A method"
pass
self.assertEqual(getattr(Test().method, 'myattr', False), True)
self.assertEqual(getattr(Test().method, 'myattr2', False), True)
self.assertEqual(getattr(Test.method, 'myattr', False), True)
self.assertEqual(getattr(Test.method, 'myattr2', False), True)
self.assertEqual(Test.method.__doc__, 'A method')
self.assertEqual(Test.method.__name__, 'method')
class XFrameOptionsDecoratorsTests(TestCase):
"""
Tests for the X-Frame-Options decorators.
"""
def test_deny_decorator(self):
"""
Ensures @xframe_options_deny properly sets the X-Frame-Options header.
"""
@xframe_options_deny
def a_view(request):
return HttpResponse()
r = a_view(HttpRequest())
self.assertEqual(r['X-Frame-Options'], 'DENY')
def test_sameorigin_decorator(self):
"""
Ensures @xframe_options_sameorigin properly sets the X-Frame-Options
header.
"""
@xframe_options_sameorigin
def a_view(request):
return HttpResponse()
r = a_view(HttpRequest())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
def test_exempt_decorator(self):
"""
Ensures @xframe_options_exempt properly instructs the
XFrameOptionsMiddleware to NOT set the header.
"""
@xframe_options_exempt
def a_view(request):
return HttpResponse()
req = HttpRequest()
resp = a_view(req)
self.assertEqual(resp.get('X-Frame-Options', None), None)
self.assertTrue(resp.xframe_options_exempt)
# Since the real purpose of the exempt decorator is to suppress
# the middleware's functionality, let's make sure it actually works...
r = XFrameOptionsMiddleware().process_response(req, resp)
self.assertEqual(r.get('X-Frame-Options', None), None)
| [
"[email protected]"
] | |
12148e25092c6f6329984c046651dba85edfb209 | 3c2e75d3563053dd186dcff324fd84eba561f2a7 | /python/onos/rsm/__init__.py | b94966701cbd5b870c5f829c3de02c8bd040a16c | [
"Apache-2.0"
] | permissive | stjordanis/onos-api | 00c2434090b9f51d7eacf00f082abd7f2146c1fc | 13fca9dc160a23bc9d89e4ef33ee2da9b2a8ee48 | refs/heads/master | 2023-09-02T11:07:58.824154 | 2021-11-01T17:40:27 | 2021-11-01T17:40:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 10,002 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: onos/rsm/rsm.proto
# plugin: python-betterproto
from dataclasses import dataclass
from typing import Dict, List, Optional
import betterproto
from betterproto.grpc.grpclib_server import ServiceBase
import grpclib
class SliceType(betterproto.Enum):
SLICE_TYPE_DL_SLICE = 0
SLICE_TYPE_UL_SLICE = 1
class SchedulerType(betterproto.Enum):
SCHEDULER_TYPE_ROUND_ROBIN = 0
SCHEDULER_TYPE_PROPORTIONALLY_FAIR = 1
SCHEDULER_TYPE_QOS_BASED = 2
class UeIdType(betterproto.Enum):
UE_ID_TYPE_CU_UE_F1_AP_ID = 0
UE_ID_TYPE_DU_UE_F1_AP_ID = 1
UE_ID_TYPE_RAN_UE_NGAP_ID = 2
UE_ID_TYPE_AMF_UE_NGAP_ID = 3
UE_ID_TYPE_ENB_UE_S1_AP_ID = 4
@dataclass(eq=False, repr=False)
class SliceItem(betterproto.Message):
e2_node_id: str = betterproto.string_field(1)
slice_ids: List[str] = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class Ack(betterproto.Message):
success: bool = betterproto.bool_field(1)
cause: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class CreateSliceRequest(betterproto.Message):
e2_node_id: str = betterproto.string_field(1)
slice_id: str = betterproto.string_field(2)
scheduler_type: "SchedulerType" = betterproto.enum_field(3)
weight: str = betterproto.string_field(4)
slice_type: "SliceType" = betterproto.enum_field(5)
@dataclass(eq=False, repr=False)
class CreateSliceResponse(betterproto.Message):
ack: "Ack" = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class UpdateSliceRequest(betterproto.Message):
e2_node_id: str = betterproto.string_field(1)
slice_id: str = betterproto.string_field(2)
scheduler_type: "SchedulerType" = betterproto.enum_field(3)
weight: str = betterproto.string_field(4)
slice_type: "SliceType" = betterproto.enum_field(5)
@dataclass(eq=False, repr=False)
class UpdateSliceResponse(betterproto.Message):
ack: "Ack" = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class DeleteSliceRequest(betterproto.Message):
e2_node_id: str = betterproto.string_field(1)
slice_id: str = betterproto.string_field(2)
slice_type: "SliceType" = betterproto.enum_field(3)
@dataclass(eq=False, repr=False)
class DeleteSliceResponse(betterproto.Message):
ack: "Ack" = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class SliceAssocItem(betterproto.Message):
ue_slice_assoc_id: str = betterproto.string_field(1)
e2_node_id: str = betterproto.string_field(2)
ue_id: List["UeIdType"] = betterproto.enum_field(3)
slice_id: str = betterproto.string_field(4)
@dataclass(eq=False, repr=False)
class UeId(betterproto.Message):
ue_id: str = betterproto.string_field(1)
type: "UeIdType" = betterproto.enum_field(2)
@dataclass(eq=False, repr=False)
class SetUeSliceAssociationRequest(betterproto.Message):
e2_node_id: str = betterproto.string_field(1)
ue_id: List["UeId"] = betterproto.message_field(2)
dl_slice_id: str = betterproto.string_field(3)
ul_slice_id: str = betterproto.string_field(4)
drb_id: str = betterproto.string_field(5)
@dataclass(eq=False, repr=False)
class SetUeSliceAssociationResponse(betterproto.Message):
ack: "Ack" = betterproto.message_field(1)
assigned_ue_slice_assoc_id: str = betterproto.string_field(2)
class RsmStub(betterproto.ServiceStub):
async def create_slice(
self,
*,
e2_node_id: str = "",
slice_id: str = "",
scheduler_type: "SchedulerType" = None,
weight: str = "",
slice_type: "SliceType" = None,
) -> "CreateSliceResponse":
request = CreateSliceRequest()
request.e2_node_id = e2_node_id
request.slice_id = slice_id
request.scheduler_type = scheduler_type
request.weight = weight
request.slice_type = slice_type
return await self._unary_unary(
"/onos.rsm.Rsm/CreateSlice", request, CreateSliceResponse
)
async def update_slice(
self,
*,
e2_node_id: str = "",
slice_id: str = "",
scheduler_type: "SchedulerType" = None,
weight: str = "",
slice_type: "SliceType" = None,
) -> "UpdateSliceResponse":
request = UpdateSliceRequest()
request.e2_node_id = e2_node_id
request.slice_id = slice_id
request.scheduler_type = scheduler_type
request.weight = weight
request.slice_type = slice_type
return await self._unary_unary(
"/onos.rsm.Rsm/UpdateSlice", request, UpdateSliceResponse
)
async def delete_slice(
self,
*,
e2_node_id: str = "",
slice_id: str = "",
slice_type: "SliceType" = None,
) -> "DeleteSliceResponse":
request = DeleteSliceRequest()
request.e2_node_id = e2_node_id
request.slice_id = slice_id
request.slice_type = slice_type
return await self._unary_unary(
"/onos.rsm.Rsm/DeleteSlice", request, DeleteSliceResponse
)
async def set_ue_slice_association(
self,
*,
e2_node_id: str = "",
ue_id: Optional[List["UeId"]] = None,
dl_slice_id: str = "",
ul_slice_id: str = "",
drb_id: str = "",
) -> "SetUeSliceAssociationResponse":
ue_id = ue_id or []
request = SetUeSliceAssociationRequest()
request.e2_node_id = e2_node_id
if ue_id is not None:
request.ue_id = ue_id
request.dl_slice_id = dl_slice_id
request.ul_slice_id = ul_slice_id
request.drb_id = drb_id
return await self._unary_unary(
"/onos.rsm.Rsm/SetUeSliceAssociation",
request,
SetUeSliceAssociationResponse,
)
class RsmBase(ServiceBase):
async def create_slice(
self,
e2_node_id: str,
slice_id: str,
scheduler_type: "SchedulerType",
weight: str,
slice_type: "SliceType",
) -> "CreateSliceResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def update_slice(
self,
e2_node_id: str,
slice_id: str,
scheduler_type: "SchedulerType",
weight: str,
slice_type: "SliceType",
) -> "UpdateSliceResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def delete_slice(
self, e2_node_id: str, slice_id: str, slice_type: "SliceType"
) -> "DeleteSliceResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def set_ue_slice_association(
self,
e2_node_id: str,
ue_id: Optional[List["UeId"]],
dl_slice_id: str,
ul_slice_id: str,
drb_id: str,
) -> "SetUeSliceAssociationResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def __rpc_create_slice(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"e2_node_id": request.e2_node_id,
"slice_id": request.slice_id,
"scheduler_type": request.scheduler_type,
"weight": request.weight,
"slice_type": request.slice_type,
}
response = await self.create_slice(**request_kwargs)
await stream.send_message(response)
async def __rpc_update_slice(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"e2_node_id": request.e2_node_id,
"slice_id": request.slice_id,
"scheduler_type": request.scheduler_type,
"weight": request.weight,
"slice_type": request.slice_type,
}
response = await self.update_slice(**request_kwargs)
await stream.send_message(response)
async def __rpc_delete_slice(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"e2_node_id": request.e2_node_id,
"slice_id": request.slice_id,
"slice_type": request.slice_type,
}
response = await self.delete_slice(**request_kwargs)
await stream.send_message(response)
async def __rpc_set_ue_slice_association(
self, stream: grpclib.server.Stream
) -> None:
request = await stream.recv_message()
request_kwargs = {
"e2_node_id": request.e2_node_id,
"ue_id": request.ue_id,
"dl_slice_id": request.dl_slice_id,
"ul_slice_id": request.ul_slice_id,
"drb_id": request.drb_id,
}
response = await self.set_ue_slice_association(**request_kwargs)
await stream.send_message(response)
def __mapping__(self) -> Dict[str, grpclib.const.Handler]:
return {
"/onos.rsm.Rsm/CreateSlice": grpclib.const.Handler(
self.__rpc_create_slice,
grpclib.const.Cardinality.UNARY_UNARY,
CreateSliceRequest,
CreateSliceResponse,
),
"/onos.rsm.Rsm/UpdateSlice": grpclib.const.Handler(
self.__rpc_update_slice,
grpclib.const.Cardinality.UNARY_UNARY,
UpdateSliceRequest,
UpdateSliceResponse,
),
"/onos.rsm.Rsm/DeleteSlice": grpclib.const.Handler(
self.__rpc_delete_slice,
grpclib.const.Cardinality.UNARY_UNARY,
DeleteSliceRequest,
DeleteSliceResponse,
),
"/onos.rsm.Rsm/SetUeSliceAssociation": grpclib.const.Handler(
self.__rpc_set_ue_slice_association,
grpclib.const.Cardinality.UNARY_UNARY,
SetUeSliceAssociationRequest,
SetUeSliceAssociationResponse,
),
}
| [
"[email protected]"
] | |
a11962ae95b28d1923e23d0a5c514d53c454524e | 7889f7f0532db6a7f81e6f8630e399c90438b2b9 | /3.7.1/_downloads/a54f19823bde998a456571636498aa98/auto_subplots_adjust.py | bd6326b8291f4b1a16db182e1f642d2279a8f0b0 | [] | no_license | matplotlib/matplotlib.github.com | ef5d23a5bf77cb5af675f1a8273d641e410b2560 | 2a60d39490941a524e5385670d488c86083a032c | refs/heads/main | 2023-08-16T18:46:58.934777 | 2023-08-10T05:07:57 | 2023-08-10T05:08:30 | 1,385,150 | 25 | 59 | null | 2023-08-30T15:59:50 | 2011-02-19T03:27:35 | null | UTF-8 | Python | false | false | 3,366 | py | """
===============================================
Programmatically controlling subplot adjustment
===============================================
.. note::
This example is primarily intended to show some advanced concepts in
Matplotlib.
If you are only looking for having enough space for your labels, it is
almost always simpler and good enough to either set the subplot parameters
manually using `.Figure.subplots_adjust`, or use one of the automatic
layout mechanisms
(:doc:`/tutorials/intermediate/constrainedlayout_guide` or
:doc:`/tutorials/intermediate/tight_layout_guide`).
This example describes a user-defined way to read out Artist sizes and
set the subplot parameters accordingly. Its main purpose is to illustrate
some advanced concepts like reading out text positions, working with
bounding boxes and transforms and using
:ref:`events <event-handling-tutorial>`. But it can also serve as a starting
point if you want to automate the layouting and need more flexibility than
tight layout and constrained layout.
Below, we collect the bounding boxes of all y-labels and move the left border
of the subplot to the right so that it leaves enough room for the union of all
the bounding boxes.
There's one catch with calculating text bounding boxes:
Querying the text bounding boxes (`.Text.get_window_extent`) needs a
renderer (`.RendererBase` instance), to calculate the text size. This renderer
is only available after the figure has been drawn (`.Figure.draw`).
A solution to this is putting the adjustment logic in a draw callback.
This function is executed after the figure has been drawn. It can now check
if the subplot leaves enough room for the text. If not, the subplot parameters
are updated and second draw is triggered.
.. redirect-from:: /gallery/pyplots/auto_subplots_adjust
"""
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
fig, ax = plt.subplots()
ax.plot(range(10))
ax.set_yticks([2, 5, 7], labels=['really, really, really', 'long', 'labels'])
def on_draw(event):
bboxes = []
for label in ax.get_yticklabels():
# Bounding box in pixels
bbox_px = label.get_window_extent()
# Transform to relative figure coordinates. This is the inverse of
# transFigure.
bbox_fig = bbox_px.transformed(fig.transFigure.inverted())
bboxes.append(bbox_fig)
# the bbox that bounds all the bboxes, again in relative figure coords
bbox = mtransforms.Bbox.union(bboxes)
if fig.subplotpars.left < bbox.width:
# Move the subplot left edge more to the right
fig.subplots_adjust(left=1.1*bbox.width) # pad a little
fig.canvas.draw()
fig.canvas.mpl_connect('draw_event', on_draw)
plt.show()
#############################################################################
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.artist.Artist.get_window_extent`
# - `matplotlib.transforms.Bbox`
# - `matplotlib.transforms.BboxBase.transformed`
# - `matplotlib.transforms.BboxBase.union`
# - `matplotlib.transforms.Transform.inverted`
# - `matplotlib.figure.Figure.subplots_adjust`
# - `matplotlib.figure.SubplotParams`
# - `matplotlib.backend_bases.FigureCanvasBase.mpl_connect`
| [
"[email protected]"
] | |
b28bbc203b60e128307f6f9d8d309793f3dc1e1a | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /yXZhG7zq6dWhWhirt_24.py | 1b4a59876a63f5dfdb4b9e7de1d41c308c735314 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py |
def is_prime(n):
if (n==1):
return False
for i in range(2,round(n**(0.5))+1):
if i!=n and (n%i)==0:
return False
return True
def filter_primes(num):
return [n for n in num if is_prime(n)]
| [
"[email protected]"
] | |
bbda84923f2c455dc60051aa1e126bf4dd187233 | 4a88ec266b64521fcaef88d92cb2b57776d3192b | /powerUsageNotification/powerUsageNotification.py | 132c11551e75d59adb52856ce265d156f20d6af7 | [
"MIT"
] | permissive | johntdyer/appdaemon-scripts | 4e5ea345d27d54d8133be212e5f7af57b8dfd57f | ce7e32a919be5a835d0bdf95e6650ff34b699220 | refs/heads/master | 2020-03-31T18:01:49.517418 | 2018-10-07T14:06:38 | 2018-10-07T14:06:38 | 152,443,705 | 1 | 0 | null | 2018-10-10T15:10:00 | 2018-10-10T15:09:59 | null | UTF-8 | Python | false | false | 4,100 | py | import appdaemon.plugins.hass.hassapi as hass
import globals
#
# App which notifies you when a power usage sensor indicated a device is on/off
#
#
# Args:
#
# app_switch: on/off switch for this app. example: input_boolean.turn_fan_on_when_hot
# sensor: power sensor. example: sensor.dishwasher_power_usage
# notify_name: Who to notify. example: group_notifications
# delay: seconds to wait until a the device is considered "off". example: 60
# threshold: amount of "usage" which indicated the device is on. example: 2
# alternative_name: Name to use in notification. example: Waschmaschine
#
# Release Notes
#
# Version 1.3:
# use Notify App
#
# Version 1.2:
# message now directly in own yaml instead of message module
#
# Version 1.1:
# Added app_switch
#
# Version 1.0:
# Initial Version
class PowerUsageNotification(hass.Hass):
def initialize(self):
self.timer_handle_list = []
self.listen_event_handle_list = []
self.listen_state_handle_list = []
self.app_switch = globals.get_arg(self.args,"app_switch")
self.sensor = globals.get_arg(self.args,"sensor")
self.alternative_name = globals.get_arg(self.args,"alternative_name")
self.notify_name = globals.get_arg(self.args,"notify_name")
self.delay = globals.get_arg(self.args,"delay")
self.threshold = globals.get_arg(self.args,"threshold")
self.message = globals.get_arg(self.args,"message_DE")
self.message_off = globals.get_arg(self.args,"message_off_DE")
self.triggered = False
self.isWaitingHandle = None
self.notifier = self.get_app('Notifier')
# Subscribe to sensors
self.listen_state_handle_list.append(self.listen_state(self.state_change, self.sensor))
def state_change(self, entity, attribute, old, new, kwargs):
if self.get_state(self.app_switch) == "on":
# Initial: power usage goes up
if ( new != None and new != "" and not self.triggered and float(new) > self.threshold ):
self.triggered = True
self.log("Power Usage is: {}".format(float(new)))
self.log("Setting triggered to: {}".format(self.triggered))
self.notifier.notify(self.notify_name, self.message.format(self.alternative_name))
# Power usage goes down below threshold
elif ( new != None and new != "" and self.triggered and self.isWaitingHandle == None and float(new) <= self.threshold):
self.log("Waiting: {} seconds to notify.".format(self.delay))
self.isWaitingHandle = self.run_in(self.notify_device_off,self.delay)
self.log("Setting isWaitingHandle to: {}".format(self.isWaitingHandle))
self.timer_handle_list.append(self.isWaitingHandle)
# Power usage goes up before delay
elif( new != None and new != "" and self.triggered and self.isWaitingHandle != None and float(new) > self.threshold):
self.log("Cancelling timer")
self.cancel_timer(self.isWaitingHandle)
self.isWaitingHandle = None
self.log("Setting isWaitingHandle to: {}".format(self.isWaitingHandle))
def notify_device_off(self, kwargs):
"""Notify User that device is off. This may get cancelled if it turns on again in the meantime"""
self.triggered = False
self.log("Setting triggered to: {}".format(self.triggered))
self.isWaitingHandle = None
self.log("Setting isWaitingHandle to: {}".format(self.isWaitingHandle))
self.log("Notifying user")
self.notifier.notify(self.notify_name, self.message_off.format(self.alternative_name))
def terminate(self):
for timer_handle in self.timer_handle_list:
self.cancel_timer(timer_handle)
for listen_event_handle in self.listen_event_handle_list:
self.cancel_listen_event(listen_event_handle)
for listen_state_handle in self.listen_state_handle_list:
self.cancel_listen_state(listen_state_handle) | [
"[email protected]"
] | |
c4281a41c161ba65c8915083ae81b981745630ca | 9775ab319e5c1f2270a132b0244f0847db42589b | /nilai/migrations/0008_auto_20210117_1010.py | d2abe2075f8f24b61525b7b5c136dcc1bf54b97d | [] | no_license | nabaman/SPK-SAW | 9aa8dfaf1bf5162bae1dc5c97e2b3e033a08294b | 5c0b8d491f23939615aa968cd52f081072fe2230 | refs/heads/master | 2023-02-18T17:38:21.028901 | 2021-01-22T15:37:06 | 2021-01-22T15:37:06 | 331,987,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | # Generated by Django 3.1.5 on 2021-01-17 10:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nilai', '0007_auto_20210117_1004'),
]
operations = [
migrations.RemoveField(
model_name='data_krips',
name='kriteria',
),
migrations.AddField(
model_name='data_kriteria',
name='krips',
field=models.ManyToManyField(to='nilai.Data_Krips'),
),
]
| [
"[email protected]"
] | |
5db6fa1f99c5b7ac65079c7fd585ce8c7915f235 | 817085c4009e48db05e4a30815fdd92ee27513f9 | /venv/Scripts/pip-script.py | 87639661c7805bc7bbf60fa04e4b53e33d5922f8 | [] | no_license | bluesnie/novel | 7e3a2f403def8fe3e1d9c8c1ba4e2a80344c39e0 | c11076ca61c619a2b7c1423d742d3f4c63dc1fed | refs/heads/master | 2020-04-24T02:07:07.516575 | 2019-02-20T07:44:19 | 2019-02-20T07:44:19 | 171,486,867 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | #!C:\Users\lenovo\PycharmProjects\novel\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"l"
] | l |
a643d38e90646191463eca1bc229387c66c1a11f | 65e0c11d690b32c832b943fb43a4206739ddf733 | /bsdradius/trunk/bsdradius/configDefaults.py | 244f2e4ef1d3488677ee5ad1c6d9c71ef18e43ac | [
"BSD-3-Clause"
] | permissive | Cloudxtreme/bsdradius | b5100062ed75c3201d179e190fd89770d8934aee | 69dba67e27215dce49875e94a7eedbbdf77bc784 | refs/heads/master | 2021-05-28T16:50:14.711056 | 2015-04-30T11:54:17 | 2015-04-30T11:54:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,442 | py | ## BSDRadius is released under BSD license.
## Copyright (c) 2006, DATA TECH LABS
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
## * Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions and the following disclaimer in the documentation
## and/or other materials provided with the distribution.
## * Neither the name of the DATA TECH LABS nor the names of its contributors
## may be used to endorse or promote products derived from this software without
## specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
## DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
## ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
## ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Define configuration defaults here
"""
# HeadURL $HeadURL: file:///Z:/backup/svn/bsdradius/trunk/bsdradius/configDefaults.py $
# Author: $Author: valts $
# File version: $Revision: 278 $
# Last changes: $Date: 2006-11-26 15:45:52 +0200 (Sv, 26 Nov 2006) $
prefix = '/usr/local'
# define default values
# format: {'section' : {'option' : value}}
defaultOptions = {
'PATHS' : {
'prefix' : prefix,
'conf_dir' : '%(prefix)s/etc/bsdradius',
'run_dir' : '%(prefix)s/var/run',
'log_dir' : '%(prefix)s/var/log/bsdradius',
'user_module_dir' : '%(conf_dir)s/user_modules',
'dictionary_dir' : '%(prefix)s/share/bsdradius/dictionaries',
'dictionary_file' : '%(dictionary_dir)s/dictionary',
'server_log_file' : '%(log_dir)s/bsdradiusd.log',
'pid_file' : '%(run_dir)s/bsdradiusd.pid',
'clients_file' : '%(conf_dir)s/clients.conf',
'modules_file' : '%(conf_dir)s/modules.conf',
'user_modules_file' : '%(conf_dir)s/user_modules.conf',
'config_file' : '%(conf_dir)s/bsdradiusd.conf'
},
'SERVER' : {
'home' : '',
'user' : '',
'group' : '',
'auth_port' : '1812',
'acct_port' : '1813',
'number_of_threads' : '10',
'foreground' : 'no',
'no_threads' : 'no',
'log_to_screen': 'no',
'log_to_file' : 'no',
'debug_mode' : 'no',
'log_client' : '',
'fast_accounting': 'no',
},
'DATABASE' : {
'enable' : 'no',
'type' : 'postgresql',
'host' : 'localhost',
'user' : 'bsdradius',
'pass' : '',
'name' : 'bsdradius',
'refresh_rate' : '60',
'clients_query' : 'select address, name, secret from radiusClients',
},
'AUTHORIZATION' : {
'packet_timeout' : '5',
'auth_queue_maxlength' : '300',
'modules' : '',
},
'ACCOUNTING' : {
'acct_queue_maxlength' : '300',
'modules' : '',
},
}
# Define option types.
# It is really neccessary to define only other types
# than string because Config parser converts everything
# to string by default.
# Format: {'section' : {'option' : 'type'}}
defaultTypes = {
'SERVER' : {
'auth_port' : 'int',
'acct_port' : 'int',
'number_of_threads' : 'int',
'foreground' : 'bool',
'no_threads' : 'bool',
'log_to_screen': 'bool',
'log_to_file': 'bool',
'debug_mode' : 'bool',
'fast_accounting': 'bool',
},
'DATABASE' : {
'enable' : 'bool',
'refresh_rate' : 'int',
},
'AUTHORIZATION' : {
'packet_timeout' : 'int',
'auth_queue_maxlength' : 'int',
},
'ACCOUNTING' : {
'acct_queue_maxlength' : 'int',
},
}
# configuration defaults for one BSD Radius module
moduleConfigDefaults = {
'enable': 'yes',
'configfile': '',
'startup_module': '',
'startup_function': '',
'authorization_module': '',
'authorization_function': '',
'authentication_module': '',
'authentication_function': '',
'accounting_module': '',
'accounting_function': '',
'shutdown_module': '',
'shutdown_function': '',
'pythonpath' : '',
}
| [
"valdiic@72071c86-a5be-11dd-a5cd-697bfd0a0cef"
] | valdiic@72071c86-a5be-11dd-a5cd-697bfd0a0cef |
02247885ad972b4756997bffe5c07f2ebd394d4a | 908b6ee862375003eac9af6c8ea5b32533be4709 | /collective/phantasy/atphantasy/content/phantasyschema.py | 1842cf48d3c9871472abac28e8375078dccb4886 | [] | no_license | collective/collective.phantasy | c7199995d98be183eb163f5b73c3e89147f2e8a6 | 86cb66b0905f6bb284cfc03201dd5bbfa8f9010e | refs/heads/master | 2023-08-19T13:18:13.507960 | 2015-01-19T15:32:07 | 2015-01-19T15:32:07 | 29,517,296 | 0 | 0 | null | 2015-01-20T07:24:00 | 2015-01-20T07:24:00 | null | UTF-8 | Python | false | false | 37,998 | py | from Products.Archetypes.public import *
from collective.phantasy.config import I18N_DOMAIN
from Products.SmartColorWidget.Widget import SmartColorWidget
from Products.ATContentTypes.configuration import zconf
from Products.Archetypes.atapi import AnnotationStorage
from Products.validation.config import validation
from Products.validation.validators.SupplValidators import MaxSizeValidator
from Products.validation import V_REQUIRED
validation.register(MaxSizeValidator('checkImageMaxSize',
maxsize=zconf.ATImage.max_file_size))
from collective.phantasy import phantasyMessageFactory as _
try:
from iw.fss.FileSystemStorage import FileSystemStorage
HAS_FSS = True
except :
HAS_FSS = False
try:
from Products.FCKeditor.FckWidget import FckWidget
HAS_FCKWIDGET = True
except:
HAS_FCKWIDGET = False
PRESERVED_SCHEMATAS = ['default', 'images', 'dimensions', 'colors', 'fonts', 'borders', 'plone-overloads', 'viewlets', 'dynamic-viewlets']
CUSTOM_TOOL_BAR = """[
['Source','Preview','-','Templates'],
['Cut','Copy','Paste','PasteText','RemoveFormat'],
['Bold','Italic','Underline','StrikeThrough','-','Subscript','Superscript'],
['OrderedList','UnorderedList','-','Outdent','Indent'],
['Link','Unlink','Anchor','Image','imgmapPopup','Flash'],
['Style','FontFormat'],
['FitWindow']
]"""
def finalizePhantasySchema(schema):
"""Finalizes schema to alter some fields
"""
# Id must be valid and make description invisible
schema['id'].validators = ('isValidId',)
schema['description'].widget.visible = {'view':'invisible', 'edit':'invisible'}
# FSS Storage for skin screenshot if iw.fss is available
if HAS_FSS :
schema['screenshot'].storage = FileSystemStorage()
for fieldName in schema.keys() :
if schema[fieldName].schemata not in PRESERVED_SCHEMATAS :
# hide ATCTFolder metadata fields unuseful for skins
schema[fieldName].widget.visible = {'view':'invisible', 'edit':'invisible'}
# FCKWidget for viewlet fields if FCK is available
if HAS_FCKWIDGET and schema[fieldName].schemata == 'viewlets' :
schema[fieldName].widget = FckWidget (
description = schema[fieldName].widget.description,
label = schema[fieldName].widget.label,
rows=12,
width = '100%',
height ='150px',
fck_toolbar = 'Custom',
fck_custom_toolbar = CUSTOM_TOOL_BAR,
file_portal_type = 'PhantasySkinFile',
image_portal_type = 'PhantasySkinImage',
browse_images_portal_types = ['PhantasySkinImage', 'Image'],
fck_force_other_path_method = 'get_phantasy_relative_path',
fck_force_other_root_method = 'get_phantasy_relative_path',
# force no paragraphs in viewlets
keyboard_entermode = 'div',
allow_link_byuid = False,
start_expanded = True,
allow_file_upload = False)
if fieldName == 'logoViewlet' :
css_id = 'portal-logo'
elif fieldName == 'footerViewlet' :
css_id = 'portal-footer'
elif fieldName == 'colophonViewlet' :
css_id = 'portal-colophon'
schema[fieldName].widget.fck_area_css_id = css_id
schema[fieldName].widget.fck_area_css_class = ''
# Make a copy to reinitialize all layers
new_schema = schema.copy()
return new_schema
# in skin schema fields with same name as standard plone base_properties must always be required
PhantasyFieldsSchema = Schema((
StringField(
'cssfile',
schemata ='default',
widget=StringWidget(
description = _(u'description_css_file', u"""Enter a stylesheet file name, don't forget to upload the file in this skin.
This css will be applied at the end (after all properties). Use './myimage.jpg' in this css
to reference an image called 'myimage.jpg' from this skin."""),
label = _(u'label_css_file', u'Css File Name'),
),
),
ImageField(
'screenshot',
required=False,
primary=False,
languageIndependent=True,
storage = AnnotationStorage(migrate=True),
swallowResizeExceptions = zconf.swallowImageResizeExceptions.enable,
pil_quality = zconf.pil_config.quality,
pil_resize_algo = zconf.pil_config.resize_algo,
max_size = zconf.ATImage.max_image_dimension,
sizes= {'large' : (768, 768),
'preview' : (400, 400),
'mini' : (200, 200),
'thumb' : (128, 128),
'tile' : (64, 64),
'icon' : (32, 32),
'listing' : (16, 16),
},
validators = (('checkImageMaxSize', V_REQUIRED)),
widget = ImageWidget(
description = _(u'description_phantasy_screenshot',
default=u'Upload a screen Shot for this skin, used to help users to select a skin'),
label= _(u'label_phantasy_screenshot', default=u'Screen Shot'),
show_content_type = False,
preview_scale = 'mini',
),
),
# fields for viewlets overrides
TextField('logoViewlet',
schemata ='viewlets',
required=False,
searchable=False,
validators = ('isTidyHtmlWithCleanup',),
allowable_content_types = ('text/html',),
default_content_type = 'text/html',
default_output_type = 'text/x-html-safe',
widget = RichWidget(
description = _(u'description_logo_viewlet', u"""Override the logo viewlet,
you can add images or links with rich editor"""),
label = _(u'label_logo_viewlet', u'Logo Viewlet'),
rows = 25,
allow_file_upload = False),
),
TextField('footerViewlet',
schemata ='viewlets',
required=False,
searchable=False,
validators = ('isTidyHtmlWithCleanup',),
allowable_content_types = ('text/html',),
default_content_type = 'text/html',
default_output_type = 'text/x-html-safe',
widget = RichWidget(
description = _(u'description_footer_viewlet', u"""Override the footer viewlet,
you can add images or links with rich editor"""),
label = _(u'label_footer_viewlet', u'Footer Viewlet'),
rows = 25,
allow_file_upload = False),
),
TextField('colophonViewlet',
schemata ='viewlets',
required=False,
searchable=False,
validators = ('isTidyHtmlWithCleanup',),
allowable_content_types = ('text/html',),
default_content_type = 'text/html',
default_output_type = 'text/x-html-safe',
widget = RichWidget(
description = _(u'description_colophon_viewlet', u"""Override the colophon viewlet,
you can add images or links with rich editor"""),
label = _(u'label_colophon_viewlet', u'Colophon Viewlet'),
i18n_domain = I18N_DOMAIN,
rows = 25,
allow_file_upload = False),
),
BooleanField(
'displaySearchBoxViewlet',
schemata ='dynamic-viewlets',
default = True,
widget=BooleanWidget(
description = _(u'description_display_searchbox_viewlet',
u"""Do you want to display the searchbox viewlet with live search in header ?"""),
label = _(u'label_display_searchbox_viewlet', u'Display Searchbox ?'),
),
),
BooleanField(
'displayBreadCrumbsViewlet',
schemata ='dynamic-viewlets',
default = True,
widget=BooleanWidget(
description = _(u'description_display_breadcrumbs_viewlet',
u"""Do you want to display the breadcrumbs viewlet in top of content ?"""),
label = _(u'label_display_breadcrumbs_viewlet', u'Display Bread Crumbs ?'),
),
),
BooleanField(
'displayGlobalSectionsViewlet',
schemata ='dynamic-viewlets',
default = True,
widget=BooleanWidget(
description = _(u'description_display_globalsections_viewlet',
u"""Do you want to display the global sections viewlet (horizontal navigation at top) ?"""),
label = _(u'label_display_globalsections_viewlet', u'Display Global Sections ?'),
),
),
BooleanField(
'displayPersonalBarViewlet',
schemata ='dynamic-viewlets',
default = True,
widget=BooleanWidget(
description = _(u'description_display_personalbar_viewlet',
u"""Do you want to display the personal bar viewlet (links : login, preferences ...) ?"""),
label = _(u'label_display_personalbar_viewlet', u'Display Personal Bar ?'),
),
),
BooleanField(
'displaySiteActionsViewlet',
schemata ='dynamic-viewlets',
default = True,
widget=BooleanWidget(
description = _(u'description_display_siteactions_viewlet',
u"""Do you want to display the site actions viewlet (links : site map, contact ...) ?"""),
label = _(u'label_display_siteactions_viewlet', u'Display Site Actions ?'),
),
),
BooleanField(
'displayDocumentActionsViewlet',
schemata ='dynamic-viewlets',
default = True,
widget=BooleanWidget(
description = _(u'description_display_documentactions_viewlet',
u"""Do you want to display the document actions viewlet (link: print, send this page ...) ?"""),
label = _(u'label_display_documentactions_viewlet', u'Display Document Actions ?'),
),
),
BooleanField(
'displayDocumentBylineViewlet',
schemata ='dynamic-viewlets',
default = True,
widget=BooleanWidget(
description = _(u'description_display_documentbyline_viewlet',
u"""Do you want to display the document by line viewlet for each content (author, date and keywords) ?"""),
label = _(u'label_display_documentbyline_viewlet', u'Display Document By Line ?'),
),
),
# fields for images
# logoName property is no more used in standard plone css
# so we make it invisible
StringField(
'logoName',
schemata ='images',
required=1,
widget=StringWidget(
label='Logo Name',
visible = {'view':'invisible', 'edit':'invisible'},
description = "Choose the logo file name, upload the image in the skin to overload it",
i18n_domain = I18N_DOMAIN,
),
),
StringField(
'backgroundImageName',
schemata ='images',
widget=StringWidget(
description = _(u'description_background_image_name', u"""Enter the background image name for the page, upload the image in this skin"""),
label = _(u'label_background_image_name', u'Background Image Name'),
),
),
StringField(
'backgroundImagePosition',
schemata ='images',
default="top left",
vocabulary = [("top left", _(u"Top Left")),
("top right", _(u"Top Right")),
("top center", _(u"Top Center")),
("center left", _(u"Center Left")),
("center right", _(u"Center Right")),
("center center", _(u"Center Center")),
("bottom left", _(u"Bottom Left")),
("bottom right", _(u"Bottom Right")),
("bottom center", _(u"Bottom Center"))],
widget=SelectionWidget(
description = _(u'description_background_image_position', u"""Choose the background image position for the page"""),
label = _(u'label_background_image_position', u'Background Image Position'),
format='select',
),
),
StringField(
'backgroundImageRepeat',
schemata ='images',
default="no-repeat",
vocabulary = [("no-repeat", "No repeat"),
("repeat-x", "Horizontal Repeat"),
("repeat-y", "Vertical Repeat"),
("repeat", "mosaic repeat")],
widget=SelectionWidget(
description = _(u'description_background_image_repeat', u"""Choose the background image repeat for the page"""),
label = _(u'label_background_image_repeat', u'Background Image Repeat'),
format='select',
),
),
StringField(
'portalBackgroundImageName',
schemata ='images',
widget=StringWidget(
description = _(u'description_portal_background_image_name', u"""Enter the background image name for the portal, upload the image in this skin"""),
label = _(u'label_portal_background_image_name', u'Portal Background Image Name'),
),
),
StringField(
'contentBackgroundImageName',
schemata ='images',
widget=StringWidget(
description = _(u'description_content_background_image_name', u"""Choose the background image name for the content, upload the image in this skin"""),
label = _(u'label_contentl_background_image_name', u'Content Background Image Name'),
),
),
StringField(
'headerBackgroundImageName',
schemata ='images',
widget=StringWidget(
description = _(u'description_header_background_image_name', u"""Choose the background image name for the header, upload the image in this skin"""),
label = _(u'label_header_background_image_name', u'Header Background Image Name'),
),
),
# this property is never used is standard plone css
# so we make it invisible
StringField(
'portalMinWidth',
schemata ='dimensions',
widget=StringWidget(
label='Portal min width',
visible = {'view':'invisible', 'edit':'invisible'},
description = "Choose the portal min width in px em or %",
),
),
StringField(
'portalWidth',
schemata ='dimensions',
default = '100%',
widget=StringWidget(
description = _(u'description_portal_width', u"""Choose the portal min width in px em or %"""),
label = _(u'label_portal_width', u'Portal width'),
),
),
StringField(
'portalHorizontalPosition',
schemata ='dimensions',
default="",
vocabulary = [("0", _(u"undefined")),
("0 auto 0 auto", _(u"centered")),
("0 auto 0 0", _(u"on left")),
("0 0 0 auto", _(u"on right"))],
widget=SelectionWidget(
description = _(u'description_portal_horizontal_position', u"""Choose the position for portal"""),
label = _(u'label_portal_horizontal_position', u'Portal Horizontal Position'),
format='select',
),
),
StringField(
'columnOneWidth',
schemata ='dimensions',
required=1,
widget=StringWidget(
description = _(u'description_column_one_width', u"""Choose the column one width in px em or %"""),
label = _(u'label_column_one_width', u'Column One width'),
),
),
StringField(
'columnTwoWidth',
schemata ='dimensions',
required=1,
widget=StringWidget(
description = _(u'description_column_two_width', u"""Choose the column two width in px em or %"""),
label = _(u'label_column_two_width', u'Column Two width'),
),
),
StringField(
'fontFamily',
schemata ='fonts',
required=1,
widget=StringWidget(
description = _(u'description_font_family',
u"""Choose the font family"""),
label = _(u'label_font_family', u'Font Family'),
),
),
StringField(
'fontMainSize',
schemata ='fonts',
required=0,
widget=StringWidget(
description = _(u'description_font_main_size',
u"Choose the main font size in % (better) em px pt "
u"or using a keyword (xx-small, small, ...)"),
label = _(u'label_font_main_size', u'Font Main Size'),
),
),
StringField(
'fontSmallSize',
schemata ='fonts',
required=1,
widget=StringWidget(
description = _(u'description_font_small_size',
u"Choose the small font size in % (better) em px pt "
u"or using a keyword (xx-small, small, ...)"""),
label = _(u'label_font_small_size', u'Font Small Size'),
),
),
StringField(
'headingFontFamily',
schemata ='fonts',
required=1,
widget=StringWidget(
description = _(u'description_heading_font_family',
u"""Choose the font family for titles"""),
label = _(u'label_heading_font_family', u'Heading Font Family'),
),
),
StringField(
'textTransform',
schemata ='fonts',
required=1,
vocabulary = [("none", _(u"none")),
("uppercase", _(u"uppercase")),
("lowercase", _(u"lowercase")),
("capitalize", _(u"capitalize"))],
widget=SelectionWidget(
description = _(u'description_text_transform',
u"""Choose the text transformation for tabs and some headings"""),
label = _(u'label_text_transform', u'Text Transform'),
format='select',
),
),
StringField(
'fontColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_font_color',
u"""Choose the font color"""),
label = _(u'label_font_color', u'Font Color'),
),
),
StringField(
'backgroundColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_background_color',
u"""Choose the background color of the page"""),
label = _(u'label_background_color', u'Background Color'),
),
),
StringField(
'discreetColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_discreet_color',
u"""Choose the discreet color (can be used in content) """),
label = _(u'label_discreet_color', u'Discreet Color'),
),
),
StringField(
'portalBackgroundColor',
schemata ='colors',
default="transparent",
widget=SmartColorWidget(
description = _(u'description_portal_background_color',
u"""Choose the portal background color"""),
label = _(u'label_portal_background_color', u'Portal Background Color'),
),
),
StringField(
'contentBackgroundColor',
schemata ='colors',
default="transparent",
widget=SmartColorWidget(
description = _(u'description_content_background_color',
u"""Choose background color for content part of the page"""),
label = _(u'label_content_background_color', u'Content Background Color'),
),
),
StringField(
'personaltoolsBackgroundColor',
schemata ='colors',
default="#E3E3E3",
widget=SmartColorWidget(
description = _(u'description_personaltools_background_color',
u"""Choose background color for personal tools - language choice and user menu"""),
label = _(u'label_personaltools_background_color',
u"Personal tools Background Color"),
),
),
StringField(
'personaltoolsFontColor',
schemata ='colors',
default="#205C90",
widget=SmartColorWidget(
description = _(u'description_personaltools_font_color',
u"""Choose font color for personal tools - language choice and user menu"""),
label = _(u'label_personaltools_font_color',
u"Personal tools Font Color"),
),
),
StringField(
'headerBackgroundColor',
schemata ='colors',
default="transparent",
widget=SmartColorWidget(
description = _(u'description_header_background_color',
u"""Choose background color for the header"""),
label = _(u'label_header_background_color', u"Header Background Color"),
),
),
StringField(
'globalNavBackgroundColor',
schemata ='colors',
default="#dee7ec",
widget=SmartColorWidget(
description = _(u'description_global_nav_background_color',
u"""Choose the background color of global navigation"""),
label = _(u'label_global_nav_background_color', u'Global navigation Background Color'),
),
),
StringField(
'globalNavLinkColor',
schemata ='colors',
default="#205c90",
widget=SmartColorWidget(
description = _(u'description_global_nav_font_color',
u"""Choose the color of font and selected element background in global navigation"""),
label = _(u'label_global_nav_font_color', u'Global navigation Font Color'),
),
),
StringField(
'inputFontColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_input_font_color',
u"""Choose the input fields font color"""),
label = _(u'label_input_font_color', u'Input Font Color'),
),
),
StringField(
'linkColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_link_color',
u"""Choose the color for links"""),
label = _(u'label_link_color', u'Link Color'),
),
),
StringField(
'linkVisitedColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_link_visited_color',
u"""Choose the color for visited links"""),
label = _(u'label_link_visited_color', u'Link Visited Color'),
),
),
StringField(
'linkActiveColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_link_active_color',
u"""Choose the color for active links"""),
label = _(u'label_link_active_color', u'Link Active/Hover Color'),
),
),
StringField(
'notifyBackgroundColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_notify_background_color',
u"""Choose the notify background color (for portal messages)"""),
label = _(u'label_notify_background_color', u'Notify Background Color'),
),
),
StringField(
'notifyBorderColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_notify_border_color',
u"""Choose the notify border color"""),
label = _(u'label_notify_border_color', u'Notify Border Color'),
),
),
StringField(
'helpBackgroundColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_help_background_color',
u"""Choose the bg color for help in forms"""),
label = _(u'label_help_background_color', u'Help Background Color'),
),
),
StringField(
'oddRowBackgroundColor',
schemata ='colors',
required=1,
default="#EEEEEE",
widget=SmartColorWidget(
description = _(u'description_odd_row_background_color',
u"""Choose the bg color for odd rows (tables, portlets)"""),
label = _(u'label__odd_row_background_color', u'Odd Row Background Color'),
),
),
StringField(
'evenRowBackgroundColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_even_row_background_color',
u"""Choose the bg color for even rows (tables, portlets)"""),
label = _(u'label__even_row_background_color', u'Even Row Background Color'),
),
),
StringField(
'globalBackgroundColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_global_background_color',
u"""Choose the global background color (used in tabs and portlets headers)"""),
label = _(u'label_global_background_color', u'Global Background Color'),
),
),
StringField(
'globalFontColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_global_font_color',
u"""Choose the global font color"""),
label = _(u'label_global_font_color', u'Global Font Color'),
),
),
StringField(
'globalBorderColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_global_border_color',
u"""Choose the color for global borders"""),
label = _(u'label_global_border_color', u'Global Border Color'),
),
),
StringField(
'contentViewBackgroundColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_content_views_background_color',
u"""Choose the background color for content views tabs"""),
label = _(u'label_content_views_background_color', u'Content View Background Color'),
),
),
StringField(
'contentViewBorderColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_content_views_border_color',
u"""Choose the border color for content views tabs"""),
label = _(u'label_content_views_border_color', u'Content View Border Color'),
),
),
StringField(
'contentViewFontColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_content_views_font_color',
u"""Choose the font color for content views tabs"""),
label = _(u'label_content_views_font_color', u'Content View Font Color'),
),
),
StringField(
'listingHeadersFontColor',
schemata ='colors',
required=1,
default="#666666",
widget=SmartColorWidget(
description = _(u'description_listing_headers_font_color',
u"""Choose the font color for the text of listing headers"""),
label = _(u'label_listing_headers_font_color', u'Listing Headers Font Color'),
),
),
StringField(
'portletHeadersFontColor',
schemata ='colors',
required=1,
default="#000000",
widget=SmartColorWidget(
description = _(u'description_portlet_headers_font_color',
u"""Choose the font color for the text of portlet headers"""),
label = _(u'label_portlet_headers_font_color', u'Portlet Headers Font Color'),
),
),
StringField(
'borderStyle',
schemata ='borders',
required=1,
vocabulary = [("none", "no border"),
("hidden", "hidden when none is impossible (tables)"),
("solid", "solid"),
("dotted", "dotted"),
("dashed", "dashed"),
("groove","3D groove"),
("double", "double borders"),
("inset", "3D inset"),
("outset","3D outset"),
("ridge","3D ridge")],
widget=SelectionWidget(
description = _(u'description_border_style',
u"""Choose the global border style"""),
label = _(u'label_border_style', u'Border Style'),
format='select',
),
),
StringField(
'borderStyleAnnotations',
schemata ='borders',
required=1,
vocabulary = [("none", "no border"),
("hidden", "hidden when none is impossible (tables)"),
("solid", "solid"),
("dotted", "dotted"),
("dashed", "dashed"),
("groove","3D groove"),
("double", "double borders"),
("inset", "3D inset"),
("outset","3D outset"),
("ridge","3D ridge")],
widget=SelectionWidget(
description = _(u'description_border_style_annotations',
u"""Choose the border style for annotations """),
label = _(u'label_border_style_annotations', u'Border Style for Annotations'),
format='select',
),
),
StringField(
'borderWidth',
schemata ='borders',
required=1,
widget=StringWidget(
description = _(u'description_border_width',
u"""Choose the border width in px"""),
label = _(u'label_border_width', u'Border Width'),
),
),
BooleanField(
'overloadBody',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_body',
u"""Do you want to overload the body style ?"""),
label = _(u'label_overload_body', u'Overload Body Style'),
),
),
BooleanField(
'overloadHTMLTags',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_html_tags',
u"""Do you want to overload content styles (classic html tags) ?"""),
label = _(u'label_overload_html_tags', u'Overload HTML Tags Styles'),
),
),
BooleanField(
'overloadContent',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_content',
u"""Do you want to overload standard plone styles used for content ?"""),
label = _(u'label_overload_content', u'Overload Various Content Styles'),
),
),
BooleanField(
'overloadSiteActions',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_site_actions',
u"""Do you want to overload site actions styles ?"""),
label = _(u'label_overload_site_actions', u'Overload Site Actions Styles'),
),
),
BooleanField(
'overloadSearchBox',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_search_box',
u"""Do you want to overload search box styles ?"""),
label = _(u'label_overload_search_box', u'Overload Search Box Styles'),
),
),
BooleanField(
'overloadGlobalSections',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_global_sections',
u"""Do you want to overload global sections buttons styles ?"""),
label = _(u'label_overload_global_sections', u'Overload Global Sections Styles'),
),
),
BooleanField(
'overloadPersonalTools',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_personal_tools',
u"""Do you want to overload personal tools buttons styles (login, preferences ...) ?"""),
label = _(u'label_overload_personal_tools', u'Overload Personals Tools Styles'),
),
),
BooleanField(
'overloadBreadcrumbs',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_breadcrumbs',
u"""Do you want to overload breadcrumbs styles ?"""),
label = _(u'label_overload_breadcrumbs', u'Overload Breadcrumbs Styles'),
),
),
BooleanField(
'overloadFooter',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_footer',
u"""Do you want to overload footer styles ?"""),
label = _(u'label_overload_footer', u'Overload Footer Styles'),
),
),
BooleanField(
'overloadSiteMap',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_site_map',
u"""Do you want to overload site map styles ?"""),
label = _(u'label_overload_site_map', u'Overload Site Map Styles'),
),
),
BooleanField(
'overloadColumns',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_columns',
u"""Do you want to overload columns styles ?"""),
label = _(u'label_overload_columns', u'Overload Columns Styles'),
),
),
BooleanField(
'overloadForms',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_forms',
u"""Do you want to overload forms styles ?"""),
label = _(u'label_overload_forms', u'Overload Forms Styles'),
),
),
BooleanField(
'overloadPortlets',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_portlets',
u"""Do you want to overload portlets styles ?"""),
label = _(u'label_overload_portlets', u'Overload Portlets Styles'),
),
),
BooleanField(
'overloadCalendar',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_calendar',
u"""Do you want to overload calendar styles ?"""),
label = _(u'label_overload_calendar', u'Overload Calendar Styles'),
),
),
BooleanField(
'overloadNavtree',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_navtree',
u"""Do you want to overload navigation tree styles (impact sitemap + navtree portlet) ?"""),
label = _(u'label_overload_navtree', u'Overload Navigation Tree Styles'),
),
),
BooleanField(
'overloadAuthoring',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_authoring',
u"""Do you want to overload authoring styles (content views, actions etc ...) ?"""),
label = _(u'label_overload_authoring', u'Overload Authoring Styles'),
),
),
), marshall=RFC822Marshaller())
| [
"[email protected]"
] | |
a842ae5ed2fa9404270a2b872f3c9f04a42ac434 | 2652fd6261631794535589427a384693365a585e | /trunk/workspace/Squish/src/TestScript/UI/suite_UI_51/tst_UI_51_Pref_BufferAutoView/test.py | e1a9d9fe2212331ae4697f3a3269cdded8842a9c | [] | no_license | ptqatester1/ptqa | 88c652380167f64a953bfd7a65041e7d8ac48c90 | 5b5997ea459e9aac17db8da2041e2af331927104 | refs/heads/master | 2021-01-21T19:06:49.275364 | 2017-06-19T03:15:00 | 2017-06-19T03:15:00 | 92,115,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,735 | py | from API.Utility.Util import Util
from API.Utility import UtilConst
from API.MenuBar.Options.Options import Options
from API.MenuBar.Options.OptionsConst import OptionsConst
from API.MenuBar.Options.Preferences.Miscellaneous.MiscellaneousConst import MiscellaneousConst
from API.SimulationPanel.EventList.EventListConst import EventListConst
from API.SimulationPanel.EventListFilters.EventListFilters import EventListFilters
from API.SimulationPanel.PlayControls.PlayControlsConst import PlayControlsConst
from API.MenuBar.Options.Preferences.PreferencesConst import PreferencesConst
util = Util()
options = Options()
eventListFilters = EventListFilters()
def main():
util.init()
util.open("UI13.pkt", UtilConst.UI_TEST )
util.speedUpConvergence()
editOptionsSetting()
checkpoint1()
resetOptionsSetting()
def editOptionsSetting():
options.selectOptionsItem(OptionsConst.PREFERENCES)
util.clickTab(PreferencesConst.TAB_BAR, PreferencesConst.MISCELLANEOUS)
util.clickButton(MiscellaneousConst.AUTO_VIEW_PREVIOUS_EVENTS)
util.close(OptionsConst.OPTIONS_DIALOG)
def checkpoint1():
util.clickOnSimulation()
util.clickButton(EventListConst.RESET_SIMULATION)
for i in range(0, 8):
util.clickButton(PlayControlsConst.CAPTURE_FORWARD)
snooze(10)
if (object.exists(PlayControlsConst.BUFFER_FULL_DIALOG_LABEL)):
test.fail("Buffer window found")
else:
test.passes("Buffer window not found")
def resetOptionsSetting():
options.selectOptionsItem(OptionsConst.PREFERENCES)
util.clickTab(PreferencesConst.TAB_BAR, PreferencesConst.MISCELLANEOUS)
util.clickButton(MiscellaneousConst.PROMPT)
util.close(OptionsConst.OPTIONS_DIALOG) | [
"[email protected]"
] | |
509c23e3bf72658ffd093ae405cf9de4958fb78f | 102d09ef1d6effe166ad703ba4472c45dfb03263 | /py/Maximum_Depth_of_Binary_Tree.py | 199982277744c0985b39cfc2326fc115a739fec4 | [] | no_license | bitcsdby/Codes-for-leetcode | 5693100d4b66de65d7f135bbdd81b32650aed7d0 | 9e24e621cfb9e7fd46f9f02dfc40a18a702d4990 | refs/heads/master | 2016-09-05T08:43:31.656437 | 2014-08-02T15:14:53 | 2014-08-02T15:14:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | # Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @return an integer
def maxDepth(self, root):
if root == None:
return 0;
l = self.maxDepth(root.left) + 1;
r = self.maxDepth(root.right) + 1;
return l if l > r else r;
| [
"[email protected]"
] | |
d0089bd15b2c1ffac1e167de02e3ee215da07c7b | 74698be74d244ebbabcb0b3cf17ebed26adfa37c | /orbit/utils/epoch_helper.py | 6eb110768887e95055c34f7fc3857f08a6b9c276 | [
"Apache-2.0"
] | permissive | lfads/models | aa75616fee2476641aa98ca1cbdce7e5d27a9aff | fd700f0cb2e104544c445d9fbf3991d8388ff18a | refs/heads/master | 2021-01-25T13:50:55.423010 | 2021-01-05T18:27:01 | 2021-01-05T18:27:01 | 123,619,512 | 16 | 9 | Apache-2.0 | 2021-01-05T18:27:02 | 2018-03-02T19:07:50 | Python | UTF-8 | Python | false | false | 2,136 | py | # Copyright 2020 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides a utility class for training in epochs."""
import tensorflow as tf
class EpochHelper:
"""A helper class handle bookkeeping of epochs in custom training loops."""
def __init__(self, epoch_steps: int, global_step: tf.Variable):
"""Initializes the `EpochHelper` instance.
Args:
epoch_steps: An integer indicating how many steps are in an epoch.
global_step: A `tf.Variable` providing the current global step.
"""
self._epoch_steps = epoch_steps
self._global_step = global_step
self._current_epoch = None
self._epoch_start_step = None
self._in_epoch = False
def epoch_begin(self):
"""Returns whether a new epoch should begin."""
if self._in_epoch:
return False
current_step = self._global_step.numpy()
self._epoch_start_step = current_step
self._current_epoch = current_step // self._epoch_steps
self._in_epoch = True
return True
def epoch_end(self):
"""Returns whether the current epoch should end."""
if not self._in_epoch:
raise ValueError("`epoch_end` can only be called inside an epoch.")
current_step = self._global_step.numpy()
epoch = current_step // self._epoch_steps
if epoch > self._current_epoch:
self._in_epoch = False
return True
return False
@property
def batch_index(self):
"""Index of the next batch within the current epoch."""
return self._global_step.numpy() - self._epoch_start_step
@property
def current_epoch(self):
return self._current_epoch
| [
"[email protected]"
] | |
7df058d88c766a9b978227fca727c0f1587d9861 | 2e1322f72f730fdb019c25eb533424bfb411c7dc | /backend/garpix_page/contexts/default.py | 635931bdc671efeb46a46e470ae8ab5dc4d06058 | [
"MIT"
] | permissive | tempuku/garpix_page | 750d3ef78e1698d93564ae510a9514dfb815853f | d24fa3d8c7b0b4134e66795965596f3cdb61c8db | refs/heads/master | 2023-03-17T03:33:07.528207 | 2021-03-11T15:15:13 | 2021-03-11T15:15:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | def context(request, *args, **kwargs):
return {}
| [
"[email protected]"
] | |
ad9a3b50ae05c454484d9697933ee5e00f730b4a | 5dd7c4ec44b76180040badc67849ad44f81690f9 | /unittests/test_stockitem.py | 751eb41a7c209613f1a6e803ac526f15a85a3c77 | [] | no_license | myluco/Phoenix | 68f9abe15a673fe56da6ef4375849ba6a642622d | 2de746beda35b8b5db547658cae1c65cfe164039 | refs/heads/master | 2021-01-18T15:59:05.001240 | 2016-12-04T00:08:36 | 2016-12-04T00:08:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | import unittest
from unittests import wtc
import wx
#---------------------------------------------------------------------------
class stockitem_Tests(wtc.WidgetTestCase):
# TODO: Remove this test and add real ones.
def test_stockitem1(self):
self.fail("Unit tests for stockitem not implemented yet.")
#---------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
6c10278bce7d441831f59503418233abcba5dee8 | 17c14b758959cdceec0dce8f783346fdeee8e111 | /chap05_nlp/automl/train.py | bca8b1fd41ce03b243523430bdc8d09621f7daa4 | [] | no_license | yurimkoo/tensormsa_jupyter | b0a340119339936d347d12fbd88fb017599a0029 | 0e75784114ec6dc8ee7eff8094aef9cf37131a5c | refs/heads/master | 2021-07-18T12:22:31.396433 | 2017-10-25T01:42:24 | 2017-10-25T01:42:24 | 109,469,220 | 1 | 0 | null | 2017-11-04T05:20:15 | 2017-11-04T05:20:15 | null | UTF-8 | Python | false | false | 3,650 | py | """
Utility used by the Network class to actually train.
Based on:
https://github.com/fchollet/keras/blob/master/examples/mnist_mlp.py
"""
from keras.datasets import mnist, cifar10
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.utils.np_utils import to_categorical
from keras.callbacks import EarlyStopping
# Helper: Early stopping.
early_stopper = EarlyStopping(patience=5)
def get_cifar10():
"""Retrieve the CIFAR dataset and process the data."""
# Set defaults.
nb_classes = 10
batch_size = 64
input_shape = (3072,)
# Get the data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.reshape(50000, 3072)
x_test = x_test.reshape(10000, 3072)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, nb_classes)
y_test = to_categorical(y_test, nb_classes)
return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test)
def get_mnist():
"""Retrieve the MNIST dataset and process the data."""
# Set defaults.
nb_classes = 10
batch_size = 128
input_shape = (784,)
# Get the data.
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, nb_classes)
y_test = to_categorical(y_test, nb_classes)
return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test)
def compile_model(network, nb_classes, input_shape):
"""Compile a sequential model.
Args:
network (dict): the parameters of the network
Returns:
a compiled network.
"""
# Get our network parameters.
nb_layers = network['nb_layers']
nb_neurons = network['nb_neurons']
activation = network['activation']
optimizer = network['optimizer']
model = Sequential()
# Add each layer.
for i in range(nb_layers):
# Need input shape for first layer.
if i == 0:
model.add(Dense(nb_neurons, activation=activation, input_shape=input_shape))
else:
model.add(Dense(nb_neurons, activation=activation))
model.add(Dropout(0.2)) # hard-coded dropout
# Output layer.
model.add(Dense(nb_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=optimizer,
metrics=['accuracy'])
return model
def train_and_score(network, dataset):
"""Train the model, return test loss.
Args:
network (dict): the parameters of the network
dataset (str): Dataset to use for training/evaluating
"""
if dataset == 'cifar10':
nb_classes, batch_size, input_shape, x_train, \
x_test, y_train, y_test = get_cifar10()
elif dataset == 'mnist':
nb_classes, batch_size, input_shape, x_train, \
x_test, y_train, y_test = get_mnist()
model = compile_model(network, nb_classes, input_shape)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=10000, # using early stopping, so no real limit
verbose=0,
validation_data=(x_test, y_test),
callbacks=[early_stopper])
score = model.evaluate(x_test, y_test, verbose=0)
return score[1] # 1 is accuracy. 0 is loss. | [
"[email protected]"
] | |
95e2a602cdea202da5cba6e81d040adac387cb68 | ea3048858939a8162f82a1d0b0ec43171530ea8d | /apps/search/models.py | 62ec89db3a73a35a853d885c234a3453ffbb6a68 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | kknet/NewsBlur | d229b12c39f7ca3eab1e28922171f87ea37b8df1 | fa78b434f980d2814dd05fedb70d9e87259ee998 | refs/heads/master | 2021-01-17T22:36:29.651729 | 2016-09-20T20:05:25 | 2016-09-20T20:05:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,270 | py | import re
import time
import datetime
import pymongo
import pyes
import redis
import celery
import mongoengine as mongo
from django.conf import settings
from django.contrib.auth.models import User
from apps.search.tasks import IndexSubscriptionsForSearch
from apps.search.tasks import IndexSubscriptionsChunkForSearch
from apps.search.tasks import IndexFeedsForSearch
from utils import log as logging
from utils.feed_functions import chunks
class MUserSearch(mongo.Document):
'''Search index state of a user's subscriptions.'''
user_id = mongo.IntField(unique=True)
last_search_date = mongo.DateTimeField()
subscriptions_indexed = mongo.BooleanField()
subscriptions_indexing = mongo.BooleanField()
meta = {
'collection': 'user_search',
'indexes': ['user_id'],
'index_drop_dups': True,
'allow_inheritance': False,
}
@classmethod
def get_user(cls, user_id, create=True):
try:
user_search = cls.objects.read_preference(pymongo.ReadPreference.PRIMARY)\
.get(user_id=user_id)
except cls.DoesNotExist:
if create:
user_search = cls.objects.create(user_id=user_id)
else:
user_search = None
return user_search
def touch_search_date(self):
if not self.subscriptions_indexed and not self.subscriptions_indexing:
self.schedule_index_subscriptions_for_search()
self.subscriptions_indexing = True
self.last_search_date = datetime.datetime.now()
self.save()
def schedule_index_subscriptions_for_search(self):
IndexSubscriptionsForSearch.apply_async(kwargs=dict(user_id=self.user_id),
queue='search_indexer_tasker')
# Should be run as a background task
def index_subscriptions_for_search(self):
from apps.rss_feeds.models import Feed
from apps.reader.models import UserSubscription
SearchStory.create_elasticsearch_mapping()
start = time.time()
user = User.objects.get(pk=self.user_id)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(user.username, 'search_index_complete:start')
subscriptions = UserSubscription.objects.filter(user=user).only('feed')
total = subscriptions.count()
feed_ids = []
for sub in subscriptions:
try:
feed_ids.append(sub.feed.pk)
except Feed.DoesNotExist:
continue
feed_id_chunks = [c for c in chunks(feed_ids, 6)]
logging.user(user, "~FCIndexing ~SB%s feeds~SN in %s chunks..." %
(total, len(feed_id_chunks)))
tasks = [IndexSubscriptionsChunkForSearch().s(feed_ids=feed_id_chunk,
user_id=self.user_id
).set(queue='search_indexer')
for feed_id_chunk in feed_id_chunks]
group = celery.group(*tasks)
res = group.apply_async(queue='search_indexer')
res.join_native()
duration = time.time() - start
logging.user(user, "~FCIndexed ~SB%s feeds~SN in ~FM~SB%s~FC~SN sec." %
(total, round(duration, 2)))
r.publish(user.username, 'search_index_complete:done')
self.subscriptions_indexed = True
self.subscriptions_indexing = False
self.save()
def index_subscriptions_chunk_for_search(self, feed_ids):
from apps.rss_feeds.models import Feed
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
user = User.objects.get(pk=self.user_id)
logging.user(user, "~FCIndexing %s feeds..." % len(feed_ids))
for feed_id in feed_ids:
feed = Feed.get_by_id(feed_id)
if not feed: continue
feed.index_stories_for_search()
r.publish(user.username, 'search_index_complete:feeds:%s' %
','.join([str(f) for f in feed_ids]))
@classmethod
def schedule_index_feeds_for_search(cls, feed_ids, user_id):
user_search = cls.get_user(user_id, create=False)
if (not user_search or
not user_search.subscriptions_indexed or
user_search.subscriptions_indexing):
# User hasn't searched before.
return
if not isinstance(feed_ids, list):
feed_ids = [feed_ids]
IndexFeedsForSearch.apply_async(kwargs=dict(feed_ids=feed_ids, user_id=user_id),
queue='search_indexer')
@classmethod
def index_feeds_for_search(cls, feed_ids, user_id):
from apps.rss_feeds.models import Feed
user = User.objects.get(pk=user_id)
logging.user(user, "~SB~FCIndexing %s~FC by request..." % feed_ids)
for feed_id in feed_ids:
feed = Feed.get_by_id(feed_id)
if not feed: continue
feed.index_stories_for_search()
@classmethod
def remove_all(cls, drop_index=False):
# You only need to drop the index if there is data you want to clear.
# A new search server won't need this, as there isn't anything to drop.
if drop_index:
logging.info(" ---> ~FRRemoving stories search index...")
SearchStory.drop()
user_searches = cls.objects.all()
logging.info(" ---> ~SN~FRRemoving ~SB%s~SN user searches..." % user_searches.count())
for user_search in user_searches:
try:
user_search.remove()
except Exception, e:
print " ****> Error on search removal: %s" % e
def remove(self):
from apps.rss_feeds.models import Feed
from apps.reader.models import UserSubscription
user = User.objects.get(pk=self.user_id)
subscriptions = UserSubscription.objects.filter(user=self.user_id)
total = subscriptions.count()
removed = 0
for sub in subscriptions:
try:
feed = sub.feed
except Feed.DoesNotExist:
continue
if not feed.search_indexed:
continue
feed.search_indexed = False
feed.save()
removed += 1
logging.user(user, "~FCRemoved ~SB%s/%s feed's search indexes~SN for ~SB~FB%s~FC~SN." %
(removed, total, user.username))
self.delete()
class SearchStory:
ES = pyes.ES(settings.ELASTICSEARCH_STORY_HOSTS)
name = "stories"
@classmethod
def index_name(cls):
return "%s-index" % cls.name
@classmethod
def type_name(cls):
return "%s-type" % cls.name
@classmethod
def create_elasticsearch_mapping(cls, delete=False):
if delete:
cls.ES.indices.delete_index_if_exists("%s-index" % cls.name)
cls.ES.indices.create_index_if_missing("%s-index" % cls.name)
mapping = {
'title': {
'boost': 3.0,
'index': 'analyzed',
'store': 'no',
'type': 'string',
'analyzer': 'standard',
},
'content': {
'boost': 1.0,
'index': 'analyzed',
'store': 'no',
'type': 'string',
'analyzer': 'simple',
},
'tags': {
'boost': 2.0,
'index': 'analyzed',
'store': 'no',
'type': 'string',
'analyzer': 'standard',
},
'author': {
'boost': 1.0,
'index': 'analyzed',
'store': 'no',
'type': 'string',
'analyzer': 'simple',
},
'feed_id': {
'store': 'no',
'type': 'integer'
},
'date': {
'store': 'no',
'type': 'date',
}
}
cls.ES.indices.put_mapping("%s-type" % cls.name, {
'properties': mapping,
'_source': {'enabled': False},
}, ["%s-index" % cls.name])
@classmethod
def index(cls, story_hash, story_title, story_content, story_tags, story_author, story_feed_id,
story_date):
doc = {
"content" : story_content,
"title" : story_title,
"tags" : ', '.join(story_tags),
"author" : story_author,
"feed_id" : story_feed_id,
"date" : story_date,
}
try:
cls.ES.index(doc, "%s-index" % cls.name, "%s-type" % cls.name, story_hash)
except pyes.exceptions.NoServerAvailable:
logging.debug(" ***> ~FRNo search server available.")
@classmethod
def remove(cls, story_hash):
try:
cls.ES.delete("%s-index" % cls.name, "%s-type" % cls.name, story_hash)
except pyes.exceptions.NoServerAvailable:
logging.debug(" ***> ~FRNo search server available.")
@classmethod
def drop(cls):
cls.ES.indices.delete_index_if_exists("%s-index" % cls.name)
@classmethod
def query(cls, feed_ids, query, order, offset, limit):
cls.create_elasticsearch_mapping()
cls.ES.indices.refresh()
query = re.sub(r'([^\s\w_\-])+', ' ', query) # Strip non-alphanumeric
sort = "date:desc" if order == "newest" else "date:asc"
string_q = pyes.query.QueryStringQuery(query, default_operator="AND")
feed_q = pyes.query.TermsQuery('feed_id', feed_ids[:1000])
q = pyes.query.BoolQuery(must=[string_q, feed_q])
try:
results = cls.ES.search(q, indices=cls.index_name(), doc_types=[cls.type_name()],
partial_fields={}, sort=sort, start=offset, size=limit)
except pyes.exceptions.NoServerAvailable:
logging.debug(" ***> ~FRNo search server available.")
return []
logging.info(" ---> ~FG~SNSearch ~FCstories~FG for: ~SB%s~SN (across %s feed%s)" %
(query, len(feed_ids), 's' if len(feed_ids) != 1 else ''))
try:
result_ids = [r.get_id() for r in results]
except pyes.InvalidQuery(), e:
logging.info(" ---> ~FRInvalid search query \"%s\": %s" % (query, e))
return []
return result_ids
class SearchFeed:
_es_client = None
name = "feeds"
@classmethod
def ES(cls):
if cls._es_client is None:
cls._es_client = pyes.ES(settings.ELASTICSEARCH_FEED_HOSTS)
if not cls._es_client.indices.exists_index(cls.index_name()):
cls.create_elasticsearch_mapping()
return cls._es_client
@classmethod
def index_name(cls):
return "%s-index" % cls.name
@classmethod
def type_name(cls):
return "%s-type" % cls.name
@classmethod
def create_elasticsearch_mapping(cls, delete=False):
if delete:
cls.ES().indices.delete_index_if_exists(cls.index_name())
settings = {
"index" : {
"analysis": {
"analyzer": {
"edgengram_analyzer": {
"filter": ["edgengram"],
"tokenizer": "lowercase",
"type": "custom"
},
},
"filter": {
"edgengram": {
"max_gram": "15",
"min_gram": "1",
"type": "edgeNGram"
},
}
}
}
}
cls.ES().indices.create_index_if_missing(cls.index_name(), settings)
mapping = {
"address": {
"analyzer": "edgengram_analyzer",
"store": False,
"term_vector": "with_positions_offsets",
"type": "string"
},
"feed_id": {
"store": True,
"type": "string"
},
"num_subscribers": {
"index": "analyzed",
"store": True,
"type": "long"
},
"title": {
"analyzer": "edgengram_analyzer",
"store": False,
"term_vector": "with_positions_offsets",
"type": "string"
},
"link": {
"analyzer": "edgengram_analyzer",
"store": False,
"term_vector": "with_positions_offsets",
"type": "string"
}
}
cls.ES().indices.put_mapping(cls.type_name(), {
'properties': mapping,
}, [cls.index_name()])
cls.ES().indices.flush()
@classmethod
def index(cls, feed_id, title, address, link, num_subscribers):
doc = {
"feed_id" : feed_id,
"title" : title,
"address" : address,
"link" : link,
"num_subscribers" : num_subscribers,
}
try:
cls.ES().index(doc, cls.index_name(), cls.type_name(), feed_id)
except pyes.exceptions.NoServerAvailable:
logging.debug(" ***> ~FRNo search server available.")
@classmethod
def query(cls, text, max_subscribers=5):
try:
cls.ES().default_indices = cls.index_name()
cls.ES().indices.refresh()
except pyes.exceptions.NoServerAvailable:
logging.debug(" ***> ~FRNo search server available.")
return []
if settings.DEBUG:
max_subscribers = 1
logging.info("~FGSearch ~FCfeeds~FG: ~SB%s" % text)
q = pyes.query.BoolQuery()
q.add_should(pyes.query.MatchQuery('address', text, analyzer="simple", cutoff_frequency=0.0005, minimum_should_match="75%"))
q.add_should(pyes.query.MatchQuery('link', text, analyzer="simple", cutoff_frequency=0.0005, minimum_should_match="75%"))
q.add_should(pyes.query.MatchQuery('title', text, analyzer="simple", cutoff_frequency=0.0005, minimum_should_match="75%"))
q = pyes.Search(q, min_score=1)
results = cls.ES().search(query=q, size=max_subscribers, doc_types=[cls.type_name()], sort="num_subscribers:desc")
return results
@classmethod
def export_csv(cls):
import djqscsv
qs = Feed.objects.filter(num_subscribers__gte=20).values('id', 'feed_title', 'feed_address', 'feed_link', 'num_subscribers')
csv = djqscsv.render_to_csv_response(qs).content
f = open('feeds.csv', 'w+')
f.write(csv)
f.close()
| [
"[email protected]"
] | |
00c9949db590246f66d2bb3310ffbfe39a1fee79 | 9b24eb3a15e9acd4aaf7af00d88488f5a056438f | /backend/home/api/v1/viewsets.py | c7c28c17f806e899fca335a7c524c6cb75b776a2 | [] | no_license | crowdbotics-apps/dashboard-app-18025 | b8fb28008d42371c7d74102b78ae380725b3221a | 202f33b00e14f65adfc9dbf84f748ad5cc051652 | refs/heads/master | 2022-11-15T12:16:12.733390 | 2020-06-15T17:24:52 | 2020-06-15T17:24:52 | 271,619,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,485 | py | from rest_framework import viewsets
from rest_framework import authentication
from .serializers import (
AddressSerializer,
CustomTextSerializer,
HomePageSerializer,
XYSerializer,
)
from rest_framework.authentication import SessionAuthentication, TokenAuthentication
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.permissions import IsAdminUser
from rest_framework.viewsets import ModelViewSet, ViewSet
from rest_framework.authtoken.models import Token
from rest_framework.response import Response
from home.api.v1.serializers import (
SignupSerializer,
CustomTextSerializer,
HomePageSerializer,
UserSerializer,
)
from home.models import Address, CustomText, HomePage, XY
class SignupViewSet(ModelViewSet):
serializer_class = SignupSerializer
http_method_names = ["post"]
class LoginViewSet(ViewSet):
"""Based on rest_framework.authtoken.views.ObtainAuthToken"""
serializer_class = AuthTokenSerializer
def create(self, request):
serializer = self.serializer_class(
data=request.data, context={"request": request}
)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data["user"]
token, created = Token.objects.get_or_create(user=user)
user_serializer = UserSerializer(user)
return Response({"token": token.key, "user": user_serializer.data})
class CustomTextViewSet(ModelViewSet):
serializer_class = CustomTextSerializer
queryset = CustomText.objects.all()
authentication_classes = (SessionAuthentication, TokenAuthentication)
permission_classes = [IsAdminUser]
http_method_names = ["get", "put", "patch"]
class HomePageViewSet(ModelViewSet):
serializer_class = HomePageSerializer
queryset = HomePage.objects.all()
authentication_classes = (SessionAuthentication, TokenAuthentication)
permission_classes = [IsAdminUser]
http_method_names = ["get", "put", "patch"]
class XYViewSet(viewsets.ModelViewSet):
serializer_class = XYSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = XY.objects.all()
class AddressViewSet(viewsets.ModelViewSet):
serializer_class = AddressSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Address.objects.all()
| [
"[email protected]"
] | |
005b11fedd1241560633f3f19ce4ab82b6cf9068 | 43dabf77afd5c44d55b465c1b88bf9a5e7c4c9be | /resize.py | 306400848b45f96d2ec9be96bbc1dbae1a9871f7 | [] | no_license | geegatomar/OpenCV-Computer-Vision-Adrian-Rosebrock | cc81a990a481b5e4347dd97369b38479b46e55bc | daa579309010e6e7fefb004b878ffb26374401d0 | refs/heads/master | 2022-11-18T13:07:08.040483 | 2020-07-20T01:55:39 | 2020-07-20T01:55:39 | 280,987,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | import cv2
import argparse
import numpy as np
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path of image")
ap.add_argument("-w", "--width", default=100, help="Width of resized img")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
width = int(args["width"])
ratio = width / image.shape[1]
dim = (int(ratio * image.shape[0]), width)
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
cv2.imshow("Resized img", resized)
cv2.waitKey(0)
| [
"[email protected]"
] | |
2534efd7cf1a472d4c24db7e37fb628ef53a3a0f | 9adda6cef38c05c0d6bc4f5d0be25e75500f3406 | /ques 2 sol.py | 00f2329450eb86ff204e44c7f8653fbee1abdcff | [] | no_license | GLAU-TND/python-programming-assignment4-upadhyay8844 | 09255dd1ef340f7af3ee57e4eee3c671c010d5c4 | bc5c31d40f03cceebb2c842bdd933e0e73a998a1 | refs/heads/master | 2021-05-19T05:26:14.857261 | 2020-04-01T11:43:27 | 2020-04-01T11:43:27 | 251,547,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | def is_dict(var):
return str(type(var)) == "<class 'dict'>"
def flatten_helper(d, flat_d, path):
if not is_dict(d):
flat_d[path] = d
return
for key in d:
new_keypath = "{}.{}".format(path, key) if path else key
flatten_helper(d[key], flat_d, new_keypath)
def flatten(d):
flat_d = dict()
flatten_helper(d, flat_d, "")
return flat_d
| [
"[email protected]"
] | |
4c61a7aae73fa64897e0df01720f5f1eed93b6dd | 16de2efcba33961633c1e63e493986bad54c99bd | /test.py | 73b7e8d90f6b8b0378a1486d70f70ac2af704483 | [] | no_license | thakur-nishant/Algorithms | a0cc45de5393d4cbb428cccdbf81b6937cdf97d7 | 1a0306ca9a9fc68f59e28ea26c24822c15350294 | refs/heads/master | 2022-01-07T22:22:09.764193 | 2019-05-17T20:10:24 | 2019-05-17T20:10:24 | 109,093,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | from math import log
from random import random
import matplotlib.pyplot as plt
import numpy as np
l = 2
T = 24
curr = -1/l * log(random())
arrival = [curr]
while curr < T:
curr = curr -1/l * log(random())
arrival.append(curr)
arrival = arrival[1:]
t = np.arange(0.0, T, 0.01)
N = len(t)
X = np.zeros(N)
for i in range(N):
X[i] = np.sum(arrival <= t[i])
plt.plot(t, X)
plt.xlabel('time(hrs)')
plt.show()
| [
"[email protected]"
] | |
a3dc231f3dbd0e2e1ef4dbdd546e09d37e950ff2 | f224fad50dbc182cda86291c83954607bbb60901 | /inference.py | ce98cbf4d15f6bc1e05363be1db9afeb1e519de5 | [] | no_license | Hongpeng1992/pytorch-commands | 7fd26202b7cf7d46a0ac8e1241336e8ca5dad30e | 5853625d9852e948c1ac337547f8078d048699a0 | refs/heads/master | 2020-05-04T15:38:26.704013 | 2019-02-07T07:04:01 | 2019-02-07T07:04:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,644 | py | import argparse
import io
import os
import csv
import time
import numpy as np
import pandas as pd
from collections import OrderedDict
from datetime import datetime
from dataset import CommandsDataset, get_labels
from models import model_factory
from utils import AverageMeter, get_outdir
import torch
import torch.autograd as autograd
import torch.nn
import torch.nn.functional as F
import torch.utils.data as data
import torchvision.utils
parser = argparse.ArgumentParser(description='Inference')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--model', default='resnet101', type=str, metavar='MODEL',
help='Name of model to train (default: "countception"')
parser.add_argument('--gp', default='avg', type=str, metavar='POOL',
help='Type of global pool, "avg", "max", "avgmax", "avgmaxc" (default: "avg")')
parser.add_argument('--tta', type=int, default=0, metavar='N',
help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)')
parser.add_argument('--pretrained', action='store_true', default=False,
help='Start with pretrained version of specified network (if avail)')
parser.add_argument('-b', '--batch-size', type=int, default=512, metavar='N',
help='input batch size for training (default: 512)')
parser.add_argument('-j', '--workers', type=int, default=2, metavar='N',
help='how many training processes to use (default: 1)')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to restore checkpoint (default: none)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--save-batches', action='store_true', default=False,
help='save images of batch inputs and targets every log interval for debugging/verification')
parser.add_argument('--output', default='', type=str, metavar='PATH',
help='path to output folder (default: none, current dir)')
def main():
args = parser.parse_args()
num_classes = len(get_labels())
test_time_pool = 0 #5 if 'dpn' in args.model else 0
model = model_factory.create_model(
args.model,
in_chs=1,
num_classes=num_classes,
global_pool=args.gp,
test_time_pool=test_time_pool)
#model.reset_classifier(num_classes=num_classes)
if args.num_gpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda()
else:
model.cuda()
if not os.path.exists(args.checkpoint):
print("=> no checkpoint found at '{}'".format(args.checkpoint))
exit(1)
print("=> loading checkpoint '{}'".format(args.checkpoint))
checkpoint = torch.load(args.checkpoint)
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.checkpoint, checkpoint['epoch']))
else:
model.load_state_dict(checkpoint)
csplit = os.path.normpath(args.checkpoint).split(sep=os.path.sep)
if len(csplit) > 1:
exp_name = csplit[-2] + '-' + csplit[-1].split('.')[0]
else:
exp_name = ''
if args.output:
output_base = args.output
else:
output_base = './output'
output_dir = get_outdir(output_base, 'predictions', exp_name)
dataset = CommandsDataset(
root=args.data,
mode='test',
format='spectrogram'
)
loader = data.DataLoader(
dataset,
batch_size=args.batch_size,
pin_memory=True,
shuffle=False,
num_workers=args.workers
)
model.eval()
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
try:
# open CSV for writing predictions
cf = open(os.path.join(output_dir, 'results.csv'), mode='w')
res_writer = csv.writer(cf)
res_writer.writerow(['fname'] + dataset.id_to_label)
# open CSV for writing submission
cf = open(os.path.join(output_dir, 'submission.csv'), mode='w')
sub_writer = csv.writer(cf)
sub_writer.writerow(['fname', 'label', 'prob'])
end = time.time()
batch_sample_idx = 0
for batch_idx, (input, target) in enumerate(loader):
data_time_m.update(time.time() - end)
input = input.cuda()
output = model(input)
# augmentation reduction
#reduce_factor = loader.dataset.get_aug_factor()
#if reduce_factor > 1:
# output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2).squeeze(dim=2)
# index = index[0:index.size(0):reduce_factor]
# move data to CPU and collect)
output_logprob = F.log_softmax(output, dim=1).cpu().numpy()
output = F.softmax(output, dim=1)
output_prob, output_idx = output.max(1)
output_prob = output_prob.cpu().numpy()
output_idx = output_idx.cpu().numpy()
for i in range(output_logprob.shape[0]):
index = batch_sample_idx + i
pred_label = dataset.id_to_label[output_idx[i]]
pred_prob = output_prob[i]
filename = dataset.filename(index)
res_writer.writerow([filename] + list(output_logprob[i]))
sub_writer.writerow([filename] + [pred_label, pred_prob])
batch_sample_idx += input.size(0)
batch_time_m.update(time.time() - end)
if batch_idx % args.print_freq == 0:
print('Inference: [{}/{} ({:.0f}%)] '
'Time: {batch_time.val:.3f}s, {rate:.3f}/s '
'({batch_time.avg:.3f}s, {rate_avg:.3f}/s) '
'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(
batch_sample_idx, len(loader.sampler),
100. * batch_idx / len(loader),
batch_time=batch_time_m,
rate=input.size(0) / batch_time_m.val,
rate_avg=input.size(0) / batch_time_m.avg,
data_time=data_time_m))
end = time.time()
# end iterating through dataset
except KeyboardInterrupt:
pass
except Exception as e:
print(str(e))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
14117448fe850d69ae5fcf1bd41049c19247b557 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /appmesh_write_2/virtual-router_delete.py | db6df7702ffc69ca7d3bbf5c3eda2b1680913ce2 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,343 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_two_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/appmesh/delete-virtual-router.html
if __name__ == '__main__':
"""
create-virtual-router : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/appmesh/create-virtual-router.html
describe-virtual-router : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/appmesh/describe-virtual-router.html
list-virtual-routers : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/appmesh/list-virtual-routers.html
update-virtual-router : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/appmesh/update-virtual-router.html
"""
parameter_display_string = """
# mesh-name : The name of the service mesh to delete the virtual router in.
# virtual-router-name : The name of the virtual router to delete.
"""
add_option_dict = {}
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_two_parameter("appmesh", "delete-virtual-router", "mesh-name", "virtual-router-name", add_option_dict)
| [
"[email protected]"
] | |
53fa6c563e9983afb729af1af3be08c9c03dd4a1 | 8792e3449fbc6c8dec99f6af1d9f1b4caddad1f7 | /51player.py | 470f81860462904d56f98294142a2c26cd476828 | [] | no_license | aarthisandhiya/aarthisandhiya1 | c19c1951c9ba01cd97eeddd44614953088718357 | e6f10247b6a84d6eaf371a23f2f9c3bebbc73e5b | refs/heads/master | 2020-04-15T17:17:07.151242 | 2019-05-20T05:24:19 | 2019-05-20T05:24:19 | 164,868,494 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | a=int(input())
s=[int(a) for a in input().split()]
s=list(s)
z=[]
for i in range(0,len(s)):
val=s[i]
i=i-1
while i>=0:
if val<s[i]:
s[i+1]=s[i]
s[i]=val
i=i-1
else:
break
print(s[1])
| [
"[email protected]"
] | |
98244b23e0ce113db9acb33b85781abda3504fab | 82115f52db1783a2ce963e2621bf185c61ceb419 | /Teoría/03 Widgets para formularios/3-1 Etiquetas/programa.py | f824e536fbaa1c1de04e3356c2ce610ec1b992ff | [] | no_license | lesclaz/curso-qt-pyside-udemy | ce227df451a7cff40d90543ee6c892ea1a6b131c | 8b9bbf5d45e916f1d7db9411728b2759b30d2fd9 | refs/heads/master | 2023-07-01T18:11:47.959668 | 2021-08-03T09:38:12 | 2021-08-03T09:38:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | from PySide6.QtWidgets import QApplication, QMainWindow, QLabel
from PySide6.QtCore import QSize, Qt
from PySide6.QtGui import QFont, QPixmap
from pathlib import Path
import sys
def absPath(file):
# Devuelve la ruta absoluta a un fichero desde el propio script
return str(Path(__file__).parent.absolute() / file)
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setMinimumSize(QSize(480, 320))
etiqueta = QLabel("Soy una etiqueta")
self.setCentralWidget(etiqueta)
# Creamos la imagen
imagen = QPixmap(absPath("naturaleza.jpg"))
# la asginamos a la etiqueta
etiqueta.setPixmap(imagen)
# hacemos que se escale con la ventana
etiqueta.setScaledContents(True)
# establecemos unas flags de alineamiento
etiqueta.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
if __name__ == "__main__":
app = QApplication()
window = MainWindow()
window.show()
sys.exit(app.exec_())
| [
"[email protected]"
] | |
6576a596822baf4eb435a1fe47e11d479398497b | fd878bcdaa9489883894c942aae5e316a15c2085 | /tests/dataset_readers/sst_test.py | 477e1a51ec7a5efbd55ddd0006bc58ee474d6ddc | [] | no_license | Shuailong/SPM | a12d18baa39a72a9243ad9cd4238168ab42b96d1 | 0105dae90a4acdebfc875001efab7439b3eb8259 | refs/heads/master | 2020-04-26T04:51:14.279859 | 2019-06-24T03:55:11 | 2019-06-24T03:55:11 | 173,315,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,551 | py | # pylint: disable=no-self-use,invalid-name
import pytest
import pathlib
import random
import os
from allennlp.common import Params
from allennlp.common.util import ensure_list
from allennlp.common.testing import ModelTestCase
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers.wordpiece_indexer import PretrainedBertIndexer
from allennlp.data.tokenizers import WordTokenizer, Token
from allennlp.data.tokenizers.word_splitter import BertBasicWordSplitter
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules.token_embedders.bert_token_embedder import PretrainedBertEmbedder
from spm.data.dataset_readers import GLUESST2DatasetReader
from spm import DATA_DIR as DATA_ROOT
class TestSSTReader:
FIXTURES_ROOT = (pathlib.Path(__file__).parent /
".." / ".." / "tests" / "fixtures").resolve()
BERT_VOCAB_PATH = os.path.join(
DATA_ROOT, 'bert/bert-base-uncased-vocab.txt')
@pytest.mark.parametrize("lazy", (True, False))
def test_read(self, lazy):
reader = GLUESST2DatasetReader(
tokenizer=WordTokenizer(word_splitter=BertBasicWordSplitter()),
token_indexers={'bert': PretrainedBertIndexer(
pretrained_model=self.BERT_VOCAB_PATH)},
skip_label_indexing=False
)
instances = reader.read(
str(self.FIXTURES_ROOT / 'dev.tsv'))
instances = ensure_list(instances)
example = instances[0]
tokens = [t.text for t in example.fields['tokens']]
label = example.fields['label'].label
print(label)
print(tokens)
batch = Batch(instances)
vocab = Vocabulary.from_instances(instances)
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
print(tokens['mask'].tolist()[0])
print(tokens["bert"].tolist()[0])
print([vocab.get_token_from_index(i, "bert")
for i in tokens["bert"].tolist()[0]])
print(len(tokens['bert'][0]))
print(tokens["bert-offsets"].tolist()[0])
print(tokens['bert-type-ids'].tolist()[0])
def test_can_build_from_params(self):
reader = GLUESST2DatasetReader.from_params(Params({}))
# pylint: disable=protected-access
assert reader._token_indexers['tokens'].__class__.__name__ == 'SingleIdTokenIndexer'
| [
"[email protected]"
] | |
25eaf0a29411821417765885863acfd5166a02e3 | 7298d1692c6948f0880e550d6100c63a64ce3ea1 | /deriva-annotations/catalog99/catalog-configs/Vocab/ihm_residues_not_modeled_reason.py | 9a62ee7fbe832a6a342ee44c46b17d4607a9f500 | [] | no_license | informatics-isi-edu/protein-database | b7684b3d08dbf22c1e7c4a4b8460248c6f0d2c6d | ce4be1bf13e6b1c22f3fccbb513824782609991f | refs/heads/master | 2023-08-16T10:24:10.206574 | 2023-07-25T23:10:42 | 2023-07-25T23:10:42 | 174,095,941 | 2 | 0 | null | 2023-06-16T19:44:43 | 2019-03-06T07:39:14 | Python | UTF-8 | Python | false | false | 5,585 | py | import argparse
from deriva.core import ErmrestCatalog, AttrDict, get_credential
import deriva.core.ermrest_model as em
from deriva.core.ermrest_config import tag as chaise_tags
from deriva.utils.catalog.manage.update_catalog import CatalogUpdater, parse_args
groups = {
'pdb-reader': 'https://auth.globus.org/8875a770-3c40-11e9-a8c8-0ee7d80087ee',
'pdb-writer': 'https://auth.globus.org/c94a1e5c-3c40-11e9-a5d1-0aacc65bfe9a',
'pdb-admin': 'https://auth.globus.org/0b98092c-3c41-11e9-a8c8-0ee7d80087ee',
'pdb-curator': 'https://auth.globus.org/eef3e02a-3c40-11e9-9276-0edc9bdd56a6',
'isrd-staff': 'https://auth.globus.org/176baec4-ed26-11e5-8e88-22000ab4b42b',
'pdb-submitter': 'https://auth.globus.org/99da042e-64a6-11ea-ad5f-0ef992ed7ca1'
}
table_name = 'ihm_residues_not_modeled_reason'
schema_name = 'Vocab'
column_annotations = {
'ID': {},
'URI': {},
'Name': {},
'Description': {},
'Synonyms': {},
'Owner': {}
}
column_comment = {
'ID': 'The preferred Compact URI (CURIE) for this term.',
'URI': 'The preferred URI for this term.',
'Name': 'None',
'Description': 'None',
'Synonyms': 'Alternate human-readable names for this term.',
'Owner': 'Group that can update the record.'
}
column_acls = {}
column_acl_bindings = {}
column_defs = [
em.Column.define(
'ID',
em.builtin_types['ermrest_curie'],
nullok=False,
default='PDB:{RID}',
comment=column_comment['ID'],
),
em.Column.define(
'URI',
em.builtin_types['ermrest_uri'],
nullok=False,
default='/id/{RID}',
comment=column_comment['URI'],
),
em.Column.define(
'Name', em.builtin_types['text'], nullok=False, comment=column_comment['Name'],
),
em.Column.define(
'Description',
em.builtin_types['markdown'],
nullok=False,
comment=column_comment['Description'],
),
em.Column.define('Synonyms', em.builtin_types['text[]'], comment=column_comment['Synonyms'],
),
em.Column.define('Owner', em.builtin_types['text'], comment=column_comment['Owner'],
),
]
visible_columns = {
'*': [
'RID', 'Name', 'Description', 'ID', 'URI',
['Vocab', 'ihm_residues_not_modeled_reason_term_RCB_fkey'],
['Vocab', 'ihm_residues_not_modeled_reason_term_RMB_fkey'], 'RCT', 'RMT',
['Vocab', 'ihm_residues_not_modeled_reason_term_Owner_fkey']
]
}
table_display = {'row_name': {'row_markdown_pattern': '{{{Name}}}'}}
table_annotations = {
chaise_tags.table_display: table_display,
chaise_tags.visible_columns: visible_columns,
}
table_comment = 'A set of controlled vocabular terms.'
table_acls = {}
table_acl_bindings = {
'released_reader': {
'types': ['select'],
'scope_acl': [groups['pdb-submitter']],
'projection': ['RID'],
'projection_type': 'nonnull'
},
'self_service_group': {
'types': ['update', 'delete'],
'scope_acl': ['*'],
'projection': ['Owner'],
'projection_type': 'acl'
},
'self_service_creator': {
'types': ['update', 'delete'],
'scope_acl': ['*'],
'projection': ['RCB'],
'projection_type': 'acl'
}
}
key_defs = [
em.Key.define(
['Name'], constraint_names=[['Vocab', 'ihm_residues_not_modeled_reason_Namekey1']],
),
em.Key.define(
['RID'], constraint_names=[['Vocab', 'ihm_residues_not_modeled_reason_term_RIDkey1']],
),
em.Key.define(
['ID'], constraint_names=[['Vocab', 'ihm_residues_not_modeled_reason_term_IDkey1']],
),
em.Key.define(
['URI'], constraint_names=[['Vocab', 'ihm_residues_not_modeled_reason_term_URIkey1']],
),
]
fkey_defs = [
em.ForeignKey.define(
['RCB'],
'public',
'ERMrest_Client', ['ID'],
constraint_names=[['Vocab', 'ihm_residues_not_modeled_reason_term_RCB_fkey']],
),
em.ForeignKey.define(
['RMB'],
'public',
'ERMrest_Client', ['ID'],
constraint_names=[['Vocab', 'ihm_residues_not_modeled_reason_term_RMB_fkey']],
),
em.ForeignKey.define(
['Owner'],
'public',
'Catalog_Group', ['ID'],
constraint_names=[['Vocab', 'ihm_residues_not_modeled_reason_term_Owner_fkey']],
acls={
'insert': [groups['pdb-curator']],
'update': [groups['pdb-curator']]
},
acl_bindings={
'set_owner': {
'types': ['update', 'insert'],
'scope_acl': ['*'],
'projection': ['ID'],
'projection_type': 'acl'
}
},
),
]
table_def = em.Table.define(
table_name,
column_defs=column_defs,
key_defs=key_defs,
fkey_defs=fkey_defs,
annotations=table_annotations,
acls=table_acls,
acl_bindings=table_acl_bindings,
comment=table_comment,
provide_system=True
)
def main(catalog, mode, replace=False, really=False):
updater = CatalogUpdater(catalog)
table_def['column_annotations'] = column_annotations
table_def['column_comment'] = column_comment
updater.update_table(mode, schema_name, table_def, replace=replace, really=really)
if __name__ == "__main__":
host = 'pdb.isrd.isi.edu'
catalog_id = 99
mode, replace, host, catalog_id = parse_args(host, catalog_id, is_table=True)
catalog = ErmrestCatalog('https', host, catalog_id=catalog_id, credentials=get_credential(host))
main(catalog, mode, replace)
| [
"[email protected]"
] | |
26604b1e653b586dcc138356474bf5459ea54e2e | 604fdb2c4fa24237d206e7c8835bb2c21b0a2fb7 | /ari/v1/client.py | 0f438dfd979c9fed793cc6fef8f04f0b37e2bc6d | [
"Apache-2.0"
] | permissive | SibghatullahSheikh/python-ari | d8d87d213c1a52b0ed46a8ea50362b93c772325b | f4a6f870513bc74bf96606168e0d2173ed2f2ebb | refs/heads/master | 2021-01-22T00:13:37.707863 | 2014-01-29T21:06:52 | 2014-01-29T21:06:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,344 | py | # -*- coding: utf-8 -*-
# Copyright 2012 OpenStack LLC.
# Copyright (c) 2013 PolyBeacon, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ari.common import http
from ari.v1 import application
from ari.v1 import bridge
from ari.v1 import channel
from ari.v1 import devicestate
from ari.v1 import endpoint
from ari.v1 import sound
class Client(http.HTTPClient):
"""Client for the ARI v1 API.
"""
def __init__(self, *args, **kwargs):
super(Client, self).__init__(*args, **kwargs)
self.applications = application.ApplicationManager(self)
self.bridges = bridge.BridgeManager(self)
self.channels = channel.ChannelManager(self)
self.devicestates = devicestate.DeviceStateManager(self)
self.endpoints = endpoint.EndpointManager(self)
self.sounds = sound.SoundManager(self)
| [
"[email protected]"
] | |
a74af5013611c1d1945d2e4250a4c532a725e0bd | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_6404600001200128_0/Python/kawasaki/solve.py | 13028d9808e7f822dbd94054186f11d1384f2212 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | T = int(raw_input())
for test_index in xrange(T):
N = int(raw_input())
m = map(int, raw_input().split())
y = 0
for i in xrange(N - 1):
y += max(m[i] - m[i + 1], 0)
d = max(max(m[i] - m[i + 1], 0) for i in xrange(N - 1))
z = 0
for i in xrange(N - 1):
z += min(d, m[i])
print 'Case #{}: {} {}'.format(test_index + 1, y, z)
| [
"[email protected]"
] | |
5d64b3ec43f8f8706fbb5bc2f4c1dea3573739ee | d6d87140d929262b5228659f89a69571c8669ec1 | /airbyte-connector-builder-server/connector_builder/generated/models/stream_slicer.py | 56c37db2c82d4d65076de8f3b5e19e85d772378d | [
"MIT",
"Elastic-2.0"
] | permissive | gasparakos/airbyte | b2bb2246ec6a10e1f86293da9d86c61fc4a4ac65 | 17c77fc819ef3732fb1b20fa4c1932be258f0ee9 | refs/heads/master | 2023-02-22T20:42:45.400851 | 2023-02-09T07:43:24 | 2023-02-09T07:43:24 | 303,604,219 | 0 | 0 | MIT | 2020-10-13T06:18:04 | 2020-10-13T06:06:17 | null | UTF-8 | Python | false | false | 527 | py | # coding: utf-8
from __future__ import annotations
from datetime import date, datetime # noqa: F401
import re # noqa: F401
from typing import Any, Dict, List, Optional # noqa: F401
from pydantic import AnyUrl, BaseModel, EmailStr, Field, validator # noqa: F401
class StreamSlicer(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
StreamSlicer - a model defined in OpenAPI
"""
StreamSlicer.update_forward_refs() | [
"[email protected]"
] | |
e5d021f764bf2a500658c2a962784f56ffc0f864 | 2b167e29ba07e9f577c20c54cb943861d0ccfa69 | /numerical_analysis_backup/small-scale-multiobj/pod100_sa/pareto_arch2/pareto_ff/pareto8.py | c934f4633c223ab3a0093473ad66da38262a6453 | [] | no_license | LiYan1988/kthOld_OFC | 17aeeed21e195d1a9a3262ec2e67d6b1d3f9ff0f | b1237577ea68ad735a65981bf29584ebd889132b | refs/heads/master | 2021-01-11T17:27:25.574431 | 2017-01-23T05:32:35 | 2017-01-23T05:32:35 | 79,773,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,048 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 4 15:15:10 2016
@author: li
optimize both throughput and connections
"""
#import sys
#sys.path.insert(0, '/home/li/Dropbox/KTH/numerical_analysis/ILPs')
import csv
from gurobipy import *
import numpy as np
from arch2_decomposition_new import Arch2_decompose
np.random.seed(2010)
num_cores=3
num_slots=80
i = 8
time_limit_routing = 1200 # 1000
time_limit_sa = 108 # 10800
filename = 'traffic_matrix__matrix_'+str(i)+'.csv'
# print filename
tm = []
with open(filename) as f:
reader = csv.reader(f)
for idx, row in enumerate(reader):
if idx>11:
row.pop()
row = [int(u) for u in row]
tm.append(row)
tm = np.array(tm)*25
#%% arch2
betav1 = np.arange(0,0.105,0.005)
betav2 = np.arange(0.15,1.05,0.05)
betav3 = np.arange(10, 110, 10)
betav = np.concatenate((betav1, betav2, betav3))
connection_ub = []
throughput_ub = []
connection_lb = []
throughput_lb = []
obj_ub = []
obj_lb = []
for beta in betav:
m = Arch2_decompose(tm, num_slots=num_slots, num_cores=num_cores,alpha=1,beta=beta)
m.create_model_routing(mipfocus=1,timelimit=time_limit_routing,mipgap=0.01, method=2)
m.sa_heuristic(ascending1=False,ascending2=False)
connection_ub.append(m.connections_ub)
throughput_ub.append(m.throughput_ub)
obj_ub.append(m.alpha*m.connections_ub+m.beta*m.throughput_ub)
connection_lb.append(m.obj_sah_connection_)
throughput_lb.append(m.obj_sah_throughput_)
obj_lb.append(m.alpha*m.obj_sah_connection_+m.beta*m.obj_sah_throughput_)
# print m.obj_sah_/float(m.alpha*m.connections_ub+m.beta*m.throughput_ub)
result = np.array([betav,connection_ub,throughput_ub,obj_ub,
connection_lb,throughput_lb,obj_lb]).T
file_name = "result_pareto{}.csv".format(i)
with open(file_name, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(['beta', 'connection_ub', 'throughput_ub',
'obj_ub', 'connection_lb', 'throughput_lb', 'obj_lb'])
writer.writerows(result) | [
"[email protected]"
] | |
94f78ff7515cedf224519e07f552630acac3127a | a857d1911a118b8aa62ffeaa8f154c8325cdc939 | /toontown/estate/DistributedFireworksCannon.py | d5691917f5a2a6d4d53e4cdd97782a58257a8ec5 | [
"MIT"
] | permissive | DioExtreme/TT-CL-Edition | 761d3463c829ec51f6bd2818a28b667c670c44b6 | 6b85ca8352a57e11f89337e1c381754d45af02ea | refs/heads/main | 2023-06-01T16:37:49.924935 | 2021-06-24T02:25:22 | 2021-06-24T02:25:22 | 379,310,849 | 0 | 0 | MIT | 2021-06-22T15:07:31 | 2021-06-22T15:07:30 | null | UTF-8 | Python | false | false | 4,308 | py | from toontown.toonbase.ToontownGlobals import *
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from HouseGlobals import *
from toontown.effects import DistributedFireworkShow
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from panda3d.core import CollisionSphere, CollisionNode
import FireworksGui
class DistributedFireworksCannon(DistributedFireworkShow.DistributedFireworkShow):
notify = directNotify.newCategory('DistributedFireworksCannon')
def __init__(self, cr):
DistributedFireworkShow.DistributedFireworkShow.__init__(self, cr)
self.fireworksGui = None
self.load()
return
def generateInit(self):
DistributedFireworkShow.DistributedFireworkShow.generateInit(self)
self.fireworksSphereEvent = self.uniqueName('fireworksSphere')
self.fireworksSphereEnterEvent = 'enter' + self.fireworksSphereEvent
self.fireworksGuiDoneEvent = 'fireworksGuiDone'
self.shootEvent = 'fireworkShootEvent'
self.collSphere = CollisionSphere(0, 0, 0, 2.5)
self.collSphere.setTangible(1)
self.collNode = CollisionNode(self.fireworksSphereEvent)
self.collNode.setIntoCollideMask(ToontownGlobals.WallBitmask)
self.collNode.addSolid(self.collSphere)
self.collNodePath = self.geom.attachNewNode(self.collNode)
def generate(self):
DistributedFireworkShow.DistributedFireworkShow.generate(self)
def announceGenerate(self):
self.notify.debug('announceGenerate')
self.accept(self.fireworksSphereEnterEvent, self.__handleEnterSphere)
def disable(self):
self.notify.debug('disable')
self.ignore(self.fireworksSphereEnterEvent)
self.ignore(self.shootEvent)
self.ignore(self.fireworksGuiDoneEvent)
if self.fireworksGui:
self.fireworksGui.destroy()
self.fireworksGui = None
DistributedFireworkShow.DistributedFireworkShow.disable(self)
return
def delete(self):
self.notify.debug('delete')
self.geom.removeNode()
DistributedFireworkShow.DistributedFireworkShow.delete(self)
def load(self):
self.geom = loader.loadModel('phase_5/models/props/trashcan_TT.bam')
self.geom.reparentTo(base.cr.playGame.hood.loader.geom)
self.geom.setScale(0.5)
def __handleEnterSphere(self, collEntry):
self.notify.debug('handleEnterSphere()')
self.ignore(self.fireworksSphereEnterEvent)
self.sendUpdate('avatarEnter', [])
def __handleFireworksDone(self):
self.ignore(self.fireworksGuiDoneEvent)
self.ignore(self.shootEvent)
self.sendUpdate('avatarExit')
self.fireworksGui.destroy()
self.fireworksGui = None
return
def freeAvatar(self):
base.localAvatar.posCamera(0, 0)
base.cr.playGame.getPlace().setState('walk')
self.accept(self.fireworksSphereEnterEvent, self.__handleEnterSphere)
def setMovie(self, mode, avId, timestamp):
timeStamp = globalClockDelta.localElapsedTime(timestamp)
isLocalToon = avId == base.localAvatar.doId
if mode == FIREWORKS_MOVIE_CLEAR:
self.notify.debug('setMovie: clear')
return
elif mode == FIREWORKS_MOVIE_GUI:
self.notify.debug('setMovie: gui')
if isLocalToon:
self.fireworksGui = FireworksGui.FireworksGui(self.fireworksGuiDoneEvent, self.shootEvent)
self.accept(self.fireworksGuiDoneEvent, self.__handleFireworksDone)
self.accept(self.shootEvent, self.localShootFirework)
return
else:
self.notify.warning('unknown mode in setMovie: %s' % mode)
def setPosition(self, x, y, z):
self.pos = [x, y, z]
self.geom.setPos(x, y, z)
def localShootFirework(self, index):
style = index
col1, col2 = self.fireworksGui.getCurColor()
amp = 30
dummy = base.localAvatar.attachNewNode('dummy')
dummy.setPos(0, 100, 60)
pos = dummy.getPos(render)
dummy.removeNode()
print 'lauFirework: %s, col=%s' % (index, col1)
self.d_requestFirework(pos[0], pos[1], pos[2], style, col1, col2)
| [
"[email protected]"
] | |
6b97a43edfe028b659923528eaadd406c208290f | 71501709864eff17c873abbb97ffabbeba4cb5e3 | /llvm13.0.0/lldb/test/API/functionalities/completion/TestCompletion.py | 11f0e387245e0fc659032f8b1323c5e4f7f230db | [
"NCSA",
"Apache-2.0",
"LLVM-exception"
] | permissive | LEA0317/LLVM-VideoCore4 | d08ba6e6f26f7893709d3285bdbd67442b3e1651 | 7ae2304339760685e8b5556aacc7e9eee91de05c | refs/heads/master | 2022-06-22T15:15:52.112867 | 2022-06-09T08:45:24 | 2022-06-09T08:45:24 | 189,765,789 | 1 | 0 | NOASSERTION | 2019-06-01T18:31:29 | 2019-06-01T18:31:29 | null | UTF-8 | Python | false | false | 33,655 | py | """
Test the lldb command line completion mechanism.
"""
import os
from multiprocessing import Process
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbplatform
from lldbsuite.test import lldbutil
class CommandLineCompletionTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
@classmethod
def classCleanup(cls):
"""Cleanup the test byproducts."""
try:
os.remove("child_send.txt")
os.remove("child_read.txt")
except:
pass
def test_at(self):
"""Test that 'at' completes to 'attach '."""
self.complete_from_to('at', 'attach ')
def test_de(self):
"""Test that 'de' completes to 'detach '."""
self.complete_from_to('de', 'detach ')
def test_frame_variable(self):
self.build()
self.main_source = "main.cpp"
self.main_source_spec = lldb.SBFileSpec(self.main_source)
(target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint(self,
'// Break here', self.main_source_spec)
self.assertEquals(process.GetState(), lldb.eStateStopped)
# Since CommandInterpreter has been corrected to update the current execution
# context at the beginning of HandleCompletion, we're here explicitly testing
# the scenario where "frame var" is completed without any preceding commands.
self.complete_from_to('frame variable fo',
'frame variable fooo')
self.complete_from_to('frame variable fooo.',
'frame variable fooo.')
self.complete_from_to('frame variable fooo.dd',
'frame variable fooo.dd')
self.complete_from_to('frame variable ptr_fooo->',
'frame variable ptr_fooo->')
self.complete_from_to('frame variable ptr_fooo->dd',
'frame variable ptr_fooo->dd')
self.complete_from_to('frame variable cont',
'frame variable container')
self.complete_from_to('frame variable container.',
'frame variable container.MemberVar')
self.complete_from_to('frame variable container.Mem',
'frame variable container.MemberVar')
self.complete_from_to('frame variable ptr_cont',
'frame variable ptr_container')
self.complete_from_to('frame variable ptr_container->',
'frame variable ptr_container->MemberVar')
self.complete_from_to('frame variable ptr_container->Mem',
'frame variable ptr_container->MemberVar')
def test_process_attach_dash_dash_con(self):
"""Test that 'process attach --con' completes to 'process attach --continue '."""
self.complete_from_to(
'process attach --con',
'process attach --continue ')
def test_process_launch_arch(self):
self.complete_from_to('process launch --arch ',
['mips',
'arm64'])
def test_process_load(self):
self.build()
lldbutil.run_to_source_breakpoint(self, '// Break here', lldb.SBFileSpec("main.cpp"))
self.complete_from_to('process load Makef', 'process load Makefile')
@skipUnlessPlatform(["linux"])
def test_process_unload(self):
"""Test the completion for "process unload <index>" """
# This tab completion should not work without a running process.
self.complete_from_to('process unload ',
'process unload ')
self.build()
lldbutil.run_to_source_breakpoint(self, '// Break here', lldb.SBFileSpec("main.cpp"))
err = lldb.SBError()
self.process().LoadImage(lldb.SBFileSpec(self.getBuildArtifact("libshared.so")), err)
self.assertSuccess(err)
self.complete_from_to('process unload ',
'process unload 0')
self.process().UnloadImage(0)
self.complete_from_to('process unload ',
'process unload ')
def test_process_plugin_completion(self):
subcommands = ['attach -P', 'connect -p', 'launch -p']
for subcommand in subcommands:
self.complete_from_to('process ' + subcommand + ' mac',
'process ' + subcommand + ' mach-o-core')
def completions_contain_str(self, input, needle):
interp = self.dbg.GetCommandInterpreter()
match_strings = lldb.SBStringList()
num_matches = interp.HandleCompletion(input, len(input), 0, -1, match_strings)
found_needle = False
for match in match_strings:
if needle in match:
found_needle = True
break
self.assertTrue(found_needle, "Returned completions: " + "\n".join(match_strings))
@skipIfRemote
@skipIfReproducer
def test_common_completion_process_pid_and_name(self):
# The LLDB process itself and the process already attached to are both
# ignored by the process discovery mechanism, thus we need a process known
# to us here.
self.build()
server = self.spawnSubprocess(
self.getBuildArtifact("a.out"),
["-x"], # Arg "-x" makes the subprocess wait for input thus it won't be terminated too early
install_remote=False)
self.assertIsNotNone(server)
pid = server.pid
self.completions_contain('process attach -p ', [str(pid)])
self.completions_contain('platform process attach -p ', [str(pid)])
self.completions_contain('platform process info ', [str(pid)])
self.completions_contain_str('process attach -n ', "a.out")
self.completions_contain_str('platform process attach -n ', "a.out")
def test_process_signal(self):
# The tab completion for "process signal" won't work without a running process.
self.complete_from_to('process signal ',
'process signal ')
# Test with a running process.
self.build()
self.main_source = "main.cpp"
self.main_source_spec = lldb.SBFileSpec(self.main_source)
lldbutil.run_to_source_breakpoint(self, '// Break here', self.main_source_spec)
self.complete_from_to('process signal ',
'process signal SIG')
self.complete_from_to('process signal SIGPIP',
'process signal SIGPIPE')
self.complete_from_to('process signal SIGA',
['SIGABRT',
'SIGALRM'])
def test_ambiguous_long_opt(self):
self.completions_match('breakpoint modify --th',
['--thread-id',
'--thread-index',
'--thread-name'])
def test_disassemble_dash_f(self):
self.completions_match('disassemble -F ',
['default',
'intel',
'att'])
def test_plugin_load(self):
self.complete_from_to('plugin load ', [])
def test_log_enable(self):
self.complete_from_to('log enable ll', ['lldb'])
self.complete_from_to('log enable dw', ['dwarf'])
self.complete_from_to('log enable lldb al', ['all'])
self.complete_from_to('log enable lldb sym', ['symbol'])
def test_log_enable(self):
self.complete_from_to('log disable ll', ['lldb'])
self.complete_from_to('log disable dw', ['dwarf'])
self.complete_from_to('log disable lldb al', ['all'])
self.complete_from_to('log disable lldb sym', ['symbol'])
def test_log_list(self):
self.complete_from_to('log list ll', ['lldb'])
self.complete_from_to('log list dw', ['dwarf'])
self.complete_from_to('log list ll', ['lldb'])
self.complete_from_to('log list lldb dwa', ['dwarf'])
def test_quoted_command(self):
self.complete_from_to('"set',
['"settings" '])
def test_quoted_arg_with_quoted_command(self):
self.complete_from_to('"settings" "repl',
['"replace" '])
def test_quoted_arg_without_quoted_command(self):
self.complete_from_to('settings "repl',
['"replace" '])
def test_single_quote_command(self):
self.complete_from_to("'set",
["'settings' "])
def test_terminated_quote_command(self):
# This should not crash, but we don't get any
# reasonable completions from this.
self.complete_from_to("'settings'", [])
def test_process_launch_arch_arm(self):
self.complete_from_to('process launch --arch arm',
['arm64'])
def test_target_symbols_add_shlib(self):
# Doesn't seem to work, but at least it shouldn't crash.
self.complete_from_to('target symbols add --shlib ', [])
def test_log_file(self):
# Complete in our source directory which contains a 'main.cpp' file.
src_dir = os.path.dirname(os.path.realpath(__file__)) + '/'
self.complete_from_to('log enable lldb expr -f ' + src_dir,
['main.cpp'])
def test_log_dir(self):
# Complete our source directory.
src_dir = os.path.dirname(os.path.realpath(__file__))
self.complete_from_to('log enable lldb expr -f ' + src_dir,
[src_dir + os.sep], turn_off_re_match=True)
# <rdar://problem/11052829>
def test_infinite_loop_while_completing(self):
"""Test that 'process print hello\' completes to itself and does not infinite loop."""
self.complete_from_to('process print hello\\', 'process print hello\\',
turn_off_re_match=True)
def test_watchpoint_co(self):
"""Test that 'watchpoint co' completes to 'watchpoint command '."""
self.complete_from_to('watchpoint co', 'watchpoint command ')
def test_watchpoint_command_space(self):
"""Test that 'watchpoint command ' completes to ['add', 'delete', 'list']."""
self.complete_from_to(
'watchpoint command ', [
'add', 'delete', 'list'])
def test_watchpoint_command_a(self):
"""Test that 'watchpoint command a' completes to 'watchpoint command add '."""
self.complete_from_to(
'watchpoint command a',
'watchpoint command add ')
def test_watchpoint_set_ex(self):
"""Test that 'watchpoint set ex' completes to 'watchpoint set expression '."""
self.complete_from_to(
'watchpoint set ex',
'watchpoint set expression ')
def test_watchpoint_set_var(self):
"""Test that 'watchpoint set var' completes to 'watchpoint set variable '."""
self.complete_from_to('watchpoint set var', 'watchpoint set variable ')
def test_watchpoint_set_variable_foo(self):
self.build()
lldbutil.run_to_source_breakpoint(self, '// Break here', lldb.SBFileSpec("main.cpp"))
self.complete_from_to('watchpoint set variable fo', 'watchpoint set variable fooo')
# Only complete the first argument.
self.complete_from_to('watchpoint set variable fooo ', 'watchpoint set variable fooo ')
def test_help_fi(self):
"""Test that 'help fi' completes to ['file', 'finish']."""
self.complete_from_to(
'help fi', [
'file', 'finish'])
def test_help_watchpoint_s(self):
"""Test that 'help watchpoint s' completes to 'help watchpoint set '."""
self.complete_from_to('help watchpoint s', 'help watchpoint set ')
@expectedFailureNetBSD
def test_common_complete_watchpoint_ids(self):
subcommands = ['enable', 'disable', 'delete', 'modify', 'ignore']
# Completion should not work without a target.
for subcommand in subcommands:
self.complete_from_to('watchpoint ' + subcommand + ' ',
'watchpoint ' + subcommand + ' ')
# Create a process to provide a target and enable watchpoint setting.
self.build()
lldbutil.run_to_source_breakpoint(self, '// Break here', lldb.SBFileSpec("main.cpp"))
self.runCmd('watchpoint set variable ptr_fooo')
for subcommand in subcommands:
self.complete_from_to('watchpoint ' + subcommand + ' ', ['1'])
def test_settings_append_target_er(self):
"""Test that 'settings append target.er' completes to 'settings append target.error-path'."""
self.complete_from_to(
'settings append target.er',
'settings append target.error-path')
def test_settings_insert_after_target_en(self):
"""Test that 'settings insert-after target.env' completes to 'settings insert-after target.env-vars'."""
self.complete_from_to(
'settings insert-after target.env',
'settings insert-after target.env-vars')
def test_settings_insert_before_target_en(self):
"""Test that 'settings insert-before target.env' completes to 'settings insert-before target.env-vars'."""
self.complete_from_to(
'settings insert-before target.env',
'settings insert-before target.env-vars')
def test_settings_replace_target_ru(self):
"""Test that 'settings replace target.ru' completes to 'settings replace target.run-args'."""
self.complete_from_to(
'settings replace target.ru',
'settings replace target.run-args')
def test_settings_show_term(self):
self.complete_from_to(
'settings show term-',
'settings show term-width')
def test_settings_list_term(self):
self.complete_from_to(
'settings list term-',
'settings list term-width')
def test_settings_remove_term(self):
self.complete_from_to(
'settings remove term-',
'settings remove term-width')
def test_settings_s(self):
"""Test that 'settings s' completes to ['set', 'show']."""
self.complete_from_to(
'settings s', [
'set', 'show'])
def test_settings_set_th(self):
"""Test that 'settings set thread-f' completes to 'settings set thread-format'."""
self.complete_from_to('settings set thread-f', 'settings set thread-format')
def test_settings_s_dash(self):
"""Test that 'settings set --g' completes to 'settings set --global'."""
self.complete_from_to('settings set --g', 'settings set --global')
def test_settings_clear_th(self):
"""Test that 'settings clear thread-f' completes to 'settings clear thread-format'."""
self.complete_from_to(
'settings clear thread-f',
'settings clear thread-format')
def test_settings_set_ta(self):
"""Test that 'settings set ta' completes to 'settings set target.'."""
self.complete_from_to(
'settings set target.ma',
'settings set target.max-')
def test_settings_set_target_exec(self):
"""Test that 'settings set target.exec' completes to 'settings set target.exec-search-paths '."""
self.complete_from_to(
'settings set target.exec',
'settings set target.exec-search-paths')
def test_settings_set_target_pr(self):
"""Test that 'settings set target.pr' completes to [
'target.prefer-dynamic-value', 'target.process.']."""
self.complete_from_to('settings set target.pr',
['target.prefer-dynamic-value',
'target.process.'])
def test_settings_set_target_process(self):
"""Test that 'settings set target.process' completes to 'settings set target.process.'."""
self.complete_from_to(
'settings set target.process',
'settings set target.process.')
def test_settings_set_target_process_dot(self):
"""Test that 'settings set target.process.t' completes to 'settings set target.process.thread.'."""
self.complete_from_to(
'settings set target.process.t',
'settings set target.process.thread.')
def test_settings_set_target_process_thread_dot(self):
"""Test that 'settings set target.process.thread.' completes to [
'target.process.thread.step-avoid-regexp', 'target.process.thread.trace-thread']."""
self.complete_from_to('settings set target.process.thread.',
['target.process.thread.step-avoid-regexp',
'target.process.thread.trace-thread'])
def test_thread_plan_discard(self):
self.build()
(_, _, thread, _) = lldbutil.run_to_source_breakpoint(self,
'ptr_foo', lldb.SBFileSpec("main.cpp"))
self.assertTrue(thread)
self.complete_from_to('thread plan discard ', 'thread plan discard ')
source_path = os.path.join(self.getSourceDir(), "thread_plan_script.py")
self.runCmd("command script import '%s'"%(source_path))
self.runCmd("thread step-scripted -C thread_plan_script.PushPlanStack")
self.complete_from_to('thread plan discard ', 'thread plan discard 1')
self.runCmd('thread plan discard 1')
def test_target_space(self):
"""Test that 'target ' completes to ['create', 'delete', 'list',
'modules', 'select', 'stop-hook', 'variable']."""
self.complete_from_to('target ',
['create',
'delete',
'list',
'modules',
'select',
'stop-hook',
'variable'])
def test_target_modules_dump_line_table(self):
"""Tests source file completion by completing the line-table argument."""
self.build()
self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.complete_from_to('target modules dump line-table main.cp',
['main.cpp'])
def test_target_modules_load_aout(self):
"""Tests modules completion by completing the target modules load argument."""
self.build()
self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.complete_from_to('target modules load a.ou',
['a.out'])
def test_target_modules_search_paths_insert(self):
# Completion won't work without a valid target.
self.complete_from_to("target modules search-paths insert ", "target modules search-paths insert ")
self.build()
target = self.dbg.CreateTarget(self.getBuildArtifact('a.out'))
self.assertTrue(target, VALID_TARGET)
self.complete_from_to("target modules search-paths insert ", "target modules search-paths insert ")
self.runCmd("target modules search-paths add a b")
self.complete_from_to("target modules search-paths insert ", "target modules search-paths insert 0")
# Completion only works for the first arg.
self.complete_from_to("target modules search-paths insert 0 ", "target modules search-paths insert 0 ")
def test_target_create_dash_co(self):
"""Test that 'target create --co' completes to 'target variable --core '."""
self.complete_from_to('target create --co', 'target create --core ')
def test_target_va(self):
"""Test that 'target va' completes to 'target variable '."""
self.complete_from_to('target va', 'target variable ')
def test_common_completion_thread_index(self):
subcommands = ['continue', 'info', 'exception', 'select',
'step-in', 'step-inst', 'step-inst-over', 'step-out', 'step-over', 'step-script']
# Completion should do nothing without threads.
for subcommand in subcommands:
self.complete_from_to('thread ' + subcommand + ' ',
'thread ' + subcommand + ' ')
self.build()
lldbutil.run_to_source_breakpoint(self, '// Break here', lldb.SBFileSpec("main.cpp"))
# At least we have the thread at the index of 1 now.
for subcommand in subcommands:
self.complete_from_to('thread ' + subcommand + ' ', ['1'])
def test_common_completion_type_category_name(self):
subcommands = ['delete', 'list', 'enable', 'disable', 'define']
for subcommand in subcommands:
self.complete_from_to('type category ' + subcommand + ' ', ['default'])
self.complete_from_to('type filter add -w ', ['default'])
def test_command_argument_completion(self):
"""Test completion of command arguments"""
self.complete_from_to("watchpoint set variable -", ["-w", "-s"])
self.complete_from_to('watchpoint set variable -w', 'watchpoint set variable -w ')
self.complete_from_to("watchpoint set variable --", ["--watch", "--size"])
self.complete_from_to("watchpoint set variable --w", "watchpoint set variable --watch")
self.complete_from_to('watchpoint set variable -w ', ['read', 'write', 'read_write'])
self.complete_from_to("watchpoint set variable --watch ", ["read", "write", "read_write"])
self.complete_from_to("watchpoint set variable --watch w", "watchpoint set variable --watch write")
self.complete_from_to('watchpoint set variable -w read_', 'watchpoint set variable -w read_write')
# Now try the same thing with a variable name (non-option argument) to
# test that getopts arg reshuffling doesn't confuse us.
self.complete_from_to("watchpoint set variable foo -", ["-w", "-s"])
self.complete_from_to('watchpoint set variable foo -w', 'watchpoint set variable foo -w ')
self.complete_from_to("watchpoint set variable foo --", ["--watch", "--size"])
self.complete_from_to("watchpoint set variable foo --w", "watchpoint set variable foo --watch")
self.complete_from_to('watchpoint set variable foo -w ', ['read', 'write', 'read_write'])
self.complete_from_to("watchpoint set variable foo --watch ", ["read", "write", "read_write"])
self.complete_from_to("watchpoint set variable foo --watch w", "watchpoint set variable foo --watch write")
self.complete_from_to('watchpoint set variable foo -w read_', 'watchpoint set variable foo -w read_write')
def test_command_script_delete(self):
self.runCmd("command script add -h test_desc -f none -s current usercmd1")
self.check_completion_with_desc('command script delete ', [['usercmd1', 'test_desc']])
def test_command_delete(self):
self.runCmd(r"command regex test_command s/^$/finish/ 's/([0-9]+)/frame select %1/'")
self.complete_from_to('command delete test_c', 'command delete test_command')
def test_command_unalias(self):
self.complete_from_to('command unalias ima', 'command unalias image')
def test_completion_description_commands(self):
"""Test descriptions of top-level command completions"""
self.check_completion_with_desc("", [
["command", "Commands for managing custom LLDB commands."],
["breakpoint", "Commands for operating on breakpoints (see 'help b' for shorthand.)"]
])
self.check_completion_with_desc("pl", [
["platform", "Commands to manage and create platforms."],
["plugin", "Commands for managing LLDB plugins."]
])
# Just check that this doesn't crash.
self.check_completion_with_desc("comman", [])
self.check_completion_with_desc("non-existent-command", [])
def test_completion_description_command_options(self):
"""Test descriptions of command options"""
# Short options
self.check_completion_with_desc("breakpoint set -", [
["-h", "Set the breakpoint on exception catcH."],
["-w", "Set the breakpoint on exception throW."]
])
# Long options.
self.check_completion_with_desc("breakpoint set --", [
["--on-catch", "Set the breakpoint on exception catcH."],
["--on-throw", "Set the breakpoint on exception throW."]
])
# Ambiguous long options.
self.check_completion_with_desc("breakpoint set --on-", [
["--on-catch", "Set the breakpoint on exception catcH."],
["--on-throw", "Set the breakpoint on exception throW."]
])
# Unknown long option.
self.check_completion_with_desc("breakpoint set --Z", [
])
def test_common_completion_frame_index(self):
self.build()
lldbutil.run_to_source_breakpoint(self, '// Break here', lldb.SBFileSpec("main.cpp"))
self.complete_from_to('frame select ', ['0'])
self.complete_from_to('thread backtrace -s ', ['0'])
def test_frame_recognizer_delete(self):
self.runCmd("frame recognizer add -l py_class -s module_name -n recognizer_name")
self.check_completion_with_desc('frame recognizer delete ', [['0', 'py_class, module module_name, symbol recognizer_name']])
def test_platform_install_local_file(self):
self.complete_from_to('platform target-install main.cp', 'platform target-install main.cpp')
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24489")
def test_symbol_name(self):
self.build()
self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.complete_from_to('breakpoint set -n Fo',
'breakpoint set -n Foo::Bar(int,\\ int)',
turn_off_re_match=True)
# No completion for Qu because the candidate is
# (anonymous namespace)::Quux().
self.complete_from_to('breakpoint set -n Qu', '')
def test_completion_type_formatter_delete(self):
self.runCmd('type filter add --child a Aoo')
self.complete_from_to('type filter delete ', ['Aoo'])
self.runCmd('type filter add --child b -x Boo')
self.complete_from_to('type filter delete ', ['Boo'])
self.runCmd('type format add -f hex Coo')
self.complete_from_to('type format delete ', ['Coo'])
self.runCmd('type format add -f hex -x Doo')
self.complete_from_to('type format delete ', ['Doo'])
self.runCmd('type summary add -c Eoo')
self.complete_from_to('type summary delete ', ['Eoo'])
self.runCmd('type summary add -x -c Foo')
self.complete_from_to('type summary delete ', ['Foo'])
self.runCmd('type synthetic add Goo -l test')
self.complete_from_to('type synthetic delete ', ['Goo'])
self.runCmd('type synthetic add -x Hoo -l test')
self.complete_from_to('type synthetic delete ', ['Hoo'])
@skipIf(archs=no_match(['x86_64']))
def test_register_read_and_write_on_x86(self):
"""Test the completion of the commands register read and write on x86"""
# The tab completion for "register read/write" won't work without a running process.
self.complete_from_to('register read ',
'register read ')
self.complete_from_to('register write ',
'register write ')
self.build()
self.main_source_spec = lldb.SBFileSpec("main.cpp")
lldbutil.run_to_source_breakpoint(self, '// Break here', self.main_source_spec)
# test cases for register read
self.complete_from_to('register read ',
['rax',
'rbx',
'rcx'])
self.complete_from_to('register read r',
['rax',
'rbx',
'rcx'])
self.complete_from_to('register read ra',
'register read rax')
# register read can take multiple register names as arguments
self.complete_from_to('register read rax ',
['rax',
'rbx',
'rcx'])
# complete with prefix '$'
self.completions_match('register read $rb',
['$rbx',
'$rbp'])
self.completions_match('register read $ra',
['$rax'])
self.complete_from_to('register read rax $',
['\$rax',
'\$rbx',
'\$rcx'])
self.complete_from_to('register read $rax ',
['rax',
'rbx',
'rcx'])
# test cases for register write
self.complete_from_to('register write ',
['rax',
'rbx',
'rcx'])
self.complete_from_to('register write r',
['rax',
'rbx',
'rcx'])
self.complete_from_to('register write ra',
'register write rax')
self.complete_from_to('register write rb',
['rbx',
'rbp'])
# register write can only take exact one register name as argument
self.complete_from_to('register write rbx ',
[])
def test_common_completion_target_stophook_ids(self):
subcommands = ['delete', 'enable', 'disable']
for subcommand in subcommands:
self.complete_from_to('target stop-hook ' + subcommand + ' ',
'target stop-hook ' + subcommand + ' ')
self.build()
self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.runCmd('target stop-hook add test DONE')
for subcommand in subcommands:
self.complete_from_to('target stop-hook ' + subcommand + ' ',
'target stop-hook ' + subcommand + ' 1')
# Completion should work only on the first argument.
for subcommand in subcommands:
self.complete_from_to('target stop-hook ' + subcommand + ' 1 ',
'target stop-hook ' + subcommand + ' 1 ')
def test_common_completion_type_language(self):
self.complete_from_to('type category -l ', ['c'])
def test_target_modules_load_dash_u(self):
self.build()
target = self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.complete_from_to('target modules load -u ', [target.GetModuleAtIndex(0).GetUUIDString()])
def test_complete_breakpoint_with_ids(self):
"""These breakpoint subcommands should be completed with a list of breakpoint ids"""
subcommands = ['enable', 'disable', 'delete', 'modify', 'name add', 'name delete', 'write']
# The tab completion here is unavailable without a target
for subcommand in subcommands:
self.complete_from_to('breakpoint ' + subcommand + ' ',
'breakpoint ' + subcommand + ' ')
self.build()
target = self.dbg.CreateTarget(self.getBuildArtifact('a.out'))
self.assertTrue(target, VALID_TARGET)
bp = target.BreakpointCreateByName('main', 'a.out')
self.assertTrue(bp)
self.assertEqual(bp.GetNumLocations(), 1)
for subcommand in subcommands:
self.complete_from_to('breakpoint ' + subcommand + ' ',
['1'])
bp2 = target.BreakpointCreateByName('Bar', 'a.out')
self.assertTrue(bp2)
self.assertEqual(bp2.GetNumLocations(), 1)
for subcommand in subcommands:
self.complete_from_to('breakpoint ' + subcommand + ' ',
['1',
'2'])
for subcommand in subcommands:
self.complete_from_to('breakpoint ' + subcommand + ' 1 ',
['1',
'2'])
def test_complete_breakpoint_with_names(self):
self.build()
target = self.dbg.CreateTarget(self.getBuildArtifact('a.out'))
self.assertTrue(target, VALID_TARGET)
# test breakpoint read dedicated
self.complete_from_to('breakpoint read -N ', 'breakpoint read -N ')
self.complete_from_to('breakpoint read -f breakpoints.json -N ', ['mm'])
self.complete_from_to('breakpoint read -f breakpoints.json -N n', 'breakpoint read -f breakpoints.json -N n')
self.complete_from_to('breakpoint read -f breakpoints_invalid.json -N ', 'breakpoint read -f breakpoints_invalid.json -N ')
# test common breapoint name completion
bp1 = target.BreakpointCreateByName('main', 'a.out')
self.assertTrue(bp1)
self.assertEqual(bp1.GetNumLocations(), 1)
self.complete_from_to('breakpoint set -N n', 'breakpoint set -N n')
self.assertTrue(bp1.AddNameWithErrorHandling("nn"))
self.complete_from_to('breakpoint set -N ', 'breakpoint set -N nn')
| [
"[email protected]"
] | |
d6d0d58f05ad22c9474ef9804ec088549a68f841 | 5b6b2018ab45cc4710cc5146040bb917fbce985f | /200_longest-palindromic-substring/longest-palindromic-substring.py | 60710ba54ef2ad0d3d20d4f30fd1db4aec65a148 | [] | no_license | ultimate010/codes_and_notes | 6d7c7d42dcfd84354e6fcb5a2c65c6029353a328 | 30aaa34cb1c840f7cf4e0f1345240ac88b8cb45c | refs/heads/master | 2021-01-11T06:56:11.401869 | 2016-10-30T13:46:39 | 2016-10-30T13:46:39 | 72,351,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | # coding:utf-8
'''
@Copyright:LintCode
@Author: ultimate010
@Problem: http://www.lintcode.com/problem/longest-palindromic-substring
@Language: Python
@Datetime: 16-06-28 14:08
'''
class Solution:
# @param {string} s input string
# @return {string} the longest palindromic substring
def longestPalindrome(self, s):
# Write your code here
n = len(s)
if n <= 1:
return s
m = 1
ret = ''
for i in range(1, 2*n): # at least 2 char
if i & 1 == 1: # odd
t = i / 2
j = t
else: # even
t = i / 2 - 1
j = t + 1
while t >= 0 and j < n and s[t] == s[j]:
t -= 1
j += 1
# print t, j
if t == i:
pass # one char
else:
if j - t - 1 > m:
m = j - t - 1
ret = s[t + 1: j]
return ret | [
"[email protected]"
] | |
3fa5ddad1d1612a8b0d4168c59f4f0549f95f6ff | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02937/s033330652.py | 6b68eb0299616b86752097386250b1b8f9320039 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | import bisect
s = input()
t = input()
n = len(s)
m = len(t)
indices = [[] for _ in range(26)]
for i in range(n):
indices[ord(s[i]) - ord('a')].append(i)
for i in range(n):
indices[ord(s[i]) - ord('a')].append(i + n)
ans = 0
p = 0
for i in range(m):
c = ord(t[i]) - ord('a')
if len(indices[c]) == 0:
print(-1)
exit()
p = indices[c][bisect.bisect_left(indices[c], p)] + 1
if p >= n:
p -= n
ans += n
ans += p
print(ans) | [
"[email protected]"
] | |
171918eacf53dc68cdf837f4e9b33d81ba426350 | a533010ba7e74422c5c7c0193ea2d880e427cb9d | /Python_auto_operation/bin/mlogvis | 69b36f81947475e66d1dde10eb0cb5c3e1d112c6 | [] | no_license | gateray/learning_python | 727b3effe4875f27c86c3e5e66655905f3d5d681 | bc08a58f3a5c1f1db884398efa9d27834514199f | refs/heads/master | 2021-01-19T06:31:01.616421 | 2016-06-30T07:39:23 | 2016-06-30T07:39:23 | 62,290,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | #!/home/gateray/PycharmProjects/Python_auto_operation/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'mtools==1.1.9','console_scripts','mlogvis'
__requires__ = 'mtools==1.1.9'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('mtools==1.1.9', 'console_scripts', 'mlogvis')()
)
| [
"gateray.example.com"
] | gateray.example.com |
|
32f6b9fff9acce58bdc331e9f0ec63770932d681 | 0a639bda0058ac76cca97d6123f6c39229f202f1 | /companies/models.py | 9092279495371d6cf60c4e5d5b187922621a9bb7 | [] | no_license | sanchitbareja/occuhunt-web | bb86e630c2caff5815b164435464424b5cf83375 | fab152e2ebae3f4dd5c8357696893065bdd30504 | refs/heads/master | 2020-05-21T01:15:48.973953 | 2015-01-13T04:03:18 | 2015-01-13T04:03:18 | 12,552,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,089 | py | from django.db import models
import datetime
# Create your models here.
class CompanyType(models.Model):
type = models.CharField(max_length = 256)
def __unicode__(self):
return self.type
ORGANIZATION_TYPES_LIST = (
('Accounting Services', 'Accounting Services'),
('Aerospace/Defense', 'Aerospace/Defense'),
('Agriculture', 'Agriculture'),
('Architecture/Planning', 'Architecture/Planning'),
('Arts and Entertainment', 'Arts and Entertainment'),
('Automotive/Transportation Manufacturing', 'Automotive/Transportation Manufacturing'),
('Biotech/Pharmaceuticals','Biotech/Pharmaceuticals'),
('Chemicals','Chemicals'),
('Computer Hardware','Computer Hardware'),
('Computer Software', 'Computer Software'),
('Consumer Products', 'Consumer Products'),
('Diversified Services', 'Diversified Services'),
('Education/Higher Education', 'Education/Higher Education'),
('Electronics and Misc. Tech', 'Electronics and Misc. Tech'),
('Energy', 'Energy'),
('Engineering', 'Engineering'),
('Financial Services', 'Financial Services'),
('Food, Beverage and Tobacco', 'Food, Beverage and Tobacco'),
('Government', 'Government'),
('Health Products and Services', 'Health Products and Services'),
('Hospital/Healthcare', 'Hospital/Healthcare'),
('Insurance', 'Insurance'),
('Law/Law Related', 'Law/Law Related'),
('Leisure and Travel', 'Leisure and Travel'),
('Materials and Construction', 'Materials and Construction'),
('Media', 'Media'),
('Metals and Mining', 'Metals and Mining'),
('Non-Profit and Social Services', 'Non-Profit and Social Services'),
('Other Manufacturing', 'Other Manufacturing'),
('Professional, Technical, and Administrative Services', 'Professional, Technical, and Administrative Services'),
('Real Estate', 'Real Estate'),
('Retail and Wholesale Trade', 'Retail and Wholesale Trade'),
('Telecommunications', 'Telecommunications'),
('Transportation Services', 'Transportation Services'),
('Utilities', 'Utilities'),
('Other', 'Other'),
)
class Company(models.Model):
name = models.CharField(max_length=512)
founded = models.CharField(max_length=64, null=True, blank=True)
funding = models.CharField(max_length=64, null=True, blank=True)
website = models.URLField(max_length=512, null=True, blank=True)
careers_website = models.URLField(max_length=512, null=True, blank=True)
logo = models.URLField(max_length=512, null=True, blank=True)
banner_image = models.URLField(max_length=512, null=True, blank=True)
number_employees = models.CharField(max_length=48, null=True, blank=True)
organization_type = models.CharField(max_length=512, null=True, blank=True, choices=ORGANIZATION_TYPES_LIST)
company_description = models.TextField(null=True, blank=True)
competitors = models.CharField(max_length=512, null=True, blank=True)
avg_salary = models.CharField(max_length=64, null=True, blank=True)
location = models.CharField(max_length=512, null=True, blank=True)
intro_video = models.TextField(null=True, blank=True)
timestamp = models.DateTimeField(auto_now=False, auto_now_add=True)
def __unicode__(self):
return self.name | [
"[email protected]"
] | |
1c165f1ccad36c215f56ef5613cea7ee2101c812 | 1facfd9d94b0f08ddde2834c717bda55359c2e35 | /Python programming for the absolute beginner - Michael Dawson/Chapter 8 - OOP beginning/8.3.py | 58bad4a7400168c26ceaa2894583a1b3bd76ba4e | [] | no_license | echpochmak/ppftab | 9160383c1d34a559b039af5cd1451a18f2584549 | f5747d87051d837eca431f782491ec9ba3b44626 | refs/heads/master | 2021-09-15T07:23:06.581750 | 2018-05-28T14:33:13 | 2018-05-28T14:33:13 | 261,880,781 | 1 | 0 | null | 2020-05-06T21:18:13 | 2020-05-06T21:18:13 | null | UTF-8 | Python | false | false | 3,000 | py | # The material form the book in polish
# Opiekun zwierzaka
# Wirtualny pupil, którym należy się opiekować
class Critter(object):
"""Wirtualny pupil"""
def __init__(self, name, hunger = 0, boredom = 0):
self.name = name
self.hunger = hunger
self.boredom = boredom
def __str__(self):
rep = "The valuse of your pet:\n"
rep += "The hunger = " + str(self.hunger)
rep += "\nThe boredom = " + str(self.boredom)
return rep
def __pass_time(self):
self.hunger += 1
self.boredom += 1
@property
def mood(self):
unhappiness = self.hunger + self.boredom
if unhappiness < 5:
m = "szczęśliwy"
elif 5 <= unhappiness <= 10:
m = "zadowolony"
elif 11 <= unhappiness <= 15:
m = "podenerwowany"
else:
m = "wściekły"
return m
def talk(self):
print("Nazywam się", self.name, "i jestem", self.mood, "teraz.\n")
self.__pass_time()
def eat(self, food = 4):
print("""
How much food would you like to serve to you pet?
\n 1. Type 1 for one snack.
\n 2. Type 2 for 2 snacks.
\n 3. Type 3 for 3 snacks.
\n 4. Type 4 for 4 snacks.
\n 5. Type 5 for 5 snacks.
""", end = " ")
food = int(input("Wybierasz: "))
print("Mniam, mniam. Dziękuję.")
self.hunger -= food
if self.hunger < 0:
self.hunger = 0
self.__pass_time()
def play(self, fun = 4):
print("""
How log would you like to play with your pet?
\n 1. Type 1 for one minute.
\n 2. Type 2 for 2 minutes.
\n 3. Type 3 for 3 minutes.
\n 4. Type 4 for 4 minutes.
\n 5. Type 5 for 5 minutes.
""", end = " ")
fun = int(input("Wybierasz: "))
print("Hura!")
self.boredom -= fun
if self.boredom < 0:
self.boredom = 0
self.__pass_time()
def main():
crit_name = input("Jak chcesz nazwać swojego zwierzaka?: ")
crit = Critter(crit_name)
choice = None
while choice != "0":
print \
("""
Opiekun zwierzaka
0 - zakończ
1 - słuchaj swojego zwierzaka
2 - nakarm swojego zwierzaka
3 - pobaw się ze swoim zwierzakiem
4 - show the values of your pet
""")
choice = input("Wybierasz: ")
print()
# wyjdź z pętli
if choice == "0":
print("Do widzenia.")
# słuchaj swojego zwierzaka
elif choice == "1":
crit.talk()
# nakarm swojego zwierzaka
elif choice == "2":
crit.eat()
# pobaw się ze swoim zwierzakiem
elif choice == "3":
crit.play()
elif choice == "4":
print(crit)
# nieznany wybór
else:
print("\nNiestety,", choice, "nie jest prawidłowym wyborem.")
main()
input("\n\nAby zakończyć program, naciśnij klawisz Enter.")
| [
"[email protected]"
] | |
26aa690ec62cb1867208234cd0c19ab8f7a9663a | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/S/stefanw/kommunalverwaltung_nrw.py | aef5bdefc3a1b54035f7bc556bd8da592cd801c6 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,508 | py | import scraperwiki
import lxml.html as lh
from lxml import etree
LIST_URL = 'http://www3.chamaeleon.de/komnav/kundensuchergebnis.php?Ort=&PLZ=%s&OBGM=&Bundesland=Nordrhein-Westfalen&anfrage=imnrw'
DETAIL_URL = 'http://www3.chamaeleon.de/komnav/kundensuchedetail.php?schluessel=%s&anfrage=imnrw&PLZ=%s&Ort=&Bundesland=Nordrhein-Westfalen&OBGM=&single_search='
def plz_generator():
for i in (3,4,5):
for j in range(10):
yield "%s%s" % (i, j)
kommune = []
for plz in plz_generator():
print plz
content = scraperwiki.scrape(LIST_URL % plz)
content = content.decode('latin1')
if 'Leider keinen Datensatz gefunden' in content:
continue
doc = lh.fromstring(content)
for row in doc.cssselect('tr'):
td = row.cssselect('td')
if not td:
continue
kommune.append({
'name': td[0].text_content().strip(),
'plz': td[1].text_content().strip(),
'head': td[3].text_content().strip(),
'key': td[4].cssselect('a')[0].attrib['href'].split('schluessel=')[1].split('&anfrage=')[0],
'source': td[4].cssselect('a')[0].attrib['href']
})
wanted = {
u'': None,
u'Stadt-/Gemeinde-/Kreisname': None,
u'PLZ': None,
u'Bundesland': None,
u'Bev\xf6lkerungsdichte Einwohner pro km\xb2': None,
u'(Ober-)b\xfcrgermeisterin/Landr\xe4tin/Oberkreisdirektorinbzw.(Ober-)b\xfcrgermeister/Landrat/Oberkreisdirektor': None,
u'EMail': 'email',
u'Postanschrift': 'address',
u'Regierungsbezirk': 'gov_area',
u'Fax': 'fax',
u'Telefonzentrale': 'phone',
u'Hausanschrift (Verwaltungssitz)': 'address2',
u'PLZ-Hausanschrift': 'plz2',
u'Ausl\xe4nderanteil (in %)': 'immigrant_percentage',
u'EinwohnerInnen': 'population',
u'davon weiblich/m\xe4nnlich (in %)': 'female_male_percentage',
u'Fl\xe4che (in km\xb2)': 'area',
u'Anzahl Besch\xe4ftigte': 'employees',
u'Homepage der Kommune': 'url'
}
print repr(wanted.keys())
for kom in kommune:
for v in wanted.values():
if v is not None:
kom[v] = None
content = scraperwiki.scrape(DETAIL_URL % (kom['key'], kom['plz']))
content = content.decode('latin1')
doc = lh.fromstring(content)
for row in doc.cssselect('tr'):
td = row.cssselect('td')
if not td:
continue
key = td[0].text_content().split(':')[0].strip()
if wanted.get(key, None) is not None:
kom[wanted[key]] = td[1].text_content().strip()
elif key not in wanted:
print repr(key)
print repr(kom)
scraperwiki.sqlite.save(['key'], kom, table_name='nrw_kommune')import scraperwiki
import lxml.html as lh
from lxml import etree
LIST_URL = 'http://www3.chamaeleon.de/komnav/kundensuchergebnis.php?Ort=&PLZ=%s&OBGM=&Bundesland=Nordrhein-Westfalen&anfrage=imnrw'
DETAIL_URL = 'http://www3.chamaeleon.de/komnav/kundensuchedetail.php?schluessel=%s&anfrage=imnrw&PLZ=%s&Ort=&Bundesland=Nordrhein-Westfalen&OBGM=&single_search='
def plz_generator():
for i in (3,4,5):
for j in range(10):
yield "%s%s" % (i, j)
kommune = []
for plz in plz_generator():
print plz
content = scraperwiki.scrape(LIST_URL % plz)
content = content.decode('latin1')
if 'Leider keinen Datensatz gefunden' in content:
continue
doc = lh.fromstring(content)
for row in doc.cssselect('tr'):
td = row.cssselect('td')
if not td:
continue
kommune.append({
'name': td[0].text_content().strip(),
'plz': td[1].text_content().strip(),
'head': td[3].text_content().strip(),
'key': td[4].cssselect('a')[0].attrib['href'].split('schluessel=')[1].split('&anfrage=')[0],
'source': td[4].cssselect('a')[0].attrib['href']
})
wanted = {
u'': None,
u'Stadt-/Gemeinde-/Kreisname': None,
u'PLZ': None,
u'Bundesland': None,
u'Bev\xf6lkerungsdichte Einwohner pro km\xb2': None,
u'(Ober-)b\xfcrgermeisterin/Landr\xe4tin/Oberkreisdirektorinbzw.(Ober-)b\xfcrgermeister/Landrat/Oberkreisdirektor': None,
u'EMail': 'email',
u'Postanschrift': 'address',
u'Regierungsbezirk': 'gov_area',
u'Fax': 'fax',
u'Telefonzentrale': 'phone',
u'Hausanschrift (Verwaltungssitz)': 'address2',
u'PLZ-Hausanschrift': 'plz2',
u'Ausl\xe4nderanteil (in %)': 'immigrant_percentage',
u'EinwohnerInnen': 'population',
u'davon weiblich/m\xe4nnlich (in %)': 'female_male_percentage',
u'Fl\xe4che (in km\xb2)': 'area',
u'Anzahl Besch\xe4ftigte': 'employees',
u'Homepage der Kommune': 'url'
}
print repr(wanted.keys())
for kom in kommune:
for v in wanted.values():
if v is not None:
kom[v] = None
content = scraperwiki.scrape(DETAIL_URL % (kom['key'], kom['plz']))
content = content.decode('latin1')
doc = lh.fromstring(content)
for row in doc.cssselect('tr'):
td = row.cssselect('td')
if not td:
continue
key = td[0].text_content().split(':')[0].strip()
if wanted.get(key, None) is not None:
kom[wanted[key]] = td[1].text_content().strip()
elif key not in wanted:
print repr(key)
print repr(kom)
scraperwiki.sqlite.save(['key'], kom, table_name='nrw_kommune') | [
"[email protected]"
] | |
323a4c7eddab68f041ff6fe4f9828b26f769b0ca | 525c6a69bcf924f0309b69f1d3aff341b06feb8e | /sunyata/backend/chainer/core/map/power.py | f883795e8d4b0bb7d086f28a97f498406b350ed9 | [] | no_license | knighton/sunyata_2017 | ba3af4f17184d92f6277d428a81802ac12ef50a4 | 4e9d8e7d5666d02f9bb0aa9dfbd16b7a8e97c1c8 | refs/heads/master | 2021-09-06T13:19:06.341771 | 2018-02-07T00:28:07 | 2018-02-07T00:28:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | from chainer import functions as F
from ....base.core.map.power import BasePowerAPI
class ChainerPowerAPI(BasePowerAPI):
def __init__(self):
BasePowerAPI.__init__(self)
def pow(self, x, a):
return F.math.basic_math.pow(x, a)
def rsqrt(self, x):
return F.rsqrt(x)
def sqrt(self, x):
return F.sqrt(x)
def square(self, x):
return F.square(x)
| [
"[email protected]"
] | |
d9bec0f6c57a3c8732298f0a3c6a95177cd043cd | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/tree-big-6378.py | 52a0e4f72cbae1d4e1737577e4ff7f5cd9ab764f | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,288 | py | # Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode2(object):
value:int = 0
value2:int = 0
left:"TreeNode2" = None
left2:"TreeNode2" = None
right:"TreeNode2" = None
right2:"TreeNode2" = None
def insert(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode3(object):
value:int = 0
value2:int = 0
value3:int = 0
left:"TreeNode3" = None
left2:"TreeNode3" = None
left3:"TreeNode3" = None
right:"TreeNode3" = None
right2:"TreeNode3" = None
right3:"TreeNode3" = None
def insert(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode4(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
left:"TreeNode4" = None
left2:"TreeNode4" = None
left3:"TreeNode4" = None
left4:"TreeNode4" = None
right:"TreeNode4" = None
right2:"TreeNode4" = None
right3:"TreeNode4" = None
right4:"TreeNode4" = None
def insert(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode5(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
value5:int = 0
left:"TreeNode5" = None
left2:"TreeNode5" = None
left3:"TreeNode5" = None
left4:"TreeNode5" = None
left5:"TreeNode5" = None
right:"TreeNode5" = None
right2:"TreeNode5" = None
right3:"TreeNode5" = None
right4:"TreeNode5" = None
right5:"TreeNode5" = None
def insert(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree2(object):
root:TreeNode2 = None
root2:TreeNode2 = None
size:int = 0
size2:int = 0
def insert(self:"Tree2", x:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree2", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree2", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree2", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree3(object):
root:TreeNode3 = None
root2:TreeNode3 = None
root3:TreeNode3 = None
size:int = 0
size2:int = 0
size3:int = 0
def insert(self:"Tree3", x:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree3", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree3", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree3", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree4(object):
root:TreeNode4 = None
root2:TreeNode4 = None
root3:TreeNode4 = None
root4:TreeNode4 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
def insert(self:"Tree4", x:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree4", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) $RetType:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree4", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree4", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree5(object):
root:TreeNode5 = None
root2:TreeNode5 = None
root3:TreeNode5 = None
root4:TreeNode5 = None
root5:TreeNode5 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
size5:int = 0
def insert(self:"Tree5", x:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree5", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree5", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree5", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree5", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
def makeNode2(x: int, x2: int) -> TreeNode2:
b:TreeNode2 = None
b2:TreeNode2 = None
b = TreeNode2()
b.value = x
return b
def makeNode3(x: int, x2: int, x3: int) -> TreeNode3:
b:TreeNode3 = None
b2:TreeNode3 = None
b3:TreeNode3 = None
b = TreeNode3()
b.value = x
return b
def makeNode4(x: int, x2: int, x3: int, x4: int) -> TreeNode4:
b:TreeNode4 = None
b2:TreeNode4 = None
b3:TreeNode4 = None
b4:TreeNode4 = None
b = TreeNode4()
b.value = x
return b
def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5:
b:TreeNode5 = None
b2:TreeNode5 = None
b3:TreeNode5 = None
b4:TreeNode5 = None
b5:TreeNode5 = None
b = TreeNode5()
b.value = x
return b
# Input parameters
n:int = 100
n2:int = 100
n3:int = 100
n4:int = 100
n5:int = 100
c:int = 4
c2:int = 4
c3:int = 4
c4:int = 4
c5:int = 4
# Data
t:Tree = None
t2:Tree = None
t3:Tree = None
t4:Tree = None
t5:Tree = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
k:int = 37813
k2:int = 37813
k3:int = 37813
k4:int = 37813
k5:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
| [
"[email protected]"
] | |
5f578f05e9eeca2e7a85d76fb6cb42a0d606f54b | 8ec910de801b424540abb4e6e955838a287663b6 | /Bucles/ManoMoneda.py | 7ea1f29e4f99d1de6ef426446f1d5ad4a59e551a | [] | no_license | hector81/Aprendiendo_Python | f4f211ace32d334fb6b495b1b8b449d83a7f0bf8 | 9c73f32b0c82f08e964472af1923f66c0fbb4c22 | refs/heads/master | 2022-12-28T03:41:20.378415 | 2020-09-28T09:15:03 | 2020-09-28T09:15:03 | 265,689,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,520 | py | # El programa simulará el juego de adivinar en qué mano está la moneda.
# Le preguntará a la persona el nombre y cuantas partidas quiere jugar,
# luego calculará el ganador.
import random
def introducirNumero():
while True:
try:
numeroVecesPartida = int(input("Por favor ingrese un número : "))
if numeroVecesPartida > 0:
return numeroVecesPartida
break
except ValueError:
print("Oops! No era válido. Intente nuevamente...")
print("Por favor ingrese un número de partidas: ")
numeroVecesPartida = introducirNumero()
while numeroVecesPartida > 0:
print('¿En que mano tengo la moneda? Si crees que en la derecha pulsa 1 y si es en la izquierda pulsa 2')
numeroEleccion = int(input("Escoge la mano : "))
if numeroEleccion > 2 or numeroEleccion < 1:
print('Tienes que poner 1:derecha o 2:izquierda. No valen otros numeros')
else:
numeroAleatorio = random.randint(1, 2)
if numeroAleatorio == numeroEleccion:
print('Has acertado')
numeroVecesPartida = 0
else:
if (numeroVecesPartida - 1) == 0:
print('No has acertado y ya no quedan intentos')
numeroVecesPartida = 0
else:
print('No has acertado. Vuelve a intertarlo. Te quedan ' + str(numeroVecesPartida - 1) + ' intentos')
numeroVecesPartida = numeroVecesPartida - 1
| [
"[email protected]"
] | |
6b57aa51eb80cb2ba879e3fe19dc47e190d2b60e | 65cefe621c2444d9b793d7969d2e2c3ff54373d1 | /analyze/api.py | c831cbc8c9d0c07781dd86cede87b9b4f346509d | [
"Apache-2.0"
] | permissive | leekangsan/HanziSRS | 01585e694fbf81428085b8a162acf101f7f5bec1 | 50f84a9171b61df3305e8922b645b553e895a509 | refs/heads/master | 2020-03-29T05:19:19.583429 | 2018-04-28T04:37:35 | 2018-04-28T04:37:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import requests
from bs4 import BeautifulSoup
def jukuu(word):
params = {
'q': word
}
res = requests.get('http://www.jukuu.com/search.php', params=params)
soup = BeautifulSoup(res.text, 'html.parser')
for c, e in zip(soup.find_all('tr', {'class':'c'}), soup.find_all('tr', {'class':'e'})):
yield {
'Chinese': c.text.strip(),
'English': e.text.strip()
}
if __name__ == '__main__':
print(list(jukuu('寒假')))
| [
"[email protected]"
] | |
296283618e61a02a6f6c8c8516a5ae54f984803f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02766/s873846892.py | 1b5165713fefb3469a152f09d2a75411c31b81ee | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py |
def main():
N, K = map(int, input().split())
ans = base10to(N, K)
print(len(ans))
def base10to(n, b):
if (int(n/b)):
return base10to(int(n/b), b) + str(n%b)
return str(n%b)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
068d8dce5daa9ac6705c8b77bd447240a513c227 | a38bf459ae380f67e0de22f7106a8df4385a7076 | /tests/integration/goldens/logging/samples/generated_samples/logging_v2_generated_config_service_v2_list_views_sync.py | b273c465d3ec976b018c54a7b83e2a4218b81327 | [
"Apache-2.0"
] | permissive | googleapis/gapic-generator-python | 73ce9d52f6f5bb2652d49b237b24263d6637b1da | 4eee26181e8db9fb5144eef5a76f178c1594e48a | refs/heads/main | 2023-09-04T11:12:14.728757 | 2023-09-02T10:34:44 | 2023-09-02T10:34:44 | 129,809,857 | 116 | 65 | Apache-2.0 | 2023-09-12T18:57:01 | 2018-04-16T21:47:04 | Python | UTF-8 | Python | false | false | 1,852 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListViews
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-logging
# [START logging_v2_generated_ConfigServiceV2_ListViews_sync]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import logging_v2
def sample_list_views():
# Create a client
client = logging_v2.ConfigServiceV2Client()
# Initialize request argument(s)
request = logging_v2.ListViewsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_views(request=request)
# Handle the response
for response in page_result:
print(response)
# [END logging_v2_generated_ConfigServiceV2_ListViews_sync]
| [
"[email protected]"
] | |
73f8dacb0a6fd98a99721e8a113c12641950a990 | 175522feb262e7311fde714de45006609f7e5a07 | /code/nprd/visualize_PTB.py | aab1cc504cd8b84b3cb1e24016f16bf6ca0b9dde | [] | no_license | m-hahn/predictive-rate-distortion | a048927dbc692000211df09da09ad1ed702525df | 1ff573500a2313e0a79d68399cbd83970bf05e4d | refs/heads/master | 2020-04-17T13:49:36.961798 | 2019-06-20T12:37:28 | 2019-06-20T12:37:28 | 166,631,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,051 | py | # Was called runMemoryManyConfigs_NeuralFlow_Words_English.py
from matplotlib.ticker import MaxNLocator
import math
import subprocess
import random
import os
from paths import LOG_PATH_WORDS
import numpy as np
ids = [x.split("_")[-1][:-4] for x in os.listdir("/home/user/CS_SCR/CODEBOOKS/") if "nprd" in x]
print(ids)
language = "PTB"
model = "REAL"
ress = []
times = []
epochs_nums = []
for idn in ids:
with open("/home/user/CS_SCR/CODE/predictive-rate-distortion/results/outputs-nprd-words/test-estimates-"+language+"_"+"nprd_words_PTB_saveCodebook.py"+"_model_"+idn+"_"+model+".txt", "r") as inFile:
args = next(inFile).strip().split(" ")
epochs = len(next(inFile).strip().split(" "))
epochs_nums.append(epochs)
next(inFile)
next(inFile)
next(inFile)
time = float(next(inFile).strip())
times.append(time)
print(args)
beta = args[-3]
beta = -math.log(float(beta))
if abs(beta - round(beta)) > 0.001:
continue
if round(beta) not in [1.0, 3.0, 5.0]:
continue
dat = []
with open("/home/user/CS_SCR/CODE/predictive-rate-distortion/results/nprd-samples/samples_"+idn+".txt", "r") as inFile:
data = [x.split("\t") for x in inFile.read().strip().split("\n")]
for i in range(0, len(data), 30):
dat.append(data[i:i+30])
assert data[i][0] == '0', data[i]
# print(len(dat))
ress.append((idn, round(beta), dat))
print(epochs_nums)
print(times)
#quit()
ress = sorted(ress, key=lambda x:x[1])
#print(ress)
import matplotlib
import matplotlib.pyplot as plt
numsOfTexts = [len(x[2]) for x in ress]
print(numsOfTexts)
variations = []
for j in range(min(numsOfTexts)): #len(ress[0][2])):
data = ress[0][2][j]
print(data)
pos = np.asarray([int(x[0]) for x in data])
char = [x[1] for x in data]
ys = []
for i in range(len(ress)):
ys.append([float(x[2]) for x in ress[i][2][j]])
ys[-1] = np.asarray(ys[-1])
print(ys[-1])
print(ys[0])
fig, ax = plt.subplots()
for y, color, style in zip(ys, ["red", "green", "blue"], ["dotted", "dashdot", "solid"]):
ax.plot(pos[16:], y[16:], color=color, linestyle=style)
variation = [y[16] for y in ys]
variation = max(variation) - min(variation)
variations.append((j, variation))
plt.subplots_adjust(left=0.03, right=0.99, top=0.99, bottom=0.17)
fig.set_size_inches(9, 1.7)
#ax.grid(False)
plt.xticks(pos[16:], [x.decode("utf-8") for x in char][16:])
# plt.axvline(x=15.5, color="green")
ax.grid(False)
ax.set_ylabel("Cross-Entropy", fontsize=12)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
# figure(figsize=(25,10))
fileName = "sample_"+str(j)
fig.savefig("figures/"+fileName+".png", bbox_inches='tight')
# plt.show()
plt.gcf().clear()
print("figures/"+fileName+".png")
with open("figures/"+fileName+".txt", "w") as outFile:
print >> outFile, (" ".join(char[:16]))
print(sorted(variations, key=lambda x:x[1]))
| [
"[email protected]"
] | |
8043bf4f0fcdecc59ee5421189b23a4884fc8599 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-sblp/sblp_ut=3.5_rd=0.65_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=30/params.py | d0a52d0abffd2812cb65883935448f5d49c0ec13 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.528333',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.65',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'RUN',
'trial': 30,
'utils': 'uni-medium-3'}
| [
"[email protected]"
] | |
7e3a97d42210041be00fe78eac7fdc797d8027a2 | e74e89592d8a3b1a0b465a7b1595708b224362d2 | /pset_pandas1_wine_reviews/data_cleaning/solutions/p8.py | 3d5b21f733b7e5400eea8fc4cc02f3214b41120a | [
"MIT"
] | permissive | mottaquikarim/pydev-psets | 016f60f1e9d9a534bd9a66ecde8eb412beee37d1 | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | refs/heads/master | 2023-01-10T11:15:57.041287 | 2021-06-07T23:38:34 | 2021-06-07T23:38:34 | 178,547,933 | 5 | 2 | MIT | 2023-01-03T22:28:27 | 2019-03-30T11:09:08 | Jupyter Notebook | UTF-8 | Python | false | false | 564 | py | """
Cleaning Data VIII - Find Null Values
"""
import numpy as np
import pandas as pd
wine_reviews = pd.read_csv('../../winemag-data-130k.csv')
wine_reviews.rename(columns={'points': 'rating'}, inplace=True)
# Use the below df for these problems:
wine_ratings = wine_reviews[['title', 'country', 'rating', 'price']]
# Return a count of the null values in wine_ratings.
print(wine_ratings.isnull().sum())
"""
title 0
country 63
rating 0
price 8996
"""
# Print out the number of rows in wine_ratings.
print(len(wine_ratings)) # 129971
| [
"[email protected]"
] | |
5b421ca138ad52f140295d34f8ee1fdfc8559474 | bc01e1d158e7d8f28451a7e108afb8ec4cb7d5d4 | /sage/src/sage/modular/cusps_nf.py | ea7befba4c0d9a2601835cf1167a18323322bd32 | [] | no_license | bopopescu/geosci | 28792bda1ec1f06e23ba8dcb313769b98f793dad | 0d9eacbf74e2acffefde93e39f8bcbec745cdaba | refs/heads/master | 2021-09-22T17:47:20.194233 | 2018-09-12T22:19:36 | 2018-09-12T22:19:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42,980 | py | r"""
The set `\mathbb{P}^1(K)` of cusps of a number field K
AUTHORS:
- Maite Aranes (2009): Initial version
EXAMPLES:
The space of cusps over a number field k:
::
sage: k.<a> = NumberField(x^2 + 5)
sage: kCusps = NFCusps(k); kCusps
Set of all cusps of Number Field in a with defining polynomial x^2 + 5
sage: kCusps is NFCusps(k)
True
Define a cusp over a number field:
::
sage: NFCusp(k, a, 2/(a+1))
Cusp [a - 5: 2] of Number Field in a with defining polynomial x^2 + 5
sage: kCusps((a,2))
Cusp [a: 2] of Number Field in a with defining polynomial x^2 + 5
sage: NFCusp(k,oo)
Cusp Infinity of Number Field in a with defining polynomial x^2 + 5
Different operations with cusps over a number field:
::
sage: alpha = NFCusp(k, 3, 1/a + 2); alpha
Cusp [a + 10: 7] of Number Field in a with defining polynomial x^2 + 5
sage: alpha.numerator()
a + 10
sage: alpha.denominator()
7
sage: alpha.ideal()
Fractional ideal (7, a + 3)
sage: alpha.ABmatrix()
[a + 10, -3*a + 1, 7, -2*a]
sage: alpha.apply([0, 1, -1,0])
Cusp [7: -a - 10] of Number Field in a with defining polynomial x^2 + 5
Check Gamma0(N)-equivalence of cusps:
::
sage: N = k.ideal(3)
sage: alpha = NFCusp(k, 3, a + 1)
sage: beta = kCusps((2, a - 3))
sage: alpha.is_Gamma0_equivalent(beta, N)
True
Obtain transformation matrix for equivalent cusps:
::
sage: t, M = alpha.is_Gamma0_equivalent(beta, N, Transformation=True)
sage: M[2] in N
True
sage: M[0]*M[3] - M[1]*M[2] == 1
True
sage: alpha.apply(M) == beta
True
List representatives for Gamma_0(N) - equivalence classes of cusps:
::
sage: Gamma0_NFCusps(N)
[Cusp [0: 1] of Number Field in a with defining polynomial x^2 + 5,
Cusp [1: 3] of Number Field in a with defining polynomial x^2 + 5,
...]
"""
#*****************************************************************************
# Copyright (C) 2009, Maite Aranes <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.structure.parent_base import ParentWithBase
from sage.structure.element import Element, is_InfinityElement
from sage.misc.cachefunc import cached_method
from sage.misc.superseded import deprecated_function_alias
_nfcusps_cache = {}
_list_reprs_cache = {}
def NFCusps_clear_list_reprs_cache():
"""
Clear the global cache of lists of representatives for ideal classes.
EXAMPLES::
sage: sage.modular.cusps_nf.NFCusps_clear_list_reprs_cache()
sage: k.<a> = NumberField(x^3 + 11)
sage: N = k.ideal(a+1)
sage: sage.modular.cusps_nf.list_of_representatives(N)
(Fractional ideal (1), Fractional ideal (17, a - 5))
sage: sage.modular.cusps_nf._list_reprs_cache.keys()
[Fractional ideal (a + 1)]
sage: sage.modular.cusps_nf.NFCusps_clear_list_reprs_cache()
sage: sage.modular.cusps_nf._list_reprs_cache.keys()
[]
"""
global _list_reprs_cache
_list_reprs_cache = {}
def list_of_representatives(N):
"""
Returns a list of ideals, coprime to the ideal ``N``, representatives of
the ideal classes of the corresponding number field.
Note: This list, used every time we check `\\Gamma_0(N)` - equivalence of
cusps, is cached.
INPUT:
- ``N`` -- an ideal of a number field.
OUTPUT:
A list of ideals coprime to the ideal ``N``, such that they are
representatives of all the ideal classes of the number field.
EXAMPLES::
sage: sage.modular.cusps_nf.NFCusps_clear_list_reprs_cache()
sage: sage.modular.cusps_nf._list_reprs_cache.keys()
[]
::
sage: from sage.modular.cusps_nf import list_of_representatives
sage: k.<a> = NumberField(x^4 + 13*x^3 - 11)
sage: N = k.ideal(713, a + 208)
sage: L = list_of_representatives(N); L
(Fractional ideal (1),
Fractional ideal (37, a + 12),
Fractional ideal (47, a - 9))
The output of ``list_of_representatives`` has been cached:
::
sage: sage.modular.cusps_nf._list_reprs_cache.keys()
[Fractional ideal (713, a + 208)]
sage: sage.modular.cusps_nf._list_reprs_cache[N]
(Fractional ideal (1),
Fractional ideal (37, a + 12),
Fractional ideal (47, a - 9))
"""
if N in _list_reprs_cache:
lreps = _list_reprs_cache[N]
if not (lreps is None): return lreps
lreps = NFCusps_ideal_reps_for_levelN(N)[0]
_list_reprs_cache[N] = lreps
return lreps
def NFCusps_clear_cache():
"""
Clear the global cache of sets of cusps over number fields.
EXAMPLES::
sage: sage.modular.cusps_nf.NFCusps_clear_cache()
sage: k.<a> = NumberField(x^3 + 51)
sage: kCusps = NFCusps(k); kCusps
Set of all cusps of Number Field in a with defining polynomial x^3 + 51
sage: sage.modular.cusps_nf._nfcusps_cache.keys()
[Number Field in a with defining polynomial x^3 + 51]
sage: NFCusps_clear_cache()
sage: sage.modular.cusps_nf._nfcusps_cache.keys()
[]
"""
global _nfcusps_cache
_nfcusps_cache = {}
def NFCusps(number_field, use_cache=True):
r"""
The set of cusps of a number field `K`, i.e. `\mathbb{P}^1(K)`.
INPUT:
- ``number_field`` -- a number field
- ``use_cache`` -- bool (default=True) - to set a cache of number fields
and their associated sets of cusps
OUTPUT:
The set of cusps over the given number field.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 5)
sage: kCusps = NFCusps(k); kCusps
Set of all cusps of Number Field in a with defining polynomial x^2 + 5
sage: kCusps is NFCusps(k)
True
Saving and loading works:
::
sage: loads(kCusps.dumps()) == kCusps
True
We test use_cache:
::
sage: NFCusps_clear_cache()
sage: k.<a> = NumberField(x^2 + 11)
sage: kCusps = NFCusps(k, use_cache=False)
sage: sage.modular.cusps_nf._nfcusps_cache
{}
sage: kCusps = NFCusps(k, use_cache=True)
sage: sage.modular.cusps_nf._nfcusps_cache
{Number Field in a with defining polynomial x^2 + 11: ...}
sage: kCusps is NFCusps(k, use_cache=False)
False
sage: kCusps is NFCusps(k, use_cache=True)
True
"""
if use_cache:
key = number_field
if key in _nfcusps_cache:
C = _nfcusps_cache[key]
if not (C is None): return C
C = NFCuspsSpace(number_field)
if use_cache:
_nfcusps_cache[key] = C
return C
#**************************************************************************
#* NFCuspsSpace class *
#**************************************************************************
class NFCuspsSpace(ParentWithBase):
"""
The set of cusps of a number field. See ``NFCusps`` for full documentation.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 5)
sage: kCusps = NFCusps(k); kCusps
Set of all cusps of Number Field in a with defining polynomial x^2 + 5
"""
def __init__(self, number_field):
"""
See ``NFCusps`` for full documentation.
EXAMPLES::
sage: k.<a> = NumberField(x^3 + x^2 + 13)
sage: kCusps = NFCusps(k); kCusps
Set of all cusps of Number Field in a with defining polynomial x^3 + x^2 + 13
"""
self.__number_field = number_field
ParentWithBase.__init__(self, self)
def __cmp__(self, right):
"""
Return equality only if right is the set of cusps for the same field.
Comparing sets of cusps for two different fields gives the same
result as comparing the two fields.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 5)
sage: L.<a> = NumberField(x^2 + 23)
sage: kCusps = NFCusps(k); kCusps
Set of all cusps of Number Field in a with defining polynomial x^2 + 5
sage: LCusps = NFCusps(L); LCusps
Set of all cusps of Number Field in a with defining polynomial x^2 + 23
sage: kCusps == NFCusps(k)
True
sage: LCusps == NFCusps(L)
True
sage: LCusps == kCusps
False
"""
t = cmp(type(self), type(right))
if t:
return t
else:
return cmp(self.number_field(), right.number_field())
def _repr_(self):
"""
String representation of the set of cusps of a number field.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 2)
sage: kCusps = NFCusps(k)
sage: kCusps
Set of all cusps of Number Field in a with defining polynomial x^2 + 2
sage: kCusps._repr_()
'Set of all cusps of Number Field in a with defining polynomial x^2 + 2'
sage: kCusps.rename('Number Field Cusps'); kCusps
Number Field Cusps
sage: kCusps.rename(); kCusps
Set of all cusps of Number Field in a with defining polynomial x^2 + 2
"""
return "Set of all cusps of %s" %(self.number_field())
def _latex_(self):
"""
Return latex representation of self.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 5)
sage: kCusps = NFCusps(k)
sage: latex(kCusps) # indirect doctest
\mathbf{P}^1(\Bold{Q}[a]/(a^{2} + 5))
"""
return "\\mathbf{P}^1(%s)" %(self.number_field()._latex_())
def __call__(self, x):
"""
Convert x into the set of cusps of a number field.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 5)
sage: kCusps = NFCusps(k)
sage: c = kCusps(a,2)
Traceback (most recent call last):
...
TypeError: __call__() takes exactly 2 arguments (3 given)
::
sage: c = kCusps((a,2)); c
Cusp [a: 2] of Number Field in a with defining polynomial x^2 + 5
sage: kCusps(2/a)
Cusp [-2*a: 5] of Number Field in a with defining polynomial x^2 + 5
sage: kCusps(oo)
Cusp Infinity of Number Field in a with defining polynomial x^2 + 5
"""
return NFCusp(self.number_field(), x, parent=self)
@cached_method
def zero(self):
"""
Return the zero cusp.
NOTE:
This method just exists to make some general algorithms work.
It is not intended that the returned cusp is an additive
neutral element.
EXAMPLE::
sage: k.<a> = NumberField(x^2 + 5)
sage: kCusps = NFCusps(k)
sage: kCusps.zero()
Cusp [0: 1] of Number Field in a with defining polynomial x^2 + 5
"""
return self(0)
zero_element = deprecated_function_alias(17694, zero)
def number_field(self):
"""
Return the number field that this set of cusps is attached to.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 1)
sage: kCusps = NFCusps(k)
sage: kCusps.number_field()
Number Field in a with defining polynomial x^2 + 1
"""
return self.__number_field
#**************************************************************************
#* NFCusp class *
#**************************************************************************
class NFCusp(Element):
r"""
Creates a number field cusp, i.e., an element of `\mathbb{P}^1(k)`.
A cusp on a number field is either an element of the field or infinity,
i.e., an element of the projective line over the number field. It is
stored as a pair (a,b), where a, b are integral elements of the number
field.
INPUT:
- ``number_field`` -- the number field over which the cusp is defined.
- ``a`` -- it can be a number field element (integral or not), or
a number field cusp.
- ``b`` -- (optional) when present, it must be either Infinity or
coercible to an element of the number field.
- ``lreps`` -- (optional) a list of chosen representatives for all the
ideal classes of the field. When given, the representative of the cusp
will be changed so its associated ideal is one of the ideals in the list.
OUTPUT:
``[a: b]`` -- a number field cusp.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 5)
sage: NFCusp(k, a, 2)
Cusp [a: 2] of Number Field in a with defining polynomial x^2 + 5
sage: NFCusp(k, (a,2))
Cusp [a: 2] of Number Field in a with defining polynomial x^2 + 5
sage: NFCusp(k, a, 2/(a+1))
Cusp [a - 5: 2] of Number Field in a with defining polynomial x^2 + 5
Cusp Infinity:
::
sage: NFCusp(k, 0)
Cusp [0: 1] of Number Field in a with defining polynomial x^2 + 5
sage: NFCusp(k, oo)
Cusp Infinity of Number Field in a with defining polynomial x^2 + 5
sage: NFCusp(k, 3*a, oo)
Cusp [0: 1] of Number Field in a with defining polynomial x^2 + 5
sage: NFCusp(k, a + 5, 0)
Cusp Infinity of Number Field in a with defining polynomial x^2 + 5
Saving and loading works:
::
sage: alpha = NFCusp(k, a, 2/(a+1))
sage: loads(dumps(alpha))==alpha
True
Some tests:
::
sage: I*I
-1
sage: NFCusp(k, I)
Traceback (most recent call last):
...
TypeError: unable to convert I to a cusp of the number field
::
sage: NFCusp(k, oo, oo)
Traceback (most recent call last):
...
TypeError: unable to convert (+Infinity, +Infinity) to a cusp of the number field
::
sage: NFCusp(k, 0, 0)
Traceback (most recent call last):
...
TypeError: unable to convert (0, 0) to a cusp of the number field
::
sage: NFCusp(k, "a + 2", a)
Cusp [-2*a + 5: 5] of Number Field in a with defining polynomial x^2 + 5
::
sage: NFCusp(k, NFCusp(k, oo))
Cusp Infinity of Number Field in a with defining polynomial x^2 + 5
sage: c = NFCusp(k, 3, 2*a)
sage: NFCusp(k, c, a + 1)
Cusp [-a - 5: 20] of Number Field in a with defining polynomial x^2 + 5
sage: L.<b> = NumberField(x^2 + 2)
sage: NFCusp(L, c)
Traceback (most recent call last):
...
ValueError: Cannot coerce cusps from one field to another
"""
def __init__(self, number_field, a, b=None, parent=None, lreps=None):
"""
Constructor of number field cusps. See ``NFCusp`` for full
documentation.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 1)
sage: c = NFCusp(k, 3, a+1); c
Cusp [3: a + 1] of Number Field in a with defining polynomial x^2 + 1
sage: c.parent()
Set of all cusps of Number Field in a with defining polynomial x^2 + 1
sage: kCusps = NFCusps(k)
sage: c.parent() is kCusps
True
"""
if parent is None:
parent = NFCusps(number_field)
Element.__init__(self, parent)
R = number_field.maximal_order()
if b is None:
if not a:#that is cusp "0"
self.__a = R(0)
self.__b = R(1)
return
if isinstance(a, NFCusp):
if a.parent() == parent:
self.__a = R(a.__a)
self.__b = R(a.__b)
else:
raise ValueError("Cannot coerce cusps from one field to another")
elif a in R:
self.__a = R(a)
self.__b = R(1)
elif a in number_field:
self.__b = R(a.denominator())
self.__a = R(a * self.__b)
elif is_InfinityElement(a):
self.__a = R(1)
self.__b = R(0)
elif isinstance(a, (int, long)):
self.__a = R(a)
self.__b = R(1)
elif isinstance(a, (tuple, list)):
if len(a) != 2:
raise TypeError("unable to convert %r to a cusp \
of the number field"%a)
if a[1].is_zero():
self.__a = R(1)
self.__b = R(0)
elif a[0] in R and a[1] in R:
self.__a = R(a[0])
self.__b = R(a[1])
elif isinstance(a[0], NFCusp):#we know that a[1] is not zero
if a[1] == 1:
self.__a = a[0].__a
self.__b = a[0].__b
else:
r = a[0].__a / (a[0].__b * a[1])
self.__b = R(r.denominator())
self.__a = R(r*self.__b)
else:
try:
r = number_field(a[0]/a[1])
self.__b = R(r.denominator())
self.__a = R(r * self.__b)
except (ValueError, TypeError):
raise TypeError("unable to convert %r to a cusp \
of the number field"%a)
else:
try:
r = number_field(a)
self.__b = R(r.denominator())
self.__a = R(r * self.__b)
except (ValueError, TypeError):
raise TypeError("unable to convert %r to a cusp \
of the number field"%a)
else:#'b' is given
if is_InfinityElement(b):
if is_InfinityElement(a) or (isinstance(a, NFCusp) and a.is_infinity()):
raise TypeError("unable to convert (%r, %r) \
to a cusp of the number field"%(a, b))
self.__a = R(0)
self.__b = R(1)
return
elif not b:
if not a:
raise TypeError("unable to convert (%r, %r) \
to a cusp of the number field"%(a, b))
self.__a = R(1)
self.__b = R(0)
return
if not a:
self.__a = R(0)
self.__b = R(1)
return
if (b in R or isinstance(b, (int, long))) and (a in R or isinstance(a, (int, long))):
self.__a = R(a)
self.__b = R(b)
else:
if a in R or a in number_field:
r = a / b
elif is_InfinityElement(a):
self.__a = R(1)
self.__b = R(0)
return
elif isinstance(a, NFCusp):
if a.is_infinity():
self.__a = R(1)
self.__b = R(0)
return
r = a.__a / (a.__b * b)
elif isinstance(a, (int, long)):
r = R(a) / b
elif isinstance(a, (tuple, list)):
if len(a) != 2:
raise TypeError("unable to convert (%r, %r) \
to a cusp of the number field"%(a, b))
r = R(a[0]) / (R(a[1]) * b)
else:
try:
r = number_field(a) / b
except (ValueError, TypeError):
raise TypeError("unable to convert (%r, %r) \
to a cusp of the number field"%(a, b))
self.__b = R(r.denominator())
self.__a = R(r * self.__b)
if not lreps is None:
# Changes the representative of the cusp so the ideal associated
# to the cusp is one of the ideals of the given list lreps.
# Note: the trivial class is always represented by (1).
I = self.ideal()
for J in lreps:
if (J/I).is_principal():
newI = J
l = (newI/I).gens_reduced()[0]
self.__a = R(l * self.__a)
self.__b = R(l * self.__b)
def _repr_(self):
"""
String representation of this cusp.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 1)
sage: c = NFCusp(k, a, 2); c
Cusp [a: 2] of Number Field in a with defining polynomial x^2 + 1
sage: c._repr_()
'Cusp [a: 2] of Number Field in a with defining polynomial x^2 + 1'
sage: c.rename('[a:2](cusp of a number field)');c
[a:2](cusp of a number field)
sage: c.rename();c
Cusp [a: 2] of Number Field in a with defining polynomial x^2 + 1
"""
if self.__b.is_zero():
return "Cusp Infinity of %s"%self.parent().number_field()
else:
return "Cusp [%s: %s] of %s"%(self.__a, self.__b, \
self.parent().number_field())
def number_field(self):
"""
Returns the number field of definition of the cusp ``self``
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 2)
sage: alpha = NFCusp(k, 1, a + 1)
sage: alpha.number_field()
Number Field in a with defining polynomial x^2 + 2
"""
return self.parent().number_field()
def is_infinity(self):
"""
Returns ``True`` if this is the cusp infinity.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 1)
sage: NFCusp(k, a, 2).is_infinity()
False
sage: NFCusp(k, 2, 0).is_infinity()
True
sage: NFCusp(k, oo).is_infinity()
True
"""
return self.__b == 0
def numerator(self):
"""
Return the numerator of the cusp ``self``.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 1)
sage: c = NFCusp(k, a, 2)
sage: c.numerator()
a
sage: d = NFCusp(k, 1, a)
sage: d.numerator()
1
sage: NFCusp(k, oo).numerator()
1
"""
return self.__a
def denominator(self):
"""
Return the denominator of the cusp ``self``.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 1)
sage: c = NFCusp(k, a, 2)
sage: c.denominator()
2
sage: d = NFCusp(k, 1, a + 1);d
Cusp [1: a + 1] of Number Field in a with defining polynomial x^2 + 1
sage: d.denominator()
a + 1
sage: NFCusp(k, oo).denominator()
0
"""
return self.__b
def _number_field_element_(self):
"""
Coerce to an element of the number field.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 2)
sage: NFCusp(k, a, 2)._number_field_element_()
1/2*a
sage: NFCusp(k, 1, a + 1)._number_field_element_()
-1/3*a + 1/3
"""
if self.__b.is_zero():
raise TypeError("%s is not an element of %s"%(self, \
self.number_field()))
k = self.number_field()
return k(self.__a / self.__b)
def _ring_of_integers_element_(self):
"""
Coerce to an element of the ring of integers of the number field.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 2)
sage: NFCusp(k, a+1)._ring_of_integers_element_()
a + 1
sage: NFCusp(k, 1, a + 1)._ring_of_integers_element_()
Traceback (most recent call last):
...
TypeError: Cusp [1: a + 1] of Number Field in a with defining polynomial x^2 + 2 is not an integral element
"""
if self.__b.is_one():
return self.__a
if self.__b.is_zero():
raise TypeError("%s is not an element of %s"%(self, \
self.number_field.ring_of_integers()))
R = self.number_field().ring_of_integers()
try:
return R(self.__a/self.__b)
except (ValueError, TypeError):
raise TypeError("%s is not an integral element"%self)
def _latex_(self):
r"""
Latex representation of this cusp.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 11)
sage: latex(NFCusp(k, 3*a, a + 1)) # indirect doctest
\[3 a: a + 1\]
sage: latex(NFCusp(k, 3*a, a + 1)) == NFCusp(k, 3*a, a + 1)._latex_()
True
sage: latex(NFCusp(k, oo))
\infty
"""
if self.__b.is_zero():
return "\\infty"
else:
return "\\[%s: %s\\]"%(self.__a._latex_(), \
self.__b._latex_())
def __cmp__(self, right):
"""
Compare the cusps self and right. Comparison is as for elements in
the number field, except with the cusp oo which is greater than
everything but itself.
The ordering in comparison is only really meaningful for infinity.
EXAMPLES::
sage: k.<a> = NumberField(x^3 + x + 1)
sage: kCusps = NFCusps(k)
Comparing with infinity::
sage: c = kCusps((a,2))
sage: d = kCusps(oo)
sage: c < d
True
sage: kCusps(oo) < d
False
Comparison as elements of the number field::
sage: kCusps(2/3) < kCusps(5/2)
False
sage: k(2/3) < k(5/2)
False
"""
if self.__b.is_zero():
# self is oo, which is bigger than everything but oo.
if right.__b.is_zero():
return 0
else:
return 1
elif right.__b.is_zero():
if self.__b.is_zero():
return 0
else:
return -1
return cmp(self._number_field_element_(), right._number_field_element_())
def __neg__(self):
"""
The negative of this cusp.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 23)
sage: c = NFCusp(k, a, a+1); c
Cusp [a: a + 1] of Number Field in a with defining polynomial x^2 + 23
sage: -c
Cusp [-a: a + 1] of Number Field in a with defining polynomial x^2 + 23
"""
return NFCusp(self.parent().number_field(), -self.__a, self.__b)
def apply(self, g):
"""
Return g(``self``), where ``g`` is a 2x2 matrix, which we view as a
linear fractional transformation.
INPUT:
- ``g`` -- a list of integral elements [a, b, c, d] that are the
entries of a 2x2 matrix.
OUTPUT:
A number field cusp, obtained by the action of ``g`` on the cusp
``self``.
EXAMPLES:
::
sage: k.<a> = NumberField(x^2 + 23)
sage: beta = NFCusp(k, 0, 1)
sage: beta.apply([0, -1, 1, 0])
Cusp Infinity of Number Field in a with defining polynomial x^2 + 23
sage: beta.apply([1, a, 0, 1])
Cusp [a: 1] of Number Field in a with defining polynomial x^2 + 23
"""
k = self.number_field()
return NFCusp(k, g[0]*self.__a + g[1]*self.__b, \
g[2]*self.__a + g[3]*self.__b)
def ideal(self):
"""
Returns the ideal associated to the cusp ``self``.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 23)
sage: alpha = NFCusp(k, 3, a-1)
sage: alpha.ideal()
Fractional ideal (3, 1/2*a - 1/2)
sage: NFCusp(k, oo).ideal()
Fractional ideal (1)
"""
k = self.number_field()
return k.ideal(self.__a, self.__b)
def ABmatrix(self):
"""
Returns AB-matrix associated to the cusp ``self``.
Given R a Dedekind domain and A, B ideals of R in inverse classes, an
AB-matrix is a matrix realizing the isomorphism between R+R and A+B.
An AB-matrix associated to a cusp [a1: a2] is an AB-matrix with A the
ideal associated to the cusp (A=<a1, a2>) and first column given by
the coefficients of the cusp.
EXAMPLES:
::
sage: k.<a> = NumberField(x^3 + 11)
sage: alpha = NFCusp(k, oo)
sage: alpha.ABmatrix()
[1, 0, 0, 1]
::
sage: alpha = NFCusp(k, 0)
sage: alpha.ABmatrix()
[0, -1, 1, 0]
Note that the AB-matrix associated to a cusp is not unique, and the
output of the ``ABmatrix`` function may change.
::
sage: alpha = NFCusp(k, 3/2, a-1)
sage: M = alpha.ABmatrix()
sage: M # random
[-a^2 - a - 1, -3*a - 7, 8, -2*a^2 - 3*a + 4]
sage: M[0] == alpha.numerator() and M[2]==alpha.denominator()
True
An AB-matrix associated to a cusp alpha will send Infinity to alpha:
::
sage: alpha = NFCusp(k, 3, a-1)
sage: M = alpha.ABmatrix()
sage: (k.ideal(M[1], M[3])*alpha.ideal()).is_principal()
True
sage: M[0] == alpha.numerator() and M[2]==alpha.denominator()
True
sage: NFCusp(k, oo).apply(M) == alpha
True
"""
k = self.number_field()
A = self.ideal()
if self.is_infinity():
return [1, 0, 0, 1]
if not self:
return [0, -1, 1, 0]
if A.is_principal():
B = k.ideal(1)
else:
B = k.ideal(A.gens_reduced()[1])/A
assert (A*B).is_principal()
a1 = self.__a
a2 = self.__b
g = (A*B).gens_reduced()[0]
Ainv = A**(-1)
A1 = a1*Ainv
A2 = a2*Ainv
r = A1.element_1_mod(A2)
b1 = -(1-r)/a2*g
b2 = (r/a1)*g
ABM = [a1, b1, a2, b2]
return ABM
def is_Gamma0_equivalent(self, other, N, Transformation=False):
r"""
Checks if cusps ``self`` and ``other`` are `\Gamma_0(N)`- equivalent.
INPUT:
- ``other`` -- a number field cusp or a list of two number field
elements which define a cusp.
- ``N`` -- an ideal of the number field (level)
OUTPUT:
- bool -- ``True`` if the cusps are equivalent.
- a transformation matrix -- (if ``Transformation=True``) a list of
integral elements [a, b, c, d] which are the entries of a 2x2 matrix
M in `\Gamma_0(N)` such that M * ``self`` = ``other`` if ``other``
and ``self`` are `\Gamma_0(N)`- equivalent. If ``self`` and ``other``
are not equivalent it returns zero.
EXAMPLES:
::
sage: K.<a> = NumberField(x^3-10)
sage: N = K.ideal(a-1)
sage: alpha = NFCusp(K, 0)
sage: beta = NFCusp(K, oo)
sage: alpha.is_Gamma0_equivalent(beta, N)
False
sage: alpha.is_Gamma0_equivalent(beta, K.ideal(1))
True
sage: b, M = alpha.is_Gamma0_equivalent(beta, K.ideal(1),Transformation=True)
sage: alpha.apply(M)
Cusp Infinity of Number Field in a with defining polynomial x^3 - 10
::
sage: k.<a> = NumberField(x^2+23)
sage: N = k.ideal(3)
sage: alpha1 = NFCusp(k, a+1, 4)
sage: alpha2 = NFCusp(k, a-8, 29)
sage: alpha1.is_Gamma0_equivalent(alpha2, N)
True
sage: b, M = alpha1.is_Gamma0_equivalent(alpha2, N, Transformation=True)
sage: alpha1.apply(M) == alpha2
True
sage: M[2] in N
True
"""
k = self.number_field()
other = NFCusp(k, other)
if not (self.ideal()/other.ideal()).is_principal():
if not Transformation:
return False
else:
return False, 0
reps = list_of_representatives(N)
alpha1 = NFCusp(k, self, lreps=reps)
alpha2 = NFCusp(k, other, lreps=reps)
delta = k.ideal(alpha1.__b) + N
if (k.ideal(alpha2.__b) + N)!= delta:
if not Transformation:
return False
else:
return False, 0
M1 = alpha1.ABmatrix()
M2 = alpha2.ABmatrix()
A = alpha1.ideal()
B = k.ideal(M1[1], M1[3])
ABdelta = A*B*delta*delta
units = units_mod_ideal(ABdelta)
for u in units:
if (M2[2]*M1[3] - u*M1[2]*M2[3]) in ABdelta:
if not Transformation:
return True
else:
AuxCoeff = [1, 0, 0, 1]
Aux = M2[2]*M1[3] - u*M1[2]*M2[3]
if Aux in A*B*N:
if not u==1:
AuxCoeff[3] = u
else:
A1 = (A*B*N)/ABdelta
A2 = B*k.ideal(M1[2]*M2[2])/(A*ABdelta)
f = A1.element_1_mod(A2)
w = ((1 - f)*Aux)/(M1[2]*M2[2])
AuxCoeff[3] = u
AuxCoeff[1] = w
from sage.matrix.all import Matrix
Maux = Matrix(k, 2, AuxCoeff)
M1inv = Matrix(k, 2, M1).inverse()
Mtrans = Matrix(k, 2, M2)*Maux*M1inv
assert Mtrans[1][0] in N
return True, Mtrans.list()
if not Transformation:
return False
else:
return False, 0
#**************************************************************************
# Global functions:
# - Gamma0_NFCusps --compute list of inequivalent cusps
# Internal use only:
# - number_of_Gamma0_NFCusps -- useful to test Gamma0_NFCusps
# - NFCusps_ideal_reps_for_levelN -- lists of reps for ideal classes
# - units_mod_ideal -- needed to check Gamma0(N)-equiv of cusps
#**************************************************************************
def Gamma0_NFCusps(N):
r"""
Returns a list of inequivalent cusps for `\Gamma_0(N)`, i.e., a set of
representatives for the orbits of ``self`` on `\mathbb{P}^1(k)`.
INPUT:
- ``N`` -- an integral ideal of the number field k (the level).
OUTPUT:
A list of inequivalent number field cusps.
EXAMPLES:
::
sage: k.<a> = NumberField(x^2 + 5)
sage: N = k.ideal(3)
sage: L = Gamma0_NFCusps(N)
The cusps in the list are inequivalent:
::
sage: all([not L[i].is_Gamma0_equivalent(L[j], N) for i, j in \
mrange([len(L), len(L)]) if i<j])
True
We test that we obtain the right number of orbits:
::
sage: from sage.modular.cusps_nf import number_of_Gamma0_NFCusps
sage: len(L) == number_of_Gamma0_NFCusps(N)
True
Another example:
::
sage: k.<a> = NumberField(x^4 - x^3 -21*x^2 + 17*x + 133)
sage: N = k.ideal(5)
sage: from sage.modular.cusps_nf import number_of_Gamma0_NFCusps
sage: len(Gamma0_NFCusps(N)) == number_of_Gamma0_NFCusps(N) # long time (over 1 sec)
True
"""
# We create L a list of three lists, which are different and each a list of
# prime ideals, coprime to N, representing the ideal classes of k
L = NFCusps_ideal_reps_for_levelN(N, nlists=3)
Laux = L[1]+L[2]
Lreps = list_of_representatives(N)
Lcusps = []
k = N.number_field()
for A in L[0]:
#find B in inverse class:
if A.is_trivial():
B = k.ideal(1)
#B = k.unit_ideal() produces an error because we need fract ideal
g = 1
else:
Lbs = [P for P in Laux if (P*A).is_principal()]
B = Lbs[0]
g = (A*B).gens_reduced()[0]
#for every divisor of N we have to find cusps
from sage.arith.all import divisors
for d in divisors(N):
#find delta prime coprime to B in inverse class of d*A
#by searching in our list of auxiliary prime ideals
Lds = [P for P in Laux if (P*d*A).is_principal() and P.is_coprime(B)]
deltap = Lds[0]
a = (deltap*d*A).gens_reduced()[0]
I = d + N/d
#especial case: A=B=d=<1>:
if a.is_one() and I.is_trivial():
Lcusps.append(NFCusp(k, 0, 1, lreps=Lreps))
else:
u = k.unit_group().gens()
for b in I.invertible_residues_mod(u):
#Note: if I trivial, invertible_residues_mod returns [1]
#lift b to (R/a)star
#we need the part of d which is coprime to I, call it M
M = d.prime_to_idealM_part(I)
deltAM = deltap*A*M
u = (B*deltAM).element_1_mod(I)
v = (I*B).element_1_mod(deltAM)
newb = u*b + v
#build AB-matrix:
#----> extended gcd for k.ideal(a), k.ideal(newb)
Y = k.ideal(newb).element_1_mod(k.ideal(a))
# if xa + yb = 1, cusp = y*g /a
Lcusps.append(NFCusp(k, Y*g, a, lreps=Lreps))
return Lcusps
def number_of_Gamma0_NFCusps(N):
"""
Returns the total number of orbits of cusps under the action of the
congruence subgroup `\\Gamma_0(N)`.
INPUT:
- ``N`` -- a number field ideal.
OUTPUT:
ingeter -- the number of orbits of cusps under Gamma0(N)-action.
EXAMPLES::
sage: k.<a> = NumberField(x^3 + 11)
sage: N = k.ideal(2, a+1)
sage: from sage.modular.cusps_nf import number_of_Gamma0_NFCusps
sage: number_of_Gamma0_NFCusps(N)
4
sage: L = Gamma0_NFCusps(N)
sage: len(L) == number_of_Gamma0_NFCusps(N)
True
sage: k.<a> = NumberField(x^2 + 7)
sage: N = k.ideal(9)
sage: number_of_Gamma0_NFCusps(N)
6
sage: N = k.ideal(a*9 + 7)
sage: number_of_Gamma0_NFCusps(N)
24
"""
k = N.number_field()
# The number of Gamma0(N)-sub-orbits for each Gamma-orbit:
from sage.arith.all import divisors
Ugens = [k(u) for u in k.unit_group().gens()]
s = sum([len((d+N/d).invertible_residues_mod(Ugens)) for d in divisors(N)])
# There are h Gamma-orbits, with h class number of underlying number field.
return s*k.class_number()
def NFCusps_ideal_reps_for_levelN(N, nlists=1):
"""
Returns a list of lists (``nlists`` different lists) of prime ideals,
coprime to ``N``, representing every ideal class of the number field.
INPUT:
- ``N`` -- number field ideal.
- ``nlists`` -- optional (default 1). The number of lists of prime ideals
we want.
OUTPUT:
A list of lists of ideals representatives of the ideal classes, all coprime
to ``N``, representing every ideal.
EXAMPLES::
sage: k.<a> = NumberField(x^3 + 11)
sage: N = k.ideal(5, a + 1)
sage: from sage.modular.cusps_nf import NFCusps_ideal_reps_for_levelN
sage: NFCusps_ideal_reps_for_levelN(N)
[(Fractional ideal (1), Fractional ideal (2, a + 1))]
sage: L = NFCusps_ideal_reps_for_levelN(N, 3)
sage: all([len(L[i])==k.class_number() for i in range(len(L))])
True
::
sage: k.<a> = NumberField(x^4 - x^3 -21*x^2 + 17*x + 133)
sage: N = k.ideal(6)
sage: from sage.modular.cusps_nf import NFCusps_ideal_reps_for_levelN
sage: NFCusps_ideal_reps_for_levelN(N)
[(Fractional ideal (1),
Fractional ideal (13, a - 2),
Fractional ideal (43, a - 1),
Fractional ideal (67, a + 17))]
sage: L = NFCusps_ideal_reps_for_levelN(N, 5)
sage: all([len(L[i])==k.class_number() for i in range(len(L))])
True
"""
k = N.number_field()
G = k.class_group()
L = []
for i in range(nlists):
L.append([k.ideal(1)])
it = k.primes_of_degree_one_iter()
for I in G.list():
check = 0
if not I.is_principal():
Iinv = (I.ideal())**(-1)
while check<nlists:
J = next(it)
if (J*Iinv).is_principal() and J.is_coprime(N):
L[check].append(J)
check = check + 1
return [tuple(l) for l in L]
def units_mod_ideal(I):
"""
Returns integral elements of the number field representing the images of
the global units modulo the ideal ``I``.
INPUT:
- ``I`` -- number field ideal.
OUTPUT:
A list of integral elements of the number field representing the images of
the global units modulo the ideal ``I``. Elements of the list might be
equivalent to each other mod ``I``.
EXAMPLES::
sage: from sage.modular.cusps_nf import units_mod_ideal
sage: k.<a> = NumberField(x^2 + 1)
sage: I = k.ideal(a + 1)
sage: units_mod_ideal(I)
[1]
sage: I = k.ideal(3)
sage: units_mod_ideal(I)
[1, a, -1, -a]
::
sage: from sage.modular.cusps_nf import units_mod_ideal
sage: k.<a> = NumberField(x^3 + 11)
sage: k.unit_group()
Unit group with structure C2 x Z of Number Field in a with defining polynomial x^3 + 11
sage: I = k.ideal(5, a + 1)
sage: units_mod_ideal(I)
[1,
2*a^2 + 4*a - 1,
...]
::
sage: from sage.modular.cusps_nf import units_mod_ideal
sage: k.<a> = NumberField(x^4 - x^3 -21*x^2 + 17*x + 133)
sage: k.unit_group()
Unit group with structure C6 x Z of Number Field in a with defining polynomial x^4 - x^3 - 21*x^2 + 17*x + 133
sage: I = k.ideal(3)
sage: U = units_mod_ideal(I)
sage: all([U[j].is_unit() and not (U[j] in I) for j in range(len(U))])
True
"""
k = I.number_field()
Uk = k.unit_group()
Istar = I.idealstar(2)
ulist = Uk.gens_values()
elist = [Istar(I.ideallog(u)).order() for u in ulist]
from sage.misc.mrange import xmrange
from sage.misc.all import prod
return [prod([u**e for u,e in zip(ulist,ei)],k(1)) for ei in xmrange(elist)]
| [
"valber@HPC"
] | valber@HPC |
d27a3896f5fa3feb5f17cd7861eb6378cabfc5d6 | 867846ed1df7f560ccc473413a70020155f66ad4 | /writeImageToBinary.py | 8ebf38106007bbfc86bd7004a67293c4219d32a9 | [] | no_license | abhineet123/PTF | 84297bf5aa95320dbc2d34f422f2dd563ff65a58 | 0c63f7f8251af0d70c329b2cef53694db76c1656 | refs/heads/master | 2023-08-18T18:34:40.513936 | 2023-08-09T17:28:51 | 2023-08-09T17:28:51 | 157,794,848 | 5 | 1 | null | 2021-05-16T18:48:32 | 2018-11-16T01:24:05 | MATLAB | UTF-8 | Python | false | false | 4,088 | py | # from DecompUtils import *
# from distanceGrid import applyFilter
# import time
import os
import cv2
import numpy as np
# from Misc import getParamDict
if __name__ == '__main__':
db_root_dir = 'C:/Datasets'
track_root_dir = '../Tracking Data'
img_root_dir = '../Image Data'
dist_root_dir = '../Distance Data'
track_img_root_dir = '../Tracked Images'
# params_dict = getParamDict()
# param_ids = readDistGridParams()
#
# actors = params_dict['actors']
# sequences = params_dict['sequences']
# challenges = params_dict['challenges']
# filter_types = params_dict['filter_types']
#
# actor_id = param_ids['actor_id']
# seq_id = param_ids['seq_id']
# challenge_id = param_ids['challenge_id']
# inc_id = param_ids['inc_id']
# start_id = param_ids['start_id']
# filter_id = param_ids['filter_id']
# kernel_size = param_ids['kernel_size']
# show_img = param_ids['show_img']
#
# arg_id = 1
# if len(sys.argv) > arg_id:
# actor_id = int(sys.argv[arg_id])
# arg_id += 1
# if len(sys.argv) > arg_id:
# seq_id = int(sys.argv[arg_id])
# arg_id += 1
# if len(sys.argv) > arg_id:
# challenge_id = int(sys.argv[arg_id])
# arg_id += 1
# if len(sys.argv) > arg_id:
# filter_id = int(sys.argv[arg_id])
# arg_id += 1
#
# if actor_id >= len(actors):
# print 'Invalid actor_id: ', actor_id
# sys.exit()
#
# actor = actors[actor_id]
# sequences = sequences[actor]
#
# if seq_id >= len(sequences):
# print 'Invalid dataset_id: ', seq_id
# sys.exit()
# if challenge_id >= len(challenges):
# print 'Invalid challenge_id: ', challenge_id
# sys.exit()
# if filter_id >= len(filter_types):
# print 'Invalid filter_id: ', filter_id
# sys.exit()
#
# seq_name = sequences[seq_id]
# filter_type = filter_types[filter_id]
# challenge = challenges[challenge_id]
#
# if actor == 'METAIO':
# seq_name = seq_name + '_' + challenge
actor = 'GRAM'
seq_name = 'idot_1_intersection_city_day_short'
start_id = 0
filter_type = 'none'
kernel_size = 3
show_img = 1
print 'actor: ', actor
# print 'seq_id: ', seq_id
print 'seq_name: ', seq_name
# print 'filter_type: ', filter_type
# print 'kernel_size: ', kernel_size
src_dir = db_root_dir + '/' + actor + '/Images/' + seq_name
if not os.path.exists(img_root_dir):
os.makedirs(img_root_dir)
if filter_type != 'none':
img_fname = img_root_dir + '/' + seq_name + '_' + filter_type + str(kernel_size) + '.bin'
else:
img_fname = img_root_dir + '/' + seq_name + '.bin'
print 'Reading images from: {:s}'.format(src_dir)
print 'Writing image binary data to: {:s}'.format(img_fname)
img_fid = open(img_fname, 'wb')
file_list = os.listdir(src_dir)
# print 'file_list: ', file_list
no_of_frames = len(file_list)
print 'no_of_frames: ', no_of_frames
end_id = no_of_frames
init_img = cv2.imread(src_dir + '/image{:06d}.jpg'.format(1))
img_height = init_img.shape[0]
img_width = init_img.shape[1]
# np.array([no_of_frames, ], dtype=np.uint32).tofile(img_fid)
np.array([img_width, img_height], dtype=np.uint32).tofile(img_fid)
win_name = 'Filtered Image'
if show_img:
cv2.namedWindow(win_name)
for frame_id in xrange(start_id, end_id):
# print 'frame_id: ', frame_id
curr_img = cv2.imread(src_dir + '/image{:06d}.jpg'.format(frame_id + 1))
if len(curr_img.shape) == 3:
curr_img_gs = cv2.cvtColor(curr_img, cv2.cv.CV_BGR2GRAY)
else:
curr_img_gs = curr_img
# if filter_type != 'none':
# curr_img_gs = applyFilter(curr_img_gs, filter_type, kernel_size)
curr_img_gs.astype(np.uint8).tofile(img_fid)
if show_img:
cv2.imshow(win_name, curr_img_gs)
if cv2.waitKey(1) == 27:
break
img_fid.close()
| [
"[email protected]"
] | |
336b66817aeb69caf5a08e2b80c1beac92d48c6d | de01cb554c2292b0fbb79b4d5413a2f6414ea472 | /algorithms/Easy/1275.find-winner-on-a-tic-tac-toe-game.py | b051c0e2d84dffb8192059fd3585ceb77ed909e8 | [] | no_license | h4hany/yeet-the-leet | 98292017eadd3dde98a079aafcd7648aa98701b4 | 563d779467ef5a7cc85cbe954eeaf3c1f5463313 | refs/heads/master | 2022-12-10T08:35:39.830260 | 2020-09-02T23:12:15 | 2020-09-02T23:12:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,576 | py | #
# @lc app=leetcode id=1275 lang=python3
#
# [1275] Find Winner on a Tic Tac Toe Game
#
# https://leetcode.com/problems/find-winner-on-a-tic-tac-toe-game/description/
#
# algorithms
# Easy (52.88%)
# Total Accepted: 17K
# Total Submissions: 32.2K
# Testcase Example: '[[0,0],[2,0],[1,1],[2,1],[2,2]]'
#
# Tic-tac-toe is played by two players A and B on a 3 x 3 grid.
#
# Here are the rules of Tic-Tac-Toe:
#
#
# Players take turns placing characters into empty squares (" ").
# The first player A always places "X" characters, while the second player B
# always places "O" characters.
# "X" and "O" characters are always placed into empty squares, never on filled
# ones.
# The game ends when there are 3 of the same (non-empty) character filling any
# row, column, or diagonal.
# The game also ends if all squares are non-empty.
# No more moves can be played if the game is over.
#
#
# Given an array moves where each element is another array of size 2
# corresponding to the row and column of the grid where they mark their
# respective character in the order in which A and B play.
#
# Return the winner of the game if it exists (A or B), in case the game ends in
# a draw return "Draw", if there are still movements to play return "Pending".
#
# You can assume that moves is valid (It follows the rules of Tic-Tac-Toe), the
# grid is initially empty and A will play first.
#
#
# Example 1:
#
#
# Input: moves = [[0,0],[2,0],[1,1],[2,1],[2,2]]
# Output: "A"
# Explanation: "A" wins, he always plays first.
# "X " "X " "X " "X " "X "
# " " -> " " -> " X " -> " X " -> " X "
# " " "O " "O " "OO " "OOX"
#
#
# Example 2:
#
#
# Input: moves = [[0,0],[1,1],[0,1],[0,2],[1,0],[2,0]]
# Output: "B"
# Explanation: "B" wins.
# "X " "X " "XX " "XXO" "XXO" "XXO"
# " " -> " O " -> " O " -> " O " -> "XO " -> "XO "
# " " " " " " " " " " "O "
#
#
# Example 3:
#
#
# Input: moves = [[0,0],[1,1],[2,0],[1,0],[1,2],[2,1],[0,1],[0,2],[2,2]]
# Output: "Draw"
# Explanation: The game ends in a draw since there are no moves to make.
# "XXO"
# "OOX"
# "XOX"
#
#
# Example 4:
#
#
# Input: moves = [[0,0],[1,1]]
# Output: "Pending"
# Explanation: The game has not finished yet.
# "X "
# " O "
# " "
#
#
#
# Constraints:
#
#
# 1 <= moves.length <= 9
# moves[i].length == 2
# 0 <= moves[i][j] <= 2
# There are no repeated elements on moves.
# moves follow the rules of tic tac toe.
#
#
class Solution:
def tictactoe(self, moves: List[List[int]]) -> str:
| [
"[email protected]"
] | |
e9f6eacfaf01a7fff4da4c15768700dfd006c709 | 423cc7775d1ab9874729ba304d7682a12b4a4d43 | /plugins/analyzer/previewcomparer.py | 6cc2539edc04ea1fb6f7350dfbf6e684d05043bc | [] | no_license | eyeyunianto/ghiro | 7ec2dc5ae2b766883da6f26975fd41829336e8f8 | 24ce80244893fc94300e1c4f5e3305bd182d65a6 | refs/heads/master | 2020-04-06T04:33:07.155509 | 2015-06-21T21:30:27 | 2015-06-21T21:30:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,807 | py | # Ghiro - Copyright (C) 2013-2015 Ghiro Developers.
# This file is part of Ghiro.
# See the file 'docs/LICENSE.txt' for license terms.
import logging
from itertools import izip
from lib.analyzer.base import BaseAnalyzerModule
from lib.utils import str2image
from lib.db import get_file
try:
from PIL import Image
IS_PIL = True
except ImportError:
IS_PIL = False
logger = logging.getLogger(__name__)
class ImageComparer():
"""Image comparator."""
@staticmethod
def calculate_difference(preview, original_image_id):
"""Calculate difference between two images.
@param preview: preview dict
@param original_image_id: original image ID
@return: difference, difference percentage
"""
try:
i1 = str2image(get_file(original_image_id).read())
except IOError as e:
logger.warning("Comparer error reading image: {0}".format(e))
return
# Check if thumb was resized.
if "original_file" in preview:
i2 = str2image(get_file(preview["original_file"]).read())
else:
i2 = str2image(get_file(preview["file"]).read())
# Resize.
width, height = i2.size
try:
i1 = i1.resize([width, height], Image.ANTIALIAS)
except IOError as e:
logger.warning("Comparer error reading image: {0}".format(e))
return
# Checks.
#assert i1.mode == i2.mode, "Different kinds of images."
#assert i1.size == i2.size, "Different sizes."
# Calculate difference.
pairs = izip(i1.getdata(), i2.getdata())
if len(i1.getbands()) == 1:
# for gray-scale jpegs
dif = sum(abs(p1-p2) for p1,p2 in pairs)
else:
dif = sum(abs(c1-c2) for p1,p2 in pairs for c1,c2 in zip(p1,p2))
ncomponents = i1.size[0] * i1.size[1] * 3
# Get diff percentage.
diff_perc = int((dif / 255.0 * 100) / ncomponents)
# Binary option.
if diff_perc >= 15:
diff = True
else:
diff = False
return diff, diff_perc
class PreviewComparerAnalyzer(BaseAnalyzerModule):
"""Compares previews extracted with the original image."""
order = 20
def check_deps(self):
return IS_PIL
def run(self, task):
# Compare previews to catch differences.
if "metadata" in self.results:
if "preview" in self.results["metadata"]:
for preview in self.results["metadata"]["preview"]:
difference = ImageComparer.calculate_difference(preview, self.results["file_data"])
if difference:
preview["diff"], preview["diff_percent"] = difference
return self.results
| [
"[email protected]"
] | |
a9f0ba50e1273c6a25a49d2e0bba74d1942c67b8 | b1c7a768f38e2e987a112da6170f49503b9db05f | /stockkeeping/migrations/0021_remove_purchase_stockitem.py | 56aa900ba0363ea26816b298cca975bca2319252 | [] | no_license | Niladrykar/bracketerp | 8b7491aa319f60ec3dcb5077258d75b0394db374 | ca4ee60c2254c6c132a38ce52410059cc6b19cae | refs/heads/master | 2022-12-11T04:23:07.504966 | 2019-03-18T06:58:13 | 2019-03-18T06:58:13 | 176,218,029 | 1 | 0 | null | 2022-12-08T03:01:46 | 2019-03-18T06:27:37 | JavaScript | UTF-8 | Python | false | false | 338 | py | # Generated by Django 2.0.6 on 2018-11-02 11:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('stockkeeping', '0020_auto_20181102_1628'),
]
operations = [
migrations.RemoveField(
model_name='purchase',
name='stockitem',
),
]
| [
"[email protected]"
] | |
9611ea5034446ad9760170ff5cf6b279139456de | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/helpers/pydev/pydev_pysrc.py | b9ed49e8005e3b547bd967bac75b0d83e7dd1861 | [
"Apache-2.0",
"EPL-1.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 100 | py | '''An empty file in pysrc that can be imported (from sitecustomize) to find the location of pysrc''' | [
"[email protected]"
] | |
770326a7c554452415a3a4823c7975bc958ac5bb | 45a2fef5a35090e2c3794824735dc137553c3d3b | /backup/fcards/utils.py | a968797ac2ea04b200ab1d427b0368cd1c17ba3c | [] | no_license | kris-brown/personal_website | 9248ec23e2ebab8d820a0c6be70f6fb06a80144f | 4dadfeba80eaf3f25f87b6f7bed48aa9db6ec8fc | refs/heads/master | 2021-08-28T00:03:07.483092 | 2021-08-09T06:19:56 | 2021-08-09T06:19:56 | 190,462,717 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,164 | py | from typing import List as L, Tuple as T
'''Misc helpful things'''
def flatten(lol:L[list])->list:
return [item for sublist in lol for item in sublist]
################################################################################
class Tree(object):
'''
Parse a nested bullet structure where nesting is determined by whitespace, e.g.
- A
- A1
- A2
- A2i
- B
'''
def __init__(self, value : str, children : L['Tree']) -> None:
self.value = value; self.children = children
def __str__(self)->str:
return self.print(0)
def __len__(self)->int: return 1 + sum(map(len,self.children))
def showflat(self,_discard : bool = True)->str:
'''Discard root and show flattened information (for Anki cards)'''
if _discard: curr = ''
else:
if self.value and self.value[0]=='-': curr = '\n'+self.value[1:]
else: curr = '\n'+self.value
return curr + ''.join([c.showflat(_discard=False) for c in self.children])
def print(self, indent : int) -> str:
'''Visualize as tree'''
rest = ''.join([c.print(indent+1) for c in self.children])
return '\n' + '\t'*indent + self.value + rest
@classmethod
def from_str(cls, lines:L[str]) -> 'Tree':
'''Takes the "content" of an orgmode node (list of strings) and makes a Tree'''
pairs = [(cls.level(x),x) for x in filter(lambda x: not x.isspace(),lines)]
try:
root = Tree(value = 'root', children = cls.parse_children(pairs))
except ValueError as e:
print(e)
for k,v in pairs: print(k,v)
import pdb;pdb.set_trace();assert False
return root
@classmethod
def parse_children(cls, pairs : L[T[int,str]]) -> L['Tree']:
'''Recursively parse a list of (indent-level, <content>) pairs'''
if not pairs: return [] # Base case: no more children
next_val = pairs[0][1].strip() # our first element is definitely a child.
childlevel = pairs[0][0] # All children have this indentation level
children = [] # The list that we will return
next_pairs = [] # type: L[T[int,str]] ## the lines that are descendents of the child
for i,x in pairs[1:]:
if i < childlevel: raise ValueError('Indentation level incorrectly parsed: ',x)
elif i > childlevel: next_pairs.append((i,x)) # something that belongs to next child at some depth
else:
# We've returned back to the child indentation level, so everything we've seen up to now gets added
children.append(Tree(value = next_val, children = cls.parse_children(next_pairs)))
next_val, next_pairs = x.strip(), [] # reset these variables
# Add the last tree
children.append(Tree(value=next_val,children = cls.parse_children(next_pairs)))
return children
@staticmethod
def level(astr : str) -> int:
'''Get indentation level assuming tab spacing = 8'''
ws = astr[:len(astr) - len(astr.lstrip())]
return ws.count(' ')+8*ws.count('\t')
| [
"[email protected]"
] | |
0fa8fa2d1973353700ed4feebe10f79b52b7481f | 27ed6d2db4f38cd351b642042341771d93aee121 | /python/medicUI/widgets.py | 89f63dc250a9fceacb72d24418ce995ab37e74c9 | [
"MIT"
] | permissive | wmoten/medic | eab20630e6666372e50d12fa0998ceefc9411e68 | bc2e9ec09e33ce2d0cedd8dc0c17f567208503ed | refs/heads/master | 2020-04-07T09:58:58.947534 | 2018-03-08T05:51:35 | 2018-03-08T05:51:35 | 124,202,109 | 0 | 0 | MIT | 2018-03-07T08:13:48 | 2018-03-07T08:13:48 | null | UTF-8 | Python | false | false | 23,842 | py | from Qt import QtWidgets, QtCore, QtGui
from . import model
from . import delegate
from . import functions
import medic
import os
import re
IconDir = os.path.abspath(os.path.join(__file__, "../icons"))
class ParameterFunctions():
@staticmethod
def SetParmeterValue(param_container, pram_dict):
for prm in pram_dict:
if prm["function"]:
prm["function"](param_container, prm["name"], prm["widget"])
@staticmethod
def SetInt(param, name, widget):
t = widget.text()
if not t:
t = 0
param.set(name, int(t))
@staticmethod
def SetFloat(param, name, widget):
t = widget.text()
if not t:
t = 0
param.set(name, float(t))
@staticmethod
def SetBool(param, name, widget):
param.set(name, widget.isChecked())
@staticmethod
def SetString(param, name, widget):
param.set(name, str(widget.text()))
@staticmethod
def CreateWidget(info):
name, label, parm_type, default = info
if parm_type is medic.Types.Null or\
parm_type is medic.Types.BoolArray or\
parm_type is medic.Types.IntArray or\
parm_type is medic.Types.FloatArray or\
parm_type is medic.Types.StringArray:
print "This type parameter is not supported yet : %s" % parm_type
return None, None
widget = None
function = None
if parm_type == medic.Types.Bool:
widget = QtWidgets.QCheckBox()
widget.setChecked(default)
function = ParameterFunctions.SetBool
elif parm_type == medic.Types.Int:
widget = NumericLine.CreateIntLine()
widget.setText(str(default))
function = ParameterFunctions.SetInt
elif parm_type == medic.Types.Float:
widget = NumericLine.CreateFloatLine()
widget.setText(str(default))
function = ParameterFunctions.SetFloat
elif parm_type == medic.Types.String:
widget = QtWidgets.QLineEdit()
widget.setText(default)
function = ParameterFunctions.SetString
widget.setObjectName("parameter_widget")
return widget, function
class BrowserButtonWidget(QtWidgets.QFrame):
BackClicked = QtCore.Signal()
NextClicked = QtCore.Signal()
def __init__(self, parent=None):
super(BrowserButtonWidget, self).__init__(parent=parent)
self.setObjectName("medic_browser_buttons_widget")
self.__back_button = None
self.__next_button = None
self.__makeWidgets()
def __makeWidgets(self):
main_layout = QtWidgets.QHBoxLayout()
self.setLayout(main_layout)
self.__back_button = QtWidgets.QPushButton()
self.__next_button = QtWidgets.QPushButton()
self.__back_button.setObjectName("medic_browser_back")
self.__next_button.setObjectName("medic_browser_next")
main_layout.addWidget(self.__back_button)
main_layout.addWidget(self.__next_button)
main_layout.setSpacing(1)
self.__back_button.clicked.connect(self.BackClicked.emit)
self.__next_button.clicked.connect(self.NextClicked.emit)
def setBackEnabled(self, v):
self.__back_button.setEnabled(v)
def setNextEnabled(self, v):
self.__next_button.setEnabled(v)
class CurrentKarteLabel(QtWidgets.QLabel):
def __init__(self, parent=None):
super(CurrentKarteLabel, self).__init__(parent=parent)
self.setObjectName("medic_current_karte")
class StatusLabel(QtWidgets.QLabel):
def __init__(self, parent=None):
super(StatusLabel, self).__init__(parent=parent)
self.setObjectName("status_label")
self.setFixedWidth(70)
self.__ready_icon = QtGui.QPixmap(os.path.join(IconDir, "success.png")).scaled(16, 16)
self.__success_icon = QtGui.QPixmap(os.path.join(IconDir, "success.png")).scaled(16, 16)
self.__failure_icon = QtGui.QPixmap(os.path.join(IconDir, "failure.png")).scaled(16, 16)
self.setStatus(model.Ready)
def setStatus(self, status):
if status is model.Ready:
self.setText("<font color='#b0b0b0'>Ready</font>")
elif status is model.Success:
self.setText("<font color='#1cc033'>Success</font>")
else:
self.setText("<font color='#eb2b66'>Failure</font>")
class TesterList(QtWidgets.QListView):
TesterChanged = QtCore.Signal("QModelIndex")
SingleTestTriggered = QtCore.Signal()
def __init__(self, parent=None):
super(TesterList, self).__init__(parent=parent)
self.setObjectName("medic_tester_list")
self.setUniformItemSizes(True)
self.source_model = model.TesterModel()
self.delegate = delegate.TesterDelegate()
self.setItemDelegate(self.delegate)
self.setModel(self.source_model)
self.__current_tester = None
def updateSelected(self):
for index in self.selectedIndexes():
self.update(index)
def currentTester(self):
return self.__current_tester
def reset(self):
self.clearSelection()
self.__current_tester = None
def selectionChanged(self, selected, deselected):
indexes = selected.indexes()
if not indexes:
self.clearSelection()
self.__current_tester = None
self.TesterChanged.emit(None)
else:
self.__current_tester = self.source_model.data(indexes[0], model.TesterItemRole)
self.TesterChanged.emit(indexes[0])
super(TesterList, self).selectionChanged(selected, deselected)
def mousePressEvent(self, evnt):
super(TesterList, self).mousePressEvent(evnt)
if QtCore.Qt.MouseButton.LeftButton == evnt.button():
index = self.indexAt(evnt.pos())
if index.row() < 0:
self.__current_tester = None
self.clearSelection()
elif QtCore.Qt.MouseButton.RightButton == evnt.button():
menu = QtWidgets.QMenu(self)
test = QtWidgets.QAction("Single Test", menu)
menu.addAction(test)
pos = self.mapToGlobal(evnt.pos())
menu.popup(QtCore.QPoint(pos.x() - 10, pos.y() - 10))
test.triggered.connect(self.__testTriggered)
def __testTriggered(self):
self.SingleTestTriggered.emit()
class KarteList(QtWidgets.QListView):
KarteChanged = QtCore.Signal("QModelIndex")
def __init__(self, parent=None):
super(KarteList, self).__init__(parent=parent)
self.setObjectName("medic_karte_list")
self.setUniformItemSizes(True)
self.source_model = model.KarteModel()
self.delegate = delegate.KarteDelegate()
self.setModel(self.source_model)
self.setItemDelegate(self.delegate)
self.__current_karte = None
def currentKarte(self):
return self.__current_karte
def selectionChanged(self, selected, deselected):
indexes = selected.indexes()
if not indexes:
self.clearSelection()
self.__current_karte = None
self.KarteChanged.emit(None)
else:
self.__current_karte = self.source_model.data(indexes[0], model.KarteItemRole)
self.KarteChanged.emit(indexes[0])
super(KarteList, self).selectionChanged(selected, deselected)
def mousePressEvent(self, evnt):
if QtCore.Qt.MouseButton.LeftButton == evnt.button():
index = self.indexAt(evnt.pos())
if index.row() < 0:
self.clearSelection()
self.__current_karte = None
super(KarteList, self).mousePressEvent(evnt)
class NumericLine(QtWidgets.QLineEdit):
RegexInt = re.compile("[^0-9-]")
RegexFloat = re.compile("[^0-9-.]")
def __init__(self, parent=None):
super(NumericLine, self).__init__(parent)
self.__regex = None
self.textEdited.connect(self.__regexCheck)
def __regexCheck(self, txt):
if self.__regex and txt:
self.setText(self.__regex.sub("", txt))
@staticmethod
def CreateIntLine():
e = NumericLine()
e.__regex = NumericLine.RegexInt
return e
@staticmethod
def CreateFloatLine():
e = NumericLine()
e.__regex = NumericLine.RegexFloat
return e
class ReportList(QtWidgets.QListView):
def __init__(self, parent=None):
super(ReportList, self).__init__(parent)
self.source_model = model.ReportModel()
self.setModel(self.source_model)
self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.delegate = delegate.ReportDelegate()
self.setItemDelegate(self.delegate)
def setReportItems(self, report_items):
self.source_model.setReportItems(report_items)
def selectionChanged(self, selected, deselected):
indexes = selected.indexes()
functions.ClearSelection()
for index in self.selectedIndexes():
report = self.source_model.data(index, model.ReportRole)
report.addSelection()
super(ReportList, self).selectionChanged(selected, deselected)
def mousePressEvent(self, evnt):
if QtCore.Qt.MouseButton.LeftButton == evnt.button():
index = self.indexAt(evnt.pos())
if index.row() < 0:
self.clearSelection()
super(ReportList, self).mousePressEvent(evnt)
class TesterDetailWidget(QtWidgets.QWidget):
ReportsChanged = QtCore.Signal()
def __init__(self, parent=None):
super(TesterDetailWidget, self).__init__(parent)
self.setObjectName("tester_detail_widget")
self.__tester_item = None
self.__params = []
self.__param_container = None
self.__qt_top_layout = None
self.__qt_parameter_layout = None
self.__qt_bottom_layout = None
self.__qt_test_label = None
self.__qt_report_list = None
self.__qt_fix_selected_button = None
self.__qt_fix_all_button = None
self.__createWidgets()
self.__clear()
def onReset(self):
self.__clear()
def reset(self):
self.__clear()
def setTesterItem(self, testerItem):
self.__tester_item = testerItem
self.__setTester(self.__tester_item)
self.__setReportItems(self.__tester_item.reports())
def __setTester(self, testerItem):
self.__setTesterName(testerItem.name())
self.__setDescription(testerItem.description())
self.__clearParameters()
self.__setParameters(testerItem.parameters())
self.__setFixable(testerItem.isFixable())
def __setReportItems(self, report_items):
self.__qt_report_list.setReportItems(report_items)
if not report_items:
self.__setFixable(False)
def __createWidgets(self):
main_layout = QtWidgets.QVBoxLayout()
self.setLayout(main_layout)
main_layout.setContentsMargins(0, 0, 0, 0)
# frame
frame = QtWidgets.QFrame()
frame.setObjectName("detail_frame")
main_layout.addWidget(frame)
frame_layout = QtWidgets.QVBoxLayout()
frame.setLayout(frame_layout)
frame_layout.setContentsMargins(10, 10, 10, 10)
# layout
self.__qt_parameter_layout = QtWidgets.QVBoxLayout()
button_layout = QtWidgets.QHBoxLayout()
# widgets
self.__qt_tester_label = QtWidgets.QLabel()
self.__qt_description = QtWidgets.QTextEdit()
self.__qt_description.setFixedHeight(50)
self.__qt_description.setReadOnly(True)
self.__qt_tester_label.setObjectName("detail_tester_label")
self.__qt_description.setObjectName("detail_tester_description")
self.__qt_report_list = ReportList()
self.__qt_report_list.setObjectName("detial_report_list")
self.__qt_fix_selected_button = QtWidgets.QPushButton("Fix Selected")
self.__qt_fix_all_button = QtWidgets.QPushButton("Fix All")
self.__qt_fix_selected_button.setObjectName("detail_button")
self.__qt_fix_all_button.setObjectName("detail_button")
self.__qt_fix_selected_button.setMaximumWidth(100)
self.__qt_fix_all_button.setMaximumWidth(100)
button_layout.addWidget(self.__qt_fix_selected_button)
button_layout.addWidget(self.__qt_fix_all_button)
frame_layout.addWidget(self.__qt_tester_label)
frame_layout.addSpacing(20)
frame_layout.addWidget(self.__qt_description)
frame_layout.addWidget(self.__qt_report_list)
frame_layout.addLayout(self.__qt_parameter_layout)
frame_layout.addLayout(button_layout)
self.__qt_fix_all_button.clicked.connect(self.__fixAll)
self.__qt_fix_selected_button.clicked.connect(self.__fixSelected)
def __clear(self):
self.__tester_item = None
self.__qt_report_list.setReportItems([])
self.__setTesterName("")
self.__setFixable(False)
self.__setDescription("")
self.__clearParameters()
def __setTesterName(self, name):
self.__qt_tester_label.setText(name)
def __setDescription(self, desc):
self.__qt_description.setText(desc)
def __setFixable(self, enable):
self.__qt_fix_selected_button.setEnabled(enable)
self.__qt_fix_all_button.setEnabled(enable)
def __clearLayout(self, layout):
while (True):
item = layout.takeAt(0)
if item:
l = item.layout()
w = item.widget()
if l:
self.__clearLayout(l)
if w:
layout.removeWidget(w)
w.setParent(None)
else:
break
def __clearParameters(self):
self.__params = []
self.__param_container = None
self.__clearLayout(self.__qt_parameter_layout)
def __setParameters(self, params):
self.__param_container = params
for info in params.getParamInfos():
p_name, p_label, p_type, p_default = info
widget, function = ParameterFunctions.CreateWidget(info)
if widget:
layout = QtWidgets.QHBoxLayout()
label = QtWidgets.QLabel(p_label)
label.setObjectName("parameter_label")
layout.addWidget(label)
layout.addWidget(widget)
self.__params.append({"name": p_name, "widget": widget, "function": function})
self.__qt_parameter_layout.addLayout(layout)
def __fixAll(self):
if self.__tester_item:
ParameterFunctions.SetParmeterValue(self.__param_container, self.__params)
remove_items = []
for report in self.__tester_item.reports():
if self.__tester_item.fix(report, self.__param_container):
remove_items.append(report)
self.__tester_item.removeReports(remove_items)
self.__setReportItems(self.__tester_item.reports())
self.ReportsChanged.emit()
def __fixSelected(self):
if self.__tester_item:
ParameterFunctions.SetParmeterValue(self.__param_container, self.__params)
remove_items = []
all_reports = self.__tester_item.reports()
for index in map(lambda x: x.row(), self.__qt_report_list.selectedIndexes()):
report = all_reports[index]
if self.__tester_item.fix(report, self.__param_container):
remove_items.append(report)
self.__tester_item.removeReports(remove_items)
self.__setReportItems(self.__tester_item.reports())
self.ReportsChanged.emit()
class TopBarWidget(QtWidgets.QFrame):
BackClicked = QtCore.Signal()
NextClicked = QtCore.Signal()
def __init__(self, parent=None):
super(TopBarWidget, self).__init__(parent=parent)
self.setObjectName("medic_top_bar")
self.__browser_button_widget = None
self.__current_karte_label = None
self.reset_button = None
self.test_button = None
self.status_label = None
self.__phase_items = {}
self.__phase = 0
self.__makeWidgets()
self.setPhase(0)
def setBrowserButtonEnabled(self, prevValue, nextValue):
self.__browser_button_widget.setBackEnabled(prevValue)
self.__browser_button_widget.setNextEnabled(nextValue)
def setCurrentKarteName(self, name):
self.__current_karte_label.setText(name)
def phase(self):
return self.__phase
def next(self):
self.setPhase(self.__phase + 1)
def back(self):
self.setPhase(self.__phase - 1)
def setPhase(self, phase):
self.__phase = phase
for p, items in self.__phase_items.iteritems():
if p == self.__phase:
for item in items:
item.show()
else:
for item in items:
item.hide()
def __makeWidgets(self):
main_layout = QtWidgets.QVBoxLayout()
main_layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(main_layout)
horizon_layout = QtWidgets.QHBoxLayout()
horizon_layout.setSpacing(10)
horizon_layout.setContentsMargins(0, 0, 0, 0)
self.__browser_button_widget = BrowserButtonWidget()
self.reset_button = QtWidgets.QPushButton()
self.test_button = QtWidgets.QPushButton()
self.reset_button.setObjectName("reset_button")
self.test_button.setObjectName("test_button")
self.status_label = StatusLabel()
self.__current_karte_label = CurrentKarteLabel()
self.__phase_items[1] = [self.reset_button, self.test_button, self.status_label]
horizon_layout.addWidget(self.__browser_button_widget)
horizon_layout.addSpacing(20)
horizon_layout.addWidget(self.reset_button)
horizon_layout.addWidget(self.test_button)
horizon_layout.addWidget(self.status_label)
horizon_layout.addStretch(9999)
horizon_layout.addWidget(self.__current_karte_label)
main_layout.addLayout(horizon_layout)
self.__browser_button_widget.BackClicked.connect(self.BackClicked.emit)
self.__browser_button_widget.NextClicked.connect(self.NextClicked.emit)
class MainWidget(QtWidgets.QWidget):
ConditionChanged = QtCore.Signal(bool, bool)
KarteChanged = QtCore.Signal(str)
StatusChanged = QtCore.Signal(int)
def __init__(self, parent=None):
super(MainWidget, self).__init__(parent=parent)
self.setObjectName("medic_main_widget")
self.__kartes_widget = None
self.__testers_widget = None
self.__phase = 0
self.__phase_widgets = {}
self.__callback_ids = []
self.__makeWidgets()
self.setPhase(0)
def phase(self):
return self.__phase
def next(self):
self.setPhase(self.__phase + 1)
def back(self):
self.setPhase(self.__phase - 1)
def setPhase(self, p):
self.__phase = p
for phase, widgets in self.__phase_widgets.iteritems():
if phase is p:
for widget in widgets:
widget.show()
else:
for widget in widgets:
widget.hide()
if self.__phase is 0:
able_back = False
able_next = True if self.__kartes_widget.currentKarte() else False
self.__testers_widget.reset()
else:
able_back = True
able_next = False
if self.__phase is 1:
self.reset()
self.ConditionChanged.emit(able_back, able_next)
def __makeWidgets(self):
main_layout = QtWidgets.QHBoxLayout()
main_layout.setContentsMargins(0, 0, 0, 0)
main_layout.setSpacing(0)
self.setLayout(main_layout)
self.__kartes_widget = KarteList()
self.__testers_widget = TesterList()
self.__detail_widget = TesterDetailWidget()
## phase 0
main_layout.addWidget(self.__kartes_widget)
self.__phase_widgets[0] = [self.__kartes_widget]
## phase 2
h_layout = QtWidgets.QHBoxLayout()
h_layout.addWidget(self.__testers_widget)
h_layout.addWidget(self.__detail_widget)
self.__phase_widgets[1] = [self.__testers_widget, self.__detail_widget]
main_layout.addLayout(h_layout)
## signal
self.__kartes_widget.KarteChanged.connect(self.__karteChanged)
self.__testers_widget.TesterChanged.connect(self.__testerChanged)
self.__detail_widget.ReportsChanged.connect(self.__reportsChanged)
self.__testers_widget.SingleTestTriggered.connect(self.__singleTest)
## set maya event callback
self.__callback_ids.append(functions.registSceneOpenCallback(self.__sceneChanged))
self.__callback_ids.append(functions.registNewSceneOpenCallback(self.__sceneChanged))
self.destroyed.connect(self.__removeCallbacks)
def __removeCallbacks(self):
functions.removeCallbacks(self.__callback_ids)
def __sceneChanged(self, *args):
self.reset()
def reset(self):
karte_item = self.__kartes_widget.currentKarte()
if karte_item:
karte_item.reset()
self.StatusChanged.emit(model.Ready)
self.update()
tester_item = self.__testers_widget.currentTester()
self.__detail_widget.reset()
if tester_item:
self.__detail_widget.setTesterItem(tester_item)
def test(self):
self.__detail_widget.reset()
karte_item = self.__kartes_widget.currentKarte()
if karte_item:
karte_item.testAll(testerCallback=self.forceUpdate)
self.StatusChanged.emit(karte_item.status())
self.update()
tester_item = self.__testers_widget.currentTester()
if tester_item:
self.__detail_widget.setTesterItem(tester_item)
def __singleTest(self):
self.__detail_widget.reset()
karte_item = self.__kartes_widget.currentKarte()
tester_item = self.__testers_widget.currentTester()
if karte_item and tester_item:
karte_item.test(tester_item, testerCallback=self.forceUpdate)
self.StatusChanged.emit(karte_item.status())
self.update()
self.__detail_widget.setTesterItem(tester_item)
def forceUpdate(self):
self.update()
QtWidgets.QApplication.processEvents()
def __testerChanged(self, current):
tester_item = self.__testers_widget.model().data(current, model.TesterItemRole)
if tester_item:
self.__detail_widget.setTesterItem(tester_item)
else:
self.__detail_widget.reset()
def __reportsChanged(self):
self.__testers_widget.updateSelected()
karte_item = self.__kartes_widget.currentKarte()
self.StatusChanged.emit(karte_item.status())
def __karteChanged(self, current):
able_back = False if self.__phase is 0 else True
able_next = False
karte_model = self.__kartes_widget.model()
tester_model = self.__testers_widget.model()
karte_item = karte_model.data(current, model.KarteItemRole)
if karte_item:
self.KarteChanged.emit(karte_item.name())
tester_model.setTesterItems(karte_model.data(current, model.KarteTesterItemsRole))
able_next = True
else:
self.KarteChanged.emit("")
tester_model.setTesterItems([])
self.ConditionChanged.emit(able_back, able_next)
| [
"[email protected]"
] | |
bd298e7985f0b09e4222e354e3f0afc394e96595 | b47f2e3f3298388b1bcab3213bef42682985135e | /experiments/heat-3d/tmp_files/1539.py | 27d7e85b4987f69ec0aa2f1e52f9247dec53052f | [
"BSD-2-Clause"
] | permissive | LoopTilingBenchmark/benchmark | 29cc9f845d323431e3d40e878cbfc6d1aad1f260 | 52a3d2e70216552a498fd91de02a2fa9cb62122c | refs/heads/master | 2020-09-25T09:45:31.299046 | 2019-12-04T23:25:06 | 2019-12-04T23:25:06 | 225,975,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/heat-3d/tmp_files/1539.c')
procedure('kernel_heat_3d')
loop(0)
tile(0,2,8,2)
tile(0,4,8,4)
tile(0,6,8,6)
tile(1,2,8,2)
tile(1,4,8,4)
tile(1,6,8,6)
| [
"[email protected]"
] | |
f2a797d1c550dbc9843f6fe14e7ad572536407a7 | a24b8446639f2157e2ecbdb7c11eda8e4e4344cc | /Configurations/UserConfigs/2018_AntiIso/ST_t_topConfig.py | 82fbbb685a0a6b4499f198945b017c0e1a347268 | [] | no_license | aloeliger/ReweightScheme | dcebc5651094d8d3da65885c59dae4070983624a | 05c9783fcf8e024fd26a6dbb9b1fbab4aee3c7f4 | refs/heads/master | 2021-12-11T16:10:12.881863 | 2021-08-27T21:02:21 | 2021-08-27T21:02:21 | 215,565,834 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,797 | py | import ROOT
from Configurations.Weights.CrossSectionWeightingModule.CrossSectionWeight import crossSectionWeight
from Configurations.Weights.MuIDIsoReweightingModule.MuIDIsoWeight import muIDIsoWeight_2018 as muIDIsoWeight
from Configurations.Weights.MuTrackingWeightModule.MuTrackingWeight import muTrackingWeight_2018 as muTrackingWeight
from Configurations.Weights.PileupWeightingModule.PileupWeight import pileupWeight_2018 as pileupWeight
from Configurations.Weights.TauFakeRateWeightModule.eTauFakeRateWeight import eTauFakeRateWeight_2018 as eTauFakeRateWeight
from Configurations.Weights.TauIDModule.TauIDWeight import tauIDWeight_2018 as tauIDWeight
from Configurations.Weights.TriggerSFModule.TriggerWeight import triggerWeight_2018 as triggerWeight
from Configurations.Weights.bTaggingWeightModule.bTaggingWeight import bTaggingWeight_2018
from Configurations.Weights.PrefiringWeightModule.PrefiringWeight import PrefiringWeighting
from Configurations.ConfigDefinition import ReweightConfiguration
EWKConfiguration = ReweightConfiguration()
EWKConfiguration.name = "ST_t_top"
EWKConfiguration.inputFile = "/data/aloeliger/SMHTT_Selected_2018_AntiIso_Deep/ST_t_top.root"
crossSectionWeight.sample = 'ST_t_top'
crossSectionWeight.year = '2018'
totalEventsFile = ROOT.TFile.Open("/data/aloeliger/SMHTT_Selected_2018_AntiIso_Deep/ST_t_top.root")
crossSectionWeight.totalEvents = totalEventsFile.eventCount.GetBinContent(2)
totalEventsFile.Close()
pileupWeight.year = '2018'
pileupWeight.sample = 'ST_t_top'
pileupWeight.InitPileupWeightings(pileupWeight)
EWKConfiguration.listOfWeights = [
crossSectionWeight,
muIDIsoWeight,
muTrackingWeight,
pileupWeight,
eTauFakeRateWeight,
#tauIDWeight,
triggerWeight,
bTaggingWeight_2018,
#PrefiringWeighting,
]
| [
"[email protected]"
] | |
0c75fb6bf1bbf0e8a76928ce29bf5b4f0a014996 | 6a4ebebbe0d7f81efc4f1749054a2ed7242c0e58 | /setup.py | 345a9c9073ffb87c82e6fbcc413a8d8703519644 | [
"LicenseRef-scancode-public-domain"
] | permissive | skylarker/granary | 6e192ecd2475febb3585728d5ba7afe34742107d | 2fd8ef017588b955e78606242ce582849cfd57ac | refs/heads/master | 2020-12-26T21:35:04.155528 | 2016-04-18T18:15:30 | 2016-04-18T18:15:30 | 56,891,160 | 1 | 0 | null | 2016-04-22T23:43:09 | 2016-04-22T23:43:09 | null | UTF-8 | Python | false | false | 1,868 | py | """setuptools setup module for granary.
Docs:
https://packaging.python.org/en/latest/distributing.html
http://pythonhosted.org/setuptools/setuptools.html
Based on https://github.com/pypa/sampleproject/blob/master/setup.py
"""
import unittest
from setuptools import setup, find_packages
from setuptools.command.test import ScanningLoader
class TestLoader(ScanningLoader):
def __init__(self, *args, **kwargs):
super(ScanningLoader, self).__init__(*args, **kwargs)
# webutil/test/__init__.py makes App Engine SDK's bundled libraries importable.
import oauth_dropins.webutil.test
setup(name='granary',
version='1.3.1',
description='Free yourself from silo API chaff and expose the sweet social data foodstuff inside in standard formats and protocols!',
long_description=open('README.rst').read(),
url='https://github.com/snarfed/granary',
packages=find_packages(),
include_package_data=True,
author='Ryan Barrett',
author_email='[email protected]',
license='Public domain',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Environment :: Web Environment',
'License :: OSI Approved :: MIT License',
'License :: Public Domain',
'Programming Language :: Python :: 2',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='facebook twitter google+ twitter activitystreams html microformats2 mf2 atom',
install_requires=[
# Keep in sync with requirements.txt!
'beautifulsoup4',
'html2text',
'jinja2',
'mf2py>=0.2.7',
'mf2util>=0.3.3',
'oauth-dropins>=1.3',
'requests<2.6.0',
],
test_loader='setup:TestLoader',
test_suite='granary.test',
)
| [
"[email protected]"
] | |
918f237882bc12ca5169f08d0b2a86dd2b388b12 | ec00584ab288267a7cf46c5cd4f76bbec1c70a6b | /Python/__function/functions1/functions1/23 keyword non-keyword argument.py | 9adc44c2843f16255ab0ee092696537a2eac3237 | [] | no_license | rahuldbhadange/Python | b4cc806ff23953389c9507f43d817b3815260e19 | 7e162117f1acc12537c7eeb36d6983d804122ff3 | refs/heads/master | 2021-06-23T05:04:20.053777 | 2020-01-28T10:34:28 | 2020-01-28T10:34:28 | 217,307,612 | 0 | 0 | null | 2021-06-10T22:44:11 | 2019-10-24T13:35:42 | Python | UTF-8 | Python | false | false | 553 | py | #3.keyword arguments: During fn call,using parameter name,passing value
#4.non-keyword arguments:During fn call,without parameter name,passing value
def display(branch,code):
print(branch,code)
display("CSE","05") #non-keyword argument
display(branch="ECE",code="04") #keyword argument (using parameter name)
display(code="02",branch="EEE") #keyword argument
#display(code="12","IT")
#default and non-default related to fn definition
#key-word and non-keyword relatd to fn call
#Note: After keyword argument,we cannot have nonkeyword argument
| [
"[email protected]"
] | |
d1cc019f002492e4ca2f30241964186934bb36af | 930309163b930559929323647b8d82238724f392 | /abc108_b.py | c72c059e12f6e5358657caa002cf6e7a6a309c3c | [] | no_license | GINK03/atcoder-solvers | 874251dffc9f23b187faa77c439b445e53f8dfe1 | b1e7ac6e9d67938de9a85df4a2f9780fb1fbcee7 | refs/heads/master | 2021-11-07T14:16:52.138894 | 2021-09-12T13:32:29 | 2021-09-12T13:32:29 | 11,724,396 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 162 | py |
x1, y1, x2, y2 = map(int, input().split())
dx = x2 - x1
dy = y2 - y1
x3 = x2 - dy
y3 = y2 + dx
x4 = x3 - dx
y4 = y3 - dy
print("%d %d %d %d" % (x3, y3, x4, y4))
| [
"[email protected]"
] | |
c7c1943a417de7573e5aebf77ae57a09db5008a5 | 3b89c0a97ac6b58b6923a213bc8471e11ad4fe69 | /python/CodingExercises/LeetCode1.py | 86ca7efb65730bbd49152c8028c24b15a168c256 | [] | no_license | ksayee/programming_assignments | b187adca502ecf7ff7b51dc849d5d79ceb90d4a6 | 13bc1c44e1eef17fc36724f20b060c3339c280ea | refs/heads/master | 2021-06-30T07:19:34.192277 | 2021-06-23T05:11:32 | 2021-06-23T05:11:32 | 50,700,556 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | '''
1. Two Sum
Given an array of integers, return indices of the two numbers such that they add up to a specific target.
You may assume that each input would have exactly one solution, and you may not use the same element twice.
Example:
Given nums = [2, 7, 11, 15], target = 9,
Because nums[0] + nums[1] = 2 + 7 = 9,
return [0, 1].
'''
def LeetCode1(ary,k):
dict={}
fnl_lst=[]
for i in range(0,len(ary)):
key=ary[i]
diff=k-key
if diff in dict.keys():
val=dict[diff]
tup=(i,val)
fnl_lst.append(tup)
else:
dict[key]=i
return fnl_lst
def main():
ary=[2, 7, 11, 15]
k=9
print(LeetCode1(ary,k))
if __name__=='__main__':
main() | [
"[email protected]"
] | |
0c7109401894b8ab6fa958daf9320f6f6999c573 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03231/s342511104.py | c53bcfef1be5b00fe39ad9752b5ac05a7a1bf748 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | from math import gcd
N, M = map(int, input().split())
S = input()
T = input()
L = N*M // gcd(N, M)
for i in range(N):
if M*i % N == 0:
j = M*i // N
if S[i] != T[j]:
print(-1)
exit()
print(L)
| [
"[email protected]"
] | |
3acd2877be2d35889598ed2111ffaffb3f802be0 | 4b434c6af1d205e33941289211159dfde865e38e | /con.Bmaml.eq/train.py | fdc88d237727a3c3e47393deafa25044993743e3 | [] | no_license | a1600012888/BMAML | 3b2a7f264ed13ef598cc3677d18714c4f8354176 | 4802a917d8061011be9a2b09174598216812cc58 | refs/heads/master | 2020-04-14T19:10:40.363219 | 2019-01-16T17:03:18 | 2019-01-16T17:03:18 | 164,047,888 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,915 | py | import torch
from collections import OrderedDict
from tqdm import tqdm
from utils import AvgMeter
from torch.nn.utils import vector_to_parameters, parameters_to_vector
def TrainOneTask(Task, M, SVGD, optimizer, DEVICE, num_of_step = 3, step_size = 1e-3):
X, Y, Xtest, Ytest, std = Task
X = X.to(DEVICE)
Y = Y.to(DEVICE)
Xtest = Xtest.to(DEVICE)
Ytest = Ytest.to(DEVICE)
std = std.to(DEVICE) * 100 # * 100 to stablize
SVGD.NablaLogP.update(X, Y, std)
SVGD.InitMomentumUpdaters()
with torch.no_grad():
start_logp = 0
for paramsvec in M:
start_logp = start_logp + SVGD.NablaLogP(True, paramsvec, ret_grad=False)
start_logp = start_logp / len(M)
for i in range(num_of_step):
M = SVGD.step(M, retain_graph = True, step_size = step_size)
with torch.no_grad():
end_logp = 0
for paramsvec in M:
end_logp = end_logp + SVGD.NablaLogP(True, paramsvec, ret_grad = False)
end_logp = end_logp / len(M)
SVGD.NablaLogP.update(Xtest, Ytest, std)
logp = 0
for paramsvec in M:
logp = logp + SVGD.NablaLogP(True, paramsvec, ret_grad=False)
logp = logp / len(M)
loss = logp * -1.0
loss.backward()
optimizer.step()
optimizer.zero_grad()
ret_dic = OrderedDict()
ret_dic['start_logp_train'] = start_logp.item()
ret_dic['end_logp_train'] = end_logp.item()
ret_dic['end_logp_joint'] = logp.item()
return ret_dic
def TrainOneTaskWithChaserLoss(Task, M, SVGD, optimizer, DEVICE, num_of_step = 3, step_size = 1e-3):
optimizer.zero_grad()
X, Y, Xtest, Ytest, std = Task
X = X.to(DEVICE)
Y = Y.to(DEVICE)
Xtest = Xtest.to(DEVICE)
Ytest = Ytest.to(DEVICE)
std = std.to(DEVICE) * 100 # * 100 to stablize
SVGD.NablaLogP.update(X, Y, std)
SVGD.InitMomentumUpdaters()
# Compute the LogP for initial particles (For hyper-param tuning)
with torch.no_grad():
start_logp = 0
for paramsvec in M:
start_logp = start_logp + SVGD.NablaLogP(True, paramsvec, ret_grad=False)
start_logp = start_logp / len(M)
# Inner fit
for i in range(num_of_step):
M = SVGD.step(M, retain_graph = True, step_size = step_size)
# Compute the LogP of the training set after the fitting (For hyper-param tuning)
with torch.no_grad():
end_logp = 0
for paramsvec in M:
end_logp = end_logp + SVGD.NablaLogP(True, paramsvec, ret_grad = False)
end_logp = end_logp / len(M)
Xtrain_and_test = torch.cat((X, Xtest))
Ytrain_and_test = torch.cat((Y, Ytest))
SVGD.NablaLogP.update(Xtrain_and_test, Ytrain_and_test, std)
SVGD.InitMomentumUpdaters()
# Compute the LogP of the whole set after the fitting (For hyper-param tuning)
with torch.no_grad():
logp = 0
for paramsvec in M:
logp = logp + SVGD.NablaLogP(True, paramsvec, ret_grad=False)
logp = logp / len(M)
# Approximate the true prior
M_true = []
for paramsvec in M:
m = torch.nn.ParameterList([torch.nn.Parameter(p.detach()) for p in paramsvec])
#m = [p.detach() for p in paramsvec]
M_true.append(m)
#M_true = SVGD.step(M, retain_graph=False, step_size=step_size)
for i in range(num_of_step):
M_true= SVGD.step(M_true, retain_graph=False, step_size=step_size)
chaser_loss = 0
for paramsvec, paramsvec_true in zip(M, M_true):
vec = parameters_to_vector(paramsvec)
vec_true = parameters_to_vector(paramsvec_true).detach()
chaser_loss = chaser_loss + torch.dot((vec - vec_true),(vec - vec_true) )
#for param, param_true in zip(paramsvec, paramsvec_true):
# chaser_loss = chaser_loss + torch.mean((param - param_true.detach()) ** 2)
chaser_loss = chaser_loss / len(M)
# Compute the true LogP of the whole set (For hyper-param tuning)
with torch.no_grad():
true_logp = 0
for paramsvec in M_true:
true_logp = true_logp + SVGD.NablaLogP(True, paramsvec, ret_grad=False)
true_logp = true_logp / len(M)
chaser_loss.backward()
optimizer.step()
optimizer.zero_grad()
ret_dic = OrderedDict()
ret_dic['start_logp_train'] = start_logp.item()
ret_dic['end_logp_train'] = end_logp.item()
ret_dic['end_logp_joint'] = logp.item()
ret_dic['true_logp_joint'] = true_logp.item()
ret_dic['chaser_loss'] = chaser_loss.item()
return ret_dic
def test(TaskLoader, M, SVGD, DEVICE, num_of_step = 3, step_size = 1e-3):
'''
test for continious
'''
raw_M = M
LogP = AvgMeter()
pbar = tqdm(range(100))
for i in pbar:
task = next(TaskLoader)
for j in range(len(task)-1):
X, Y, Xtest, Ytest, std = task[j]
X_next, Y_next, Xtest_next, Ytest_next, std_next = task[j+1]
X = X.to(DEVICE)
Y = Y.to(DEVICE)
#Xtest = Xtest.to(DEVICE)
#Ytest = Ytest.to(DEVICE)
#std = std.to(DEVICE) * 100 # * 100 to stablize
Xtest = Xtest_next.to(DEVICE)
Ytest = Ytest_next.to(DEVICE)
std = std_next.to(DEVICE) * 100 # * 100 to stablize
SVGD.NablaLogP.update(X, Y, std)
SVGD.InitMomentumUpdaters()
#Mt = SVGD.step(M, retain_graph=False, step_size=step_size)
for tt in range(num_of_step):
M = SVGD.step(M, retain_graph = False, step_size = step_size )#/ (len(task) -1 ))
SVGD.NablaLogP.update(Xtest, Ytest, std)
with torch.no_grad():
logp = 0
for paramsvec in M:
logp = logp + SVGD.NablaLogP(True, paramsvec, ret_grad=False)
logp = logp / len(M)
LogP.update(logp.item())
pbar.set_description("Running Validation")
pbar.set_postfix({'Logp_test':LogP.mean})
M = raw_M
return LogP.mean
| [
"[email protected]"
] | |
67b0a46a7d02e459b2ca4a9e9d9c5635591b21bf | b659e99f89cf17ae886857383cb5b708847fe3f1 | /gettingStarted/problem7.py | 8402c5ac64f20f3cd28685736d51b82d10eddaae | [] | no_license | nitheeshmavila/practice-python | bea06cc4b2b9247b926e07fd5a3987552e531242 | f54bf8934a4cf160cdfc9dc43176f1eea3bc7a41 | refs/heads/master | 2021-07-03T17:24:29.450939 | 2021-06-16T08:40:48 | 2021-06-16T08:40:48 | 100,113,256 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | '''
Problem 7: How many multiplications are performed when each of the following lines(line 1 and line 2) of code is executed?
'''
noofCalls = 0
def square(n):
print(n)
global noofCalls
noofCalls += 1
return n*n
def printCalls():
print('no of multiplications performed:',noofCalls)
print(square(5)) # line1
printCalls()
print(square(2*5)) #line2
printCalls()
'''
output
-----
no of multiplications performed:1
'''
| [
"[email protected]"
] | |
dd70383bd799a8f104e751a763ba69d1a5ff85be | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03330/s307392217.py | de40af9fdc43a32599ce03bad31896ab49cb00ac | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | def main():
from itertools import permutations
N, C = map(int, input().split())
change_cost = [[int(x) for x in input().split()] for _ in range(C)]
init_color = [[int(x) - 1 for x in input().split()] for _ in range(N)]
ctr = [[0] * C for _ in range(3)]
for r in range(N):
for c in range(N):
p = (r + c) % 3
color = init_color[r][c]
ctr[p][color] += 1
mi = 1000 * 500 * 500 + 1
for perm in permutations(range(C), r=3):
it = iter(perm)
t = 0
for p in range(3):
color_to_be = next(it)
for color, count in enumerate(ctr[p]):
t += change_cost[color][color_to_be] * count
mi = min(mi, t)
print(mi)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
390906999c1c0e7466b96f59d5a0c7e6cc9ab7d4 | 986d78fdcb40f4ee7db15bafc77070c087d16b63 | /studies/MultiBoomSparMass_v2/point_design.py | f0268c72689269395046cb2711265a992c71d693 | [
"MIT"
] | permissive | hdolfen/AeroSandbox | 8578b5e36b9a4be69801c1c9ad8819965f236edb | 4c48690e31f5f2006937352a63d653fe268c42c3 | refs/heads/master | 2023-01-20T15:36:58.111907 | 2020-11-24T13:11:44 | 2020-11-24T13:11:44 | 313,655,155 | 0 | 0 | MIT | 2020-11-24T13:11:46 | 2020-11-17T15:05:02 | null | UTF-8 | Python | false | false | 1,885 | py | ### Imports
from aerosandbox.structures.beams import *
import copy
n_booms = 1
# n_booms = 2
# load_location_fraction = 0.50
# n_booms = 3
# load_location_fraction = 0.60
mass = 80 * 6
span = 7.3
### Set up problem
opti = cas.Opti()
beam = TubeBeam1(
opti=opti,
length=span / 2,
points_per_point_load=100,
diameter_guess=10,
thickness=1e-3,
bending=True,
torsion=False,
max_allowable_stress=570e6,
)
lift_force = 9.81 * mass
# load_location = opti.variable()
# opti.set_initial(load_location, 12)
# opti.subject_to([
# load_location > 1,
# load_location < beam.length - 1,
# ])
assert (n_booms == np.array([1,2,3])).any()
if n_booms == 2 or n_booms == 3:
load_location = beam.length * load_location_fraction
beam.add_point_load(location = load_location, force = -lift_force / n_booms)
beam.add_elliptical_load(force=lift_force / 2)
beam.setup()
# Constraints (in addition to stress)
opti.subject_to([
# beam.u[-1] < 2, # tip deflection. Source: http://web.mit.edu/drela/Public/web/hpa/hpa_structure.pdf
# beam.u[-1] > -2 # tip deflection. Source: http://web.mit.edu/drela/Public/web/hpa/hpa_structure.pdf
beam.du * 180 / cas.pi < 10, # local dihedral constraint
beam.du * 180 / cas.pi > -10, # local anhedral constraint
cas.diff(beam.nominal_diameter) < 0, # manufacturability
])
# # Zero-curvature constraint (restrict to conical tube spars only)
# opti.subject_to([
# cas.diff(cas.diff(beam.nominal_diameter)) == 0
# ])
opti.minimize(beam.mass)
p_opts = {}
s_opts = {}
s_opts["max_iter"] = 1e6 # If you need to interrupt, just use ctrl+c
# s_opts["mu_strategy"] = "adaptive"
opti.solver('ipopt', p_opts, s_opts)
sol = opti.solve()
beam_sol = copy.deepcopy(beam).substitute_solution(sol)
spar_mass = beam_sol.mass * 2
# Run a sanity check
beam_sol.draw_bending()
print("Spar mass:", spar_mass) | [
"[email protected]"
] | |
f53ed7447917dec09d5d66ad99297a866cab65af | 78f3fe4a148c86ce9b80411a3433a49ccfdc02dd | /2018/11/graphics/elex18-all-suburb-map-20181119/graphic_config.py | 006fc1be9c13a6867f9c6636d339a291b2f137a6 | [] | no_license | nprapps/graphics-archive | 54cfc4d4d670aca4d71839d70f23a8bf645c692f | fe92cd061730496cb95c9df8fa624505c3b291f8 | refs/heads/master | 2023-03-04T11:35:36.413216 | 2023-02-26T23:26:48 | 2023-02-26T23:26:48 | 22,472,848 | 16 | 7 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | #!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '105w9FOQjFxe2xS_gA8rB6fXNWs-Tlyr4Jgu3icfRJgI'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.