blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
030481619ffa04cd87a5ff0f61597ae2248e6274 | 48a173f3522880739b95efabb5f8869e7036c504 | /ExpressionBuilder.py | d5a807a5a258cba66c952f1daf3974e506844c31 | [
"Apache-2.0"
] | permissive | QC99DennisWang/altanalyze | 095f22a7b7ea2130129e61bb6f5fa693b0572aa7 | 8ae24d5fe21989ba890508771101644f0a246c02 | refs/heads/master | 2021-03-19T07:44:09.412403 | 2017-04-20T20:39:44 | 2017-04-20T20:39:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216,350 | py | ###ExpressionBuilder
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - [email protected]
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys, string
import os.path
import bisect
import unique
import statistics
import math
import reorder_arrays
import ExonArray
import export
import time
import traceback
import UI
import BuildAffymetrixAssociations; reload(BuildAffymetrixAssociations)
import FilterDabg; reload(FilterDabg)
try:
from scipy import average as Average
except Exception:
from statistics import avg as Average
use_Tkinter = 'no'
try:
from Tkinter import *
use_Tkinter = 'yes'
except ImportError: use_Tkinter = 'yes'; print "\nPmw or Tkinter not found... Tkinter print out not available";
debug_mode = 'no'
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
###Code to prevent folder names from being included
for entry in dir_list:
if entry[-4:] == ".txt" or entry[-4:] == ".csv": dir_list2.append(entry)
return dir_list2
def returnLargeGlobalVars():
### Prints all large global variables retained in memory (taking up space)
all = [var for var in globals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(globals()[var])>1:
print var, len(globals()[var])
except Exception: null=[]
def clearObjectsFromMemory(db_to_clear):
db_keys={}
for key in db_to_clear: db_keys[key]=[]
for key in db_keys: del db_to_clear[key]
################# Begin Analysis from parsing files
def checkArrayHeaders(expr_input_dir,expr_group_dir):
array_names, array_linker_db = getArrayHeaders(expr_input_dir)
expr_group_list,expr_group_db = importArrayGroups(expr_group_dir,array_linker_db)
def getArrayHeaders(expr_input_dir):
### This method is used to check to see if the array headers in the groups and expression files match
fn=filepath(expr_input_dir); x = 0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
headers = string.split(data,'\t')
if data[0] != '#':
### differentiate data from column headers
if x == 1: break ### Exit out of loop, since we only want the array names
if x == 0: ### only grab headers if it's the first row
array_names = []; array_linker_db = {}; d = 0
for entry in headers[1:]: entry = string.replace(entry,'"',''); array_names.append(entry)
for array in array_names: array = string.replace(array,'\r',''); array_linker_db[array] = d; d +=1
x = 1
return array_names, array_linker_db
def checkExpressionFileFormat(expFile,reportNegatives=False):
""" Determine if the data is log, non-log and increment value for log calculation """
firstLine=True; convert=False
inputMax=0; inputMin=10000; increment=0
expressed_values={}
startIndex = 1
for line in open(expFile,'rU').xreadlines():
line = cleanUpLine(line)
key = string.split(line,'\t')[0]
t = string.split(line,'\t')
if firstLine:
headers = line
if 'row_clusters-flat' == t[1]:
startIndex = 2
firstLine = False
else:
if 'column_clusters-flat' in t:
continue ### skip this row if analyzing a clustered heatmap file
try: uid, coordinates = string.split(key,'=')
except Exception: uid = key
if '' in t[1:]:
values = [0 if x=='' else x for x in t[startIndex:]]
elif 'NA' in t[1:]:
values = [0 if x=='NA' else x for x in t[startIndex:]]
else:
values = t[1:]
try: values = map(lambda x: float(x), values)
except Exception:
print values
print traceback.format_exc()
if max(values)>inputMax: inputMax = max(values)
if min(values)<inputMin: inputMin = min(values)
if inputMax>100: ### Thus, not log values
expressionDataFormat = 'non-log'
if inputMin<=1: #if inputMin<=1:
increment = inputMin+1
convert = True
else:
expressionDataFormat = "log"
#print expressionDataFormat,increment,convert
if reportNegatives == False:
return expressionDataFormat,increment,convert
else:
### Report if negative values are present
increment = inputMin
if convert: ### Should rarely be the case, as this would indicate that a non-log folds are present in the file
increment = increment+1
return expressionDataFormat,increment,convert
def calculate_expression_measures(expr_input_dir,expr_group_dir,experiment_name,comp_group_dir,probeset_db,annotate_db):
print "Processing the expression file:",expr_input_dir
try: expressionDataFormat,increment,convertNonLogToLog = checkExpressionFileFormat(expr_input_dir)
except Exception:
expressionDataFormat = expression_data_format; increment = 0
if expressionDataFormat == 'non-log': convertNonLogToLog=True
else: convertNonLogToLog = False
#print convertNonLogToLog, expressionDataFormat, increment
global array_fold_headers; global summary_filtering_stats; global raw_data_comp_headers; global array_folds
fn1=filepath(expr_input_dir)
x = 0; y = 0; d = 0
blanksPresent=False
array_folds={}
for line in open(fn1,'rU').xreadlines():
data = cleanUpLine(line)
if data[0] != '#' and data[0] != '!':
fold_data = string.split(data,'\t')
try: arrayid = fold_data[0]
except Exception: arrayid = 'UID'
if len(arrayid)>0:
if arrayid[0]== ' ':
try: arrayid = arrayid[1:] ### Cufflinks issue
except Exception: arrayid = ' ' ### can be the first row UID column as blank
else:
arrayid = 'UID'
#if 'counts.' in expr_input_dir: arrayid,coordinates = string.split(arrayid,'=') ### needed for exon-level analyses only
### differentiate data from column headers
if x == 1:
fold_data = fold_data[1:]; fold_data2=[]
for fold in fold_data:
fold = string.replace(fold,'"','')
try:
fold = float(fold); fold_data2.append(fold)
except Exception:
fold_data2.append('')
blanksPresent = True
"""
print_out = 'WARNING!!! The ID'+arrayid+ 'has an invalid expression value:'+[fold]+'\n. Correct and re-run'
try: UI.WarningWindow(print_out,'Critical Error - Exiting Program!!!'); sys.exit()
except NameError: print print_out; sys.exit()
"""
if expressionDataFormat == 'non-log' and (convertNonLogToLog or array_type == 'RNASeq'):
fold_data3=[] ###Convert numeric expression to log fold (previous to version 2.05 added 1)
for fold in fold_data2:
try:
log_fold = math.log((float(fold)+increment),2) ### changed from - log_fold = math.log((float(fold)+1),2) - version 2.05
fold_data3.append(log_fold)
except ValueError: ###Not an ideal situation: Value is negative - Convert to zero
if float(fold)<=0: log_fold = math.log(1.01,2); fold_data3.append(log_fold)
else:
fold_data3.append('')
blanksPresent = True
"""
print_out = 'WARNING!!! The ID'+arrayid+ 'has an invalid expression value:'+fold+'\n. Correct and re-run'
try: UI.WarningWindow(print_out,'Critical Error - Exiting Program!!!'); sys.exit()
except NameError: print print_out; sys.exit()
"""
fold_data2 = fold_data3
if (array_type == "AltMouse"):
if arrayid in probeset_db: array_folds[arrayid] = fold_data2; y = y+1
else: array_folds[arrayid] = fold_data2; y = y+1
else: #only grab headers if it's the first row
array_names = []; array_linker_db = {}
for entry in fold_data[1:]:
entry = string.replace(entry,'"','')
if len(entry)>0: array_names.append(entry)
for array in array_names: #use this to have an orignal index order of arrays
array = string.replace(array,'\r','') ###This occured once... not sure why
array_linker_db[array] = d; d +=1
#add this aftwards since these will also be used as index values
x = 1
print len(array_folds),"IDs imported...beginning to calculate statistics for all group comparisons"
expr_group_list,expr_group_db = importArrayGroups(expr_group_dir,array_linker_db)
comp_group_list, comp_group_list2 = importComparisonGroups(comp_group_dir)
if 'RPKM' in norm and 'counts.' in expr_input_dir: normalization_method = 'RPKM-counts' ### process as counts if analyzing the counts file
else: normalization_method = norm
if expressionDataFormat == 'non-log': logvalues=False
else: logvalues=True
if convertNonLogToLog: logvalues = True
try:
array_folds, array_fold_headers, summary_filtering_stats,raw_data_comp_headers = reorder_arrays.reorder(array_folds,array_names,expr_group_list,
comp_group_list,probeset_db,include_raw_data,array_type,normalization_method,fl,logvalues=logvalues,blanksPresent=blanksPresent)
except Exception:
print traceback.format_exc(),'\n'
print_out = 'AltAnalyze encountered an error with the format of the expression file.\nIf the data was designated as log intensities and it is not, then re-run as non-log.'
try: UI.WarningWindow(print_out,'Critical Error - Exiting Program!!!'); root.destroy(); force_exit ### Forces the error log to pop-up
except NameError: print print_out; sys.exit()
### Integrate maximum counts for each gene for the purpose of filtering (RNASeq data only)
if array_type == 'RNASeq' and 'counts.' not in expr_input_dir: addMaxReadCounts(expr_input_dir)
### Export these results to a DATASET statistics and annotation results file
if 'counts.' not in expr_input_dir:
if array_type == 'RNASeq' and norm == 'RPKM':
filterRNASeq(count_statistics_db)
### Export count summary in GenMAPP format
if include_raw_data == 'yes': headers = removeRawCountData(array_fold_headers)
else: headers = array_fold_headers
exportDataForGenMAPP(headers,'counts')
exportAnalyzedData(comp_group_list2,expr_group_db)
### Export formatted results for input as an expression dataset into GenMAPP or PathVisio
if data_type == 'expression':
if include_raw_data == 'yes': headers = removeRawData(array_fold_headers)
else: headers = array_fold_headers
exportDataForGenMAPP(headers,'expression')
try: clearObjectsFromMemory(summary_filtering_stats); clearObjectsFromMemory(array_folds)
except Exception: null=[]
try: clearObjectsFromMemory(summary_filtering_stats); summary_filtering_stats=[]
except Exception: null=[]
else:
### When performing an RNASeq analysis on RPKM data, we first perform these analyses on the raw counts to remove fold changes for low expressing genes
"""count_statistics_db={}; count_statistics_headers=[]
for key in array_folds:
count_statistics_db[key] = array_folds[key]
for name in array_fold_headers: count_statistics_headers.append(name)"""
try: clearObjectsFromMemory(summary_filtering_stats)
except Exception: null=[]
try: clearObjectsFromMemory(summary_filtering_stats); summary_filtering_stats=[]
except Exception: null=[]
return array_folds, array_fold_headers
def filterRNASeq(counts_db):
### Parse through the raw count data summary statistics and annotate any comparisons considered NOT EXPRESSED by read count filtering as not expressed (on top of RPKM filtering)
reassigned = 0; re = 0
for gene in counts_db:
i=0 ### keep track of the index (same as RPKM index)
for val in counts_db[gene]:
if val =='Insufficient Expression':
#print val, i, array_folds[gene][i];kill
if array_folds[gene][i] != 'Insufficient Expression': reassigned = gene, array_folds[gene][i]
array_folds[gene][i] = 'Insufficient Expression' ### Re-assign the fold changes to this non-numeric value
re+=1
i+=1
#print reassigned, re
def addMaxReadCounts(filename):
import RNASeq
max_count_db,array_names = RNASeq.importGeneCounts(filename,'max')
for gene in summary_filtering_stats:
gs = summary_filtering_stats[gene]
gs.setMaxCount(max_count_db[gene]) ### Shouldn't cause an error, but we want to get an exception if it does (something is wrong with the analysis)
def simplerGroupImport(group_dir):
if 'exp.' in group_dir or 'filteredExp.' in group_dir:
group_dir = string.replace(group_dir,'exp.','groups.')
group_dir = string.replace(group_dir,'filteredExp.','groups.')
import collections
try: sample_group_db = collections.OrderedDict()
except Exception:
try:
import ordereddict
sample_group_db = ordereddict.OrderedDict()
except Exception:
sample_group_db={}
fn = filepath(group_dir)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
try: sample_filename,group_number,group_name = string.split(data,'\t')
except Exception:
print string.split(data,'\t'), 'more than 3 columns present in groups file'
kill
sample_group_db[sample_filename] = group_name
return sample_group_db
def simpleGroupImport(group_dir,splitHeaders=False):
""" Used for calculating fold changes prior to clustering for individual samples (genomtric folds) """
import collections
try:
### OrderedDict used to return the keys in the orders added for markerFinder
group_sample_db=collections.OrderedDict()
group_name_db=collections.OrderedDict()
group_name_sample_db=collections.OrderedDict()
group_db=collections.OrderedDict()
except Exception:
try:
import ordereddict
group_sample_db = ordereddict.OrderedDict()
group_name_db=ordereddict.OrderedDict()
group_name_sample_db=ordereddict.OrderedDict()
group_db=ordereddict.OrderedDict()
except Exception:
group_sample_db={}
group_name_db={}
group_name_sample_db={}
group_db={}
sample_list=[]
group_dir = verifyExpressionFile(group_dir)
fn = filepath(group_dir)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
try: sample_filename,group_number,group_name = string.split(data,'\t')
except Exception: print data;kill
if splitHeaders:
if '~' in sample_filename: sample_filename = string.split(sample_filename,'~')[-1]
group_sample_db[sample_filename] = group_name+':'+sample_filename
try: group_name_sample_db[group_name].append(group_name+':'+sample_filename)
except Exception: group_name_sample_db[group_name] = [group_name+':'+sample_filename]
sample_list.append(sample_filename)
group_db[sample_filename] = group_name
group_name_db[group_number]=group_name ### used by simpleCompsImport
### Get the comparisons indicated by the user
comps_name_db,comp_groups = simpleCompsImport(group_dir,group_name_db)
return sample_list,group_sample_db,group_db,group_name_sample_db,comp_groups,comps_name_db
def simpleCompsImport(group_dir,group_name_db):
""" Used for calculating fold changes prior to clustering for individual samples (genomtric folds) """
comps_dir = string.replace(group_dir,'groups.','comps.')
comps_name_db={}
comp_groups=[]
comps_dir = verifyExpressionFile(comps_dir)
fn = filepath(comps_dir)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
try:
exp_group_num,con_group_num = string.split(data,'\t')
exp_group_name = group_name_db[exp_group_num]
con_group_name = group_name_db[con_group_num]
try: comps_name_db[con_group_name].append(exp_group_name)
except Exception:
#comps_name_db[con_group_name] = [exp_group_name] ### If we don't want to include the control samples
comps_name_db[con_group_name] = [con_group_name] ### Add the control group versus itself the first time
comps_name_db[con_group_name].append(exp_group_name)
### Keep track of the order of the groups for ordering the cluster inputs
if con_group_name not in comp_groups:
comp_groups.append(con_group_name)
if exp_group_name not in comp_groups:
comp_groups.append(exp_group_name)
except Exception: pass ### Occurs if there are dummy lines in the file (returns with no values)
return comps_name_db,comp_groups
def importArrayGroups(expr_group_dir,array_linker_db):
new_index_order = 0
expr_group_list=[]
expr_group_db = {} ### use when writing out data
fn=filepath(expr_group_dir)
try:
try:
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
length = string.join(t,'') ### Some lines can be blank
if len(length)>2:
array_header,group,group_name = t
group = int(group)
### compare new to original index order of arrays
try:
original_index_order = array_linker_db[array_header]
except KeyError:
print_out = 'WARNING!!! At least one sample-ID listed in the "groups." file (e.g.,'+array_header+')'+'\n is not in the sample "exp." file. See the new file "arrays." with all "exp." header names\nand correct "groups."'
try: UI.WarningWindow(print_out,'Critical Error - Exiting Program!!!')
except Exception: print print_out
exportArrayHeaders(expr_group_dir,array_linker_db)
try: root.destroy(); sys.exit()
except Exception: sys.exit()
entry = new_index_order, original_index_order, group, group_name
expr_group_list.append(entry)
new_index_order += 1 ### add this aftwards since these will also be used as index values
expr_group_db[str(group)] = group_name
expr_group_list.sort() ### sorting put's this in the original array order
except ValueError:
print_out = 'The group number "'+group+'" is not a valid integer. Correct before proceeding.'
try: UI.WarningWindow(print_out,'Critical Error - Exiting Program!!!'); root.destroy(); sys.exit()
except Exception: print print_out; sys.exit()
except Exception,e:
print traceback.format_exc(),'\n'
exportArrayHeaders(expr_group_dir,array_linker_db)
print_out = 'No groups or comps files found for'+expr_group_dir+'... exiting program.'
try: UI.WarningWindow(print_out,'Critical Error - Exiting Program!!!'); root.destroy(); sys.exit()
except Exception: print print_out; sys.exit()
return expr_group_list,expr_group_db
def exportArrayHeaders(expr_group_dir,array_linker_db):
new_file = string.replace(expr_group_dir,'groups.','arrays.')
new_file = string.replace(new_file,'exp.','arrays.')
new_file = string.replace(new_file,'counts.','arrays.')
if 'arrays.' not in new_file: new_file = 'arrays.' + new_file ### Can occur if the file does not have 'exp.' in it
fn=filepath(new_file); data = open(fn,'w')
for array in array_linker_db: data.write(array+'\n')
data.close()
def importComparisonGroups(comp_group_dir):
comp_group_list=[]; comp_group_list2=[]
try:
fn=filepath(comp_group_dir)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
groups = string.split(data,'\t')
groups2 = groups[0],groups[1] #as a list these would be unhashable
comp_group_list.append(groups)
comp_group_list2.append(groups2)
except Exception: null=[] ### Occcurs when no file present
return comp_group_list, comp_group_list2
def importMicrornaAssociations(species,report):
filename = 'AltDatabase/Ensembl/'+species+'/'+species+'_microRNA-Ensembl.txt'
fn=filepath(filename); ensembl_microRNA_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
miR,ens_geneid,sources = string.split(data,'\t')
miR_annot = miR+'('+sources+')'
try: ensembl_microRNA_db[ens_geneid].append(miR_annot)
except KeyError: ensembl_microRNA_db[ens_geneid] = [miR_annot]
###Optionally filter out miRs with evidence from just one algorithm (options are 'any' and 'muliple'
for gene in ensembl_microRNA_db:
miRs = ensembl_microRNA_db[gene]; miRs.sort()
if report == 'multiple':
miRs2=[]
for mir in miRs:
if '|' in mir: miRs2.append(mir)
miRs=miRs2
miRs = string.join(miRs,', ')
ensembl_microRNA_db[gene] = miRs
return ensembl_microRNA_db
def importSystemCodes():
filename = 'Config/source_data.txt'
fn=filepath(filename); x=0; systems={}
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
system_name=t[0];system_code=t[1]
if x==0: x=1
else: systems[system_name] = system_code
return systems
def exportDataForGenMAPP(headers,input_type):
###Export summary columns for GenMAPP analysis
systems = importSystemCodes()
GenMAPP_file = expression_dataset_output_dir + 'GenMAPP-'+experiment_name+'.txt'
if 'counts' in input_type:
GenMAPP_file = string.replace(GenMAPP_file,'GenMAPP-','COUNTS-')
try: genmapp = export.createExportFile(GenMAPP_file,expression_dataset_output_dir[:-1])
except RuntimeError:
export.isFileOpen(GenMAPP_file,expression_dataset_output_dir[:-1])
genmapp = export.createExportFile(GenMAPP_file,expression_dataset_output_dir[:-1])
if array_type == "3'array" and 'Ensembl' not in vendor:
if vendor == 'Affymetrix': system_code = 'X'
elif vendor == 'Illumina': system_code = 'Il'
elif vendor == 'Agilent': system_code = 'Ag'
elif vendor == 'Codelink': system_code = 'Co'
else:
### This is another system selected by the user
system = string.replace(vendor,'other:','')
try: system_code = systems[system]
except Exception: system_code = 'Sy'
elif array_type != 'AltMouse': system_code = 'En'
else:
try: system_code = systems[vendor]
except Exception: system_code = 'X'
genmapp_title = ['GeneID','SystemCode'] + headers
genmapp_title = string.join(genmapp_title,'\t')+'\t'+'ANOVA-rawp'+'\t'+'ANOVA-adjp'+'\t'+'largest fold'+'\n'
genmapp.write(genmapp_title)
for probeset in array_folds:
if 'ENS' in probeset and (' ' in probeset or '_' in probeset or ':' in probeset or '-' in probeset) and len(probeset)>9:
system_code = 'En'
ensembl_gene = 'ENS'+string.split(probeset,'ENS')[1]
if ' ' in ensembl_gene:
ensembl_gene = string.split(ensembl_gene,' ')[0]
if '_' in ensembl_gene:
ensembl_gene = string.split(ensembl_gene,'_')[0]
if ':' in ensembl_gene:
ensembl_gene = string.split(ensembl_gene,':')[0]
if '-' in ensembl_gene:
ensembl_gene = string.split(ensembl_gene,'-')[0]
data_val = ensembl_gene+'\t'+system_code
elif ('ENS' in probeset or 'ENF' in probeset) and system_code == 'Sy' and len(probeset)>9:
system_code = 'En'
data_val = probeset+'\t'+system_code
else:
data_val = probeset+'\t'+system_code
for value in array_folds[probeset]: data_val += '\t'+ str(value)
gs = summary_filtering_stats[probeset]
data_val += '\t'+ str(gs.Pval()) +'\t'+ str(gs.AdjP()) +'\t'+ str(gs.LogFold()) +'\n'
genmapp.write(data_val)
genmapp.close()
exportGOEliteInput(headers,system_code)
print 'Exported GO-Elite input files...'
def buildCriterion(ge_fold_cutoffs, ge_pvalue_cutoffs, ge_ptype, main_output_folder, operation, UseDownRegulatedLabel=False, genesToExclude={}):
global array_folds; global m_cutoff; global p_cutoff; global expression_dataset_output_dir
global ptype_to_use; global use_downregulated_label; use_downregulated_label = UseDownRegulatedLabel
m_cutoff = math.log(float(ge_fold_cutoffs),2); p_cutoff = ge_pvalue_cutoffs; ptype_to_use = ge_ptype
expression_dataset_output_dir = string.replace(main_output_folder,'GO-Elite','ExpressionOutput/')
dir_list = read_directory(expression_dataset_output_dir[:-1])
if operation == 'summary': filetype = 'DATASET-'
else: filetype = 'GenMAPP-'
for filename in dir_list:
if filetype in filename:
fn=filepath(expression_dataset_output_dir+filename)
array_folds = {}; x=0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line); t = string.split(data,'\t')
if x==0: x=1; headers = t[1:-2]
else:
values = t[1:-2]; probeset = t[0]; system_code = t[1]
if probeset not in genesToExclude: ### E.g., sex-associated or pseudogenes
array_folds[probeset] = values
if operation == 'summary':
exportGeneRegulationSummary(filename,headers,system_code)
else:
input_files_exported = exportGOEliteInput(headers,system_code)
array_folds=[]
def excludeGenesImport(filename):
fn=filepath(filename)
exclude_genes = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
uid = string.split(data,'\t')[0]
exclude_genes[uid] = None
return exclude_genes
def importCountSummary():
### Copied code from buildCriterion
count_summary_db={}
indexed_headers={}
filetype = 'COUNTS-'
dir_list = read_directory(expression_dataset_output_dir[:-1])
for filename in dir_list:
if filetype in filename:
fn=filepath(expression_dataset_output_dir+filename)
count_summary_db = {}; x=0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line); t = string.split(data,'\t')
if x==0:
x=1; i=0
for header in t:
indexed_headers[header]=i
i+=1
else:
values = t[1:-2]; probeset = t[0]; system_code = t[1]
count_summary_db[probeset] = values
return count_summary_db, indexed_headers
def exportGOEliteInput(headers,system_code):
### Filter statistics based on user-defined thresholds as input for GO-Elite analysis
criterion_db={}; denominator_geneids={}; index = 0; ttest=[]
for column in headers:
if 'ANOVA' in ptype_to_use and ptype_to_use in column: ttest.append(index) ### Not currently implemented
elif ptype_to_use in column and 'ANOVA' not in column: ttest.append(index)
lfi = 2 ### relative logfold index position
if ptype_to_use == 'adjp': lfi = 3
index+=1
### Had to introduce the below code to see if any p-values for a criterion are < 1 (otherwise, include them for GO-Elite)
exclude_p1={}
for probeset in array_folds:
index = 0; af = array_folds[probeset]
for value in array_folds[probeset]:
if index in ttest:
criterion_name = headers[index][5:]
if criterion_name not in exclude_p1:
try: p_value = float(value)
except Exception: p_value = 1 ### Occurs when a p-value is annotated as 'Insufficient Expression'
if p_value < 1:
exclude_p1[criterion_name] = True # Hence, at least one gene has a p<1
index+=1
for probeset in array_folds:
index = 0; af = array_folds[probeset]
for value in array_folds[probeset]:
denominator_geneids[probeset]=[]
if index in ttest:
criterion_name = headers[index][5:]
if use_downregulated_label==False:
rcn = string.split(criterion_name,'_vs_'); rcn.reverse() ### re-label all downregulated as up (reverse the numerator/denominator)
reverse_criterion_names = string.join(rcn,'_vs_')
regulation_call = '-upregulated'
else:
reverse_criterion_names = criterion_name
regulation_call = '-downregulated'
try: log_fold = float(af[index-lfi])
except Exception: log_fold = 0 ### Occurs when a fold change is annotated as 'Insufficient Expression'
try: p_value = float(value)
except Exception: p_value = 1 ### Occurs when a p-value is annotated as 'Insufficient Expression'
try: excl_p1 = exclude_p1[criterion_name] ### You can have adjusted p-values that are equal to 1
except Exception: excl_p1 = False #Make True to exclude ALL non-adjp < sig value entries
#print [log_fold, m_cutoff, p_value, p_cutoff];sys.exit()
if abs(log_fold)>m_cutoff and (p_value<p_cutoff or (p_value==1 and excl_p1==False)):
#if p_value == 1: print log_fold, probeset,[value]; sys.exit()
try: criterion_db[criterion_name].append((probeset,log_fold,p_value))
except KeyError: criterion_db[criterion_name] = [(probeset,log_fold,p_value)]
if log_fold>0:
try: criterion_db[criterion_name+'-upregulated'].append((probeset,log_fold,p_value))
except KeyError: criterion_db[criterion_name+'-upregulated'] = [(probeset,log_fold,p_value)]
else:
if use_downregulated_label==False:
log_fold = abs(log_fold)
try: criterion_db[reverse_criterion_names+regulation_call].append((probeset,log_fold,p_value))
except KeyError: criterion_db[reverse_criterion_names+regulation_call] = [(probeset,log_fold,p_value)]
index += 1
### Format these statistical filtering parameters as a string to include in the file as a record
if m_cutoff<0: fold_cutoff = -1/math.pow(2,m_cutoff)
else: fold_cutoff = math.pow(2,m_cutoff)
stat_filters = ' (Regulation criterion: fold > '+str(fold_cutoff)+' and '+ptype_to_use+ ' p-value < '+str(p_cutoff)+')'
stat_filters_filename = '-fold'+str(fold_cutoff)+'_'+ptype_to_use+str(p_cutoff)
### Format these lists to export as tab-delimited text files
if len(criterion_db)>0:
### Export denominator gene IDs
input_files_exported = 'yes'
expression_dir = string.replace(expression_dataset_output_dir,'ExpressionOutput/','')
goelite_file = expression_dir +'GO-Elite/denominator/GE.denominator.txt'
goelite = export.createExportFile(goelite_file,expression_dir+'GO-Elite/denominator')
goelite_title = ['GeneID','SystemCode']
goelite_title = string.join(goelite_title,'\t')+'\n'; goelite.write(goelite_title)
for probeset in denominator_geneids:
try:
if 'ENS' in probeset and (' ' in probeset or '_' in probeset or ':' in probeset or '-' in probeset) and len(probeset)>9:
system_code = 'En'
ensembl_gene = 'ENS'+string.split(probeset,'ENS')[1]
if ' ' in ensembl_gene:
ensembl_gene = string.split(ensembl_gene,' ')[0]
if '_' in ensembl_gene:
ensembl_gene = string.split(ensembl_gene,'_')[0]
if ':' in ensembl_gene:
ensembl_gene = string.split(ensembl_gene,':')[0]
if '-' in ensembl_gene:
ensembl_gene = string.split(ensembl_gene,'-')[0]
probeset = ensembl_gene
elif ':' in probeset:
probeset = string.split(probeset,':')[0]
system_code = 'Sy'
except Exception:
pass
if ('ENS' in probeset or 'ENF' in probeset) and system_code == 'Sy' and len(probeset)>9:
system_code = 'En'
values = string.join([probeset,system_code],'\t')+'\n'; goelite.write(values)
goelite.close()
### Export criterion gene IDs and minimal data
for criterion_name in criterion_db:
if criterion_name[-1] == ' ': criterion_file_name = criterion_name[:-1]
else: criterion_file_name = criterion_name
if 'upregulated' in criterion_name: elitedir = 'upregulated'
elif 'downregulated' in criterion_name: elitedir = 'downregulated'
else: elitedir = 'regulated'
goelite_file = expression_dir + 'GO-Elite/'+elitedir+'/GE.'+criterion_file_name+stat_filters_filename+'.txt'
goelite = export.ExportFile(goelite_file)
goelite_title = ['GeneID'+stat_filters,'SystemCode',criterion_name+'-log_fold',criterion_name+'-p_value']
goelite_title = string.join(goelite_title,'\t')+'\n'; goelite.write(goelite_title)
for (probeset,log_fold,p_value) in criterion_db[criterion_name]:
try:
if 'ENS' in probeset and (' ' in probeset or '_' in probeset or ':' in probeset or '-' in probeset):
system_code = 'En'
ensembl_gene = 'ENS'+string.split(probeset,'ENS')[1]
if ' ' in ensembl_gene:
ensembl_gene = string.split(ensembl_gene,' ')[0]
if '_' in ensembl_gene:
ensembl_gene = string.split(ensembl_gene,'_')[0]
if ':' in ensembl_gene:
ensembl_gene = string.split(ensembl_gene,':')[0]
if '-' in ensembl_gene:
ensembl_gene = string.split(ensembl_gene,'-')[0]
probeset = ensembl_gene
elif ':' in probeset:
probeset = string.split(probeset,':')[0]
system_code = 'Sy'
except Exception:
pass
values = string.join([probeset,system_code,str(log_fold),str(p_value)],'\t')+'\n'
goelite.write(values)
goelite.close()
else: input_files_exported = 'no'
return input_files_exported
def exportGeneRegulationSummary(filename,headers,system_code):
"""
1) Exports summary results description - Performs a series of targetted queries to report the number
of coding and non-coding genes expressed along with various regulation and annotation parameters.
2) Exports a global regulated expression table - Values are log2 geometric folds relative to baseline
of the entire row (all samples) for any criterion met (see ptype_to_use, m_cutoff, p_cutoff). Optionally
cluster these results downstream and perform QC analyses."""
criterion_db={}; detected_exp_db={}; denominator_geneids={}; index = 0; ttest=[]; avg_columns=[]; all_criterion=[]; all_groups=[]
search_miR = 'miR-1('
coding_types = ['protein_coding','ncRNA']
for column in headers:
if 'ANOVA' in ptype_to_use and ptype_to_use in column: ttest.append(index) ### Not currently implemented
elif ptype_to_use in column and 'ANOVA' not in column: ttest.append(index)
lfi = 2 ### relative logfold index position
if ptype_to_use == 'adjp': lfi = 3
index+=1
if 'Protein Classes' in column: pc = index-1
if 'microRNA' in column: mi = index-1
if 'avg-' in column: avg_columns.append(index-1)
if 'Symbol' in column: sy = index-1
try: count_summary_db,indexed_headers = importCountSummary()
except Exception: count_summary_db={}
### Had to introduce the below code to see if any p-values for a criterion are < 1 (otherwise, include them for GO-Elite)
exclude_p1={}
for probeset in array_folds:
index = 0; af = array_folds[probeset]
for value in array_folds[probeset]:
if index in ttest:
criterion_name = headers[index][5:]
if criterion_name not in exclude_p1:
try: p_value = float(value)
except Exception: p_value = 1 ### Occurs when a p-value is annotated as 'Insufficient Expression'
if p_value < 1:
exclude_p1[criterion_name] = True # Hence, at least one gene has a p<1
index+=1
genes_to_import={}; probeset_symbol={}
for probeset in array_folds:
index = 0; af = array_folds[probeset]
probeset_symbol[probeset] = af[sy]
for value in array_folds[probeset]:
denominator_geneids[probeset]=[]
if index in avg_columns:
column_name = headers[index]
group_name = column_name[4:]
try: protein_class = af[pc]
except Exception: protein_class = 'NULL'
proceed = False
if array_type == 'RNASeq':
if norm == 'RPKM':
try: ### Counts file should be present but if not, still proceed
i2 = indexed_headers[column_name]
if float(af[index])>gene_rpkm_threshold and count_summary_db[probeset][i2]>gene_exp_threshold:
#if float(af[index])>5 and count_summary_db[probeset][i2]>50:
proceed = True
except Exception:
proceed = True
exp_info = probeset, af[index],count_summary_db[probeset][i2] ### keep track of the expression info
else:
if float(af[index])>expr_threshold:
proceed = True
exp_info = probeset, expr_threshold,expr_threshold
if proceed==True:
if group_name not in all_groups: all_groups.append(group_name)
if 'protein_coding' in protein_class:
try: detected_exp_db[group_name,'protein_coding'].append(exp_info)
except KeyError: detected_exp_db[group_name,'protein_coding']=[exp_info]
else:
try: detected_exp_db[group_name,'ncRNA'].append(exp_info)
except KeyError: detected_exp_db[group_name,'ncRNA']=[exp_info]
if index in ttest:
criterion_name = headers[index][5:]
try: log_fold = float(af[index-lfi])
except Exception: log_fold = 0 ### Occurs when a fold change is annotated as 'Insufficient Expression'
try: p_value = float(value)
except Exception: p_value = 1 ### Occurs when a p-value is annotated as 'Insufficient Expression'
try: excl_p1 = exclude_p1[criterion_name] ### You can have adjusted p-values that are equal to 1
except Exception: excl_p1 = False #Make True to exclude ALL non-adjp < sig value entries
try: protein_class = af[pc]
except Exception: protein_class = 'NULL'
if abs(log_fold)>m_cutoff and (p_value<p_cutoff or (p_value==1 and excl_p1==False)):
if criterion_name not in all_criterion: all_criterion.append(criterion_name)
try: criterion_db[criterion_name]+=1
except KeyError: criterion_db[criterion_name] = 1
genes_to_import[probeset]=[] ### All, regulated genes (any criterion)
if 'protein_coding' in protein_class:
if log_fold>0:
try: criterion_db[criterion_name,'upregulated','protein_coding']+=1
except KeyError: criterion_db[criterion_name,'upregulated','protein_coding'] = 1
try:
if 'miR-1(' in af[mi]:
try: criterion_db[criterion_name,'upregulated','protein_coding',search_miR[:-1]]+=1
except KeyError: criterion_db[criterion_name,'upregulated','protein_coding',search_miR[:-1]] = 1
except Exception: None ### occurs when mi not present
else:
try: criterion_db[criterion_name,'downregulated','protein_coding']+=1
except KeyError: criterion_db[criterion_name,'downregulated','protein_coding'] = 1
try:
if 'miR-1(' in af[mi]:
try: criterion_db[criterion_name,'downregulated','protein_coding',search_miR[:-1]]+=1
except KeyError: criterion_db[criterion_name,'downregulated','protein_coding',search_miR[:-1]] = 1
except Exception: None ### occurs when mi not present
else:
if protein_class == 'NULL':
class_name = 'unclassified'
else:
class_name = 'ncRNA'
if log_fold>0:
try: criterion_db[criterion_name,'upregulated',class_name]+=1
except KeyError: criterion_db[criterion_name,'upregulated',class_name] = 1
try:
if 'miR-1(' in af[mi]:
try: criterion_db[criterion_name,'upregulated',class_name,search_miR[:-1]]+=1
except KeyError: criterion_db[criterion_name,'upregulated',class_name,search_miR[:-1]] = 1
except Exception: None ### occurs when mi not present
else:
try: criterion_db[criterion_name,'downregulated',class_name]+=1
except KeyError: criterion_db[criterion_name,'downregulated',class_name] = 1
try:
if 'miR-1(' in af[mi]:
try: criterion_db[criterion_name,'downregulated',class_name,search_miR[:-1]]+=1
except KeyError: criterion_db[criterion_name,'downregulated',class_name,search_miR[:-1]] = 1
except Exception: None ### occurs when mi not present
index += 1
if len(criterion_db)>0:
try: exportGeometricFolds(expression_dataset_output_dir+filename,array_type,genes_to_import,probeset_symbol)
except Exception,e:
print 'Failed to export geometric folds due to:'
print e ### Don't exit the analysis just report the problem
print traceback.format_exc()
None
### Export lists of expressed genes
all_expressed={}
for (group_name,coding_type) in detected_exp_db:
eo = export.ExportFile(expression_dataset_output_dir+'/ExpressedGenes/'+group_name+'-'+coding_type+'.txt')
eo.write('GeneID\tRPKM\tCounts\n')
for (gene,rpkm,counts) in detected_exp_db[(group_name,coding_type)]:
eo.write(gene+'\t'+str(rpkm)+'\t'+str(counts)+'\n')
all_expressed[gene]=[]
try: eo.close()
except Exception: pass
filename = string.replace(filename,'DATASET-','SUMMARY-')
filename = string.replace(filename,'GenMAPP-','SUMMARY-')
summary_path = expression_dataset_output_dir +filename
export_data = export.ExportFile(summary_path)
print 'Export summary gene expression results to:',filename
### Output Number of Expressed Genes
title = ['Biological group']
for group_name in all_groups: title.append(group_name)
title = string.join(title,'\t')+'\n'; export_data.write(title)
if array_type == 'RNASeq':
### Only really informative for RNA-Seq data right now, since DABG gene-level stats are not calculated (too time-intensive for this one statistic)
for coding_type in coding_types:
if coding_type == 'protein_coding': values = ['Expressed protein-coding genes']
else: values = ['Expressed ncRNAs']
for group in all_groups:
for group_name in detected_exp_db:
if group in group_name and coding_type in group_name:
values.append(str(len(detected_exp_db[group_name])))
values = string.join(values,'\t')+'\n'; export_data.write(values)
export_data.write('\n')
if m_cutoff<0: fold_cutoff = -1/math.pow(2,m_cutoff)
else: fold_cutoff = math.pow(2,m_cutoff)
### Export criterion gene IDs and minimal data
export_data.write('Regulation criterion: fold > '+str(fold_cutoff)+' and '+ptype_to_use+ ' p-value < '+str(p_cutoff)+'\n\n')
for criterion in all_criterion:
title = [criterion,'up','down','up-'+search_miR[:-1],'down-'+search_miR[:-1]]
title = string.join(title,'\t')+'\n'; export_data.write(title)
for coding_type in coding_types:
values = ['Regulated '+coding_type+' genes']
for criterion_name in criterion_db:
if len(criterion_name)==3:
if criterion in criterion_name and ('upregulated',coding_type) == criterion_name[1:]:
values.append(str(criterion_db[criterion_name]))
if len(values)==1: values.append('0')
for criterion_name in criterion_db:
if len(criterion_name)==3:
if criterion in criterion_name and ('downregulated',coding_type) == criterion_name[1:]:
values.append(str(criterion_db[criterion_name]))
if len(values)==2: values.append('0')
for criterion_name in criterion_db:
if len(criterion_name)==4:
if criterion in criterion_name and ('upregulated',coding_type) == criterion_name[1:-1]:
values.append(str(criterion_db[criterion_name]))
if len(values)==3: values.append('0')
for criterion_name in criterion_db:
if len(criterion_name)==4:
if criterion in criterion_name and ('downregulated',coding_type) == criterion_name[1:-1]:
values.append(str(criterion_db[criterion_name]))
if len(values)==4: values.append('0')
#print values;sys.exit()
values = string.join(values,'\t')+'\n'; export_data.write(values)
export_data.write('\n')
export_data.close()
def exportGeometricFolds(filename,platform,genes_to_import,probeset_symbol,exportOutliers=True,exportRelative=True,customPath=None,convertNonLogToLog=False):
#print expression_data_format
#print platform
#print len(genes_to_import)
#print exportOutliers
#print exportRelative
#print customPath
""" Import sample and gene expression values from input file, filter, calculate geometric folds
and export. Use for clustering and QC."""
#print '\n',filename
filename = string.replace(filename,'///','/')
filename = string.replace(filename,'//','/')
if 'ExpressionOutput' in filename:
filename = string.replace(filename,'-steady-state.txt','.txt')
export_path = string.replace(filename,'ExpressionOutput','ExpressionOutput/Clustering')
export_path = string.replace(export_path,'DATASET-','SampleLogFolds-') ### compared to all-sample mean
export_path2 = string.replace(export_path,'SampleLogFolds-','OutlierLogFolds-') ### compared to all-sample mean
export_path3 = string.replace(export_path,'SampleLogFolds-','RelativeSampleLogFolds-') ### compared to control groups
filename = string.replace(filename,'ExpressionOutput','ExpressionInput')
filename = string.replace(filename,'DATASET-','exp.')
groups_dir = string.replace(filename,'exp.','groups.')
if platform != "3'array" and platform != "AltMouse":
### This is the extension for gene-level results for exon sensitive platfomrs
filename1 = string.replace(filename,'.txt','-steady-state.txt')
status = verifyFile(filename1)
if status == 'yes':
filename = filename1
status = verifyFile(filename)
if status != 'yes':
filename = string.replace(filename,'exp.','')
filename = string.replace(filename,'ExpressionInput','')
status = verifyFile(filename)
if customPath!=None:
### If an alternative output path is desired
export_path = customPath
print len(genes_to_import), 'genes with data to export...'
try:
if expression_data_format == 'non-log' and platform != 'RNASeq': convertNonLogToLog = True
except Exception: pass
if status != 'yes':
print "Clustering expression file not exported due to missing file:"
print filename
if status == 'yes':
export_data = export.ExportFile(export_path)
if exportOutliers: export_outliers = export.ExportFile(export_path2)
if exportRelative: export_relative = export.ExportFile(export_path3)
print 'Export inputs for clustering to:',export_path
expressionDataFormat,increment,convertNonLogToLog = checkExpressionFileFormat(filename)
#print expressionDataFormat,increment,convertNonLogToLog
fn=filepath(filename); row_number=0; exp_db={}; relative_headers_exported = False
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if data[0]=='#' and row_number==0: row_number = 0
elif row_number==0:
sample_list,group_sample_db,group_db,group_name_sample_db,comp_groups,comps_name_db = simpleGroupImport(groups_dir)
try: sample_index_list = map(lambda x: t[1:].index(x), sample_list) ### lookup index of each sample in the ordered group sample list
except Exception:
missing=[]
for x in sample_list:
if x not in t[1:]: missing.append(x)
print 'missing:',missing
print t
print sample_list
print filename, groups_dir
print 'Unknown Error!!! Skipping cluster input file build (check column and row formats for conflicts)'; forceExit
new_sample_list = map(lambda x: group_sample_db[x], sample_list) ### lookup index of each sample in the ordered group sample list
title = string.join([t[0]]+new_sample_list,'\t')+'\n' ### output the new sample order (group file order)
export_data.write(title)
if exportOutliers: export_outliers.write(title)
if exportRelative:
### Used for the relative fold calculation
group_index_db={}
for x in sample_list:
group_name = group_db[x]
sample_index = t[1:].index(x)
try: group_index_db[group_name].append(sample_index)
except Exception: group_index_db[group_name] = [sample_index] ### dictionary of group to input file sample indexes
row_number=1
else:
gene = t[0]
if platform == 'RNASeq':
### Convert to log2 RPKM values - or counts
try: values = map(lambda x: math.log(float(x)+increment,2), t[1:])
except Exception:
if convertNonLogToLog:
values = logTransformWithNAs(t[1:],increment)
else:
values = TransformWithNAs(t[1:])
else:
try:
if convertNonLogToLog:
values = map(lambda x: math.log(float(x)+increment,2), t[1:])
else:
values = map(float,t[1:])
except Exception:
if convertNonLogToLog:
values = logTransformWithNAs(t[1:],increment)
else:
values = TransformWithNAs(t[1:])
### Calculate log-fold values relative to the mean of all sample expression values
values = map(lambda x: values[x], sample_index_list) ### simple and fast way to reorganize the samples
try: avg = statistics.avg(values)
except Exception:
values2=[]
for v in values:
try: values2.append(float(v))
except Exception: pass
try: avg = statistics.avg(values2)
except Exception:
if len(values2)>0: avg = values2[0]
else: avg = 0
try: log_folds = map(lambda x: (x-avg), values)
except Exception:
log_folds=[]
for x in values:
try: log_folds.append(x-avg)
except Exception: log_folds.append('')
if gene in genes_to_import:
### Genes regulated in any user-indicated comparison according to the fold and pvalue cutoffs provided
log_folds = map(lambda x: str(x), log_folds)
try: gene2 = gene+' '+probeset_symbol[gene]
except Exception: gene2 = gene
if len(t[1:])!=len(log_folds):
log_folds = t[1:] ### If NAs - output the original values
export_data.write(string.join([gene2]+log_folds,'\t')+'\n')
if exportRelative:
### Calculate log-fold values relative to the mean of each valid group comparison
control_group_avg={}; comps_exp_db={}
for group_name in comps_name_db: ### control group names
con_group_values = map(lambda x: values[x], group_index_db[group_name]) ### simple and fast way to reorganize the samples
try: control_group_avg[group_name] = statistics.avg(con_group_values) ### store the mean value of each control group
except Exception:
con_group_values2=[]
for val in con_group_values:
try: con_group_values2.append(float(val))
except Exception: pass
try: control_group_avg[group_name] = statistics.avg(con_group_values)
except Exception:
if len(con_group_values)>0:
control_group_avg[group_name] = con_group_values[0]
else: control_group_avg[group_name] = 0.0
for exp_group in comps_name_db[group_name]:
try: comps_exp_db[exp_group].append(group_name) ### Create a reversed version of the comps_name_db, list experimental as the key
except Exception: comps_exp_db[exp_group] = [group_name]
relative_log_folds=[] ### append all new log folds to this list
relative_column_names=[]
for group_name in comp_groups:
if group_name in comps_exp_db: ### Hence, the group has a designated control (controls may not) - could have the control group be a control for the control samples
group_values = map(lambda x: values[x], group_index_db[group_name]) ### simple and fast way to reorganize the samples
for control_group_name in comps_exp_db[group_name]:
con_avg = control_group_avg[control_group_name]
try:
relative_log_folds += map(lambda x: str(x-con_avg), group_values) ### calculate log-folds and convert to strings
except Exception:
relative_log_folds=[]
for x in group_values:
try: relative_log_folds.append(str(x-con_avg))
except Exception: relative_log_folds.append('')
if relative_headers_exported == False:
exp_sample_names = group_name_sample_db[group_name]
relative_column_names += map(lambda x: (x+' vs '+control_group_name), exp_sample_names) ### add column names indicating the comparison
if relative_headers_exported == False:
title = string.join(['UID']+relative_column_names,'\t')+'\n' ### Export column headers for the relative fold changes
export_relative.write(title)
relative_headers_exported = True
if len(t[1:])!=len(relative_log_folds):
relative_log_folds = t[1:] ### If NAs - output the original values
export_relative.write(string.join([gene2]+relative_log_folds,'\t')+'\n')
elif exportOutliers:
### When a gene is regulated and not significant, export to the outlier set
try: gene2 = gene+' '+probeset_symbol[gene]
except Exception: gene2 = gene
### These are defaults we may allow the user to control later
log_folds = [0 if x=='' else x for x in log_folds] ### using list comprehension, replace '' with 0
if max([max(log_folds),abs(min(log_folds))])>1:
proceed = True
if platform == 'RNASeq':
if max(values)<0.1: proceed = False
if proceed == True:
log_folds = map(lambda x: str(x), log_folds)
if len(t[1:])!=len(log_folds):
log_folds = t[1:] ### If NAs - output the original values
export_outliers.write(string.join([gene2]+log_folds,'\t')+'\n')
row_number+=1 ### Keep track of the first gene as to write out column headers for the relative outputs
export_data.close()
if exportOutliers: export_outliers.close()
if exportRelative: export_relative.close()
def logTransformWithNAs(values,increment):
values2=[]
for x in values:
try: values2.append(math.log(float(x)+increment,2))
except Exception:
values2.append('')
return values2
def TransformWithNAs(values):
values2=[]
for x in values:
try: values2.append(float(x))
except Exception:
values2.append('')
return values2
def importAndOrganizeLineageOutputs(expr_input,filename,platform):
""" This function takes LineageProfiler z-scores and organizes the samples into groups
takes the mean results for each group and looks for changes in lineage associations """
groups_dir = string.replace(expr_input,'exp.','groups.')
groups_dir = string.replace(groups_dir,'-steady-state.txt','.txt') ### groups is for the non-steady-state file
export_path = string.replace(filename,'ExpressionOutput','ExpressionOutput/Clustering')
export_path = string.replace(export_path,'.txt','-groups.txt')
export_data = export.ExportFile(export_path)
export_pathF = string.replace(export_path,'.txt','_filtered.txt')
export_dataF = export.ExportFile(export_pathF)
print 'Export inputs for clustering to:',export_path
fn=filepath(filename); row_number=0; exp_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if data[0]=='#': row_number = 0
elif row_number==0:
group_index_db={}
### use comps in the future to visualize group comparison changes
sample_list,group_sample_db,group_db,group_name_sample_db,comp_groups,comps_name_db = simpleGroupImport(groups_dir)
for x in sample_list:
group_name = group_db[x]
sample_index = t[1:].index(x)
try: group_index_db[group_name].append(sample_index)
except Exception: group_index_db[group_name] = [sample_index] ### dictionary of group to input file sample indexes
groups = map(str, group_index_db) ### store group names
new_sample_list = map(lambda x: group_db[x], sample_list) ### lookup index of each sample in the ordered group sample list
title = string.join([t[0]]+groups,'\t')+'\n' ### output the new sample order (group file order)
export_data.write(title)
export_dataF.write(title)
row_number=1
else:
tissue = t[0]
if platform == 'RNASeq' and 'LineageCorrelations' not in filename:
### Convert to log2 RPKM values - or counts
values = map(lambda x: math.log(float(x),2), t[1:])
else:
values = map(float,t[1:])
avg_z=[]; avg_z_float=[]
for group_name in group_index_db:
group_values = map(lambda x: values[x], group_index_db[group_name]) ### simple and fast way to reorganize the samples
avg = statistics.avg(group_values)
avg_z.append(str(avg))
avg_z_float.append(avg)
export_data.write(string.join([tissue]+avg_z,'\t')+'\n')
if max(avg_z_float)>1:
export_dataF.write(string.join([tissue]+avg_z,'\t')+'\n')
export_data.close(); export_dataF.close()
return export_path,export_pathF
def removeRawData(array_fold_headers):
### Prior to exporting data for GenMAPP, remove raw data columns
columns_with_stats=[]; i=0; stat_headers = ['avg', 'log_fold', 'fold', 'rawp', 'adjp']; filtered_headers=[]
for header in array_fold_headers:
broken_header = string.split(header,'-')
### Only keep those headers and indexes with recognized ExpressionBuilder inserted prefixes
if broken_header[0] in stat_headers: columns_with_stats.append(i); filtered_headers.append(header)
i+=1
for probeset in array_folds:
filtered_list=[]
for i in columns_with_stats: filtered_list.append(array_folds[probeset][i])
array_folds[probeset] = filtered_list ### Re-assign values of the db
return filtered_headers
def removeRawCountData(array_fold_headers):
### Prior to exporting data for GenMAPP, remove raw data columns
columns_with_stats=[]; i=0; stat_headers = ['avg', 'log_fold', 'fold', 'rawp', 'adjp']; filtered_headers=[]
for header in array_fold_headers:
broken_header = string.split(header,'-')
### Only keep those headers and indexes with recognized ExpressionBuilder inserted prefixes
if broken_header[0] in stat_headers: columns_with_stats.append(i); filtered_headers.append(header)
i+=1
for probeset in count_statistics_db:
filtered_list=[]
for i in columns_with_stats: filtered_list.append(count_statistics_db[probeset][i])
count_statistics_db[probeset] = filtered_list ### Re-assign values of the db
return filtered_headers
def exportAnalyzedData(comp_group_list2,expr_group_db):
report = 'multiple'; report = 'single'
try: ensembl_microRNA_db = importMicrornaAssociations(species,report)
except IOError: ensembl_microRNA_db={}
if array_type != "AltMouse" and array_type != "3'array":
try:
import EnsemblImport
gene_location_db = EnsemblImport.getEnsemblGeneLocations(species,array_type,'key_by_array')
except Exception: gene_location_db={}
if data_type == 'expression':
new_file = expression_dataset_output_dir + 'DATASET-'+experiment_name+'.txt'
try: data = export.createExportFile(new_file,expression_dataset_output_dir[:-1])
except RuntimeError:
export.isFileOpen(new_file,expression_dataset_output_dir[:-1])
data = export.createExportFile(new_file,expression_dataset_output_dir[:-1])
try: custom_annotation_dbase = importCustomAnnotations()
except Exception: custom_annotation_dbase={}
x=0;y=0;z=0
for arrayid in array_folds:
if arrayid in annotate_db and arrayid in probeset_db: x = 1
if arrayid in annotate_db: y = 1
if arrayid in conventional_array_db: z = 1
break
Vendor = vendor ### Need to rename as re-assigning will cause a global conflict error
for arrayid in array_folds:
if 'ENS' in arrayid and Vendor == 'Symbol':
Vendor = 'Ensembl'
break
if array_type != "AltMouse" and (array_type != "3'array" or 'Ensembl' in Vendor):
#annotate_db[gene] = symbol, definition,rna_processing
#probeset_db[gene] = transcluster_string, exon_id_string
title = ['Ensembl_gene','Definition','Symbol','Transcript_cluster_ids','Constitutive_exons_used','Constitutive_IDs_used','Putative microRNA binding sites','Select Cellular Compartments','Select Protein Classes','Chromosome','Strand','Genomic Gene Corrdinates','GO-Biological Process','GO-Molecular Function','GO-Cellular Component','WikiPathways']
title = string.join(title,'\t')
elif arrayCode == 3: ### Code indicates this array probes only for small RNAs
title = ['Probeset ID','Sequence Type','Transcript ID','Species Scientific Name','Genomic Location']
title = string.join(title,'\t')
elif x == 1:
title = "Probesets" +'\t'+ 'Definition' +'\t'+ 'Symbol' +'\t'+ 'affygene' +'\t'+ 'exons' +'\t'+ 'probe_type_call' +'\t'+ 'ensembl'
elif y==1: title = "Probesets" +'\t'+ 'Symbol' +'\t'+ 'Definition'
elif array_type == "3'array":
title = ['Probesets','Symbol','Definition','Ensembl_id','Entrez_id','Unigene_id','GO-Process','GO-Function','GO-Component','Pathway_info','Putative microRNA binding sites','Select Cellular Compartments','Select Protein Classes']
title = string.join(title,'\t')
else: title = "Probesets"
for entry in array_fold_headers: title = title + '\t' + entry
title += '\t'+ 'ANOVA-rawp' +'\t'+ 'ANOVA-adjp' +'\t'+'largest fold'
if array_type == 'RNASeq': title += '\t'+ 'maximum sample read count'
data.write(title+'\n')
for arrayid in array_folds:
if arrayCode == 3:
ca = conventional_array_db[arrayid]
definition = ca.Description()
symbol = ca.Symbol()
data_val = [arrayid,ca.Description(),ca.Symbol(),ca.Species(),ca.Coordinates()]
data_val = string.join(data_val,'\t')
elif array_type != 'AltMouse' and (array_type != "3'array" or 'Ensembl' in Vendor):
try: definition = annotate_db[arrayid][0]; symbol = annotate_db[arrayid][1]; rna_processing = annotate_db[arrayid][2]
except Exception: definition=''; symbol=''; rna_processing=''
report = 'all'
try: miRs = ensembl_microRNA_db[arrayid]
except KeyError: miRs = ''
try:
trans_cluster = probeset_db[arrayid][0]
exon_ids = probeset_db[arrayid][1]
probesets = probeset_db[arrayid][2]
except Exception:
trans_cluster='';exon_ids='';probesets=''
try: compartment,custom_class = custom_annotation_dbase[arrayid]
except KeyError: compartment=''; custom_class=''
try: chr,strand,start,end = gene_location_db[arrayid]
except Exception: chr=''; strand=''; strand=''; start=''; end=''
try: pi = conventional_array_db[arrayid]; process = pi.Process(); function=pi.Function(); component=pi.Component(); pathway = pi.Pathway()
except Exception: process=''; function=''; component=''; pathway=''
data_val = [arrayid,symbol,definition,trans_cluster,exon_ids,probesets,miRs,compartment,custom_class,chr,strand,start+'-'+end,process,function,component,pathway]
data_val = string.join(data_val,'\t')
elif arrayid in annotate_db and arrayid in probeset_db: ### This is for the AltMouse array
symbol = annotate_db[arrayid][0]
definition = annotate_db[arrayid][1]
affygene = probeset_db[arrayid][0][0:-1] #probeset_db[probeset] = affygene,exons,probe_type_call,ensembl
exons = probeset_db[arrayid][1]
probe_type_call = probeset_db[arrayid][2]
ensembl = probeset_db[arrayid][3]
data_val = arrayid +'\t'+ definition +'\t'+ symbol +'\t'+ affygene +'\t'+ exons +'\t'+ probe_type_call +'\t'+ ensembl
elif arrayid in annotate_db:
definition = annotate_db[arrayid][0]
symbol = annotate_db[arrayid][1]
data_val = arrayid +'\t'+ definition +'\t'+ symbol
elif array_type == "3'array" and 'Ensembl' not in Vendor:
try:
ca = conventional_array_db[arrayid]
definition = ca.Description()
symbol = ca.Symbol()
ens = ca.EnsemblString()
entrez = ca.EntrezString()
unigene = ca.UnigeneString()
pathway_info = ca.PathwayInfo()
component = ca.GOComponentNames(); process = ca.GOProcessNames(); function = ca.GOFunctionNames()
compartment=''; custom_class=''; miRs=''
if len(ens)>0:
if ens[0]=='|': ens = ens[1:]
store=[]
for ens_gene in ca.Ensembl(): ### Add Custom Annotation layer
try: compartment,custom_class = custom_annotation_dbase[ens_gene]
except KeyError: null=[]
try: miRs = ensembl_microRNA_db[ens_gene]
except KeyError: null=[]
if 'protein_coding' in custom_class and len(store)==0: ### Use the first instance only
store = miRs,compartment,custom_class+'('+ens_gene+')'
if len(store)>0: ### pick the Ensembl with protein coding annotation to represent (as opposed to aligning annotated pseudo genes)
miRs,compartment,custom_class = store
except KeyError:
definition=''; symbol=''; ens=''; entrez=''; unigene=''; pathway_info=''
process=''; function=''; component=''; compartment='' ;custom_class=''; miRs=''
data_val = [arrayid,symbol,definition,ens,entrez,unigene,process,function,component,pathway_info,miRs,compartment,custom_class]
data_val = string.join(data_val,'\t')
else:
data_val = arrayid
for value in array_folds[arrayid]:
data_val = data_val + '\t' + str(value)
gs = summary_filtering_stats[arrayid]
#if arrayid == '1623863_a_at': print [gs.LogFold()]
data_val += '\t'+ str(gs.Pval()) +'\t'+ str(gs.AdjP()) +'\t'+ str(gs.LogFold())
if array_type == 'RNASeq': data_val+= '\t'+ gs.MaxCount()
data_val = string.replace(data_val,'\n','')
data.write(data_val+'\n')
data.close()
print "Full Dataset with statistics:",'DATASET-'+experiment_name+'.txt', 'written'
gene_location_db=[]
ensembl_microRNA_db=[]
custom_annotation_dbase=[]
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def eliminate_redundant_dict_values(database):
db1={}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
def convert_to_list(database):
db1=[]; db2=[]; temp_list=[]
for key in database:
list = database[key]
#print key,list,dog #32 [(2, 1.1480585565447154), (3, 0.72959188370731742), (0, 0.0), (1, -0.60729064216260165)]
list.sort()
temp_list=[]
temp_list.append(key)
for entry in list:
avg_fold = entry[1]
temp_list.append(avg_fold)
#print temp_list, dog #[32, 0.0, -0.60729064216260165, 1.1480585565447154, 0.72959188370731742]
db1.append(temp_list)
db1.sort()
return db1
def import_annotations(filename):
fn=filepath(filename)
annotation_dbase = {}
for line in open(fn,'rU').xreadlines():
try:
data = cleanUpLine(line)
try: probeset,definition,symbol,rna_processing = string.split(data,'\t')
except ValueError:
probeset,definition,symbol = string.split(data,'\t')
rna_processing = ''
annotation_dbase[probeset] = definition, symbol,rna_processing
except ValueError: continue
return annotation_dbase
def importCustomAnnotations():
### Combine non-coding Ensembl gene annotations with UniProt functional annotations
try: custom_annotation_dbase = importTranscriptBiotypeAnnotations(species)
except Exception: custom_annotation_dbase = {}
try: housekeeping_genes=BuildAffymetrixAssociations.getHousekeepingGenes(species)
except Exception: housekeeping_genes=[]
print len(custom_annotation_dbase),'Ensembl Biotypes and', len(housekeeping_genes),'housekeeping genes.'
for ens_gene in housekeeping_genes:
if ens_gene not in custom_annotation_dbase: custom_annotation_dbase[ens_gene] = '','housekeeping'
else:
compartment,custom_class = custom_annotation_dbase[ens_gene]
custom_class+='|housekeeping'
custom_annotation_dbase[ens_gene] = compartment,custom_class
filename = 'AltDatabase/uniprot/'+species+'/custom_annotations.txt'
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
ens_gene,compartment,custom_class = t[:3]
if ens_gene in custom_annotation_dbase:
biotype = custom_annotation_dbase[ens_gene][1]
if len(custom_class)>0: custom_class+='|'+biotype
else: custom_class=biotype
custom_annotation_dbase[ens_gene] = compartment,custom_class
return custom_annotation_dbase
def importTranscriptBiotypeAnnotations(species):
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_transcript-biotypes.txt'
fn=filepath(filename); biotype_db = {}; custom_annotation_dbase={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
gene,transcript,biotype = string.split(data,'\t')
### Determine if only one annotation is associated with each gene
try: biotype_db[gene][biotype]=[]
except Exception: biotype_db[gene] = db = {biotype:[]}
for gene in biotype_db:
db = biotype_db[gene]
if len(db)==1 and 'protein_coding' not in db:
for biotype in db: ### Non-coding gene annotation
custom_annotation_dbase[gene] = '',biotype
elif 'protein_coding' in db:
custom_annotation_dbase[gene] = '','protein_coding'
elif 'transcribed_unprocessed_pseudogene' in db:
custom_annotation_dbase[gene] = '','transcribed_unprocessed_pseudogene'
else:
ls=[] ### otherwise include all gene annotations
for i in db: ls.append(i)
ls = string.join(ls,'|')
custom_annotation_dbase[gene] = '',ls
return custom_annotation_dbase
def importAltMerge(import_type):
### Import Probeset annotations
try:
ensembl_db={}; fn=filepath('AltDatabase/Mm/AltMouse/AltMouse-Ensembl.txt')
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
affygene,ensembl = string.split(data,'\t')
ensembl_db[affygene]=ensembl
#print len(ensembl_db),'Ensembl-AltMouse relationships imported'
except TypeError: null=[]
### Import Probeset annotations
probeset_annotation_file = "AltDatabase/"+species+'/'+array_type+'/'+ "MASTER-probeset-transcript.txt"
probeset_db = {}; constitutive_db = {}; fn=filepath(probeset_annotation_file); replacements=0
for line in open(fn,'rU').xreadlines():
probeset_data = cleanUpLine(line)
probeset,affygene,exons,transcript_num,transcripts,probe_type_call,ensembl,block_exon_ids,block_structure,comparison_info = string.split(probeset_data,'\t')
if probeset == "Probeset": continue
else:
if affygene[:-1] in ensembl_db: ensembl = ensembl_db[affygene[:-1]]; replacements+=1
if import_type == 'full': ### Mimics the structure of ExonArrayEnsemblRules.reimportEnsemblProbesets() dictionary probe_association_db
probe_data = affygene,affygene,exons,'','core'
probeset_db[probeset] = probe_data
else: probeset_db[probeset] = affygene,exons,probe_type_call,ensembl
if probe_type_call == 'gene':
try: constitutive_db[affygene].append(probeset)
except KeyError: constitutive_db[affygene] = [probeset]
return probeset_db, constitutive_db
def parse_custom_annotations(filename):
custom_array_db = {}
x=0
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
array_data = data
array_id,probeset,other = string.split(array_data,'\t') #remove endline
custom_array_db[array_id] = probeset,other
print len(custom_array_db), "custom array entries process"
return custom_array_db
def remoteLineageProfiler(params,expr_input_dir,ArrayType,Species,Vendor,customMarkers=False,specificPlatform=False):
global species
global array_type
global vendor
global remoteAnalysis
global fl
remoteAnalysis = True
species = Species
array_type = ArrayType
vendor = Vendor
fl = params
graphics_links = []
if 'ExpressionInput' in expr_input_dir:
output_dir = string.replace(expr_input_dir,'ExpressionInput', 'ExpressionOutput')
root_dir = export.findParentDir(output_dir)
else:
root_dir = export.findParentDir(expr_input_dir)+'ExpressionOutput/'
try:
### If this directory exists, create a global variable for it
dir_list = read_directory(root_dir[:-1])
global expression_dataset_output_dir
global experiment_name
experiment_name = string.replace(export.findFilename(expr_input_dir)[:-4],'exp.','')
expression_dataset_output_dir = root_dir
except Exception:
#print traceback.format_exc()
None
graphic_links = performLineageProfiler(expr_input_dir,graphics_links,customMarkers,specificPlatform=specificPlatform)
return graphic_links
def performLineageProfiler(expr_input_dir,graphic_links,customMarkers=False,specificPlatform=False):
try:
import WikiPathways_webservice
import LineageProfiler
start_time = time.time()
try:
compendium_type = fl.CompendiumType()
compendium_platform = fl.CompendiumPlatform()
except Exception:
compendium_type = 'protein_coding'
compendium_platform = 'exon'
#print 'Compendium platform selected:',compendium_platform
print 'Biological data type to examine:',compendium_type
try: ### Works when expression_dataset_output_dir is defined
exp_output = expression_dataset_output_dir + 'DATASET-'+experiment_name+'.txt'
#try: array_type_data = vendor, array_type
array_type_data = array_type
except Exception: ### Otherwise, user directly supplied file is used
array_type_data = vendor, array_type
exp_output = export.findParentDir(expr_input_dir)+'/LineageCorrelations-'+export.findFilename(expr_input_dir)
if specificPlatform == False:
compendium_platform = 'exon'
status = False
compareToAll=False
"""
print species
print array_type_data
print expr_input_dir
print exp_output
print compendium_type
print compendium_platform
print customMarkers
"""
try:
zscore_output_dir1 = LineageProfiler.runLineageProfiler(species,array_type_data,expr_input_dir, exp_output,compendium_type,compendium_platform,customMarkers); status = True
#zscore_output_dir1 = None
except Exception:
print traceback.format_exc(),'\n'
zscore_output_dir1 = None
if compareToAll:
try:
zscore_output_dir2 = LineageProfiler.runLineageProfiler(species,array_type_data,expr_input_dir, exp_output,compendium_type,'gene',customMarkers); status = True
#zscore_output_dir2 = None
except Exception: zscore_output_dir2 = None
try:
zscore_output_dir3 = LineageProfiler.runLineageProfiler(species,array_type_data,expr_input_dir, exp_output,compendium_type,"3'array",customMarkers); status = True
#zscore_output_dir3 = None
except Exception: zscore_output_dir3 = None
zscore_output_dirs=[zscore_output_dir1,zscore_output_dir2,zscore_output_dir3]
else:
zscore_output_dirs=[zscore_output_dir1]
### Create a combined zscore_output_dir output using all predictions
zscore_output_dir = combineLPResultFiles(zscore_output_dirs)
if status == False:
#print traceback.format_exc(),'\n'
if species != 'Mm' and species != 'Hs':
print 'LineageProfiler analysis failed (possibly unsupported species).'
else:
time_diff = str(round(time.time()-start_time,1))
print 'LineageProfiler analysis completed in %s seconds' % time_diff
try: ### If the file a groups file exists in the expected directory structure -> export a groups z-score file
export_path, export_filtered_path = importAndOrganizeLineageOutputs(expr_input_dir,zscore_output_dir,array_type)
except Exception:
export_path = zscore_output_dir ### keeps the sample z-score file as input
export_filtered_path = None
### Output a heat map of the sample Z-score correlations
graphic_links = LineageProfiler.visualizeLineageZscores(zscore_output_dir,export_path,graphic_links)
if export_filtered_path != None:
try: LineageProfiler.visualizeLineageZscores(export_filtered_path,export_path,graphic_links) ### Just output a heatmap of filtered grouped terms
except Exception: pass
### Color the TissueMap from WikiPathways using their webservice
if customMarkers==False:
print 'Coloring LineageMap profiles using WikiPathways webservice...'
graphic_links = WikiPathways_webservice.viewLineageProfilerResults(export_path,graphic_links)
except Exception:
print traceback.format_exc(),'\n'
### Analysis may not be supported for species or data is incompatible
try:
if remoteAnalysis:
if species != 'Mm' and species != 'Hs':
print 'LineageProfiler analysis failed (possibly unsupported species).'
#print traceback.format_exc(),'\n'
except Exception:
pass
return graphic_links
def combineLPResultFiles(input_files):
combined_sample_cell_db={}
celltypes=[]
for fn in input_files:
if fn != None:
firstLine=True
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t=string.split(data,'\t')
if firstLine:
headers = t[1:]
firstLine=False
else:
cell_type = t[0]
if cell_type not in celltypes:
celltypes.append(cell_type)
zscores = t[1:]
for sample in headers:
z = float(zscores[headers.index(sample)])
try:
cell_zscores = combined_sample_cell_db[sample]
try: cell_zscores[cell_type].append(z)
except Exception: cell_zscores[cell_type]=[z]
except Exception:
combined_sample_cell_db[sample] = {cell_type:[z]}
try:
headers.sort()
celltypes.sort()
for i in input_files:
if i!=None:
output_file = string.join(string.split(i,'-')[:-2],'-')+'-zscores.txt'
o = export.ExportFile(output_file)
o.write(string.join(['LineagePredictions']+headers,'\t')+'\n')
break
for cell_type in celltypes:
values = [cell_type]
for sample in headers:
cell_zscores = combined_sample_cell_db[sample]
#try: cell_zscores[cell_type].sort()
#except Exception: cell_zscores[cell_type] = [0]
selectedZ=str(cell_zscores[cell_type][0])
#if 'Breast' in sample and cell_type=='Breast': print cell_zscores['Breast'],sample, selectedZ;sys.exit()
#selectedZ=str(statistics.avg(cell_zscores[cell_type]))
values.append(selectedZ)
o.write(string.join(values,'\t')+'\n')
o.close()
except Exception: pass
try: returnRowHeaderForMaxEntry(output_file,10)
except Exception: pass
return output_file
def visualizeQCPlots(expr_input_dir):
original_expr_input_dir = expr_input_dir
expr_input_dir = string.replace(expr_input_dir,'-steady-state','') ### We want the full exon/probeset-level expression file
try:
import clustering
import QC
print 'Building quality control graphs...'
if array_type == 'RNASeq':
counts_input_dir = string.replace(expr_input_dir,'exp.','counts.')
graphic_links = QC.outputRNASeqQC(counts_input_dir)
else:
graphic_links = QC.outputArrayQC(expr_input_dir)
print 'Building hierarchical cluster graphs...'
paths = getSampleLogFoldFilenames(expr_input_dir)
graphic_links = clustering.outputClusters(paths,graphic_links, Normalize='median',Species=species)
try: graphic_links = clustering.runPCAonly(original_expr_input_dir,graphic_links,False,plotType='2D',display=False)
except Exception: pass
except Exception:
print 'Unable to generate QC plots:'
print traceback.format_exc()
try: graphic_links = graphic_links
except Exception: graphic_links=None ### Matplotlib likely not installed - or other unknown issue
return graphic_links
def getSampleLogFoldFilenames(filename):
if '/' in filename: delim = '/'
else: delim = '\\'
if 'ExpressionInput' in filename:
export_path = string.replace(filename,'ExpressionInput','ExpressionOutput/Clustering')
path1 = string.replace(export_path,'exp.','SampleLogFolds-')
path2 = string.replace(export_path,'exp.','OutlierLogFolds-')
path3 = string.replace(export_path,'exp.','RelativeSampleLogFolds-')
paths = [path1,path2,path3]
else:
paths = string.split(filename,delim)
path1 = string.join(paths[:-1],delim)+'/ExpressionOutput/Clustering/SampleLogFolds-'+paths[-1]
path2 = string.replace(path1,'SampleLogFolds-','OutlierLogFolds-')
path3 = string.replace(path1,'SampleLogFolds-','RelativeSampleLogFolds-')
paths = [path1,path2,path3]
return paths
def importGeneAnnotations(species):
### Used for internal testing
gene_annotation_file = "AltDatabase/ensembl/"+species+"/"+species+"_Ensembl-annotations_simple.txt"
fn=filepath(gene_annotation_file)
count = 0; annotate_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if count == 0: count = 1
else:
gene, description, symbol = string.split(data,'\t')
annotate_db[gene] = symbol,description,''
return annotate_db
def remoteExpressionBuilder(Species,Array_type,dabg_p,expression_threshold,
avg_all_for_ss,Expression_data_format,Vendor,
constitutive_source,data_source,Include_raw_data,
perform_alt_analysis,GE_fold_cutoffs,GE_pvalue_cutoffs,
GE_ptype,exp_file_location_db,Root):
start_time = time.time()
global root; root = Root
#def remoteExpressionBuilder():
global species; global array_type ; species = Species; array_type = Array_type; global altanalyze_files; global vendor; vendor = Vendor
global filter_by_dabg; filter_by_dabg = 'yes' ### shouldn't matter, since the program should just continue on without it
global expression_data_format; global expression_dataset_output_dir; global root_dir; global data_type
global conventional_array_db; global custom_array_db; global constitutive_db; global include_raw_data; global experiment_name
global annotate_db; global probeset_db; global process_custom; global m_cutoff; global p_cutoff; global ptype_to_use; global norm
global arrayCode; arrayCode = 0; global probability_statistic; global fl; global use_downregulated_label; use_downregulated_label = True
global count_statistics_db; global count_statistics_headers; count_statistics_db = {}
include_raw_data = Include_raw_data; expression_data_format = Expression_data_format
data_type = 'expression' ###Default, otherwise is 'dabg'
d = "core"; e = "extendend"; f = "full"; exons_to_grab = d ### Currently, not used by the program... intended as an option for ExonArrayAffymetrixRules full annotation (deprecated)
### Original options and defaults
"""
dabg_p = 0.75; data_type = 'expression' ###used for expression analysis when dealing with AltMouse arrays
a = "3'array"; b = "exon"; c = "AltMouse"; e = "custom"; array_type = c
l = 'log'; n = 'non-log'; expression_data_format = l
w = 'Agilent'; x = 'Affymetrix'; y = 'Ensembl'; z = 'default'; data_source = y; constitutive_source = z; vendor = x
hs = 'Hs'; mm = 'Mm'; dr = 'Dr'; rn = 'Rn'; species = mm
include_raw_data = 'yes'
expression_threshold = 70 ### Based on suggestion from BMC Genomics. 2006 Dec 27;7:325. PMID: 17192196, for hu-exon 1.0 st array
avg_all_for_ss = 'no' ###Default is 'no' since we don't want all probes averaged for the exon arrays
"""
ct = 'count'; avg = 'average'; filter_method = avg
filter_by_dabg = 'yes'
m_cutoff = m_cutoff = math.log(float(GE_fold_cutoffs),2); p_cutoff = float(GE_pvalue_cutoffs); ptype_to_use = GE_ptype
print "Beginning to Process the",species,array_type,'dataset'
process_custom = 'no'
if array_type == "custom": ### Keep this code for now, even though not currently used
import_dir = '/AltDatabase/affymetrix/custom'
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
for affy_data in dir_list: #loop through each file in the directory to output results
affy_data_dir = 'AltDatabase/affymetrix/custom/'+affy_data
custom_array_db = parse_custom_annotations(affy_data_dir)
array_type = a; process_custom = 'yes'
if array_type == "AltMouse":
print "Processing AltMouse splicing data"
original_probeset_db,constitutive_db = importAltMerge('basic')
probe_annotation_file = "AltDatabase/"+species+'/'+ array_type+'/'+array_type+"_annotations.txt"
original_annotate_db = import_annotations(probe_annotation_file)
conventional_array_db = []
elif array_type == "3'array" and 'Ensembl' not in vendor: ### If user supplied IDs are from Ensembl - doesn't matter the vendor
original_vendor = vendor
if 'other:' in vendor:
vendor = string.replace(vendor,'other:','')
process_go='yes';extract_go_names='yes';extract_pathway_names='yes'
probeset_db = []; annotate_db = []
constitutive_db = ""; conventional_array_db = {}
affy_data_dir = 'AltDatabase/affymetrix'
if vendor == 'Affymetrix':
try: conventional_array_db, arrayCode = BuildAffymetrixAssociations.importAffymetrixAnnotations(affy_data_dir,species,process_go,extract_go_names,extract_pathway_names)
except Exception: print 'Error in processing CSV data. Getting this data from GO-Elite annotations instead.'
if vendor == 'Affymetrix' and len(conventional_array_db)>0: use_go='no'
else: use_go = 'yes'
try:
print "Adding additional gene, GO and WikiPathways annotations"
conventional_array_db = BuildAffymetrixAssociations.getUIDAnnotationsFromGOElite(conventional_array_db,species,vendor,use_go)
except Exception: print "Additional annotation import failed"
print len(conventional_array_db), "Array IDs with annotations from",vendor,"annotation files imported."
vendor = original_vendor
elif array_type != "AltMouse":
probeset_db = []; annotate_db = []; constitutive_db = []; conventional_array_db = []
### The below function gathers GO annotations from the GO-Elite database (not Affymetrix as the module name implies)
conventional_array_db = BuildAffymetrixAssociations.getEnsemblAnnotationsFromGOElite(species)
if 'Ensembl' in vendor:
annotate_db = importGeneAnnotations(species) ### populate annotate_db - mimicking export structure of exon array
global expr_threshold; global dabg_pval; global gene_exp_threshold; global gene_rpkm_threshold; dabg_pval = dabg_p
altanalyze_files = []; datasets_with_all_necessary_files=0
for dataset in exp_file_location_db:
experiment_name = string.replace(dataset,'exp.',''); experiment_name = string.replace(experiment_name,'.txt','')
fl = exp_file_location_db[dataset]
expr_input_dir = fl.ExpFile()
stats_input_dir = fl.StatsFile()
expr_group_dir = fl.GroupsFile()
comp_group_dir = fl.CompsFile()
try: batch_effects = fl.BatchEffectRemoval()
except Exception: batch_effects = 'NA'
try: norm = fl.FeatureNormalization()
except Exception: norm = 'NA'
try: probability_statistic = fl.ProbabilityStatistic()
except Exception: probability_statistic = 'unpaired t-test'
try: gene_exp_threshold = fl.GeneExpThreshold()
except Exception: gene_exp_threshold = 0
try: gene_rpkm_threshold = fl.RPKMThreshold()
except Exception: gene_rpkm_threshold = 0
if expression_data_format == 'log':
try: expr_threshold = math.log(float(expression_threshold),2)
except Exception: expr_threshold = 0 ### Applies to RNASeq datasets
else:
try: expr_threshold = float(expression_threshold)
except Exception: expr_threshold = 0
residuals_input_dir = string.replace(expr_input_dir,'exp.','residuals.')
root_dir = fl.RootDir()
datasets_with_all_necessary_files +=1
checkArrayHeaders(expr_input_dir,expr_group_dir)
expression_dataset_output_dir = root_dir+"ExpressionOutput/"
if batch_effects == 'yes':
try:
import combat
combat.runPyCombat(fl)
except Exception:
print_out = 'Batch effect removal analysis (py-combat) failed due to an uknown error:'
print traceback.format_exc()
try: UI.WarningWindow(print_out,'Critical Error - Exiting Program!!!'); root.destroy(); sys.exit()
except Exception: print print_out; sys.exit()
if array_type != "3'array": #array_type != 'AltMouse' and
try: probeset_db,annotate_db,comparison_filename_list = ExonArray.getAnnotations(fl,array_type,dabg_p,expression_threshold,data_source,vendor,constitutive_source,species,avg_all_for_ss,filter_by_dabg,perform_alt_analysis,expression_data_format)
except Exception, e:
print traceback.format_exc()
print_out = 'Error ecountered for the '+species+', '+array_type+' dataset. Check to ensure that:\n(1) the correct platform and species were selected and\n(2) some expression values are present in ExpressionInput/exp.YourDataset.txt'
try: UI.WarningWindow(print_out,'Critical Error - Exiting Program!!!'); root.destroy(); sys.exit()
except Exception: print print_out; sys.exit()
if array_type != 'AltMouse': expr_input_dir = expr_input_dir[:-4]+'-steady-state.txt'
else: probeset_db = original_probeset_db; annotate_db = original_annotate_db
for file in comparison_filename_list: altanalyze_files.append(file)
residual_file_status = ExonArray.verifyFile(residuals_input_dir)
### Separate residual file into comparison files for AltAnalyze (if running FIRMA)
if residual_file_status == 'found': ExonArray.processResiduals(fl,Array_type,Species,perform_alt_analysis)
"""
import ExonArrayEnsemblRules
source_biotype = array_type, root_dir
probeset_db,annotate_db,constitutive_gene_db,splicing_analysis_db = ExonArrayEnsemblRules.getAnnotations('no',constitutive_source,source_biotype,species)
expr_input_dir = expr_input_dir[:-4]+'-steady-state.txt'
"""
if norm == 'RPKM' and array_type == 'RNASeq':
### Separately analyze steady-state counts first, to replace fold changes
counts_expr_dir = string.replace(expr_input_dir,'exp.','counts.')
if 'counts.' not in counts_expr_dir: counts_expr_dir = 'counts.'+counts_expr_dir ### Occurs if 'exp.' not in the filename
count_statistics_db, count_statistics_headers = calculate_expression_measures(counts_expr_dir,expr_group_dir,experiment_name,comp_group_dir,probeset_db,annotate_db)
calculate_expression_measures(expr_input_dir,expr_group_dir,experiment_name,comp_group_dir,probeset_db,annotate_db)
buildCriterion(GE_fold_cutoffs, p_cutoff, ptype_to_use, root_dir+'/ExpressionOutput/','summary') ###Outputs a summary of the dataset and all comparisons to ExpressionOutput/summary.txt
#except Exception: null=[]
graphic_links = None
if fl.ProducePlots() == 'yes':
graphic_links = visualizeQCPlots(expr_input_dir)
if fl.PerformLineageProfiler() == 'yes':
if graphic_links==None: graphic_links = []
graphic_links = performLineageProfiler(expr_input_dir,graphic_links) ### Correlate gene-level expression values with known cells and tissues
if graphic_links != None:
fl.setGraphicLinks(graphic_links) ### Uses Matplotlib to export QC and clustering plots
annotate_db={}; probeset_db={}; constitutive_db={}; array_fold_db={}; raw_data_comps={}; conventional_array_db=[]
clearObjectsFromMemory(conventional_array_db); conventional_array_db=[]
try: clearObjectsFromMemory(summary_filtering_stats); summary_filtering_stats=[]
except Exception: null=[]
try: clearObjectsFromMemory(array_folds); array_folds=[]
except Exception: null=[]
try: clearObjectsFromMemory(count_statistics_db); count_statistics_db=[]
except Exception: null=[]
#print 'after deleted'; returnLargeGlobalVars()
### Generate the NI file if possible
try: calculateNormalizedIntensities(root_dir,species,array_type,avg_all_for_SS=avg_all_for_ss)
except Exception: pass
if datasets_with_all_necessary_files == 0:
###Thus no files were found with valid inputs for all file types
print 'WARNING....No propperly named datasets were found. ExpressionBuilder requires that there are at least 3 files with the prefixes "exp.", "groups." and "comps.", with the following dataset name being identical with all three files.'
print "...check these file names before running again."
inp = sys.stdin.readline(); sys.exit()
altanalyze_files = unique.unique(altanalyze_files) ###currently not used, since declaring altanalyze_files a global is problematic (not available from ExonArray... could add though)
if array_type != "3'array" and perform_alt_analysis != 'expression':
altanalyze_output = FilterDabg.remoteRun(fl,species,array_type,expression_threshold,filter_method,dabg_p,expression_data_format,altanalyze_files,avg_all_for_ss)
return 'continue',altanalyze_output
else:
end_time = time.time(); time_diff = int(end_time-start_time)
return 'stop'
def verifyFile(filename):
fn=filepath(filename)
try:
for line in open(fn,'rU').xreadlines(): found = 'yes'; break
except Exception: found = 'no'
return found
def verifyExpressionFile(filename):
""" Unlike the above, corrects the expression file path if not found """
fn=filepath(filename)
try:
for line in open(fn,'rU').xreadlines(): break
except Exception:
fn = string.replace(fn,'ExpressionInput/','') ### This file is in the parent path presumably (otherwise can't find it)
return fn
def exportSignatures(db,directory,species):
import gene_associations
export_file = export.findParentDir(directory[:-2]+'.txt')
if 'AltExon' in directory:
export_file+='signatures/AltExon-signatures.txt'
else:
export_file+='signatures/GeneExpression-signatures.txt'
export_object = export.ExportFile(export_file)
if 'AltExon' in directory:
export_object.write('symbol\tentrez\tensembl\tsource\turl\tname\tAltAnalyze-ExonID\tASPIRE|Splicing-Index LogFold\tGenomicLocation\n') ### Header line
else:
export_object.write('symbol\tentrez\tsource\turl\tname\tLogFold\tBH-adjp-value\n') ### Header line
url = 'http://code.google.com/p/altanalyze/wiki/PCBC_C4_compendium'
source = 'AltAnalyze'
sig = 0
gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
for filename in gene_conversion_db:
db,input_data_db = gene_conversion_db[filename]
filename = string.replace(filename,'.txt','')
filename = string.replace(filename,'GE.','')
filename = string.replace(filename,'-upregulated','')
for ensembl in db:
sig+=1
try: symbol = gene_to_symbol[ensembl][0]
except Exception: continue
entrezgenes = db[ensembl][0]
entrezgenes = string.split(entrezgenes,'|')
statistics = input_data_db[ensembl][0][1:]
#print [statistics];sys.exit()
for entrez in entrezgenes:
### output format: symbol, entrez, source, url, name
values = string.join([symbol,entrez,ensembl,source,url,filename]+statistics,'\t')+'\n'
export_object.write(values)
export_object.close()
print 'Exported',sig,'signatures to:'
print export_file
def runGOElite(species,directory):
""" Separate pipeline for automating GO-Elite when re-generating criterion - Currently used outside of any pipelines """
mod = 'Ensembl'
pathway_permutations = 'FisherExactTest'
filter_method = 'z-score'
z_threshold = 1.96
p_val_threshold = 0.05
change_threshold = 2
resources_to_analyze = 'local'
returnPathways = 'yes'
root = None
import GO_Elite
directory = string.replace(directory,'ExpressionOutput','')
results_dir = directory
print '\nBeginning to run GO-Elite analysis on all results'
elite_input_dirs = ['regulated']#,'upregulated','downregulated','MarkerFinder'] ### 'AltExon' Run GO-Elite multiple times to ensure heatmaps are useful and to better organize results
for elite_dir in elite_input_dirs:
if elite_dir == 'AltExon': returnPathways = 'no'
else: returnPathways = 'yes'
file_dirs = results_dir+'GO-Elite/'+elite_dir,results_dir+'GO-Elite/denominator',results_dir+'GO-Elite/'+elite_dir
input_dir = results_dir+'GO-Elite/'+elite_dir
try: input_files = read_directory(input_dir) ### Are there any files to analyze?
except Exception: input_files = []
if len(input_files)>0:
variables = species,mod,pathway_permutations,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,returnPathways,file_dirs,root
try: GO_Elite.remoteAnalysis(variables,'non-UI',Multi=mlp)
except Exception: pass
def filterDatasetFile(main_output_folder):
global array_folds; global expression_dataset_output_dir
expression_dataset_output_dir = string.replace(main_output_folder,'GO-Elite','ExpressionOutput/')
dir_list = read_directory(expression_dataset_output_dir[:-1])
for filename in dir_list:
if 'DATASET-' in filename:
dataset_fn=filepath(expression_dataset_output_dir+filename)
x=0
for line in open(dataset_fn,'rU').xreadlines():
data = cleanUpLine(line); t = string.split(data,'\t')
if x==0: headers = t; break
### Get a list of column header titles to include
fn=filepath('Config/DATASET-headers.txt')
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line); t = string.split(data,'\t')
columns_to_include = t
### Filter statistics based on user-defined thresholds as input for GO-Elite analysis
criterion_db={}; denominator_geneids={}; index = 0; indexes=[]; avg_indexes=[]
for column in headers:
if 'avg-' in column: avg_indexes.append(index)
elif 'log_fold-' in column: indexes.append(index)
elif 'fold-' in column: indexes.append(index)
elif 'rawp-' in column: indexes.append(index)
elif 'adjp-' in column: indexes.append(index)
elif 'ANOVA' in column: indexes.append(index)
elif column in columns_to_include: indexes.append(index)
index+=1
###Export out the filtered file
dataset_fn_filtered = string.replace(dataset_fn,'.txt','-abreviated.txt')
dataset_fn_filtered = string.replace(dataset_fn_filtered,'ExpressionOutput','ExpressionOutput/filtered')
data_filter = export.ExportFile(dataset_fn_filtered)
firstLine=True
for line in open(dataset_fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstLine:
firstLine=False
if 'maximum sample read count' in t: ### For RNA-Seq
h=[]
for l in t:
if 'avg-' in l: h.append(string.replace(l,'avg-','avg_RPKM-'))
else: h.append(l)
t=h
values = map(lambda x: t[x], indexes)
avg_values = map(lambda x: t[x], avg_indexes) ### Specifically grab only the average-expression values (not anything else)
values = string.join(values+avg_values,'\t')+'\n'
data_filter.write(values)
data_filter.close()
def importProbesetRegions(species,platform):
filename = 'AltDatabase/'+species+'/'+platform+'/'+species+'_Ensembl_probesets.txt'
fn=filepath(filename)
region_db = {}
firstRow=True
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstRow: firstRow = False
else:
probeset = t[0]
gene = t[2]
region = gene+':'+string.replace(t[12],'-','.')
region_db[region] = probeset
return region_db
def buildAltExonClusterInputs(input_folder,species,platform,dataType='AltExonConfirmed'):
alternative_exon_db={}
dir_list = read_directory(input_folder)
if platform == 'junction':
region_db = importProbesetRegions(species,platform)
for filename in dir_list:
if '.txt' in filename:
proceed = True
if platform == 'RNASeq':
if 'ASPIRE' in filename or 'egress' in filename: proceed = False ### Need to create a special analysis just for reciprocal junctions
if proceed: ### Don't include splicing-index results for RNA-Seq
fn=filepath(input_folder+filename)
x=0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: x=1
else:
if platform == 'junction' and 'GO-Elite' in input_folder:
altExon = t[2]
if altExon in region_db:
altExon = region_db[altExon]
elif 'splicing-index' in filename:
altExon = t[-1]
elif 'ASPIRE' in filename or 'egress' in filename:
altExon = t[-1]
altExon = string.split(altExon,' | ')
else:
altExon = t[2]
#if float(t[3])<0.001:
alternative_exon_db[altExon]=None
print len(alternative_exon_db), 'alternative exon IDs imported'
import gene_associations
gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
input_folder = string.split(input_folder,'GO-Elite')[0]+'AltResults/RawSpliceData/'+species+'/splicing-index/'
dir_list = read_directory(input_folder)
exported_IDs=0
added={}
for filename in dir_list:
if '.txt' in filename and ('_average' not in filename) or len(dir_list)==1: ### We only want the group-comparison SI file
export_dir = string.split(input_folder,'RawSpliceData')[0]+'Clustering/'+dataType+'-'+filename
export_data = export.ExportFile(export_dir)
fn=filepath(input_folder+filename)
x=0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
headers = t[2:] ### first two columsn are gene and ExonID
export_data.write(string.join(headers,'\t')+'\n') ### write header row
x=1
else:
if platform != 'RNASeq':
if 'ENS' in t[0]: #ENSG human
gene = t[0]; altExon = t[2]
else:
altExon = t[0]
else:
gene = t[0]; altExon = t[2]
if ';' in altExon:
altExon1, altExon2 = string.split(altExon,';')
altExons = [altExon1,gene+':'+altExon2]
else:
altExons = [altExon]
for altExon in altExons:
if altExon in alternative_exon_db and altExon not in added:
added[altExon]=[]
#values = map(lambda x: float(x)*-1, t[3:]) #reverse the fold for cluster visualization
values = map(lambda x: float(x), t[3:])
#print len(headers),len(values);sys.exit()
avg = statistics.avg(values)
log_folds = map(lambda x: str(x-avg), values) ### calculate log folds and make these strings
values = string.join([altExon]+log_folds,'\t')+'\n' ### [t[3]]+ before log_folds?
if gene in gene_to_symbol: symbol = gene_to_symbol[gene][0]+" "
else: symbol = ''
export_data.write(symbol+values)
exported_IDs+=1
print exported_IDs, 'exported ID values for clustering'
export_data.close()
return export_dir, exported_IDs
def exportHeatmap(filename,useHOPACH=True, color_gradient='red_black_sky',normalize=False,columnMethod='average',size=0,graphics=[]):
import clustering
row_method = 'weighted'; row_metric = 'cosine'; column_method = 'average'; column_metric = 'euclidean'; transpose = False
try:
if columnMethod !=None:
column_method = columnMethod
if size < 7000:
graphics = clustering.runHCexplicit(filename, graphics, row_method, row_metric, column_method, column_metric, color_gradient, transpose, display=False, Normalize=normalize)
except Exception:
print 'Clustering failed for:',filename
return graphics
def meanCenterPSI(filename):
firstLine=True
output_file = filename[:-4]+'-cluster.txt'
export_obj = export.ExportFile(output_file)
for line in open(filename,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstLine:
export_obj.write(line)
firstLine = False
else:
try:
avg = averageWithNulls(t[1:])
values = map(lambda x: replaceNulls(x,avg), t[1:])
export_obj.write(string.join([t[0]]+values,'\t')+'\n')
except Exception: pass
return output_file
def filterJunctionExpression(filename,minPercentPresent=None):
output_file = filename[:-4]+'-filter.txt'
export_obj = export.ExportFile(output_file)
filtered_lines = []; filtered_size=0; filtered_size_stringent=0
firstLine = True
size=0; imported_num=0
for line in open(filename,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstLine:
export_obj.write(line)
firstLine = False
else:
imported_num+=1
if '_' in t[0] or '-ENS' in t[0] or '-' not in t[0]:
pass
else:
try: a,b = string.split(t[0],'|')
except Exception:
try: n,a,b = string.split(t[0],' ')
except Exception: continue
if '-' in a and '-' in b:
vals = map(lambda x: countNulls(x),t[1:])
percent_present = sum(vals)/(len(vals)*1.00)
if percent_present>0.1:
filtered_lines.append([percent_present,line])
size+=1
if percent_present>0.5:
filtered_size+=1
if percent_present>0.85:
filtered_size_stringent+=1
percent_imported = ((1.00*size)/imported_num)
if minPercentPresent!=None:
size=0
filtered_lines.sort(); filtered_lines.reverse()
for (percent_present,line) in filtered_lines:
if percent_present>minPercentPresent:
export_obj.write(line)
size+=1
elif size < 8000 and percent_imported<0.5: ### The filtered is at least 30% the size of the imported
for (percent_present,line) in filtered_lines:
export_obj.write(line)
else:
size=0
to_import = int(imported_num*0.3)
#print "Filtering down to", to_import, "entries."
filtered_lines.sort(); filtered_lines.reverse()
for (percent_present,line) in filtered_lines:
if filtered_size_stringent>8000:
if percent_present>0.95:
export_obj.write(line)
size+=1
elif filtered_size>8000:
if percent_present>0.85:
export_obj.write(line)
size+=1
else:
if percent_present>0.5:
export_obj.write(line)
size+=1
print 'Filtered RI-PSI entries to',size
export_obj.close()
return output_file,size
def countNulls(val):
if float(val) == 0:
return 0
else: return 1
def calculateNormalizedIntensities(root_dir, species, array_type, avg_all_for_SS = 'no', probeset_type = 'core', analysis_type = 'processed', expFile = False):
""" Since it is a time-consuming step that is needed for visualizing SI values for exons, it is faster
to efficiently calculate NI values in advance. """
if array_type == 'gene': platform = 'GeneArray'
elif array_type == 'exon': platform = 'ExonArray'
elif array_type == 'junction': platform = 'JunctionArray'
else: platform = array_type
#alt_exon_exp_dir = root_dir+'/AltExpression/FullDatasets/'+platform+'/'+species ### This file doesn't exist if only one pairwise comparison group
alt_exon_exp_dir = root_dir+'/ExpressionInput/' ### This file doesn't exist if only one pairwise comparison group
dir_list = read_directory(alt_exon_exp_dir)
for file in dir_list:
if '.txt' in file and 'exp.' in file and 'steady' not in file:
selected_file = file[4:]
fn=filepath(alt_exon_exp_dir+'/'+file)
#sample_group_db = simplerGroupImport(string.replace(fn,'exp.','groups.'))
if analysis_type == 'raw':
### Create a filtered exon and junction expression file outside the typical workflow
import RNASeq
exp_threshold=5; rpkm_threshold=5
expressed_uids_rpkm = RNASeq.getMaxCounts(expFile,rpkm_threshold)
expressed_uids_counts = RNASeq.getMaxCounts(string.replace(expFile,'exp.','counts.'),exp_threshold)
expressed_uids = expressed_uids_rpkm.viewkeys() & expressed_uids_counts.viewkeys() ### common
fn = root_dir+'/AltExpression/FilteredDataset/'+platform+'/'+species+'/'+ export.findFilename(expFile)### This file doesn't exist if only one pairwise comparison group
expressed_uids_rpkm = RNASeq.getMaxCounts(expFile,rpkm_threshold,filterExport=expressed_uids,filterExportDir=fn)
### Get the gene values
gene_db={};
if analysis_type == 'raw':
### Get these directly from the steady-state file
exp_dir=string.replace(expFile,'.txt','-steady-state.txt')
firstLine = True
low_diff_exp_genes={}
for line in open(exp_dir,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstLine:
firstLine = False
ge_header = t
else:
values = map(lambda x: math.log(float(x),2), t[1:])
gene_db[t[0]]=values
elif array_type == 'RNASeq':
firstLine=True
fn_steady = fn[:-4]+'-steady-state.txt'
for line in open(fn_steady,'rU').xreadlines():
if firstLine: firstLine = False
elif ':' in line[:50]: pass
else:
data = cleanUpLine(line)
t = string.split(data,'\t')
try:
values = map(lambda x: float(x), t[1:])
values = map(lambda x: math.log(x,2),values)
gene_db[t[0]]=values
except Exception: pass
else:
import AltAnalyze
exon_db, constitutive_probeset_db = AltAnalyze.importSplicingAnnotations(array_type,species,probeset_type,avg_all_for_SS,root_dir)
gene_db={}; firstLine=True
for line in open(fn,'rU').xreadlines():
if firstLine: firstLine = False
else:
data = cleanUpLine(line)
t = string.split(data,'\t')
if t[0] in constitutive_probeset_db:
gene = constitutive_probeset_db[t[0]]
values = map(lambda x: float(x), t[1:])
try: gene_db[gene].append(values) ### combine these
except Exception: gene_db[gene] = [values]
for gene in gene_db:
#print gene, gene_db[gene]
constitutive_values = zip(*gene_db[gene]) ### combine values from multiple lists into lists of lists
#print constitutive_values
values = map(lambda x: Average(x), constitutive_values)
#print values; sys.exit()
gene_db[gene] = values
if len(gene_db)>0:
alt_exon_output_dir = root_dir+'/AltResults/RawSpliceData/'+species+'/splicing-index/'+selected_file
if analysis_type == 'raw':
alt_exon_output_dir = string.replace(alt_exon_output_dir,'RawSpliceData','RawSpliceDataTemp')
export_obj = export.ExportFile(alt_exon_output_dir)
print 'Exporting exon-level Normalized Intensity file to:',alt_exon_output_dir
print len(gene_db),'genes with data imported'
firstLine=True
exon_entries=0; saved_entries=0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstLine:
firstLine = False
values = string.join(['Gene','ExonID','probesetID']+t[1:],'\t')+'\n'
export_obj.write(values)
else:
if (':' in t[0]) or array_type != 'RNASeq':
feature_values = map(lambda x: float(x), t[1:])
if analysis_type == 'raw' or array_type == 'RNASeq':
feature_values = map(lambda x: math.log(x,2), feature_values)
if array_type == 'RNASeq':
gene = string.split(t[0],':')[0]
else:
try: gene = exon_db[t[0]].GeneID()
except Exception: gene = None
if gene != None:
exon_entries+=1
if gene in gene_db:
gene_values = gene_db[gene]
if ('-' not in t[0]) or analysis_type == 'raw':
NI_values = [logratio(value) for value in zip(*[feature_values,gene_values])]
NI_values = map(lambda x: str(x), NI_values)
NI_values = string.join([gene,'NA',t[0]]+NI_values,'\t')+'\n'
export_obj.write(NI_values)
saved_entries+=1
export_obj.close()
print exon_entries, 'found with',saved_entries,'entries normalized.'
return alt_exon_output_dir
def compareRawJunctionExpression(root_dir,platform,species,critical_exon_db,expFile,min_events=0,med_events=0):
expFile = exportSorted(expFile, 0) ### Sort the expression file
print expFile
from scipy import stats
exported=[]
retained_introns=[]
features_examined={}
junction_locations = {}
critical_exon_gene_db={}
critical_junction_pair_db={}
psi_db={}
#min_events = 4; med_events = 13
#min_events = 0; med_events = 0
print 'Begining reciprocal junction/intron retention unbiased analyses (min_events=%d, med_events=%d)' % (min_events,med_events)
for altexon in critical_exon_db:
gene = string.split(altexon,':')[0]
inclusion_list,exclusion_list = critical_exon_db[altexon]
try:
critical_exon_gene_db[gene].append(critical_exon_db[altexon])
except Exception:
critical_exon_gene_db[gene] = [critical_exon_db[altexon]]
for i in inclusion_list:
for e in exclusion_list:
try: critical_junction_pair_db[i,e].append(altexon)
except Exception: critical_junction_pair_db[i,e] = [altexon]
export_dir = root_dir+'AltResults/AlternativeOutput/'+species+'_'+platform+'_top_alt_junctions-PSI.txt'
export_data = export.ExportFile(export_dir)
clust_export_dir = root_dir+'AltResults/AlternativeOutput/'+species+'_'+platform+'_top_alt_junctions-PSI-clust.txt'
clust_export_data = export.ExportFile(clust_export_dir)
def ratio(values):
return float(values[0]+1)/(values[1]+1)
def dPSI(incl_exp,total_exp,indexes,min_events):
indexes.sort()
incl_filt = map(lambda x: incl_exp[x],indexes) ### Only consider expressed cells
total_filt = map(lambda x: total_exp[x],indexes)
dpsi_values = [ratio(value) for value in zip(*[incl_filt,total_filt])]
dpsi_values.sort()
if len(dpsi_values)==1:
return dpsi_values[0]
elif min_events == 0:
return dpsi_values[-1]-dpsi_values[0]
else: return dpsi_values[-1*min_events]-dpsi_values[min_events]
#return max(dpsi_values)-min(dpsi_values) ### percent change in isoform expression
def getIndexes(events,min_exp_thresh):
i=0
indexes=[]
for e in events:
if e>min_exp_thresh: indexes.append(i) ### minimum expression value (changed from 5 to 10 8/5/2016)
i+=1
return indexes
def diff(values,demon,num):
return values[num]-values[demon]
def diffCompare(incl_exp,excl_exp,incl,excl):
if max(incl_exp)>max(excl_exp): denom = 1;num = 0
else: denom = 0; num=1
diff_values = [diff(value,denom,num) for value in zip(*[incl_exp,excl_exp])]
if min(diff_values)==0 and ('-' not in incl or '-' not in excl): ### subtract out the overlap if all reads in the inclusion exon are confounded by the junction 1nt reads
if denom == 1:
feature_exp_db[incl] = diff_values
else:
feature_exp_db[excl] = diff_values
return True
else: return False
def junctionComparisonMethod(incl,excl):
useAllJunctionsForExcl=True
min_exp = 4; med_exp = 9
incl_exp = feature_exp_db[incl]
excl_exp = feature_exp_db[excl]
if useAllJunctionsForExcl:
gene = string.split(excl,':')[0]
try: gene_exp = gene_junction_denom[gene]
except Exception:
try:
gene = string.split(incl,':')[0]
gene_exp = gene_junction_denom[gene]
except Exception: gene_exp=[]
rho,p = stats.pearsonr(incl_exp,excl_exp)
### See if the data for one feature is confounded by another
### The example is exon-inclusion with 1nt overlap getting the associated reads from BedTools
status = diffCompare(incl_exp,excl_exp,incl,excl)
if status: ### update the difference generated values
excl_exp = feature_exp_db[excl]
rho,p = stats.pearsonr(incl_exp,excl_exp)
#if 'ENSMUSG00000009350:E14.2_87617106-E15.1' in incl: print feature_exp_db[incl],'a'
num_incl_events = sum(i > min_exp for i in incl_exp)
num_excl_events = sum(i > min_exp for i in excl_exp)
combined = [sum(value) for value in zip(*[incl_exp,excl_exp])]
total_number_junctions = sum(i > min_exp for i in combined)
### Calculate a delta PSI value for each expressed cell
combined = [sum(value) for value in zip(*[incl_exp,excl_exp])]
exp_indexes = getIndexes(combined,med_exp)
try: dpsi = dPSI(incl_exp,combined,exp_indexes,min_events)
except Exception: dpsi = 1
dpsi_values = [ratio(value) for value in zip(*[incl_exp,combined])]
dpsi_values = nullReplace(dpsi_values,combined,min_exp)
psi_db[incl,excl] = dpsi_values ### optionally output these at the end
#if incl == 'ENSMUSG00000019505:E3.2_62365772-E3.6_62366457' and excl == 'ENSMUSG00000029162:E7.2-E7.4':
#dpsi2 = dPSI(excl_exp,combined,exp_indexes)
#if 'ENSMUSG00000009350:E14.2_87617106-E15.1' in incl: print feature_exp_db[incl],'b'
if '-' in incl and '-' in excl:
try: max_ratio = expressionRatio(incl_exp,excl_exp,num_incl_events,num_excl_events) # Checks to see if the predominant isoform is expressed at significantly higher levels
except Exception: max_ratio = 0
try:
max_gene_ratio = max([ratio(value) for value in zip(*[incl_exp,gene_exp])])
except Exception: max_gene_ratio = 0
else: max_ratio = 1; max_gene_ratio = 0
#if 'ENSMUSG00000009350:E14.2_87617106-E15.1' in incl: print feature_exp_db[incl],'c'
#if num_incl_events > 15 and num_excl_events > 15 and total_number_junctions>30 and max_ratio>0.5: ### ensures high expression of the minor isoform
#if ((num_incl_events > 15 and num_excl_events > 7) or (num_incl_events > 7 and num_excl_events > 15)) and total_number_junctions>20 and max_ratio>0.3:
#if 'ENSG00000100650' in incl: print incl,excl,max_ratio,num_incl_events,num_excl_events,dpsi,rho
if ((num_incl_events > med_events and num_excl_events > min_events) or (num_incl_events > min_events and num_excl_events > med_events)) and total_number_junctions>(min_events*2) and max_ratio>0.1: ### ensures high expression of the minor isoform
#print rho
if dpsi > 0.25:
return max_ratio,num_incl_events,num_excl_events,dpsi,rho,max_gene_ratio,True
else:
return max_ratio,num_incl_events,num_excl_events,dpsi,rho,max_gene_ratio,False
else:
return max_ratio,num_incl_events,num_excl_events,dpsi,rho,max_gene_ratio,False
def intronComparisonMethod(query):
upstream_exon = string.replace(query,'I','E')
if '_' in query:
intronic_region = string.split(query,'_')[0] ### Unclear why this is needed, but sometimes only an _ intronic region exists for the intron
if intronic_region not in junction_locations: ### Ensures we are not using this instead of a real intron region (e.g., I2.1_1234 and I2.1)
upstream_exon = string.replace(intronic_region,'I','E')
try:
pos1,pos2 = junction_locations[upstream_exon][0]
upstream_exon_len = abs(pos2-pos1)
except Exception: upstream_exon_len = None
downstream_exon = getDownstreamExon(upstream_exon)
try:
pos1,pos2 = junction_locations[downstream_exon][0]
downstream_exon_len = abs(pos2-pos1)
except Exception: downstream_exon_len=None
pos1,pos2 = junction_locations[query][0]
intron_len = abs(pos2-pos1)
try:
up_rpk = max(feature_exp_db[upstream_exon])/float(upstream_exon_len)
if downstream_exon_len!=None:
down_rpk = max(feature_exp_db[downstream_exon])/float(downstream_exon_len)
adjacent_rpk = max([up_rpk,down_rpk]) ### get the most conservative estimate
adjacent_rpk = up_rpk
intron_rel_exp = (max(feature_exp_db[query])/float(intron_len))/adjacent_rpk
return intron_rel_exp
except Exception:
return 0
def compareJunctionExpression(gene):
regulated_junctions={}
inclusion_max_psi={}
try: symbol,description = gene_annotations[gene]
except Exception: symbol='';description=''
if gene in critical_exon_gene_db:
critical_exon_list = critical_exon_gene_db[gene]
critical_exon_list = unique.unique(critical_exon_list)
for (inclusion_list,exclusion_list) in critical_exon_list:
inclusion_list = unique.unique(inclusion_list)
exclusion_list = unique.unique(exclusion_list)
for incl in inclusion_list:
for excl in exclusion_list:
if excl != incl:
pair = [incl,excl]; pair.sort()
features_examined[incl]=[]
try:
max_ratio,num_incl_events,num_excl_events,dpsi,rho,max_all_psi,proceed = junctionComparisonMethod(incl,excl)
inclusion_max_psi[incl] = max_all_psi
#if 'ENSMUSG00000027680:E21.1-E22.1' in incl: print incl, excl,max_ratio,num_incl_events,num_excl_events,dpsi,rho,proceed
if proceed:
"""if max_ratio<0.1:
if num_excl_events > num_incl_events:
print max_ratio
print max(incl_exp)
print statistics.median(excl_exp)
print incl_exp
print excl_exp;sys.exit()"""
#if 'ENSMUSG00000009350:E14.2_87617106-E15.1' in incl: print feature_exp_db[incl]
altexons = unique.unique(critical_junction_pair_db[incl,excl])
altexons = string.join(altexons,'|')
if num_excl_events > num_incl_events:
#print max_ratio, '\t',gene
regulated_junctions[incl]=rho,excl,num_incl_events,num_excl_events,'incl',dpsi,rho,max_ratio,altexons
else:
#print max_ratio, '\t',gene
regulated_junctions[excl]=rho,incl,num_excl_events,num_incl_events,'excl',dpsi,rho,max_ratio,altexons
#if 'ENSMUSG00000009350:E14.2_87617106-E15.1' in incl: print feature_exp_db[incl]
#if rho < -0.3:
#print incl, excl, rho
#print incl_exp
#print excl_exp
#print sum(i > 5 for i in incl_exp)
#print sum(i > 5 for i in excl_exp)
except Exception: pass
#lower_index = int(len(rpkms)*0.2)
#upper_index = len(rpkms)-lower_index
for reg_junction in regulated_junctions:
ref_junction = regulated_junctions[reg_junction][1]
altexons = regulated_junctions[reg_junction][-1]
max_ratio = regulated_junctions[reg_junction][-2]
rho = regulated_junctions[reg_junction][-3]
dpsi = regulated_junctions[reg_junction][-4]
#if 'ENSMUSG00000009350:E14.2_87617106-E15.1' in incl: print feature_exp_db[incl]
### Perform a sanity check for junction comparisons
reg_pos,reg_loc = junction_locations[reg_junction]
ref_pos,ref_loc = junction_locations[ref_junction]
positions = reg_pos+ref_pos
positions.sort()
values = map(lambda x: str(x+1),feature_exp_db[reg_junction])
try: values = psi_db[reg_junction,ref_junction]
except Exception: values = psi_db[ref_junction,reg_junction]; reg_junction,ref_junction = ref_junction,reg_junction
try: max_incl_psi = str(inclusion_max_psi[reg_junction])
except Exception: max_incl_psi = '0'
if reg_pos == positions[:2] or reg_pos == positions[-2:]:
#print 'Impropper junctions excluded',reg_junction,ref_junction,positions,reg_pos
pass
elif ('-' not in reg_junction or '-' not in ref_junction) and platform != 'junction': ### Possible retained intron
if '-' not in reg_junction: query = reg_junction
elif '-' not in ref_junction: query = ref_junction
if 'I' in query:
intron_rel_exp = intronComparisonMethod(query)
if intron_rel_exp>0.25:
"""
print upstream_exon, query, intron_len, upstream_exon_len,(max(feature_exp_db[query])/float(intron_len)),(max(feature_exp_db[upstream_exon])/float(upstream_exon_len))
print max(feature_exp_db[query])
print junction_locations[query]
print max(feature_exp_db[upstream_exon])
print junction_locations[upstream_exon]
print intron_rel_exp
"""
if platform == 'junction':
try: reg_junction = probeset_junction_db[reg_junction] + ' ' +reg_junction
except Exception: pass
try: ref_junction = probeset_junction_db[ref_junction] + ' ' +ref_junction
except Exception: pass
export_data.write(string.join([symbol,description,reg_junction,ref_junction,altexons,str(max_ratio),str(dpsi),str(rho),max_incl_psi,reg_loc+'|'+ref_loc,'intron-retained']+values,'\t')+'\n')
avg = averageWithNulls(values)
values = map(lambda x: replaceNulls(x,avg), values)
clust_export_data.write(string.join([symbol+':'+reg_junction+'|'+ref_junction]+values,'\t')+'\n')
retained_introns.append(reg_junction)
retained_introns.append(ref_junction)
else:
try: avg = averageWithNulls(values)
except Exception: continue
if platform == 'junction':
try: reg_junction = probeset_junction_db[reg_junction] + ' ' +reg_junction
except Exception: pass
try: ref_junction = probeset_junction_db[ref_junction] + ' ' +ref_junction
except Exception: pass
export_data.write(string.join([symbol,description,reg_junction,ref_junction,altexons,str(max_ratio),str(dpsi),str(rho),max_incl_psi,reg_loc+'|'+ref_loc,'junctions']+values,'\t')+'\n')
values = map(lambda x: replaceNulls(x,avg), values)
clust_export_data.write(string.join([symbol+':'+reg_junction+'|'+ref_junction]+values,'\t')+'\n')
exported.append(reg_junction)
exported.append(ref_junction)
### Predict novel undetected events from above
junctions=[]; other=[]; features=[]; feature_pos=[]; loc_db={}; coord_db={}; junctions_to_compare=[]; regulated_junctions={};inclusion_max_psi={}
#max_ratio,proceed = junctionComparisonMethod(incl,excl)
for feature in feature_exp_db:
### The below code is for overlapping junctions not found from the above analysis (could include exons and junctions)
if feature not in exported and feature not in retained_introns:
if '-' in feature and platform != 'junction':
junctions.append(feature)
else:
other.append(feature)
features.append(feature)
try:
pos1,pos2 = junction_locations[feature][0]
feature_pos.append(pos1); feature_pos.append(pos2)
try:loc_db[pos1].append(feature)
except Exception: loc_db[pos1] = [feature]
try:loc_db[pos2].append(feature)
except Exception: loc_db[pos2] = [feature]
try: coord_db[pos1,pos2].append(feature)
except Exception: coord_db[pos1,pos2] = [feature]
except Exception: pass ### occurs for junction arrays if the probeset ID is not in the database
feature_pos = unique.unique(feature_pos); feature_pos.sort() ### These are the unique positions sorted
overlapping_features=[]
additional_possible_retained_introns=[] ### catch some funky intron retention events
### Get initial list of overlapping features
for feature in features: ### e.g., some junction
try:
pos1,pos2 = junction_locations[feature][0] ### coordinates of that junction
i1 = feature_pos.index(pos1) ### index position of the junctions
i2 = feature_pos.index(pos2)
#print feature, i1, i2, pos1, pos2
if (i1-i2) != 1:
overlapping_features.append(feature)
if 'I' in feature and '_' in feature:
possible_intron = string.split(feature,'_')[0]
if possible_intron not in features:
additional_possible_retained_introns.append((i1,i2,feature))
except Exception:
pass
for feature in overlapping_features:
#if feature not in features_examined: ### Remove this to allow for other reasonable junction or junction intron pairs that were not significant above
### get overlapping feature pairs
pos1,pos2 = junction_locations[feature][0]
i1 = feature_pos.index(pos1)
i2 = feature_pos.index(pos2)
### Include a search for funky intron retention events (needed due to some weird intron retention issue)
for (in1,in2,f2) in additional_possible_retained_introns:
if i1<=in1 and i2>=in2 and feature!=f2:
junctions_to_compare.append([feature,f2])
for i in range(i1+1,i2):
overlapping = loc_db[feature_pos[i]]
for o in overlapping:
if o not in features_examined and '-' in o and '-' in feature and platform != 'junction':
#print feature, o
#print junction_locations[feature][0]
#print junction_locations[o][0]
junctions_to_compare.append([feature,o])
#print 'junctions_to_compare:',junctions_to_compare
### Since this is the same coordinates, should be finding intron retention pairs
for coord in coord_db:
features = unique.unique(coord_db[coord])
if len(features)>1:
for f in features:
for g in features:
if g!=f:
fs = [g,f]; fs.sort()
if g not in exported and f not in exported:
if g not in retained_introns and f not in retained_introns:
junctions_to_compare.append(fs)
unique.unique(junctions_to_compare)
for (incl,excl) in junctions_to_compare: #Not really incl, excl, just features
max_ratio,num_incl_events,num_excl_events,dpsi,rho,max_all_psi,proceed = junctionComparisonMethod(incl,excl)
inclusion_max_psi[incl] = max_all_psi
#if 'ENSG00000100650' in incl: print incl,excl, max_ratio, proceed, rho, num_incl_events, num_excl_events, 'k'
if proceed:
altexons = ''
if num_excl_events > num_incl_events:
#print max_ratio, '\t',gene
regulated_junctions[incl]=rho,excl,num_incl_events,num_excl_events,'incl',dpsi,rho,max_ratio,altexons
else:
#print max_ratio, '\t',gene
regulated_junctions[excl]=rho,incl,num_excl_events,num_incl_events,'excl',dpsi,rho,max_ratio,altexons
for reg_junction in regulated_junctions:
ref_junction = regulated_junctions[reg_junction][1]
altexons = regulated_junctions[reg_junction][-1]
max_ratio = regulated_junctions[reg_junction][-2]
rho = regulated_junctions[reg_junction][-3]
dpsi = regulated_junctions[reg_junction][-4]
### Perform a sanity check for junction comparisons
reg_pos,reg_loc = junction_locations[reg_junction]
ref_pos,ref_loc = junction_locations[ref_junction]
positions = reg_pos+ref_pos
positions.sort()
values = map(lambda x: str(x+1),feature_exp_db[reg_junction])
try: values = psi_db[reg_junction,ref_junction]
except Exception: values = psi_db[ref_junction,reg_junction]; reg_junction,ref_junction = ref_junction,reg_junction
try: max_incl_psi = str(inclusion_max_psi[reg_junction])
except Exception: max_incl_psi = '0'
if ('-' not in reg_junction or '-' not in ref_junction) and platform != 'junction': ### Possible retained intron
if '-' not in reg_junction: query = reg_junction
elif '-' not in ref_junction: query = ref_junction
intron_rel_exp = intronComparisonMethod(query)
if intron_rel_exp>0.25:
if platform == 'junction':
try: reg_junction = probeset_junction_db[reg_junction] + ' ' +reg_junction
except Exception: pass
try: ref_junction = probeset_junction_db[ref_junction] + ' ' +ref_junction
except Exception: pass
export_data.write(string.join([symbol,description,reg_junction,ref_junction,altexons,str(max_ratio),str(dpsi),str(rho),max_incl_psi,reg_loc+'|'+ref_loc,'exon-retained']+values,'\t')+'\n')
avg = averageWithNulls(values)
values = map(lambda x: replaceNulls(x,avg), values)
clust_export_data.write(string.join([symbol+':'+reg_junction+'|'+ref_junction]+values,'\t')+'\n')
retained_introns.append(reg_junction)
retained_introns.append(ref_junction)
#print query
else:
if platform == 'junction':
try: reg_junction = probeset_junction_db[reg_junction] + ' ' +reg_junction
except Exception: pass
try: ref_junction = probeset_junction_db[ref_junction] + ' ' +ref_junction
except Exception: pass
export_data.write(string.join([symbol,description,reg_junction,ref_junction,altexons,str(max_ratio),str(dpsi),str(rho),'0',reg_loc+'|'+ref_loc,'others']+values,'\t')+'\n')
avg = averageWithNulls(values)
values = map(lambda x: replaceNulls(x,avg), values)
clust_export_data.write(string.join([symbol+':'+reg_junction+'|'+ref_junction]+values,'\t')+'\n')
exported.append(reg_junction)
exported.append(ref_junction)
#print ref_junction,reg_junction
if platform == 'junction' or platform == 'AltMouse':
probeset_gene_db={}
import ExonArrayEnsemblRules
if platform == 'junction':
export_exon_filename = 'AltDatabase/'+species+'/'+platform+'/'+species+'_Ensembl_probesets.txt'
if platform == 'AltMouse':
export_exon_filename = 'AltDatabase/'+species+'/'+platform+'/'+species+'_Ensembl_probesets.txt'
ensembl_probeset_db = ExonArrayEnsemblRules.reimportEnsemblProbesetsForSeqExtraction(export_exon_filename,'only-junctions',{})
for gene in ensembl_probeset_db:
for uid,pos,location in ensembl_probeset_db[gene]:
junction_locations[uid] = pos,location
probeset_gene_db[uid]=gene
def filterByLocalJunctionExp(gene,features):
try: symbol,description = gene_annotations[gene]
except Exception: symbol='';description=''
begin_time = time.time()
junctions_to_compare={}
overlapping_junctions_exp={}
overlapping_junctions={}
for feature in feature_exp_db:
feature_exp = feature_exp_db[feature]
if '-' in feature:
pos1,pos2 = junction_locations[feature][0]
for f2 in feature_exp_db:
if '-' in f2:
if f2!=feature:
f2_exp = feature_exp_db[f2]
alt_pos1,alt_pos2 = junction_locations[f2][0]
positions = [pos1,pos2,alt_pos1,alt_pos2]
positions.sort()
diff = positions.index(pos2)-positions.index(pos1)
if diff!=1:
#try: junctions_to_compare[feature].append(f2)
#except Exception: junctions_to_compare[feature] = [f2]
try: overlapping_junctions_exp[feature].append([f2_exp,f2])
except Exception: overlapping_junctions_exp[feature] = [[f2_exp,f2]]
else:
diff = positions.index(alt_pos2)-positions.index(alt_pos1)
if diff!=1:
try: overlapping_junctions_exp[feature].append([f2_exp,f2])
except Exception: overlapping_junctions_exp[feature] = [[f2_exp,f2]]
"""
for feature in feature_exp_db:
if '-' in feature:
pos1,pos2 = junction_locations[feature][0]
positions.append((pos1,pos2))
positions.append((pos2,pos1))
overlapping_features=[]; junctions_to_compare={}; loc_db={}; feature_pos=[]
for feature in features:
pos1,pos2 = junction_locations[feature][0]
feature_pos.append(pos1); feature_pos.append(pos2)
try:loc_db[pos1].append(feature)
except Exception: loc_db[pos1] = [feature]
try:loc_db[pos2].append(feature)
except Exception: loc_db[pos2] = [feature]
for feature in features: ### e.g., some junction
try:
pos1,pos2 = junction_locations[feature][0] ### coordinates of that junction
i1 = feature_pos.index(pos1) ### index position of the junctions
i2 = feature_pos.index(pos2)
#print feature, i1, i2, pos1, pos2
if (i1-i2) != 1:
overlapping_features.append(feature)
except Exception:
pass
for feature in overlapping_features:
#if feature not in features_examined: ### Remove this to allow for other reasonable junction or junction intron pairs that were not significant above
### get overlapping feature pairs
pos1,pos2 = junction_locations[feature][0]
i1 = feature_pos.index(pos1)
i2 = feature_pos.index(pos2)
for i in range(i1+1,i2):
overlapping = loc_db[feature_pos[i]]
for o in overlapping:
if o not in features_examined and '-' in o and '-' in feature and platform != 'junction':
try: junctions_to_compare[feature].append(o)
except Exception: junctions_to_compare[feature] = [o]
"""
#duration = time.time() - begin_time
#print duration, 'seconds'
expressed_junctions=[]
for feature in overlapping_junctions_exp:
counts = map(lambda x: x[0], overlapping_junctions_exp[feature])
combined = [sum(value) for value in zip(*counts)]
#if feature == 'ENSG00000002586:E1.5-E4.1':
#print feature
#print combined
#print overlapping_junctions_exp[feature][0];sys.exit()
#dpsi_values = [ratio(value) for value in zip(*[overlapping_junctions_exp[feature][0],combined])]
#print feature
#print overlapping_junctions[feature]
#print overlapping_junctions_exp[feature]
#print combined;sys.exit()
exclusion_id = feature+'|exclusion'
feature_exp_db[exclusion_id] = combined
max_ratio,num_incl_events,num_excl_events,dpsi,rho,max_all_psi,proceed = junctionComparisonMethod(feature,exclusion_id)
if proceed:
fe1,fe2 = string.split(feature,'-')
if '_' in fe1 and '_' in fe2: pass
else:
#"""
top_excl_junction=[]
for (exp_ls,f2) in overlapping_junctions_exp[feature]:
top_excl_junction.append([statistics.avg(exp_ls),f2])
top_excl_junction.sort()
#print top_excl_junction[-8:]
#print statistics.avg(feature_exp_db[feature])
top_excl_junction = top_excl_junction[-1][-1]
t1,t2 = string.split(top_excl_junction,'-')
altexons = []
if t1!=fe1: altexons.append(fe1)
if t2!=fe2: altexons.append(gene+':'+fe2)
altexons = string.join(altexons,'|')
reg_pos,reg_loc = junction_locations[feature]
ref_pos,ref_loc = junction_locations[top_excl_junction]
#print [feature, dpsi,rho]
#top_excl_junctions = map(lambda x: x[-1], top_excl_junction[-5:])
#print top_excl_junctions;sys.exit()
#for i in top_excl_junctions: max_ratio,num_incl_events,num_excl_events,dpsi,rho,max_all_psi,proceed = junctionComparisonMethod(feature,i); print i, dpsi,rho
values = psi_db[feature,exclusion_id]
max_incl_psi = str(getMax(values))
export_data.write(string.join([symbol,description,feature,top_excl_junction,altexons,str(max_ratio),str(dpsi),str(rho),max_incl_psi,reg_loc+'|'+ref_loc,'junctions']+values,'\t')+'\n')
avg = averageWithNulls(values)
values_imputed = map(lambda x: replaceNulls(x,avg), values)
clust_export_data.write(string.join([symbol+':'+feature+'|'+top_excl_junction]+values_imputed,'\t')+'\n')
exported.append(feature)
exported.append(top_excl_junction)
#sys.exit()
gene_annotations = getGeneAnnotations(species)
firstLine = True
feature_exp_db={}
gene_junction_denom={} ### Determine the max junction counts per gene per sample
regulated_junctions = {}
genes_examined=0; gene_increment=1000
prior_gene = None
gene = None
for line in open(expFile,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstLine:
firstLine = False
ge_header = t
additional_headers = string.join(['Symbol','Description','Examined-Junction','Background-Major-Junction','AltExons',"PME","dPSI",'rho','Max Inclusion PSI','Coordinates','feature']+t[1:],'\t')+'\n'
export_data.write(additional_headers)
clust_export_data.write(line)
else:
uid = t[0]
if '=' in uid:
try: uid,location = string.split(uid,'=')
except Exception: print t[0];sys.exit()
pos1,pos2 = string.split(string.split(location,':')[-1],'-')
pos = [int(pos1),int(pos2)]
pos.sort()
junction_locations[uid] = pos,location ### use the to report position and verify compared junctions
if gene == 'ENSG00000100650':
proceed = True
else: proceed = True
if platform == 'RNASeq':
gene = string.split(uid,':')[0]
else:
if uid in probeset_gene_db:
gene = probeset_gene_db[uid]
else: proceed = False
if proceed:
counts = map(lambda x: float(x), t[1:])
if platform == 'junction' or platform == 'AltMouse':
counts = map(lambda x: int(math.pow(2,x)), counts) #log transform these instead, to make like junction counts
if '-' in uid or uid in junction_locations:
#try: gene_junction_denom[gene].append(counts)
#except Exception: gene_junction_denom[gene] = [counts]
pass
if genes_examined==gene_increment:
gene_increment+=1000
print '*',
if gene != prior_gene and prior_gene !=None:
genes_examined+=1
#if len(gene_junction_denom)>0:
if prior_gene == '!ENSG00000198001':
filterByLocalJunctionExp(prior_gene,feature_exp_db)
#try: gene_junction_denom[prior_gene] = [max(value) for value in zip(*gene_junction_denom[prior_gene])] # sum the junction counts for all junctions across the gene
#except Exception: pass
if platform == 'RNASeq':
filterByLocalJunctionExp(prior_gene,feature_exp_db)
else:
compareJunctionExpression(prior_gene)
feature_exp_db={}
gene_junction_denom={}
if max(counts)>4:
feature_exp_db[uid] = counts
prior_gene = gene
#compareJunctionExpression(gene)
export_data.close()
clust_export_data.close()
graphic_links=[]
if (len(exported)/2)<7000:
if (len(exported)/2)<4000:
graphic_links = exportHeatmap(clust_export_dir,useHOPACH=False,color_gradient='yellow_black_blue',normalize=True,columnMethod='hopach',size=len(exported)/2)
else:
clust_export_dir,size = filterJunctionExpression(clust_export_dir)
if size<4000:
try: graphic_links = exportHeatmap(clust_export_dir,useHOPACH=False,color_gradient='yellow_black_blue',normalize=True,columnMethod='hopach',size=len(exported)/2,filter=True)
except Exception: graphic_links=[]
print len(exported)/2,'junctions exported and',len(retained_introns)/2, 'retained introns exported...'
return graphic_links, clust_export_dir
def getGeneAnnotations(species):
gene_annotations={}
fn = filepath('AltDatabase/ensembl/'+species+'/'+species+'_Ensembl-annotations_simple.txt')
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
ensembl,description,symbol = string.split(data,'\t')
gene_annotations[ensembl] = symbol,description
return gene_annotations
def getDownstreamExon(upstream_exon):
gene,exon = string.split(upstream_exon,':')
downstream_exon = gene+':E'+str(int(string.split(exon,'.')[0][1:])+1)+'.1'
return downstream_exon
def expressionRatio(incl_exp,excl_exp,num_incl_events,num_excl_events):
### Selects the minor isoform and looks at its relative expression
if num_incl_events>num_excl_events:
max_ratio = max(excl_exp)/max(incl_exp)
else:
max_ratio = max(incl_exp)/max(excl_exp)
return max_ratio
def unbiasedComparisonSpliceProfiles(root_dir,species,platform,expFile=None,min_events=-1,med_events=-1): # 4 9
""" This is prototype code to identify critical splicing events (SI-exon-level) from single cell data prior to group assignment """
begin_time = time.time()
if platform == 'RNASeq': avg_all_for_SS = 'yes'
else: avg_all_for_SS = 'no'
agglomerate_inclusion_probesets = 'no'
probeset_type = 'core'
import JunctionArray; import AltAnalyze
buildFromDeNovoJunctionsOnly=True
if buildFromDeNovoJunctionsOnly and platform=='RNASeq':
alt_junction_db={}
else:
exon_db, constitutive_probeset_db = AltAnalyze.importSplicingAnnotations(platform,species,probeset_type,avg_all_for_SS,root_dir)
alt_junction_db,critical_exon_db,exon_dbase,exon_inclusion_db,exon_db = JunctionArray.getPutativeSpliceEvents(species,platform,exon_db,agglomerate_inclusion_probesets,root_dir)
print 'Number of Genes with Examined Splice Events:',len(alt_junction_db)
if platform == 'junction':
global probeset_junction_db; probeset_junction_db={}
#alt_junction_db = {'ENSG00000100650':alt_junction_db['ENSG00000100650']}
critical_exon_db={}
for affygene in alt_junction_db:
for event in alt_junction_db[affygene]:
for critical_exon in event.CriticalExonList():
critical_exon = affygene+':'+critical_exon
try:
#print event.InclusionJunction(), event.ExclusionJunction();sys.exit()
inclusion_list,exclusion_list = critical_exon_db[critical_exon]
if '-' in event.InclusionProbeset() or (platform == 'junction' and '-' in event.InclusionJunction()):
inclusion_list.append(event.InclusionProbeset())
exclusion_list.append(event.ExclusionProbeset())
if platform == 'junction':
probeset_junction_db[event.InclusionProbeset()] = event.InclusionJunction()
probeset_junction_db[event.ExclusionProbeset()] = event.ExclusionJunction()
except Exception:
if '-' in event.InclusionProbeset() or (platform == 'junction' and '-' in event.InclusionJunction()):
inclusion_list = [event.InclusionProbeset()]
else: inclusion_list=[]
exclusion_list = [event.ExclusionProbeset()]
#inclusion_list.append(critical_exon)
inclusion_list = unique.unique(inclusion_list)
exclusion_list = unique.unique(exclusion_list)
if len(inclusion_list)>0 and len(exclusion_list)>0:
critical_exon_db[critical_exon] = inclusion_list,exclusion_list
elif 'I' in critical_exon and '_' not in critical_exon and '.1' in critical_exon:
critical_exon_db[critical_exon] = [critical_exon],exclusion_list
#if affygene == 'ENSMUSG00000004952':
#if '.1' not in critical_exon: print critical_exon,inclusion_list,exclusion_list
if expFile != None:
graphic_links, cluster_input = compareRawJunctionExpression(root_dir,platform,species,critical_exon_db,expFile,min_events=min_events,med_events=med_events)
print 'finished in',int(time.time()-begin_time), 'seconds'
return graphic_links, cluster_input
### Determine the location of the gene expression file
input_folder = root_dir+'AltResults/RawSpliceDataTemp/'+species+'/splicing-index/'
dir_list = read_directory(input_folder) ### get all of the RawSplice files
for filename in dir_list:
if '.txt' in filename and ('_average' not in filename):
dataset_name = filename
input_dir = input_folder + dataset_name
exportSorted(input_dir, 2)
for filename in dir_list:
if '.txt' in filename and ('_average' not in filename):
dataset_name = filename
input_dir = input_folder + dataset_name
import RNASeq
biological_categories = RNASeq.importBiologicalRelationships(species)
genes = biological_categories['protein_coding']
#genes = biological_categories['BioMarker']
genes.update(biological_categories['transcription regulator'])
genes.update(biological_categories['splicing regulator'])
genes.update(biological_categories['kinase'])
genes.update(biological_categories['GPCR'])
### Import gene expression summaries to exclude high differential genes
fn=filepath(root_dir+'/ExpressionInput/exp.'+dataset_name[:-4]+'-steady-state.txt')
firstLine = True
low_diff_exp_genes={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstLine:
firstLine = False
ge_header = t
else:
rpkms = map(lambda x: float(x), t[1:])
rpkms.sort()
lower_index = int(len(rpkms)*0.2); upper_index = len(rpkms)-lower_index
gene = t[0]
#if (max(rpkms)/min(rpkms))<5: ### Max allowed differential expression
#if (rpkms[upper_index]/rpkms[lower_index])<5:
#if gene == 'ENSMUSG00000078812': print statistics.avg(rpkms)
if gene in genes and statistics.avg(rpkms)>5:
low_diff_exp_genes[gene]=rpkms
#print low_diff_exp_genes['ENSMUSG00000078812']
print len(low_diff_exp_genes), 'genes with less than 5-fold differential expression'
import gene_associations; from scipy import stats
gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
exported_IDs=0; ids_exmained=0; genes_examined=0; gene_increment=1000
added={}; prior_gene = False; gene_comparison_db={}
print 'Begining to quickly find alternative exons...'
for filename in dir_list:
if '.txt' in filename and ('_average' not in filename) or len(dir_list)==1: ### We only want the group-comparison SI file
export_dir = string.split(input_folder,'RawSpliceData')[0]+'Unbiased/'+filename
export_data = export.ExportFile(export_dir)
fn=filepath(input_folder+filename)
x=0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
headers = t[2:] ### first two columsn are gene and ExonID
#print headers
#print ge_header;sys.exit()
export_data.write(string.join(headers,'\t')+'\n') ### write header row
x=1
else:
if platform != 'RNASeq':
if 'ENS' in t[0]: #ENSG human
gene = t[0]; altExon = t[2]
else:
altExon = t[0]
else:
gene = t[0]; altExon = t[2]
if genes_examined==gene_increment:
gene_increment+=1000
print '*',
if gene != prior_gene:
genes_examined+=1
hits={}
for gene in gene_comparison_db:
feature_db = gene_comparison_db[gene]
if gene == 'ENSMUSG00000078812':
for i in feature_db: print i
for altexon in feature_db:
#print altexon
if altexon in critical_exon_db:
inclusion_list,exclusion_list = critical_exon_db[altexon]
#print inclusion_list, 'incl'
altexon_SI = feature_db[altexon]
for incl in inclusion_list:
if incl in feature_db:
incl_SI = feature_db[incl]
with warnings.catch_warnings():
warnings.filterwarnings("ignore") ### hides import warnings
try: rho,p = stats.pearsonr(altexon_SI,incl_SI)
except Exception: rho = 1
#print rho, altexon,incl
#print string.join(map(str,altexon_SI),'\t')
#print string.join(map(str,incl_SI),'\t');sys.exit()
if rho>0.4:
hits[altexon]=[]
if gene == 'ENSMUSG00000078812':print '***', incl
#print inclusion_list, 'excl'
for excl in inclusion_list:
if excl in feature_db:
excl_SI = feature_db[excl]
with warnings.catch_warnings():
warnings.filterwarnings("ignore") ### hides import warnings
rho,p = stats.pearsonr(altexon_SI,excl_SI)
if rho<-0.4:
hits[altexon]=[]
if gene == 'ENSMUSG00000078812': print '***', excl
if gene == 'ENSMUSG00000078812': print hits
for altExon in hits:
added[altExon]=[]
log_folds = feature_db[altExon]
log_folds = map(str, log_folds)
values = string.join([altExon]+log_folds,'\t')+'\n' ### [t[3]]+ before log_folds?
if gene in gene_to_symbol: symbol = gene_to_symbol[gene][0]+" "
else: symbol = ''
export_data.write(symbol+values)
exported_IDs+=1
gene_comparison_db={}
#if exported_IDs> 1: sys.exit()
prior_gene = gene
if ';' in altExon:
altExon1, altExon2 = string.split(altExon,';')
altExons = [altExon1,gene+':'+altExon2]
else:
altExons = [altExon]
for altExon in altExons:
if altExon not in added and gene in low_diff_exp_genes: #altExon in alternative_exon_db and
#if altExon == 'ENSMUSG00000022841:E7.2':
ids_exmained+=1
#values = map(lambda x: float(x)*-1, t[3:]) #reverse the fold for cluster visualization
values = map(lambda x: float(x), t[3:])
#print len(headers),len(values);sys.exit()
avg = statistics.avg(values)
log_folds = map(lambda x: x-avg, values) ### calculate log folds and make these strings
i=0; si_exp_list = [] ### store the pairs of SI and gene expression for each sample
rpkms = list(low_diff_exp_genes[gene])
for si in log_folds: si_exp_list.append([rpkms[i],si]); i+=1
rpkms.sort()
si_exp_list.sort() ### This object contains both gene expression and SI values
max_rpkm = rpkms[-1]
half_max_rpkm = max_rpkm/2 ### Only look at genes in which there is less than a 2 fold differnce
s = bisect.bisect_right(rpkms,half_max_rpkm)
si_highGeneExp = map(lambda (rpkm,si): si, si_exp_list[s:])
#print si_exp_list[s:]
#cv = statistics.stdev(si_highGeneExp)/statistics.avg(si_highGeneExp)
si_highGeneExp.sort()
try:
biggest_diff = si_highGeneExp[-2]-si_highGeneExp[1]
#print biggest_diff
#print cv
if gene == 'ENSG00000009413':
print altExon, biggest_diff
print si_highGeneExp
if biggest_diff>2 and len(si_highGeneExp)>20:
try:
feature_db = gene_comparison_db[gene]
feature_db[altExon] = log_folds
except Exception:
feature_db={}
feature_db[altExon] = log_folds
gene_comparison_db[gene] = feature_db
#added[altExon]=[]
#log_folds = map(str, log_folds)
#values = string.join([altExon]+log_folds,'\t')+'\n' ### [t[3]]+ before log_folds?
#if gene in gene_to_symbol: symbol = gene_to_symbol[gene][0]+" "
#else: symbol = ''
#export_data.write(symbol+values)
#exported_IDs+=1
except Exception: pass ### Occurs with less than 4 samples in the si_highGeneExp set
print exported_IDs, 'exported ID values for clustering out of',ids_exmained
export_data.close()
return export_dir, exported_IDs
def AllGroupsNIComparison(root_dir, species, array_type):
if array_type == 'RNASeq': avg_all_for_SS = 'yes'
else: avg_all_for_SS = 'no'
agglomerate_inclusion_probesets = 'no'
#calculateNormalizedIntensities(root_dir, species, array_type, avg_all_for_SS = avg_all_for_SS, probeset_type = 'core')
### This analysis is designed for datasets without known variables (e.g., single cell seq)
import JunctionArray
exon_db, constitutive_probeset_db = AltAnalyze.importSplicingAnnotations(array_type,species,probeset_type,avg_all_for_SS,root_dir)
alt_junction_db,critical_exon_db,exon_dbase,exon_inclusion_db,exon_db = JunctionArray.getPutativeSpliceEvents(species,array_type,exon_db,agglomerate_inclusion_probesets,root_dir)
print 'Number of Genes with Examined Splice Events:',len(alt_junction_db)
for affygene in alt_junction_db:
for event in alt_junction_db[affygene]:
event.InclusionProbeset()
event.ExclusionProbeset()
def createExpressionSQLdb(species,platform,expFile):
""" Store junction/exon RPKMs or probesets expression in a SQL database"""
start=time.time()
import SQLInterace
DBname = 'FeatureExpression'
schema_text ='''-- Schema for species specific AltAnalyze junction/exon expression data.
-- Genes store general information on each Ensembl gene ID
create table ExonExp (
uid text primary key,
gene text,
expression text
);
'''
conn = SQLInterace.populateSQLite(species,platform,DBname,schema_text=schema_text) ### conn is the database connnection interface
### Populate the database
print 'importing', expFile
fn=filepath(expFile)
for line in open(fn,'r').xreadlines():
data = line.strip()
t = string.split(data,'\t')
uid = t[0]; expression = string.join(t[1:],'\t')
try: gene = string.split(uid,':')[0]
except Exception: print 'not RNASeq - function not supported';kill
#print exonID,gene,sequence
### Store this data in the SQL database
command = """insert into ExonExp (uid, gene, expression)
values ('%s', '%s','%s')""" % (uid,gene,expression)
conn.execute(command)
conn.commit() ### Needed to commit changes
conn.close()
time_diff = str(round(time.time()-start,1))
print 'Exon/Junction Expression Data added to SQLite database in %s seconds' % time_diff
def logratio(list):
return list[0] - list[1]
def matchAndCorrelate(prime, secondary, output_dir, rho_cutoff):
### Take two files and correlate their IDs to any matching
export_object = export.ExportFile(output_dir[:-4]+'-'+str(rho_cutoff)+'.txt')
export_object.write('Feature1\tFeature2\trho\n')
firstLine = True; prime_db={}
for line in open(prime,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstLine:
firstLine = False
else:
prime_db[t[0]] = map(float,t[1:])
firstLine = True; secondary_db={}; key_db={}
for line in open(secondary,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstLine:
firstLine = False
else:
try: gene_id,probe_id = string.split(t[0],':')
except Exception: gene_id = t[0]; probe_id = t[0]
try: secondary_db[gene_id].append((map(float,t[1:]),probe_id))
except Exception: secondary_db[gene_id] = [(map(float,t[1:]),probe_id)]
from scipy import stats
top_correlated={}
for gene in prime_db:
prime_profile = prime_db[gene]
if gene in secondary_db:
for (secondary_db_profile, probe_id) in secondary_db[gene]:
rho,p = stats.pearsonr(prime_profile,secondary_db_profile)
if rho > rho_cutoff or rho < -1*rho_cutoff:
#print gene, '\t',probe_id, '\t',rho
export_object.write(gene+'\t'+probe_id+'\t'+str(rho)+'\n')
export_object.close()
def getMax(values):
values2=[]
for i in values:
try: values2.append(float(i))
except Exception: pass
return max(values2)
def replaceNulls(x,avg):
if x=='':
return '0'
else:
return str(float(x)-avg)
def nullReplace(dpsi_values,combined,min_exp): ### Don't count un-detected genes in later stats
null_replaced=[]
i=0
for v in combined:
if v<(min_exp+1): null_replaced.append('')
else: null_replaced.append(str(dpsi_values[i]))
i+=1
return null_replaced
def averageWithNulls(values):
avg_vals=[]
for i in values:
try: avg_vals.append(float(i))
except Exception: pass
avg = statistics.avg(avg_vals)
return avg
def expressionSortImport(filename,filter_db=None):
firstLine = True; exp_db={}; lines=0; max_var = 3
for line in open(filename,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstLine:
headers = t[1:]
header_ind = map(lambda x: (x,headers.index(x)),headers) ### store header index values
header_ind.sort()
#print header_ind
headers_ind = map(lambda (x,i): i,header_ind)
firstLine = False
else:
try: exp_data = map(float,t[1:])
except Exception:
exp_data=[]
for value in t[1:]:
try: value = float(value)
except Exception: pass
exp_data.append(value)
exp_data = map(lambda i: exp_data[i],headers_ind)
if filter_db != None:
key = t[0]
if ':' in key:
key = string.split(key,':')[0]
max_var = 0
if key in filter_db:
if max(exp_data)>max_var:
exp_db[key] = exp_data
else:
exp_db[t[0]] = exp_data
lines+=1
print len(exp_db),'IDs imported from', export.findFilename(filename)
return exp_db
def featureCorrelate(species,query_dir,feature_dir,output_dir,feature_type):
#python ExpressionBuilder.py --species Hs --i /Volumes/SEQ-DATA/SingleCell-Churko/Filtered/Unsupervised-AllExons/AltResults/Unbiased/junctions-2-5/top_alt_junctions_clust-TTN_all_selected.txt --additional /Volumes/SEQ-DATA/SingleCell-Churko/Filtered/Unsupervised-AllExons/ExpressionInput/exp.CM-TTN-steady-state.txt --analysis featureCorrelate --var "splicing regulator"
### Correlate features in a file to feature-specific gene expression data (e.g., "splicing regulator")
try: export_object = export.ExportFile(output_dir[:-4]+'-'+feature_type+'.txt')
except Exception: export_object = export.ExportFile(output_dir[:-4]+'-None.txt')
export_object.write('UID\tFeature\trho\n')
import RNASeq; import ExpressionBuilder
biological_categories = RNASeq.importBiologicalRelationships(species)
gene_to_symbol_db = ExpressionBuilder.importGeneAnnotations(species)
if feature_type != None:
filter_genes = biological_categories[feature_type]
else:
filter_genes=None
try: print len(filter_genes),feature_type,'genes imported for comparison...'
except Exception: pass
query_db = expressionSortImport(query_dir)
feature_db = expressionSortImport(feature_dir,filter_genes)
from scipy import stats
top_correlated={}
for uid in query_db:
query_exp_profile = query_db[uid]
for gene in feature_db:
feature_exp_profile = feature_db[gene]
try: rho,p = stats.pearsonr(query_exp_profile,feature_exp_profile)
except Exception:
### If missing values are present, only correlate to where the values are present
query_exp_profile2=[]
feature_exp_profile2=[]
i=0
for v in query_exp_profile:
if v!='':
query_exp_profile2.append(query_exp_profile[i])
feature_exp_profile2.append(feature_exp_profile[i])
i+=1
if len(feature_exp_profile2)>20:
rho,p = stats.pearsonr(query_exp_profile2,feature_exp_profile2)
else:
rho = 0
try: symbol = gene_to_symbol_db[gene]
except Exception: symbol = gene
try: top_correlated[uid].append([abs(rho),symbol[0],rho])
except Exception: top_correlated[uid]=[[abs(rho),symbol[0],rho]]
for uid in top_correlated:
res = top_correlated[uid]
res.sort()
feature = res[-1][1]
rho = res[-1][-1]
export_object.write(uid+'\t'+feature+'\t'+str(rho)+'\n')
export_object.close()
def lncRNANeighborCorrelationAnalysis(dataset_dir):
### dataset_dir is the ExpressionOuput DATASET file location
#Get all analyzed genes and coordinates
print 'Importing the DATASET file'
global gene_symbol_db
fn=filepath(dataset_dir); gene_coordinate_db={}; all_lncRNA_db={}; coord_gene_db={}; gene_symbol_db={}
chr_coord_list=[]; positive_strand=[]; negative_strand=[]
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
geneID = t[0]
symbol = t[2]
protein_class = t[8]
chr = t[9]
strand = t[10]
coordinates = tuple(string.split(t[11],'-'))
coord_list = chr,coordinates,strand
gene_coordinate_db[geneID]=coord_list
coord_gene_db[coord_list] = geneID
gene_symbol_db[geneID] = symbol
chr_coord_list.append(coord_list)
if '+' in strand:
positive_strand.append(coord_list)
else:
negative_strand.append(coord_list)
if 'lincRNA' in protein_class or 'lncRNA' in protein_class:
all_lncRNA_db[geneID]=[]
chr_coord_list.sort(); positive_strand.sort(); negative_strand.sort()
useClusterFile = False
#Get all significantly differentially expressed genes
if useClusterFile:
cluster_file = string.replace(dataset_dir,'ExpressionOutput','ExpressionOutput/Clustering/')
cluster_file = string.replace(cluster_file,'DATASET-','SampleLogFolds-')
else:
cluster_file = string.replace(dataset_dir,'ExpressionOutput','ExpressionInput')
cluster_file = string.replace(cluster_file,'DATASET-','exp.')
cluster_file = string.replace(cluster_file,'.txt','-steady-state.txt')
print 'Importing the cluster file'
fn=filepath(cluster_file); differentially_exp_db={}; lncRNA_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
uid = string.split(data,'\t')[0]
if ' ' in uid:
uid = string.split(uid,' ')[0]
differentially_exp_db[uid]=[]
if uid in all_lncRNA_db:
lncRNA_db[uid]=[]
#import random
#lncRNA_db = random.sample(differentially_exp_db,len(lncRNA_db))
print 'Number of lncRNAs regulated in clusters:',len(lncRNA_db)
#Get the MarkerFinder cluster assignments of all analyzed genes
root_dir = string.split(dataset_dir,'ExpressionOutput')[0]
markerfinder = root_dir+'ExpressionOutput/MarkerFinder/AllGenes_correlations-ReplicateBased.txt'
print 'Importing the MarkerFinder file'
fn=filepath(markerfinder); cluster_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
geneID = t[0]
cluster = t[-1]
if geneID in differentially_exp_db:
cluster_db[geneID] = cluster
cluster_regulated_lncRNAs={}
for geneID in lncRNA_db:
try:
cluster_regulated_lncRNAs[cluster_db[geneID]]+=1
except Exception:
try: cluster_regulated_lncRNAs[cluster_db[geneID]]=1
except Exception: pass
for cluster in cluster_regulated_lncRNAs:
print cluster, cluster_regulated_lncRNAs[cluster]
print 'Searching for lncRNA positional correlations'
direction_list=['both','forward','reverse']
print '\tExamining both strands'
for direction in direction_list:
nmc = searchUpOrDownstreamGenes(lncRNA_db,gene_coordinate_db,cluster_db,coord_gene_db,chr_coord_list,direction)
#print len(nmc),len(chr_coord_list)
print '\tExamining the positive strand'
for direction in direction_list:
nmc = searchUpOrDownstreamGenes(lncRNA_db,gene_coordinate_db,cluster_db,coord_gene_db,positive_strand,direction)
#print len(nmc),len(positive_strand)
print '\tExamining the negative strand'
for direction in direction_list:
nmc = searchUpOrDownstreamGenes(lncRNA_db,gene_coordinate_db,cluster_db,coord_gene_db,negative_strand,direction)
#print len(nmc),len(negative_strand)
def searchUpOrDownstreamGenes(lncRNA_db,gene_coordinate_db,cluster_db,coord_gene_db,coord_list,direction):
neighbor_matching_cluster_db={}; multiple_lncRNAs=0; number_of_neighbors=0
for geneID in lncRNA_db:
coordinates = gene_coordinate_db[geneID]
if coordinates in coord_list: ### strand dependent
rank_index = coord_list.index(coordinates)
if geneID in cluster_db:
cluster = cluster_db[geneID]
if direction == 'forward':
search_pos = [4, 3,2,1]
search_pos = [1]
elif direction == 'reverse':
search_pos = [-4, -3,-2,-1]
search_pos = [-1]
else:
search_pos = [4,3,2,1,-3,-2,-1, -4]
search_pos = [1,-1]
for oi in search_pos:
i = coord_list[rank_index-oi]
neighbor_gene = coord_gene_db[i]
symbol = gene_symbol_db[neighbor_gene]
if neighbor_gene in cluster_db and neighbor_gene not in lncRNA_db and neighbor_gene != geneID and '.' not in symbol:
ncluster = cluster_db[neighbor_gene]
if cluster == ncluster:
if neighbor_gene in lncRNA_db:
multiple_lncRNAs+=1
try: neighbor_matching_cluster_db[geneID]+=1; number_of_neighbors+=1
except Exception: neighbor_matching_cluster_db[geneID]=1; number_of_neighbors+=1
print cluster,gene_symbol_db[geneID],gene_symbol_db[neighbor_gene]
#print 'multiple_lncRNAs:', multiple_lncRNAs, number_of_neighbors
return neighbor_matching_cluster_db
def getHighestExpressingGenes(input_file,output_dir,topReported):
### Sorts genes based on RPKM (ignore read counts)
bisectValues = False
if topReported<100:
bisectValues = True
firstLine = True
sampleExpression_db={}
for line in open(input_file,'rU').xreadlines():
data = cleanUpLine(line)
values = string.split(data,'\t')
if firstLine:
headers = values[1:]
for i in headers:
sampleExpression_db[i]=[]
firstLine = False
print len(values)
else:
gene = values[0]
i=0
for rpkm in values[1:]:
sampleExpression_db[headers[i]].append((float(rpkm),gene))
i+=1
for sample in sampleExpression_db:
Sample = string.replace(sample,'.bed','')
Sample = string.replace(Sample,'.cel','')
Sample = string.replace(Sample,'.CEL','')
Sample = string.replace(Sample,':','-')
export_object = export.ExportFile(output_dir+'/'+Sample+'-top_'+str(topReported)+'.txt')
export_object.write('Genes\tSystemCode\tChanged\n')
sampleExpression_db[sample].sort()
if bisectValues:
rpkms = map(lambda x: x[0], sampleExpression_db[sample])
print rpkms[-5:]
s = bisect.bisect_right(rpkms,float(topReported))
topExpGenes = map(lambda x: str(x[1]), sampleExpression_db[sample][-1*(len(rpkms)-s):])
print Sample,len(topExpGenes), s
else:
topExpGenes = map(lambda x: str(x[1]), sampleExpression_db[sample][-1*topReported:])
for gene in topExpGenes:
if 'ENS' in gene or 'ENF' in gene: system = 'En'
else: system = 'Sy'
export_object.write(gene+'\t'+system+'\t1\n')
export_object.close()
print 'The top',topReported,'expressing genes have been exported to',output_file
def returnRowHeaderForMaxEntry(filename,top):
### Used for enrichment analysis matrices to find the most significant term for each comparison/group/sample
output_file = filename[:-4]+'_top%d.txt' % top
export_object = export.ExportFile(output_file)
import clustering; import numpy
matrix, column_header, row_header, dataset_name, group_db = clustering.importData(filename,reverseOrder=False)
matrix = map(numpy.array, zip(*matrix)) ### coverts these to tuples
column_header, row_header = row_header, column_header
x=0
for row in matrix:
comparison = row_header[x]
copied_row_values = list(row)
copied_row_values.sort()
max_vals = copied_row_values[-1*top:]
max_vals.reverse()
term = column_header[list(row).index(max_vals[0])]
term+= '('+str(max_vals[0])[:4]+')|'
if top>1:
term+= column_header[list(row).index(max_vals[1])]
term+= '('+str(max_vals[1])[:4]+')|'
if top>2:
term+= column_header[list(row).index(max_vals[2])]
term+= '('+str(max_vals[2])[:4]+')|'
if top>3:
term+= column_header[list(row).index(max_vals[3])]
term+= '('+str(max_vals[3])[:4]+')|'
if top>4:
term+= column_header[list(row).index(max_vals[4])]
term+= '('+str(max_vals[4])[:4]+')|'
if top>5:
term+= column_header[list(row).index(max_vals[5])]
term+= '('+str(max_vals[5])[:4]+')|'
if top>6:
term+= column_header[list(row).index(max_vals[6])]
term+= '('+str(max_vals[6])[:4]+')|'
if top>7:
term+= column_header[list(row).index(max_vals[7])]
term+= '('+str(max_vals[7])[:4]+')|'
if top>8:
term+= column_header[list(row).index(max_vals[8])]
term+= '('+str(max_vals[8])[:4]+')|'
if top>9:
term+= column_header[list(row).index(max_vals[9])]
term+= '('+str(max_vals[9])[:4]+')|'
if top>10:
term+= column_header[list(row).index(max_vals[10])]
term+= '('+str(max_vals[10])[:4]+')|'
#print comparison, term
export_object.write(comparison+'\t'+term+'\n')
x+=1
export_object.close()
def orderHeatmapByMarkerFinderOrder(clustered_file):
output_file = clustered_file[:-4]+'_MarkerFinderOrdered.txt'
export_object = export.ExportFile(output_file)
firstLine = True
geneOrder=[]
arrayOrder={}
for line in open(input_file,'rU').xreadlines():
data = line[:-1]
values = string.split(data,'\t')
if firstLine:
headers = values[1:]
for i in headers:
group,sample = string.split(i,':')
try: arrayOrder[group].append(i)
except Exception: arrayOrder[group] = [i]
firstLine = False
else:
gene = values[0]
def exportSorted(filename, sort_col, excludeHeader=True):
### efficient method to sort a big file without storing everything in memory
### http://stackoverflow.com/questions/7079473/sorting-large-text-data
ouput_file = filename[:-4]+'-sorted' ### temporary
index = []
f = open(filename)
firstLine = True
while True:
offset = f.tell()
line = f.readline()
if not line: break
length = len(line)
col = line.split('\t')[sort_col].strip()
if firstLine:
header = line
firstLine = False
if excludeHeader == False:
index.append((col, offset, length))
else:
index.append((col, offset, length))
f.close()
index.sort()
o = open(ouput_file,'w')
f = open(filename)
if excludeHeader:
o.write(header)
for col, offset, length in index:
#print col, offset, length
f.seek(offset)
o.write(f.read(length))
o.close()
try:
### Error occurs when the file can't be deleted due to system permissions
os.remove(filename)
os.rename(ouput_file,filename)
return filename
except Exception:
return ouput_file
def importJunctionPositions(species,array_type):
### Look up the junction coordinates for the region
if array_type == 'RNASeq':
probesets = 'junctions'
else:
probesets = 'probesets'
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_'+probesets+'.txt'
fn=filepath(filename)
region_db = {}
firstRow=True
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstRow: firstRow = False
else:
probeset = t[0]
gene = t[2]
chr = t[4]
if '|' in t[13]:
region1 = string.split(t[13],'|')[1]
region2 = string.split(t[14],'|')[0]
junction_coord = region1+'-'+region2
region_db[probeset] = junction_coord
region_db[junction_coord] = gene+':'+t[12],chr ### Junction region for this version of Ensembl
return region_db
def convertArrayReciprocalJunctionToCoordinates(species,array_type,dir_path,start_version,end_version):
""" Script for taking junction array defined ASPIRE or LinearRegression junction pairs, extracting the region coordinates
and exporting those coordinates with end_version EnsMart block and region IDs"""
UI.exportDBversion(start_version) ### Database EnsMart version
region_db = importJunctionPositions(species,array_type)
comparison_db={}
dir_list = UI.read_directory(dir_path)
for filename in dir_list:
if '.txt' in filename:
comparsion = string.split(filename,'.')[0]
proceed = False
if ('ASPIRE' in filename or 'egress' in filename) and ('GENE' not in filename and 'inclusion' in filename): proceed = True ### Need to create a special analysis just for reciprocal junctions
if proceed: ### Don't include splicing-index results for RNA-Seq
comparison_db[comparsion] = {}
fn=filepath(dir_path+'/'+filename)
x=0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
p1 = t.index('probeset1')
p2 = t.index('probeset2')
reg_call = t.index('regulation_call')
e1 = t.index('exons1')
x=1
else:
if '-' in t[e1]:
jc1 = region_db[t[p1]]
jc2 = region_db[t[p2]]
chr = region_db[jc1][1]
db = comparison_db[comparsion]
db[jc1,jc2] = t[reg_call],chr
UI.exportDBversion(end_version) ### Database EnsMart version
converted_comparison_db = {}
region_db2 = importJunctionPositions(species,'RNASeq')
eo = export.ExportFile(dir_path+'/converted_junction_events.txt')
succeed=0; fail=0
for comparison in comparison_db:
for (j1,j2) in comparison_db[comparison]:
reg_call,chr = comparison_db[comparison][(j1,j2)]
if j1 in region_db2 and j2 in region_db2:
junction1_id,chr = region_db2[j1]
junction2_id,chr = region_db2[j2]
#print junction1_id, junction2_id, j1,j2, comparison;sys.exit()
else:
junction1_id=''
junction2_id=''
j1=chr+':'+j1
j2=chr+':'+j2
values = string.join([comparison,j1,j2,junction1_id,junction2_id,reg_call],'\t')+'\n'
eo.write(values)
eo.close()
def convertPSIJunctionIDsToPositions(psi_file,regulated_file):
""" Links up PSI genomic positions with IDs in a significantly differentially regulated PSI results file """
fn=filepath(psi_file)
x=0
coord_db = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
symbol = t.index('Symbol')
minor = t.index('Examined-Junction')
major = t.index('Background-Major-Junction')
coord = t.index('Coordinates')
x=1
else:
uid = t[symbol]+':'+t[minor]+'|'+t[major]
coordinates = t[coord]
coord_db[uid] = coordinates
dir_path = export.findParentDir(regulated_file)
comparison = export.findFilename(regulated_file)
eo = export.ExportFile(dir_path+'/coordinate_PSI_events.txt')
fn=filepath(regulated_file)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
event = t[0]
event = string.replace(event,'@',':')
event = string.replace(event,'&',':')
event = string.replace(event,'__','|')
regulation = t[1]
if event in coord_db:
coordinates = coord_db[event]
values = string.join([comparison,event,coordinates,regulation],'\t')+'\n'
eo.write(values)
eo.close()
if __name__ == '__main__':
#predictSplicingEventTypes('ENSG00000123352:E15.4-E16.1','ENSG00000123352:E15.3-E16.1');sys.exit()
test=False
if test:
directory = '/Volumes/SEQ-DATA/AML_junction/AltResults/AlternativeOutput/'
dir_list = read_directory(directory)
for file in dir_list:
if 'PSI-clust' in file:
filename = meanCenterPSI(directory+'/'+file)
#filterJunctionExpression(filename,minPercentPresent=0.75)
#exportHeatmap('/Volumes/My Passport/AML-LAML/LAML1/AltResults/AlternativeOutput/Hs_RNASeq_top_alt_junctions-PSI-clust-filt.txt',color_gradient='yellow_black_blue',columnMethod='hopach')
#sys.exit()
#convertPSIJunctionIDsToPositions('/Volumes/SEQ-DATA/Grimeslab/TopHat/AltResults/AlternativeOutput/Mm_RNASeq_top_alt_junctions-PSI.txt','/Users/saljh8/Documents/1-dataAnalysis/SplicingFactors/Grimes-MarkerFinder-v2.txt.txt')
#convertArrayReciprocalJunctionToCoordinates('Hs','junction','/Volumes/Time Machine Backups/dataAnalysis/SplicingFactor/Hs/hglue/Marto/AltResults/AlternativeOutput','EnsMart65','EnsMart72')
#sys.exit()
fold = 2
pval = 0.05
ptype = 'rawp'
species = 'Hs'
analysis = 'goelite'
array_type = "3'array"
norm = 'RPKM'
graphic_links=[]
additional = None
use_downregulated_labels=True
excludeGenes = None
expression_data_format = 'non-log'
expression_data_format = 'log'
var = None
################ Comand-line arguments ################
import getopt
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Please designate a tab-delimited input expression file in the command-line"
print "Example: python ExpressionBuilder.py --i '/Users/me/GSEXXX/ExpressionOutput' --p 0.05 --f 1.5 --ptype rawp --analysis summary --direction up --platform RNASeq"
sys.exit()
#Building GO-Elite inputs and running GO-Elite in batch
#python ExpressionBuilder.py --i /Users/saljh8/Desktop/C4-hESC/ExpressionOutput --p 0.05 --f 2 --ptype adjp --analysis goelite --direction down --platform gene --species Hs --additional goelite
#Generating signatures
#python ExpressionBuilder.py --i /Users/saljh8/Desktop/C4-hESC/GO-Elite/upregulated/ --analysis signature --inputSource Ensembl --outputSource EntrezGene
#Filtering expression datasets
#python ExpressionBuilder.py --i /Users/saljh8/Desktop/C4-hESC/ExpressionOutput --analysis filter
else:
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','o=','f=','p=','a=','platform=',
'ptype=','analysis=','species=','direction=',
'inputSource=','outputSource=', 'additional=',
'excludeGenes=','var='])
#print sys.argv[1:]
for opt, arg in options:
if opt == '--i': directory=arg
elif opt == '--o': output_file=arg
elif opt == '--f': fold=float(arg)
elif opt == '--p': pval=float(arg)
elif opt == '--ptype': ptype=arg
elif opt == '--analysis' or opt == '--a': analysis=arg
elif opt == '--species': species=arg
elif opt == '--platform': array_type=arg
elif opt == '--inputSource': input_source=arg
elif opt == '--outputSource': output_source=arg
elif opt == '--additional': additional=arg
elif opt == '--excludeGenes': excludeGenes=arg ### File location for text file with genes to exclude
elif opt == '--var': var=arg
elif opt == '--direction':
if 'own' in arg:
use_downregulated_labels = True
else:
use_downregulated_labels = False
else:
print "Warning! Command-line argument: %s not recognized. Exiting..." % opt; sys.exit()
### Allow for import of genes to exclude (e.g., sex-associated or pseudogenes)
try: genesToExclude = excludeGenesImport(excludeGenes)
except Exception: genesToExclude = {}
print analysis
if array_type == 'RNASeq':
gene_exp_threshold = 50
gene_rpkm_threshold = 3
if analysis == 'matchAndCorrelate':
matchAndCorrelate(directory, var, output_source, additional)
if analysis == 'returnRowHeaderForMaxEntry':
### Used primarily for combining LineageProfiler z-scores to report the top categories across compendiums
try: returnRowHeaderForMaxEntry(directory,int(var))
except Exception: pass
if analysis == 'featureCorrelate':
try: output_file = output_file
except Exception: output_file=directory
featureCorrelate(species,directory,additional,output_file,var)
if analysis == 'MarkerFinderOrder':
### Used for combining the all gene MarkerFinder ordered results with already clustered results (e.g., significant gene)
### to return significantly differentially expressed genes (expressed sufficiently) and cluster samples within classes
### but order by MarkerFinder correlations and groups
orderHeatmapByMarkerFinderOrder(directory)
if analysis == 'unbiased':
#python ExpressionBuilder.py --species Hs --platform RNASeq --i "/Volumes/My Passport/salomonis2/SRP042161_GBM-single-cell/bams/" --a unbiased --additional "/Volumes/My Passport/salomonis2/SRP042161_GBM-single-cell/bams/ExpressionInput/counts.GBM_scRNA-Seq.txt"
import RNASeq
#export_dir = '/Volumes/SEQ-DATA/Grimes/14018_gmp-pro/Lattice/Full/AltResults/Unbiased/DataPlots/Clustering-myeloblast-hierarchical_euclidean_euclidean.txt'
#export_dir = '/Volumes/SEQ-DATA/SingleCell-Churko/AltResults/Unbiased/DataPlots/Clustering-CM-hierarchical_euclidean_euclidean.txt'
#calculateNormalizedIntensities(directory, species, array_type, analysis_type = 'raw', expFile = additional)
var = unbiasedComparisonSpliceProfiles(directory,species,array_type,expFile=additional,min_events=0,med_events=0)
#export_dir, exported_IDs = var
#print export_dir
#RNASeq.correlateClusteredGenes(export_dir)
if analysis == 'highest-expressing':
getHighestExpressingGenes(directory,output_file,float(var))
if analysis == 'lncRNA':
lncRNANeighborCorrelationAnalysis(directory)
if analysis == 'NI':
calculateNormalizedIntensities(directory,species,array_type)
if analysis == 'AltExonConfirmed':
### Grab the alternative exons in the AltExonConfirmed GO-Elite folder, combine them and filter the splicing-index raw table
input_dir = directory+'/AltExonConfirmed/'
cluster_file, rows_in_file = buildAltExonClusterInputs(input_dir,species,array_type,dataType='AltExonConfirmed')
if rows_in_file < 7000:
exportHeatmap(cluster_file,size=rows_in_file)
if analysis == 'goelite' or analysis == 'summary':
#python ExpressionBuilder.py --f 2 --p 0.05 --ptype adjp --analysis summary --i /inputs
buildCriterion(fold, pval, ptype, directory+'/',analysis,UseDownRegulatedLabel=use_downregulated_labels,genesToExclude=genesToExclude)
if additional == 'goelite':
import multiprocessing as mlp
runGOElite(species,directory)
if analysis == 'filter':
filterDatasetFile(directory+'/')
if analysis == 'signature':
import gene_associations
directory+='/'; gene_conversion_db={}
dir_list = read_directory(directory)
for file in dir_list:
filename = directory+'/'+file
db,input_data_db = gene_associations.IDconverter(filename, species, input_source, output_source,analysis=analysis)
gene_conversion_db[file] = db,input_data_db
exportSignatures(gene_conversion_db,directory,species)
if analysis == 'QC':
graphic_links = visualizeQCPlots(directory)
elif analysis == 'LineageProfiler':
graphic_links = performLineageProfiler(directory,graphic_links)
| [
"[email protected]"
] | |
dea0ba39768a7c8ede0d9bf90e3b3f51c7138806 | 9c50f57a9cb32b44e86a0cdcbf61ead34754b085 | /杂物间/python基础/day06/作业九.py | 0b2ba089221b23677327707278422873cfa7b562 | [] | no_license | a1403893559/rg201python | c3f115011981393c86a0150e5281096651712ad4 | 448f04c86e4c7fd30e3a2a4f9121b934ae1d49be | refs/heads/master | 2020-03-15T23:32:17.723403 | 2018-03-18T12:59:43 | 2018-03-18T12:59:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | # 题目要求是 使用字典 存储熟人信息 其实呢 就是名片
shu_ren = {'name':'李文浩','age':'18','city':'山西煤窑'}
for key,value in shu_ren.items():
print('%s:%s'%(key,value))
| [
"[email protected]"
] | |
377cdbfbe4bc9e17cc34e3664bff5057689407cd | 4ae7cdc9292009398a292bdf6bee61428559fdfd | /SourceCodeTools/code/data/sourcetrail/nodes_of_interest_from_dataset.py | d9e69bccbc79dc112cfbe4294211e25a29fc1878 | [] | no_license | VitalyRomanov/method-embedding | 52a4e6e7bf726b4db0872902a0eaf1d8cb82b4a8 | 1c8f0fc04eb1f495555272d9747fd2fea68525e1 | refs/heads/master | 2023-08-31T17:39:04.051912 | 2023-01-08T05:02:52 | 2023-01-08T05:02:52 | 219,153,628 | 5 | 7 | null | 2023-07-22T20:27:20 | 2019-11-02T12:54:12 | Python | UTF-8 | Python | false | false | 707 | py | import json
def get_node_ids_from_dataset(dataset_path):
node_ids = []
with open(dataset_path, "r") as dataset:
for line in dataset:
entry = json.loads(line)
for _, _, id_ in entry["replacements"]:
node_ids.append(int(id_))
return node_ids
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("dataset")
parser.add_argument("output")
args = parser.parse_args()
node_ids = get_node_ids_from_dataset(args.dataset)
with open(args.output, "w") as sink:
sink.write("node_id\n")
for id_ in node_ids:
sink.write(f"{id_}\n")
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
7351ff746480493141320d2010c33f6f1d6936d8 | 61b9e597f0bd27ee7ec86188b7e10518ee30425c | /tests/run_cache_experiments.py | cc4d49b11eb3b1381b7a355b51f7b474a639d114 | [] | no_license | sirrice/dbwipes_src | eeb369d09ba28cb1ab3ffa70551c2b253dd39cb3 | 4d42b7d51af190b21679f38150f85dec1496d78c | refs/heads/master | 2021-01-21T12:36:22.888835 | 2014-04-23T20:53:16 | 2014-04-23T20:53:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,696 | py | import os
from sqlalchemy import *
from common import *
def print_clusters(sub, dim, clusters=[], tuples=[], title=''):
xattr = 'a_%d' % dim
yattr = 'a_%d' % (dim+1)
for cluster in clusters:
bbox = tuple(map(list, zip(*cluster.bbox)))
# we want bounds for attrs a_dim, a_dim+1, but bbox cols
# may not be ordered as we like
if xattr in cluster.cols:
xidx = cluster.cols.index(xattr)
x = bbox[xidx]
x[0] = max(0, x[0])
x[1] = min(100, x[1])
else:
x = [0, 100]
if yattr in cluster.cols:
yidx = cluster.cols.index(yattr)
y = bbox[yidx]
y[0] = max(0, y[0])
y[1] = min(100, y[1])
else:
y = [0, 100]
c = cm.jet(cluster.error)
r = Rect((x[0], y[0]), x[1]-x[0], y[1]-y[0], alpha=min(1., max(0.2,cluster.error)), ec=c, fill=False, lw=1.5)
sub.add_patch(r)
if tuples:
cols = zip(*tuples)
xs, ys, cs = cols[dim], cols[dim+1], cols[-2]
cs = np.array(cs) / 100.
sub.scatter(xs, ys, c=cs, alpha=0.5, lw=0)
sub.set_ylim(-5, 105)
sub.set_xlim(-5, 105)
sub.set_title(title)
def print_all_clusters(pp, db, tablename, learner, c):
try:
all_clusters = [cluster.clone() for cluster in learner.all_clusters]
all_clusters = normalize_cluster_errors(all_clusters)
clusters = [cluster.clone() for cluster in learner.final_clusters]
clusters = normalize_cluster_errors(clusters)
best_clusters = sorted(clusters, key=lambda c: c.error, reverse=True)
best_clusters = best_clusters[:2]
best_clusters[0].error = 1
tuples = get_tuples_in_bounds(db, tablename, [], 'g = 7')
for cl in clusters:
print str(cl), cl.c_range
for dim in xrange(len(tuples[0])-4):
suffix = "%.4f dim %d" % (c, dim)
fig = plt.figure(figsize=(12, 4))
print_clusters(fig.add_subplot(1, 3, 1), dim, all_clusters, tuples=tuples,title="merged %s" % suffix)
print_clusters(fig.add_subplot(1, 3, 2), dim, clusters, tuples=tuples,title="merged %s" % suffix)
print_clusters(fig.add_subplot(1, 3, 3), dim, best_clusters, tuples=tuples, title="best %s" % suffix)
plt.savefig(pp, format='pdf')
except Exception as e:
import traceback
traceback.print_exc()
pdb.set_trace()
pass
def run(pp, cutoff, **params):
dataset = params['dataset']
test_datas = get_test_data(datasetnames[dataset])
tablename = test_datas[-1]
dbname = test_datas[0]
db = create_engine('postgresql://localhost/%s' % dbname)
costs, rules, all_ids, table_size, learner = run_experiment(dataset, **params)
cost = costs['cost_total']
ft = learner.full_table
print len(ft)
truth = [int(row['id'].value) for row in ft if row['v'] >= cutoff]
all_stats = [compute_stats(ids, truth, table_size) for ids in all_ids]
stats, rule, ids = tuple(zip(all_stats, rules, all_ids)[0])
data = tuple([tablename,params['c'],cost]+list(stats))
print "stats:%s,c(%.3f),cost(%.2f),%.6f,%.6f,%.6f,%.6f" % data
print 'stats:%s'% str(sdrule_to_clauses(rule.simplify())[0])
print_all_clusters(pp, db, tablename, learner, params['c'])
return costs
def warmup(dim, cutoff, **kwargs):
dataset = "data_%d_%d_1000_0d50_%duo" % (dim, dim, cutoff)
params = {
'klass':BDT,
'nbadresults' : 10,
'epsilon':0.005,
'tau':[0.1, 0.5],
'p' : 0.7,
'l':.5,
'min_pts' : 10,
'min_improvement':.01,
'granularity':15,
'max_wait':1,
'naive':False,
'use_mtuples':False,
'use_cache': False
}
params.update(kwargs)
ft, bts, gts, truth, aggerr, cols = get_parameters(dataset, **params)
params.update({
'aggerr' : aggerr,
'cols' : cols,
'tablename' : dataset,
'dataset' : dataset
})
learner = BDT(**params)
learner.setup_tables(ft, bts, gts, **params)
learner.get_partitions(ft, bts, gts, **params)
def run_cache(dim, cutoff, cs, **kwargs):
dataset = kwargs.get('dataset', "data_%d_%d_1000_0d50_%duo" % (dim, dim, cutoff))
params = {
'klass':BDT,
'nbadresults' : 10,
'epsilon':0.005,
'tau':[0.1, 0.5],
'p' : 0.7,
'l':.5,
'min_pts' : 10,
'min_improvement':.01,
'granularity':15,
'max_wait':20,
'naive':False,
'use_mtuples':False,
'use_cache': False,
dataset: dataset
}
params.update(kwargs)
pp = PdfPages('figs/topdown_all_%s.pdf' % str(dataset))
cost_dicts = []
for c in cs:
params['c'] = c
cost_dict = run(pp, cutoff, **params)
cost_dicts.append(cost_dict)
pp.close()
return cost_dicts
def reset_cache():
try:
os.system('rm dbwipes*.cache')
except:
pass
if __name__ == '__main__':
np.seterr(all='raise')
if len(sys.argv) < 4:
print "python run_cache_experiments.py [dimensions] [30|80] [cache? 0|1] [list of cs values]"
print "cs values defaults to [.5, .4, .3, .2, .1, .05, 0]"
sys.exit()
dim = int(sys.argv[1])
uo = int(sys.argv[2])
cache = bool(int(sys.argv[3]))
cs = map(float, sys.argv[4:])
if not cs:
cs = [.5, .4, .3, .2, .1, 0.05, 0.0]
#reset_cache()
#cachecost_dicts = run_cache(dim, uo, cs, l=0.95, tree_alg='rt', klass=NDT, use_cache=cache, tau= [0.1, 0.5])
cachecost_dicts = run_cache(dim, uo, cs, l=0.85, tree_alg='rt', klass=BDT,
epsilon=0.001, use_cache=cache, tau= [0.02, 0.5],
c_range=[0.01, 0.7],
dataset='data2clust_2_2_2k_vol20_uo80')
print "c,total,partbad,partgood,split,merge,cache"
for c, cd in zip(cs, cachecost_dicts):
print "%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%d" % (
c,
cd.get('cost_total', -1),
cd.get('cost_partition_bad', -1),
cd.get('cost_partition_good', -1),
cd.get('cost_split', -1),
cd.get('cost_merge', -1),
cache
)
| [
"[email protected]"
] | |
44a318abdeab78a4e1fc6de40b655367ad1b4e90 | d30aba490a9527e7fc1f31e178b5f1c3067ae84d | /build/combined_robot_hw_tests/catkin_generated/pkg.develspace.context.pc.py | 13c2aeff20a5c6b70551c5306392f462066cbd7e | [] | no_license | weetinygit/tart | 130845b9c0e7b01d0a5a5177b056c85495e4e5cc | 339344cb9ef0561655f73f4609f340974113785a | refs/heads/master | 2021-09-13T07:38:39.706301 | 2018-04-26T21:18:19 | 2018-04-26T21:18:19 | 111,699,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/weety/tart5/src/roscontrol/src/ros_control/combined_robot_hw_tests/include".split(';') if "/home/weety/tart5/src/roscontrol/src/ros_control/combined_robot_hw_tests/include" != "" else []
PROJECT_CATKIN_DEPENDS = "combined_robot_hw;hardware_interface;roscpp".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lcombined_robot_hw_tests".split(';') if "-lcombined_robot_hw_tests" != "" else []
PROJECT_NAME = "combined_robot_hw_tests"
PROJECT_SPACE_DIR = "/home/weety/tart5/devel/.private/combined_robot_hw_tests"
PROJECT_VERSION = "0.12.0"
| [
"[email protected]"
] | |
145d8b4115c4d955272564fad60a095f75f40fab | 4a4cc797f9a46a2c09075abfc2033c480eaa5486 | /mini-programs/week_2/lectures/lecture_1_logical_data_type, conditional_operator_and_cycles.py | 25f6b5cd500aa4069b3494dd7ee0525bd4c0d4d9 | [] | no_license | h1mik/python_programs | efa806eb9803c3e50aee845d5e0be9560d65f2be | 33e169ef33f70bc94cd535ecf44e852ae58c7a64 | refs/heads/main | 2023-02-04T14:45:04.855119 | 2020-12-26T09:25:22 | 2020-12-26T09:25:22 | 309,483,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | # True and False |
# (==) - равно | (> ) - больше
# (!=) - не равно | (<=) - меньше равно
# (<) - меньше | (>=) - больше равно
# (and or) - бинарные | (not) - унарная
x = 4
y = 6
print(x < y)
st1 = "Books"
st2 = "Alpha"
print(not st1 > st2)
| [
"[email protected]"
] | |
95591f4a5fbda649b1639552fefac75304660451 | 917a690cad8fece9102ba3191284f1ab83a8aeaf | /pypubmed/util/__init__.py | 19406ff5499cccc2d0e8abe969aa2ba74bd4aff0 | [
"MIT"
] | permissive | wisRen/pypubmed | 1751f429e38f4fd58d058584d36e7a4031c02660 | 6dd1b6ee7e8c7cf2a10885ed7662a4e2d16fa8e4 | refs/heads/master | 2023-06-21T05:10:47.579755 | 2021-08-11T06:01:05 | 2021-08-11T06:01:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | import os
def safe_open(filename, mode='r'):
if 'w' in mode:
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
if filename.endswith('.gz'):
import gzip
return gzip.open(filename, mode=mode)
return open(filename, mode=mode)
| [
"[email protected]"
] | |
d5017384859880726a5577c4f806d918eefbdd79 | d85163b314c220e88e99426ab4e10f031423aeb2 | /qc/outremont/districts/definition.py | 3c55d715faa90a28b9fc8ce3ea1aa64642c9bdbf | [] | no_license | opennorth-archive/represent-canada-data | dc3e98d4f24ce877ec2dfc9e40675e561912c45f | f1792a724f0a58d596e3376c71b6b3e64f7887b0 | refs/heads/master | 2021-01-22T05:10:00.952023 | 2014-04-04T19:51:40 | 2014-04-04T19:52:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | # coding: utf-8
from datetime import date
import boundaries
boundaries.register(u'Outremont districts',
domain=u'Outremont, Montréal, QC',
last_updated=date(2013, 10, 16),
name_func=lambda f: re.sub(u'', u'—', f.get('NOM_DISTRI')), # control character, m-dash
authority=u'Ville de Montréal',
source_url='http://donnees.ville.montreal.qc.ca/dataset/elections-2013-districts-electoraux',
licence_url='http://donnees.ville.montreal.qc.ca/licence/licence-texte-complet/',
data_url='http://donnees.ville.montreal.qc.ca/storage/f/2013-10-16T14%3A16%3A09.092Z/districtelect.zip',
encoding='iso-8859-1',
metadata={'ocd_division': u'ocd-division/country:ca/csd:2466023/borough:outremont'},
ogr2ogr=u'''-where "ARRONDISSE='Outremont'"''',
)
| [
"[email protected]"
] | |
e8cf91c8b80954559fa3dac1a1efd4984b760cfd | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/0/ff.py | b2e3773abd97fdc0bf3b121877f4920e49a23b1d | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'FF':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
4245cd1c1914d5cde8d48f77bc3b37941ee4b174 | 0eb0cc67c11baec9caf82b61f161c091c7043364 | /api_book/wsgi.py | d6be60053dfb1ff1084624d501fbd456288cf37a | [] | no_license | juniorcarvalho/work-at-olist | dd648fceaed5e0f8b165ac1279cfb1ae1ccae0a5 | fc59143c8d295da50aa42b312aa28ce1202ec890 | refs/heads/master | 2022-07-17T23:54:29.071100 | 2020-05-19T22:40:11 | 2020-05-19T22:40:11 | 264,524,765 | 0 | 0 | null | 2020-05-16T20:49:54 | 2020-05-16T20:49:54 | null | UTF-8 | Python | false | false | 428 | py | """
WSGI config for api_book project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from dj_static import Cling
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'api_book.settings')
application = Cling(get_wsgi_application())
| [
"[email protected]"
] | |
dd071b116f01cd5df223b91b2af30535c163e630 | 7d17161a77ad04ea1de1dabe84619b6c4fffe2ad | /test/python/circuit/library/test_piecewise_chebyshev.py | 14660435d445ca3a3e2114e685177d21bf94b665 | [
"Apache-2.0"
] | permissive | annos-IBM/qiskit-terra | 5e3b93a089a6e00c9279bf82735d78b497e92023 | 78ece7ad9baba64395eea98c45fb83a30b04c835 | refs/heads/main | 2023-05-28T20:42:19.805833 | 2021-06-09T03:47:48 | 2021-06-09T03:47:48 | 375,329,407 | 1 | 0 | Apache-2.0 | 2021-06-09T11:22:14 | 2021-06-09T11:22:14 | null | UTF-8 | Python | false | false | 4,954 | py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test the piecewise Chebyshev approximation."""
import unittest
from collections import defaultdict
import numpy as np
from ddt import ddt, data, unpack
from qiskit.test.base import QiskitTestCase
from qiskit import BasicAer, execute
from qiskit.circuit import QuantumCircuit
from qiskit.circuit.library.arithmetic.piecewise_chebyshev import PiecewiseChebyshev
@ddt
class TestPiecewiseChebyshev(QiskitTestCase):
"""Test the piecewise Chebyshev approximation."""
def assertFunctionIsCorrect(self, function_circuit, reference):
"""Assert that ``function_circuit`` implements the reference function ``reference``."""
function_circuit._build()
num_state_qubits = function_circuit.num_state_qubits
num_ancilla_qubits = function_circuit.num_ancillas
circuit = QuantumCircuit(num_state_qubits + 1 + num_ancilla_qubits)
circuit.h(list(range(num_state_qubits)))
circuit.append(function_circuit.to_instruction(), list(range(circuit.num_qubits)))
backend = BasicAer.get_backend("statevector_simulator")
statevector = execute(circuit, backend).result().get_statevector()
probabilities = defaultdict(float)
for i, statevector_amplitude in enumerate(statevector):
i = bin(i)[2:].zfill(circuit.num_qubits)[num_ancilla_qubits:]
probabilities[i] += np.real(np.abs(statevector_amplitude) ** 2)
unrolled_probabilities = []
unrolled_expectations = []
for i, probability in probabilities.items():
x, last_qubit = int(i[1:], 2), i[0]
if last_qubit == "0":
expected_amplitude = np.cos(reference(x)) / np.sqrt(2 ** num_state_qubits)
else:
expected_amplitude = np.sin(reference(x)) / np.sqrt(2 ** num_state_qubits)
unrolled_probabilities += [probability]
unrolled_expectations += [np.real(np.abs(expected_amplitude) ** 2)]
np.testing.assert_array_almost_equal(
unrolled_probabilities, unrolled_expectations, decimal=3
)
@data((lambda x: np.arcsin(1 / x), 2, [2, 4], 2), (lambda x: x / 8, 1, [1, 8], 3))
@unpack
def test_piecewise_chebyshev(self, f_x, degree, breakpoints, num_state_qubits):
"""Test the piecewise Chebyshev approximation."""
def pw_poly(x):
if breakpoints[0] <= x < breakpoints[-1]:
return f_x(x)
return np.arcsin(1)
pw_approximation = PiecewiseChebyshev(f_x, degree, breakpoints, num_state_qubits)
self.assertFunctionIsCorrect(pw_approximation, pw_poly)
def test_piecewise_chebyshev_mutability(self):
"""Test the mutability of the piecewise Chebyshev approximation."""
def pw_poly(x, f_x):
if breakpoints[0] <= x < breakpoints[-1]:
return f_x(x)
return np.arcsin(1)
def f_x_1(x):
return x / 2
pw_approximation = PiecewiseChebyshev(f_x_1)
with self.subTest(msg="missing number of state qubits"):
with self.assertRaises(AttributeError): # no state qubits set
print(pw_approximation.draw())
with self.subTest(msg="default setup, just setting number of state qubits"):
pw_approximation.num_state_qubits = 2
pw_approximation.f_x = f_x_1
# set to the default breakpoints for pw_poly
breakpoints = [0, 4]
pw_approximation.breakpoints = breakpoints
self.assertFunctionIsCorrect(pw_approximation, lambda x: pw_poly(x, f_x_1))
def f_x_2(x):
return x / 4
with self.subTest(msg="setting non-default values"):
breakpoints = [0, 2]
degree = 2
pw_approximation.breakpoints = breakpoints
pw_approximation.degree = degree
pw_approximation.f_x = f_x_2
self.assertFunctionIsCorrect(pw_approximation, lambda x: pw_poly(x, f_x_2))
def f_x_3(x):
return x ** 2
with self.subTest(msg="changing all values"):
pw_approximation.num_state_qubits = 4
breakpoints = [1, 3, 6]
degree = 3
pw_approximation.breakpoints = breakpoints
pw_approximation.degree = degree
pw_approximation.f_x = f_x_3
self.assertFunctionIsCorrect(pw_approximation, lambda x: pw_poly(x, f_x_3))
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
b9fdfca21b63d92c178c7c1277d346717e9cdced | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/ThirteenTeV/ADD/ADDmonoJet_MD_6_d_6_TuneCUETP8M1_13TeV_pythia8_cfi.py | 85a31581696b8c9fa838b33ac1ac34c210e6a0db | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 1,923 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.0),
maxEventsToPrint = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettings = cms.vstring('Tune:preferLHAPDF = 2',
'Main:timesAllowErrors = 10000',
'Check:epTolErr = 0.01',
'Beams:setProductionScalesFromLHEF = off',
'SLHA:keepSM = on',
'SLHA:minMassSM = 1000.',
'ParticleDecays:limitTau0 = on',
'ParticleDecays:tau0Max = 10',
'ParticleDecays:allowPhotonRadiation = on'),
pythia8CUEP8M1Settings = cms.vstring('Tune:pp 14',
'Tune:ee 7',
'MultipartonInteractions:pT0Ref=2.4024',
'MultipartonInteractions:ecmPow=0.25208',
'MultipartonInteractions:expPow=1.6'),
pythia8_unparticle = cms.vstring('ExtraDimensionsLED:monojet = on',
'ExtraDimensionsLED:CutOffmode = 1',
'ExtraDimensionsLED:t = 0.5',
'ExtraDimensionsLED:n = 6',
'ExtraDimensionsLED:MD = 6000.',
'5000039:m0 = 1200.',
'5000039:mWidth = 1000.',
'5000039:mMin = 1.',
'5000039:mMax = 13990.',
'PhaseSpace:pTHatMin = 80.',
'PartonLevel:ISR = on',
'PartonLevel:FSR = on',
'ParticleDecays:limitTau0 = on',
'ParticleDecays:tauMax = 10'),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'pythia8_unparticle')
)
)
| [
"[email protected]"
] | |
a75ec1360ca1390fde1fe469602935530bd811b1 | f56cf80c0111a1317de73be229a943bdc18277ff | /SERVER/FETCHER/allcasereader.py | 07f05bd01af0b19ddf8878aa289af30596e6d649 | [] | no_license | EkiVox/SmartCopybookCase | 2f3939b00d534adef18ca4da3d01273c2f534f42 | 9af47aea814fc29ef4b2ee797a35f10d20a22d49 | refs/heads/master | 2020-03-07T18:50:56.454545 | 2018-12-07T17:32:31 | 2018-12-07T17:32:31 | 127,653,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | import os
with open('ALL-NEW-IDS.list', 'r') as idsfile:
print idsfile.read()
os.remove('ALL-NEW-IDS.list') | [
"[email protected]"
] | |
1e44a07d05b4c0f1a1b0769e052174d81815a74d | 466912406272829982f75854cf0104c6ce8c9814 | /data/spider2/crawler/news/huxiu_news.py | 5b912905b334e53e2d840ea03c67d087fd86fc6f | [] | no_license | logonmy/Codes | 9631fa103fc499663361fa7eeccd7cedb9bb08e4 | 92723efdeccfc193f9ee5d0ab77203c254f34bc2 | refs/heads/master | 2021-09-21T18:07:22.985184 | 2018-08-30T05:53:26 | 2018-08-30T05:53:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,416 | py | # -*- coding: utf-8 -*-
import os, sys, datetime, re, json, time
from lxml import html
from pyquery import PyQuery as pq
reload(sys)
sys.setdefaultencoding("utf-8")
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '..'))
import BaseCrawler
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../../util'))
import loghelper,extract,db, util,url_helper,download, extractArticlePublishedDate
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../parser/util2'))
import parser_mysql_util
import parser_mongo_util
#logger
loghelper.init_logger("crawler_huxiu_news", stream=True)
logger = loghelper.get_logger("crawler_huxiu_news")
NEWSSOURCE = "Huxiu"
RETRY = 3
TYPE = 60001
SOURCE =13823
URLS = []
CURRENT_PAGE = 1
linkPattern = "/article/\d+.html"
Nocontents = [
]
columns = [
{"column": None, "max": 4},
]
class ListCrawler(BaseCrawler.BaseCrawler):
def __init__(self, timeout=30):
BaseCrawler.BaseCrawler.__init__(self, timeout=timeout)
def is_crawl_success(self, url, content):
if content is not None:
try:
j = json.loads(content)
# logger.info(j)
except:
return False
if j.has_key("msg") is True and j["msg"] == "获取成功":
return True
return False
class NewsCrawler(BaseCrawler.BaseCrawler):
def __init__(self, timeout=20):
BaseCrawler.BaseCrawler.__init__(self, timeout=timeout)
#实现
def is_crawl_success(self,url,content):
d = pq(html.fromstring(content.decode("utf-8")))
title = d('head> title').text().strip()
logger.info("title: %s url: %s", title, url)
if title.find("虎嗅网") >= 0:
return True
return False
def has_news_content(content):
d = pq(html.fromstring(content.decode("utf-8")))
title = d('head> title').text().strip()
temp = title.split("-")
if len(temp) < 2:
return False
if temp[0].strip() == "":
return False
return True
def process_news(column, newsurl, content, newspost, download_crawler):
if has_news_content(content):
d = pq(html.fromstring(content.decode("utf-8")))
key = newsurl.split("/")[-1].strip().replace(".html","")
type = TYPE
category = None
title = d('div.article-wrap> h1').text().strip()
mongo = db.connect_mongo()
collection_news = mongo.article.news
if collection_news.find_one({"title": title}) is not None:
mongo.close()
return
tags = []
articletags = d("meta[name='keywords']").attr("content")
if articletags is not None:
for tag in articletags.split():
if tag is not None and tag.strip() != "" and tag not in tags and tag != title:
tags.append(tag)
# post = d('div#post_thumbnail> img').attr("src")
postraw = d('div.article-wrap> div.article-img-box> img').attr("src")
# posturl = parser_mysql_util.get_logo_id(postraw, download_crawler, SOURCE, key, "news")
(posturl, width, height) = parser_mysql_util.get_logo_id_new(postraw, download_crawler, SOURCE, key, "news")
if posturl is not None:
post = str(posturl)
else:
post = None
brief = d("meta[name='description']").attr("content")
post_time = d('div.article-author> div> span.article-time').text()
logger.info(post_time)
news_time = extract.extracttime(post_time)
if news_time is None:
news_time = datetime.datetime.now()
article = d('div.article-wrap> div.article-content-wrap').html()
contents = extract.extractContents(newsurl, article)
logger.info("%s, %s, %s, %s -> %s, %s. %s", key, title, news_time, ":".join(tags), category, brief, post)
# exit()
# mongo = db.connect_mongo()
# collection_news = mongo.article.news
# if collection_news.find_one({"title": title}) is not None:
# mongo.close()
# return
flag, domain = url_helper.get_domain(newsurl)
dnews = {
"date": news_time - datetime.timedelta(hours=8),
"title": title,
"link": newsurl,
"createTime": datetime.datetime.now(),
"source": SOURCE,
"key": key,
"key_int": int(key),
"type": type,
"original_tags": tags,
"processStatus": 0,
# "companyId": None,
"companyIds": [],
"category": category,
"domain": domain,
"categoryNames": []
}
dcontents = []
rank = 1
for c in contents:
if c["data"].find("查看原文")>=0 or c["data"].find("关注微信公众号虎嗅网")>=0 :
break
if c["type"] == "text":
dc = {
"rank": rank,
"content": c["data"],
"image": "",
"image_src": "",
}
else:
if download_crawler is None:
dc = {
"rank": rank,
"content": "",
"image": "",
"image_src": c["data"],
}
else:
(imgurl, width, height) = parser_mysql_util.get_logo_id_new(c["data"], download_crawler, SOURCE, key, "news")
if imgurl is not None:
dc = {
"rank": rank,
"content": "",
"image": str(imgurl),
"image_src": "",
"height": int(height),
"width": int(width)
}
else:
continue
logger.info(c["data"])
dcontents.append(dc)
rank += 1
dnews["contents"] = dcontents
if brief is None or brief.strip() == "":
brief = util.get_brief_from_news(dcontents)
if post is None or post.strip() == "":
post = util.get_posterId_from_news(dcontents)
if download_crawler is None:
dnews["post"] = post
else:
dnews["postId"] = post
dnews["brief"] = brief
if news_time > datetime.datetime.now():
logger.info("Time: %s is not correct with current time", news_time)
dnews["date"] = datetime.datetime.now() - datetime.timedelta(hours=8)
# id = collection_news.insert(dnews)
mongo.close()
# logger.info("*************DONE************* %s", id)
nid = parser_mongo_util.save_mongo_news(dnews)
logger.info("Done: %s", nid)
return
def run_news(column, crawler, download_crawler):
while True:
if len(URLS) == 0:
return
URL = URLS.pop(0)
crawler_news(column, crawler, URL["link"], URL["post"], download_crawler)
def crawler_news(column, crawler, newsurl, newspost, download_crawler):
maxretry = 0
while True:
result = crawler.crawl(newsurl, agent=True)
if result['get'] == 'success':
#logger.info(result["redirect_url"])
try:
process_news(column, newsurl, result['content'], newspost, download_crawler)
except Exception,ex:
logger.exception(ex)
break
if maxretry > 20:
break
maxretry += 1
def process(content, flag):
try:
j = json.loads(content)
# logger.info(content)
htmls = j["data"]
except:
htmls = content
cnt = 0
if len(htmls) == 0 or htmls.find("div") == -1:
return cnt
d = pq(html.fromstring(htmls.decode("utf-8")))
for a in d('div.mod-b.mod-art> div.mob-ctt> h2> a'):
try:
link = d(a).attr("href").strip().replace("?f=retweeted","")
# logger.info(link)
if re.search(linkPattern, link):
link = "https://www.huxiu.com" + link
logger.info("Link: %s is right news link", link)
# check mongo data if link is existed
mongo = db.connect_mongo()
collection_news = mongo.article.news
item = collection_news.find_one({"link": link})
mongo.close()
if (item is None or flag == "all") and link not in URLS:
linkmap = {
"link": link,
"post": None
}
URLS.append(linkmap)
else:
# logger.info(link)
pass
except:
logger.info("cannot get link")
return len(URLS)
def run(flag, column, listcrawler, newscrawler, concurrent_num, download_crawler):
global CURRENT_PAGE
cnt = 1
while True:
key = CURRENT_PAGE
if flag == "all":
if key > column["max"]:
return
else:
if cnt == 0 or key > column["max"]:
return
CURRENT_PAGE += 1
if key > 1:
url = 'https://www.huxiu.com/v2_action/article_list?page=%s' % (key)
else:
url = 'https://www.huxiu.com/'
while True:
if key > 1:
result = listcrawler.crawl(url,agent=True)
else:
result = newscrawler.crawl(url, agent=True)
if result['get'] == 'success':
try:
cnt = process(result['content'], flag)
if cnt > 0:
logger.info("%s has %s fresh news", url, cnt)
logger.info(URLS)
# threads = [gevent.spawn(run_news, column, newscrawler, download_crawler) for i in xrange(concurrent_num)]
# gevent.joinall(threads)
run_news(column, newscrawler, download_crawler)
# exit()
except Exception,ex:
logger.exception(ex)
cnt = 0
break
def start_run(concurrent_num, flag):
global CURRENT_PAGE
while True:
logger.info("%s news %s start...", NEWSSOURCE, flag)
listcrawler = ListCrawler()
newscrawler = NewsCrawler()
download_crawler = download.DownloadCrawler(use_proxy=False)
# download_crawler = None
for column in columns:
CURRENT_PAGE = 1
run(flag, column, listcrawler, newscrawler, concurrent_num, download_crawler)
logger.info("%s news %s end.", NEWSSOURCE, flag)
if flag == "incr":
time.sleep(60*8) #30 minutes
else:
return
#gevent.sleep(86400*3) #3 days
if __name__ == "__main__":
if len(sys.argv) > 1:
param = sys.argv[1]
if param == "incr":
start_run(1, "incr")
elif param == "all":
start_run(1, "all")
else:
link = param
download_crawler = None
crawler_news({}, NewsCrawler(), link, None, download_crawler)
else:
start_run(1, "incr") | [
"[email protected]"
] | |
7b45add26ddf9df79e85d7c7e09bc534253dae7d | a8139ccd50a27861d3c5a4168fd0e4b351c0a514 | /material/code/advanced_oop_and_python_topics/4_ManagedAttributeDemo/test.py | 9e8897b3d4a283a1395d9d8c0fe56f8a27ae63ef | [] | no_license | shambhand/pythontraining | a124aa1485c3ce0e589fc2cd93c1e991746432e4 | 24dd923e2b2c07c70500775e3665e2a527240329 | refs/heads/master | 2021-05-17T22:54:45.331127 | 2019-01-11T03:12:59 | 2019-01-11T03:12:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,433 | py | class Person:
def __init__(self, name):
self._name = name
@property # name = property(name)
def name(self):
"name property docs"
print('fetch...')
return self._name
@name.setter # name = name.setter(name)
def name(self, value):
print('change...')
self._name = value
@name.deleter # name = name.deleter(name)
def name(self):
print('remove...')
del self._name
def main ():
bob = Person('Bob Smith') # bob has a managed attribute
print(bob.name) # Runs name getter (name 1)
print ("addr(Person.name.getter):", hex(id((Person.name.getter))))
bob.name = 'Robert Smith' # Runs name setter (name 2)
print ("addr(Person.name.setter):", hex(id((Person.name.setter))))
print(bob.name)
del bob.name # Runs name deleter (name 3)
print('-'*20)
sue = Person('Sue Jones') # sue inherits property too
print(sue.name)
print(Person.name.__doc__) # Or help(Person.name)
print ("type (Person.name):", type (Person.name))
print ("hex(id(Person.name)):", hex(id(Person.name)))
print ("type (Person.name.getter):", type (Person.name.getter))
print ("type (Person.name.setter):", type (Person.name.setter))
print ("type (Person.name.deleter):", type (Person.name.deleter))
print ("addr(Person.name.getter):", hex(id((Person.name.getter))))
print ("addr (Person.name.setter):", hex(id((Person.name.setter))))
print ("addr (Person.name.deleter):", hex(id(Person.name.deleter)))
main () | [
"[email protected]"
] | |
6589281398355c7fc10996ec4525e0d770330e54 | db57094349de63766daf70a2e6bdb06bf3af09cf | /Cap05_Tkinter/34_images.py | 75d83162c1ff336bb175ae05bd17c0c8d302ff12 | [] | no_license | frclasso/turma1_Python_Modulo2_2019 | 2f9e9aebc48c1c0b92b684ad92958bc557866cde | 0b7f0fac0a1de4a2dbe4ff4fb2985fbfee89ed33 | refs/heads/master | 2020-04-17T19:36:07.545787 | 2019-06-10T15:29:10 | 2019-06-10T15:29:10 | 166,871,517 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 310 | py |
from tkinter import *
from PIL import Image
from PIL import ImageTk
window = Tk()
window.title("Images and incons")
icon = Image.open('python_logo.png')
icon = icon.resize((300,200), Image.ANTIALIAS)
photoImg = ImageTk.PhotoImage(icon)
Label(window, image=photoImg).grid(row= 0, column=0)
window.mainloop() | [
"[email protected]"
] | |
e7b77d6e6a2582abf034379240a03cf715d36e43 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2950/60632/240945.py | 5249f2541bece3d84fdfdc430e8fde133e238884 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 58 | py | s = input()
if s.count('2') != x.count('5'):
print(-1) | [
"[email protected]"
] | |
5878e977598eb5846f58ee444eb02ccef892e5d7 | 06cabd66791a5ee15bb3ba4b04d8bc8dea5bfda0 | /Python modules/MakePdf_old.py | 8203bd500d8b65d5bac2e798af6e0af5f18b62bc | [] | no_license | claiello/python_data_analysis | f7405dfd15f0dccd2089b1878af40b9d075071d2 | 0b8d3cc5717243e72214dc24a7fc823220e13179 | refs/heads/master | 2020-04-17T20:36:51.720891 | 2017-04-23T10:00:08 | 2017-04-23T10:00:08 | 66,181,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | # test
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
def multipage(filename, figs=None, dpi=200):
pp = PdfPages(filename)
if figs is None:
figs = [plt.figure(n) for n in plt.get_fignums()]
for fig in figs:
#fig.set_size_inches(1000./fig.dpi,600./fig.dpi)
fig.set_size_inches(1200./fig.dpi,900./fig.dpi)
fig.savefig(pp, format='pdf')
pp.close()
#plt.figure(figsize=(8, 6), dpi=80)
def multipage_longer(filename, figs=None, dpi=200):
pp = PdfPages(filename)
if figs is None:
figs = [plt.figure(n) for n in plt.get_fignums()]
for fig in figs:
#fig.set_size_inches(1000./fig.dpi,600./fig.dpi)
fig.set_size_inches(1600./fig.dpi,1200./fig.dpi)
fig.savefig(pp, format='pdf')
pp.close()
| [
"[email protected]"
] | |
6d84e48bc8c4382eb95096d38f8ad9fcc2138698 | c615384fbf8b10ce3cdefc008509454460def72f | /Topics/Integer arithmetic (1)/Calculate it/main.py | 1a67cc55ecf79fed40206b72536f3ffc83392922 | [] | no_license | ivanelisandro/PythonStudiesJobAgency | 07e1981601c5314dc48239cc220db9849468b946 | 28f6ff4d1fc2592f4dd740e3aa68af0bac105f5d | refs/heads/main | 2023-08-25T19:32:10.306188 | 2021-10-22T01:34:21 | 2021-10-22T01:34:21 | 391,798,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40 | py | print((1234567890 * 987654321) + 67890)
| [
"[email protected]"
] | |
5490923cd851a1f6025d71a2c24cadd7013227ab | 28be2173e5590cc5b03119e9b83c57980e6a7e8a | /learnwithpeople/wsgi.py | 70d79666fb99a92372c78e6ca89fa8357aaf18ee | [
"MIT"
] | permissive | EdgarOrnelas/learning-circles | cd164f123885ed2079b34ad394c9849b370563b9 | 293c849321d735aebbdcb6c65b7c92f751f9fd89 | refs/heads/master | 2021-01-21T20:56:35.429589 | 2017-06-16T09:20:46 | 2017-06-16T09:20:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | """
WSGI config for learnwithpeople project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "learnwithpeople.settings")
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
application = Cling(get_wsgi_application())
| [
"[email protected]"
] | |
5646965b46d4371cc23d562b9c531b6bc8162ca7 | a9fb692b89e1ea2cdadcb322c41c1513bb24e9c2 | /src/climtas/profile.py | 8e4f91f622d41d7f21465e048cc1126d10fbcf1b | [
"Apache-2.0"
] | permissive | ScottWales/climtas | a10039598093030391b339cceb2efc83912f198d | 68ddf31a0ce0f7ceb0a0d3ca6156e07efe42d6ec | refs/heads/master | 2022-09-15T02:44:22.059027 | 2022-08-26T05:26:43 | 2022-08-26T05:26:43 | 228,983,033 | 5 | 3 | null | 2022-08-26T05:26:44 | 2019-12-19T05:39:44 | Jupyter Notebook | UTF-8 | Python | false | false | 11,029 | py | #!/g/data/hh5/public/apps/nci_scripts/python-analysis3
# Copyright 2020 Scott Wales
# author: Scott Wales <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Profiling dask data processing
* :func:`benchmark` runs a function with different chunks, returning the time
taken for each chunk setting
* :func:`profile` runs a function with a single chunk setting, returning the
time taken in different dask stages and chunk information
Profile results
===============
time_total
Total time taken to process the data (seconds)
time_open
Time spent opening the dataset (seconds)
time_function
Time spent running the function (seconds)
time_optimize
Time spent optimizing the Dask graph (seconds)
time_load
Time spent computing the data with Dask (seconds)
chunks
Chunk shape
nchunks_in
Number of chunks in loaded data
nchunks_out
Number of chunks in function output
chunksize_in
Size of chunks in loaded data
chunksize_out
Size of chunks in function output
tasks_in
Dask graph size in loaded data
tasks_out
Dask graph size in function output
tasks_optimized
Dask graph size after optimizing function output
"""
from typing import Dict, Any, List
import xarray
import dask
import time
import pandas
import numpy
import typing as T
import datetime
import os
import csv
import subprocess
import json
from . import __version__
def benchmark(
paths: str,
variable: str,
chunks: Dict[str, List[int]],
function,
run_count: int = 3,
mfdataset_args: Dict[str, Any] = {},
):
"""
Profile a function on different chunks of data
Opens a dataset with :func:`xarray.open_mfdataset` with one of the chunk
options, then runs function on variable
>>> def func(da):
... return t2m.mean()
>>> climtas.profile.benchmark(
... '/g/data/ub4/era5/netcdf/surface/t2m/2019/t2m_era5_global_20190101_*.nc',
... variable='t2m',
... function=func,
... chunks={'time':[93, 93], 'latitude': [91, 91], 'longitude': [180, 180*2]}) #doctest: +SKIP
Args:
paths: Paths to open (as :func:`xarray.open_mfdataset`)
variable: Variable in the dataset to use
chunks: Mapping of dimension name to a list of chunk sizes, one entry
for each run
function: Function that takes a :obj:`xarray.DataArray` (the variable)
and returns a :obj:`xarray.DataArray` to test the performance of
run_count: Number of times to run each profile (the minimum time is returned)
mfdataset_args: Extra arguments to pass to :func:`xarray.open_mfdataset`
Returns:
:obj:`pandas.DataFrame` with information from :func:`profile` for each
run
"""
results = []
for values in zip(*chunks.values()):
cs = dict(zip(chunks.keys(), values))
results.append(
profile(paths, variable, cs, function, run_count, mfdataset_args)
)
r = pandas.DataFrame(results)
return r
def profile(
paths: str,
variable: str,
chunks: Dict[str, int],
function,
run_count: int = 3,
mfdataset_args: Dict[str, Any] = {},
):
"""
Run a function run_count times, returning the minimum time taken
>>> def func(da):
... return t2m.mean()
>>> climtas.profile.profile(
... '/g/data/ub4/era5/netcdf/surface/t2m/2019/t2m_era5_global_20190101_*.nc',
... variable='t2m',
... function=func,
... chunks={'time':93, 'latitude': 91, 'longitude': 180}) #doctest: +SKIP
{'time_total': 9.561158710159361,
'time_open': 0.014718276914209127,
'time_function': 0.0033595040440559387,
'time_optimize': 0.01087462529540062,
'time_load': 9.529402975924313,
'chunks': {'time': 93, 'latitude': 91, 'longitude': 180},
'nchunks_in': 512,
'nchunks_out': 1,
'chunksize_in': '6.09 MB',
'chunksize_out': '4 B',
'tasks_in': 513,
'tasks_out': 1098,
'tasks_optimized': 1098}
Args:
paths: Paths to open (as :func:`xarray.open_mfdataset`)
variable: Variable in the dataset to use
chunks: Mapping of dimension name to chunk sizes
function: Function that takes a :obj:`xarray.DataArray` (the variable)
and returns a :obj:`dask.array.Array` to test the performance of
run_count: Number of times to run each profile (the minimum time is returned)
mfdataset_args: Extra arguments to pass to :func:`xarray.open_mfdataset`
Returns:
Dict[str, int] :ref:`profiling information<Profile results>`
"""
result = profile_once(paths, variable, chunks, function, mfdataset_args)
for n in range(run_count - 1):
r = profile_once(paths, variable, chunks, function, mfdataset_args)
for k, v in r.items():
if k.startswith("time_") and v < result[k]:
result[k] = v
return result
def profile_once(
paths: str,
variable: str,
chunks: Dict[str, int],
function,
mfdataset_args: Dict[str, Any] = {},
):
"""
Run a single profile instance
>>> def func(da):
... return t2m.mean()
>>> climtas.profile.profile_once(
... '/g/data/ub4/era5/netcdf/surface/t2m/2019/t2m_era5_global_20190101_*.nc',
... variable='t2m',
... function=func,
... chunks={'time':93, 'latitude': 91, 'longitude': 180}) #doctest: +SKIP
{'time_total': 9.561158710159361,
'time_open': 0.014718276914209127,
'time_function': 0.0033595040440559387,
'time_optimize': 0.01087462529540062,
'time_load': 9.529402975924313,
'chunks': {'time': 93, 'latitude': 91, 'longitude': 180},
'nchunks_in': 512,
'nchunks_out': 1,
'chunksize_in': '6.09 MB',
'chunksize_out': '4 B',
'tasks_in': 513,
'tasks_out': 1098,
'tasks_optimized': 1098}
Args:
paths: Paths to open (as :func:`xarray.open_mfdataset`)
variable: Variable in the dataset to use
chunks: Mapping of dimension name to chunk sizes
function: Function that takes a :obj:`xarray.DataArray` (the variable)
and returns a :obj:`dask.array.Array` to test the performance of
run_count: Number of times to run each profile (the minimum time is returned)
mfdataset_args: Extra arguments to pass to :func:`xarray.open_mfdataset`
Returns:
Dict[str, Any] :ref:`profiling information<Profile results>`
"""
results: Dict[str, Any] = {}
total_start = time.perf_counter()
open_start = time.perf_counter()
with xarray.open_mfdataset(paths, chunks=chunks, **mfdataset_args) as data:
open_end = time.perf_counter()
var = data[variable]
tasks_in = len(var.data.__dask_graph__())
chunks_in = var.data.npartitions
chunksize_in = dask.utils.format_bytes(
numpy.prod(var.data.chunksize) * var.dtype.itemsize
)
func_start = time.perf_counter()
r = function(var).data
func_end = time.perf_counter()
tasks = len(r.__dask_graph__())
chunksize = dask.utils.format_bytes(numpy.prod(r.chunksize) * r.dtype.itemsize)
chunks_out = r.npartitions
opt_start = time.perf_counter()
opt = dask.optimize(r)
opt_end = time.perf_counter()
tasks_opt = len(r.__dask_graph__())
load_start = time.perf_counter()
dask.compute(opt)
load_end = time.perf_counter()
total_end = time.perf_counter()
results["time_total"] = total_end - total_start
results["time_open"] = open_end - open_start
results["time_function"] = func_end - func_start
results["time_optimize"] = opt_end - opt_start
results["time_load"] = load_end - load_start
results["chunks"] = chunks
results["nchunks_in"] = chunks_in
results["nchunks_out"] = chunks_out
results["chunksize_in"] = chunksize_in
results["chunksize_out"] = chunksize
results["tasks_in"] = tasks_in
results["tasks_out"] = tasks
results["tasks_optimized"] = tasks_opt
return results
class Timer:
def __init__(self, name):
self.starts = {}
self.stops = {}
self.chunks = {}
self.client = None
self.name = name
self.total_start = time.perf_counter()
self.excluded = 0
def mark(self, name: str) -> None:
if name not in self.starts:
self.starts[name] = time.perf_counter()
else:
self.stops[name] = time.perf_counter()
print(name, self.stops[name] - self.starts[name])
def exclude(self, name: str) -> None:
self.mark(name)
if name in self.stops:
self.excluded += self.stops[name] - self.starts[name]
def times(self) -> T.Dict[str, float]:
return {k: self.stops[k] - v for k, v in self.starts.items()}
def record(self, file) -> None:
total = time.perf_counter() - self.total_start - self.excluded
result = {
"name": self.name,
"run_date": datetime.datetime.now(),
"xarray_version": xarray.__version__,
"climtas_version": __version__,
"client_workers": len(self.client.cluster.workers),
"worker_threads": self.client.cluster.workers[0].nthreads,
"total": total,
}
result.update({"chunk_" + k: v for k, v in self.chunks.items()})
result.update(self.times())
result.update(self.pbs_info())
exists = os.path.exists(file)
with open(file, "a") as f:
writer = csv.DictWriter(f, list(result.keys()))
if not exists:
writer.writeheader()
writer.writerow(result)
def pbs_info(self):
jobid = os.environ.get("PBS_JOBID", None)
if jobid is None:
return {"mem_request": None, "mem_used": None, "cpu_pct": None}
uqstat = subprocess.run(
["/g/data/hh5/public/apps/nci_scripts/uqstat", "--format=json"],
stdout=subprocess.PIPE,
text=True,
)
uqstat.check_returncode()
j = json.loads(uqstat.stdout)
job_info = j[jobid]
return {
"mem_request": job_info["mem_request"],
"mem_used": job_info["mem_used"],
"cpu_pct": job_info["cpu_pct"],
"hostname": os.environ["HOSTNAME"],
}
| [
"[email protected]"
] | |
fd762726f5e677c8313ab0c32b7230cce74d9b04 | 2b4790d77439d89ad27bdd04bac539283f0dd605 | /basic_ex/11-module.py | f30ced73ca20173a9af871afa196d9b7e56f467c | [] | no_license | ajioy/python-ex | 9fde4bcfe35edeee5050365660a03bdb6b913da1 | 982a3cdf0de0e140faa4cb539f2961b311de2c2a | refs/heads/master | 2020-04-05T14:06:09.909935 | 2018-08-14T14:43:55 | 2018-08-14T14:43:55 | 59,105,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | # -*- coding:utf-8 -*-
import sys
def test():
args = sys.argv
if len(args) == 1:
print 'Hello, world'
elif len(args) == 2:
print 'Hello, %s' % args[1]
else:
print 'Too many arguments!'
if __name__ == '__main__':
test()
| [
"[email protected]"
] | |
ae2480bc43eb4a6ec86d8e0eb354a44122b0262d | 67ecd129777da89dfaa04a5803f0345e1248f8ba | /nmigen/hdl/ast.py | cadf9600c6d5cc62c295a53f911a9a19f87a4d58 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | sjolsen/nmigen | f866bc27896d94da75a53d5c579b6eba06be78c9 | 0e40dc0a2d336945dfe0669207fe160cafff50dc | refs/heads/master | 2021-03-22T21:57:44.362165 | 2020-04-07T11:17:14 | 2020-04-08T14:08:35 | 247,401,219 | 0 | 0 | NOASSERTION | 2020-03-15T04:28:48 | 2020-03-15T04:28:48 | null | UTF-8 | Python | false | false | 55,889 | py | from abc import ABCMeta, abstractmethod
import traceback
import warnings
import typing
from collections import OrderedDict
from collections.abc import Iterable, MutableMapping, MutableSet, MutableSequence
from enum import Enum
from .. import tracer
from .._utils import *
from .._unused import *
__all__ = [
"Shape", "signed", "unsigned",
"Value", "Const", "C", "AnyConst", "AnySeq", "Operator", "Mux", "Part", "Slice", "Cat", "Repl",
"Array", "ArrayProxy",
"Signal", "ClockSignal", "ResetSignal",
"UserValue",
"Sample", "Past", "Stable", "Rose", "Fell", "Initial",
"Statement", "Switch",
"Property", "Assign", "Assert", "Assume", "Cover",
"ValueKey", "ValueDict", "ValueSet", "SignalKey", "SignalDict", "SignalSet",
]
class DUID:
"""Deterministic Unique IDentifier."""
__next_uid = 0
def __init__(self):
self.duid = DUID.__next_uid
DUID.__next_uid += 1
class Shape(typing.NamedTuple):
"""Bit width and signedness of a value.
A ``Shape`` can be constructed using:
* explicit bit width and signedness;
* aliases :func:`signed` and :func:`unsigned`;
* casting from a variety of objects.
A ``Shape`` can be cast from:
* an integer, where the integer specifies the bit width;
* a range, where the result is wide enough to represent any element of the range, and is
signed if any element of the range is signed;
* an :class:`Enum` with all integer members or :class:`IntEnum`, where the result is wide
enough to represent any member of the enumeration, and is signed if any member of
the enumeration is signed.
Parameters
----------
width : int
The number of bits in the representation, including the sign bit (if any).
signed : bool
If ``False``, the value is unsigned. If ``True``, the value is signed two's complement.
"""
width: int = 1
signed: bool = False
@staticmethod
def cast(obj, *, src_loc_at=0):
if isinstance(obj, Shape):
return obj
if isinstance(obj, int):
return Shape(obj)
if isinstance(obj, tuple):
width, signed = obj
warnings.warn("instead of `{tuple}`, use `{constructor}({width})`"
.format(constructor="signed" if signed else "unsigned", width=width,
tuple=obj),
DeprecationWarning, stacklevel=2 + src_loc_at)
return Shape(width, signed)
if isinstance(obj, range):
if len(obj) == 0:
return Shape(0, obj.start < 0)
signed = obj.start < 0 or (obj.stop - obj.step) < 0
width = max(bits_for(obj.start, signed),
bits_for(obj.stop - obj.step, signed))
return Shape(width, signed)
if isinstance(obj, type) and issubclass(obj, Enum):
min_value = min(member.value for member in obj)
max_value = max(member.value for member in obj)
if not isinstance(min_value, int) or not isinstance(max_value, int):
raise TypeError("Only enumerations with integer values can be used "
"as value shapes")
signed = min_value < 0 or max_value < 0
width = max(bits_for(min_value, signed), bits_for(max_value, signed))
return Shape(width, signed)
raise TypeError("Object {!r} cannot be used as value shape".format(obj))
# TODO: use dataclasses instead of this hack
def _Shape___init__(self, width=1, signed=False):
if not isinstance(width, int) or width < 0:
raise TypeError("Width must be a non-negative integer, not {!r}"
.format(width))
Shape.__init__ = _Shape___init__
def unsigned(width):
"""Shorthand for ``Shape(width, signed=False)``."""
return Shape(width, signed=False)
def signed(width):
"""Shorthand for ``Shape(width, signed=True)``."""
return Shape(width, signed=True)
class Value(metaclass=ABCMeta):
@staticmethod
def cast(obj):
"""Converts ``obj`` to an nMigen value.
Booleans and integers are wrapped into a :class:`Const`. Enumerations whose members are
all integers are converted to a :class:`Const` with a shape that fits every member.
"""
if isinstance(obj, Value):
return obj
if isinstance(obj, int):
return Const(obj)
if isinstance(obj, Enum):
return Const(obj.value, Shape.cast(type(obj)))
raise TypeError("Object {!r} cannot be converted to an nMigen value".format(obj))
def __init__(self, *, src_loc_at=0):
super().__init__()
self.src_loc = tracer.get_src_loc(1 + src_loc_at)
def __bool__(self):
raise TypeError("Attempted to convert nMigen value to boolean")
def __invert__(self):
return Operator("~", [self])
def __neg__(self):
return Operator("-", [self])
def __add__(self, other):
return Operator("+", [self, other])
def __radd__(self, other):
return Operator("+", [other, self])
def __sub__(self, other):
return Operator("-", [self, other])
def __rsub__(self, other):
return Operator("-", [other, self])
def __mul__(self, other):
return Operator("*", [self, other])
def __rmul__(self, other):
return Operator("*", [other, self])
def __check_divisor(self):
width, signed = self.shape()
if signed:
# Python's division semantics and Verilog's division semantics differ for negative
# divisors (Python uses div/mod, Verilog uses quo/rem); for now, avoid the issue
# completely by prohibiting such division operations.
raise NotImplementedError("Division by a signed value is not supported")
def __mod__(self, other):
other = Value.cast(other)
other.__check_divisor()
return Operator("%", [self, other])
def __rmod__(self, other):
self.__check_divisor()
return Operator("%", [other, self])
def __floordiv__(self, other):
other = Value.cast(other)
other.__check_divisor()
return Operator("//", [self, other])
def __rfloordiv__(self, other):
self.__check_divisor()
return Operator("//", [other, self])
def __check_shamt(self):
width, signed = self.shape()
if signed:
# Neither Python nor HDLs implement shifts by negative values; prohibit any shifts
# by a signed value to make sure the shift amount can always be interpreted as
# an unsigned value.
raise NotImplementedError("Shift by a signed value is not supported")
def __lshift__(self, other):
other = Value.cast(other)
other.__check_shamt()
return Operator("<<", [self, other])
def __rlshift__(self, other):
self.__check_shamt()
return Operator("<<", [other, self])
def __rshift__(self, other):
other = Value.cast(other)
other.__check_shamt()
return Operator(">>", [self, other])
def __rrshift__(self, other):
self.__check_shamt()
return Operator(">>", [other, self])
def __and__(self, other):
return Operator("&", [self, other])
def __rand__(self, other):
return Operator("&", [other, self])
def __xor__(self, other):
return Operator("^", [self, other])
def __rxor__(self, other):
return Operator("^", [other, self])
def __or__(self, other):
return Operator("|", [self, other])
def __ror__(self, other):
return Operator("|", [other, self])
def __eq__(self, other):
return Operator("==", [self, other])
def __ne__(self, other):
return Operator("!=", [self, other])
def __lt__(self, other):
return Operator("<", [self, other])
def __le__(self, other):
return Operator("<=", [self, other])
def __gt__(self, other):
return Operator(">", [self, other])
def __ge__(self, other):
return Operator(">=", [self, other])
def __abs__(self):
width, signed = self.shape()
if signed:
return Mux(self >= 0, self, -self)
else:
return self
def __len__(self):
return self.shape().width
def __getitem__(self, key):
n = len(self)
if isinstance(key, int):
if key not in range(-n, n):
raise IndexError("Cannot index {} bits into {}-bit value".format(key, n))
if key < 0:
key += n
return Slice(self, key, key + 1)
elif isinstance(key, slice):
start, stop, step = key.indices(n)
if step != 1:
return Cat(self[i] for i in range(start, stop, step))
return Slice(self, start, stop)
else:
raise TypeError("Cannot index value with {}".format(repr(key)))
def as_unsigned(self):
"""Conversion to unsigned.
Returns
-------
Value, out
This ``Value`` reinterpreted as a unsigned integer.
"""
return Operator("u", [self])
def as_signed(self):
"""Conversion to signed.
Returns
-------
Value, out
This ``Value`` reinterpreted as a signed integer.
"""
return Operator("s", [self])
def bool(self):
"""Conversion to boolean.
Returns
-------
Value, out
``1`` if any bits are set, ``0`` otherwise.
"""
return Operator("b", [self])
def any(self):
"""Check if any bits are ``1``.
Returns
-------
Value, out
``1`` if any bits are set, ``0`` otherwise.
"""
return Operator("r|", [self])
def all(self):
"""Check if all bits are ``1``.
Returns
-------
Value, out
``1`` if all bits are set, ``0`` otherwise.
"""
return Operator("r&", [self])
def xor(self):
"""Compute pairwise exclusive-or of every bit.
Returns
-------
Value, out
``1`` if an odd number of bits are set, ``0`` if an even number of bits are set.
"""
return Operator("r^", [self])
def implies(premise, conclusion):
"""Implication.
Returns
-------
Value, out
``0`` if ``premise`` is true and ``conclusion`` is not, ``1`` otherwise.
"""
return ~premise | conclusion
def bit_select(self, offset, width):
"""Part-select with bit granularity.
Selects a constant width but variable offset part of a ``Value``, such that successive
parts overlap by all but 1 bit.
Parameters
----------
offset : Value, in
Index of first selected bit.
width : int
Number of selected bits.
Returns
-------
Part, out
Selected part of the ``Value``
"""
offset = Value.cast(offset)
if type(offset) is Const and isinstance(width, int):
return self[offset.value:offset.value + width]
return Part(self, offset, width, stride=1, src_loc_at=1)
def word_select(self, offset, width):
"""Part-select with word granularity.
Selects a constant width but variable offset part of a ``Value``, such that successive
parts do not overlap.
Parameters
----------
offset : Value, in
Index of first selected word.
width : int
Number of selected bits.
Returns
-------
Part, out
Selected part of the ``Value``
"""
offset = Value.cast(offset)
if type(offset) is Const and isinstance(width, int):
return self[offset.value * width:(offset.value + 1) * width]
return Part(self, offset, width, stride=width, src_loc_at=1)
def matches(self, *patterns):
"""Pattern matching.
Matches against a set of patterns, which may be integers or bit strings, recognizing
the same grammar as ``Case()``.
Parameters
----------
patterns : int or str
Patterns to match against.
Returns
-------
Value, out
``1`` if any pattern matches the value, ``0`` otherwise.
"""
matches = []
for pattern in patterns:
if not isinstance(pattern, (int, str, Enum)):
raise SyntaxError("Match pattern must be an integer, a string, or an enumeration, "
"not {!r}"
.format(pattern))
if isinstance(pattern, str) and any(bit not in "01- \t" for bit in pattern):
raise SyntaxError("Match pattern '{}' must consist of 0, 1, and - (don't care) "
"bits, and may include whitespace"
.format(pattern))
if (isinstance(pattern, str) and
len("".join(pattern.split())) != len(self)):
raise SyntaxError("Match pattern '{}' must have the same width as match value "
"(which is {})"
.format(pattern, len(self)))
if isinstance(pattern, int) and bits_for(pattern) > len(self):
warnings.warn("Match pattern '{:b}' is wider than match value "
"(which has width {}); comparison will never be true"
.format(pattern, len(self)),
SyntaxWarning, stacklevel=3)
continue
if isinstance(pattern, str):
pattern = "".join(pattern.split()) # remove whitespace
mask = int(pattern.replace("0", "1").replace("-", "0"), 2)
pattern = int(pattern.replace("-", "0"), 2)
matches.append((self & mask) == pattern)
elif isinstance(pattern, int):
matches.append(self == pattern)
elif isinstance(pattern, Enum):
matches.append(self == pattern.value)
else:
assert False
if not matches:
return Const(0)
elif len(matches) == 1:
return matches[0]
else:
return Cat(*matches).any()
def eq(self, value):
"""Assignment.
Parameters
----------
value : Value, in
Value to be assigned.
Returns
-------
Assign
Assignment statement that can be used in combinatorial or synchronous context.
"""
return Assign(self, value, src_loc_at=1)
@abstractmethod
def shape(self):
"""Bit width and signedness of a value.
Returns
-------
Shape
See :class:`Shape`.
Examples
--------
>>> Signal(8).shape()
Shape(width=8, signed=False)
>>> Const(0xaa).shape()
Shape(width=8, signed=False)
"""
pass # :nocov:
def _lhs_signals(self):
raise TypeError("Value {!r} cannot be used in assignments".format(self))
@abstractmethod
def _rhs_signals(self):
pass # :nocov:
def _as_const(self):
raise TypeError("Value {!r} cannot be evaluated as constant".format(self))
__hash__ = None
@final
class Const(Value):
"""A constant, literal integer value.
Parameters
----------
value : int
shape : int or tuple or None
Either an integer ``width`` or a tuple ``(width, signed)`` specifying the number of bits
in this constant and whether it is signed (can represent negative values).
``shape`` defaults to the minimum possible width and signedness of ``value``.
Attributes
----------
width : int
signed : bool
"""
src_loc = None
@staticmethod
def normalize(value, shape):
width, signed = shape
mask = (1 << width) - 1
value &= mask
if signed and value >> (width - 1):
value |= ~mask
return value
def __init__(self, value, shape=None, *, src_loc_at=0):
# We deliberately do not call Value.__init__ here.
self.value = int(value)
if shape is None:
shape = Shape(bits_for(self.value), signed=self.value < 0)
elif isinstance(shape, int):
shape = Shape(shape, signed=self.value < 0)
else:
shape = Shape.cast(shape, src_loc_at=1 + src_loc_at)
self.width, self.signed = shape
self.value = self.normalize(self.value, shape)
def shape(self):
return Shape(self.width, self.signed)
def _rhs_signals(self):
return ValueSet()
def _as_const(self):
return self.value
def __repr__(self):
return "(const {}'{}d{})".format(self.width, "s" if self.signed else "", self.value)
C = Const # shorthand
class AnyValue(Value, DUID):
def __init__(self, shape, *, src_loc_at=0):
super().__init__(src_loc_at=src_loc_at)
self.width, self.signed = Shape.cast(shape, src_loc_at=1 + src_loc_at)
if not isinstance(self.width, int) or self.width < 0:
raise TypeError("Width must be a non-negative integer, not {!r}"
.format(self.width))
def shape(self):
return Shape(self.width, self.signed)
def _rhs_signals(self):
return ValueSet()
@final
class AnyConst(AnyValue):
def __repr__(self):
return "(anyconst {}'{})".format(self.width, "s" if self.signed else "")
@final
class AnySeq(AnyValue):
def __repr__(self):
return "(anyseq {}'{})".format(self.width, "s" if self.signed else "")
@final
class Operator(Value):
def __init__(self, operator, operands, *, src_loc_at=0):
super().__init__(src_loc_at=1 + src_loc_at)
self.operator = operator
self.operands = [Value.cast(op) for op in operands]
def shape(self):
def _bitwise_binary_shape(a_shape, b_shape):
a_bits, a_sign = a_shape
b_bits, b_sign = b_shape
if not a_sign and not b_sign:
# both operands unsigned
return Shape(max(a_bits, b_bits), False)
elif a_sign and b_sign:
# both operands signed
return Shape(max(a_bits, b_bits), True)
elif not a_sign and b_sign:
# first operand unsigned (add sign bit), second operand signed
return Shape(max(a_bits + 1, b_bits), True)
else:
# first signed, second operand unsigned (add sign bit)
return Shape(max(a_bits, b_bits + 1), True)
op_shapes = list(map(lambda x: x.shape(), self.operands))
if len(op_shapes) == 1:
(a_width, a_signed), = op_shapes
if self.operator in ("+", "~"):
return Shape(a_width, a_signed)
if self.operator == "-":
return Shape(a_width + 1, True)
if self.operator in ("b", "r|", "r&", "r^"):
return Shape(1, False)
if self.operator == "u":
return Shape(a_width, False)
if self.operator == "s":
return Shape(a_width, True)
elif len(op_shapes) == 2:
(a_width, a_signed), (b_width, b_signed) = op_shapes
if self.operator in ("+", "-"):
width, signed = _bitwise_binary_shape(*op_shapes)
return Shape(width + 1, signed)
if self.operator == "*":
return Shape(a_width + b_width, a_signed or b_signed)
if self.operator in ("//", "%"):
assert not b_signed
return Shape(a_width, a_signed)
if self.operator in ("<", "<=", "==", "!=", ">", ">="):
return Shape(1, False)
if self.operator in ("&", "^", "|"):
return _bitwise_binary_shape(*op_shapes)
if self.operator == "<<":
if b_signed:
extra = 2 ** (b_width - 1) - 1
else:
extra = 2 ** (b_width) - 1
return Shape(a_width + extra, a_signed)
if self.operator == ">>":
if b_signed:
extra = 2 ** (b_width - 1)
else:
extra = 0
return Shape(a_width + extra, a_signed)
elif len(op_shapes) == 3:
if self.operator == "m":
s_shape, a_shape, b_shape = op_shapes
return _bitwise_binary_shape(a_shape, b_shape)
raise NotImplementedError("Operator {}/{} not implemented"
.format(self.operator, len(op_shapes))) # :nocov:
def _rhs_signals(self):
return union(op._rhs_signals() for op in self.operands)
def __repr__(self):
return "({} {})".format(self.operator, " ".join(map(repr, self.operands)))
def Mux(sel, val1, val0):
"""Choose between two values.
Parameters
----------
sel : Value, in
Selector.
val1 : Value, in
val0 : Value, in
Input values.
Returns
-------
Value, out
Output ``Value``. If ``sel`` is asserted, the Mux returns ``val1``, else ``val0``.
"""
sel = Value.cast(sel)
if len(sel) != 1:
sel = sel.bool()
return Operator("m", [sel, val1, val0])
@final
class Slice(Value):
def __init__(self, value, start, stop, *, src_loc_at=0):
if not isinstance(start, int):
raise TypeError("Slice start must be an integer, not {!r}".format(start))
if not isinstance(stop, int):
raise TypeError("Slice stop must be an integer, not {!r}".format(stop))
n = len(value)
if start not in range(-(n+1), n+1):
raise IndexError("Cannot start slice {} bits into {}-bit value".format(start, n))
if start < 0:
start += n
if stop not in range(-(n+1), n+1):
raise IndexError("Cannot stop slice {} bits into {}-bit value".format(stop, n))
if stop < 0:
stop += n
if start > stop:
raise IndexError("Slice start {} must be less than slice stop {}".format(start, stop))
super().__init__(src_loc_at=src_loc_at)
self.value = Value.cast(value)
self.start = start
self.stop = stop
def shape(self):
return Shape(self.stop - self.start)
def _lhs_signals(self):
return self.value._lhs_signals()
def _rhs_signals(self):
return self.value._rhs_signals()
def __repr__(self):
return "(slice {} {}:{})".format(repr(self.value), self.start, self.stop)
@final
class Part(Value):
def __init__(self, value, offset, width, stride=1, *, src_loc_at=0):
if not isinstance(width, int) or width < 0:
raise TypeError("Part width must be a non-negative integer, not {!r}".format(width))
if not isinstance(stride, int) or stride <= 0:
raise TypeError("Part stride must be a positive integer, not {!r}".format(stride))
super().__init__(src_loc_at=src_loc_at)
self.value = value
self.offset = Value.cast(offset)
self.width = width
self.stride = stride
def shape(self):
return Shape(self.width)
def _lhs_signals(self):
return self.value._lhs_signals()
def _rhs_signals(self):
return self.value._rhs_signals() | self.offset._rhs_signals()
def __repr__(self):
return "(part {} {} {} {})".format(repr(self.value), repr(self.offset),
self.width, self.stride)
@final
class Cat(Value):
"""Concatenate values.
Form a compound ``Value`` from several smaller ones by concatenation.
The first argument occupies the lower bits of the result.
The return value can be used on either side of an assignment, that
is, the concatenated value can be used as an argument on the RHS or
as a target on the LHS. If it is used on the LHS, it must solely
consist of ``Signal`` s, slices of ``Signal`` s, and other concatenations
meeting these properties. The bit length of the return value is the sum of
the bit lengths of the arguments::
len(Cat(args)) == sum(len(arg) for arg in args)
Parameters
----------
*args : Values or iterables of Values, inout
``Value`` s to be concatenated.
Returns
-------
Value, inout
Resulting ``Value`` obtained by concatentation.
"""
def __init__(self, *args, src_loc_at=0):
super().__init__(src_loc_at=src_loc_at)
self.parts = [Value.cast(v) for v in flatten(args)]
def shape(self):
return Shape(sum(len(part) for part in self.parts))
def _lhs_signals(self):
return union((part._lhs_signals() for part in self.parts), start=ValueSet())
def _rhs_signals(self):
return union((part._rhs_signals() for part in self.parts), start=ValueSet())
def _as_const(self):
value = 0
for part in reversed(self.parts):
value <<= len(part)
value |= part._as_const()
return value
def __repr__(self):
return "(cat {})".format(" ".join(map(repr, self.parts)))
@final
class Repl(Value):
"""Replicate a value
An input value is replicated (repeated) several times
to be used on the RHS of assignments::
len(Repl(s, n)) == len(s) * n
Parameters
----------
value : Value, in
Input value to be replicated.
count : int
Number of replications.
Returns
-------
Repl, out
Replicated value.
"""
def __init__(self, value, count, *, src_loc_at=0):
if not isinstance(count, int) or count < 0:
raise TypeError("Replication count must be a non-negative integer, not {!r}"
.format(count))
super().__init__(src_loc_at=src_loc_at)
self.value = Value.cast(value)
self.count = count
def shape(self):
return Shape(len(self.value) * self.count)
def _rhs_signals(self):
return self.value._rhs_signals()
def __repr__(self):
return "(repl {!r} {})".format(self.value, self.count)
# @final
class Signal(Value, DUID):
"""A varying integer value.
Parameters
----------
shape : ``Shape``-castable object or None
Specification for the number of bits in this ``Signal`` and its signedness (whether it
can represent negative values). See ``Shape.cast`` for details.
If not specified, ``shape`` defaults to 1-bit and non-signed.
name : str
Name hint for this signal. If ``None`` (default) the name is inferred from the variable
name this ``Signal`` is assigned to.
reset : int or integral Enum
Reset (synchronous) or default (combinatorial) value.
When this ``Signal`` is assigned to in synchronous context and the corresponding clock
domain is reset, the ``Signal`` assumes the given value. When this ``Signal`` is unassigned
in combinatorial context (due to conditional assignments not being taken), the ``Signal``
assumes its ``reset`` value. Defaults to 0.
reset_less : bool
If ``True``, do not generate reset logic for this ``Signal`` in synchronous statements.
The ``reset`` value is only used as a combinatorial default or as the initial value.
Defaults to ``False``.
attrs : dict
Dictionary of synthesis attributes.
decoder : function or Enum
A function converting integer signal values to human-readable strings (e.g. FSM state
names). If an ``Enum`` subclass is passed, it is concisely decoded using format string
``"{0.name:}/{0.value:}"``, or a number if the signal value is not a member of
the enumeration.
Attributes
----------
width : int
signed : bool
name : str
reset : int
reset_less : bool
attrs : dict
decoder : function
"""
def __init__(self, shape=None, *, name=None, reset=0, reset_less=False,
attrs=None, decoder=None, src_loc_at=0):
super().__init__(src_loc_at=src_loc_at)
if name is not None and not isinstance(name, str):
raise TypeError("Name must be a string, not {!r}".format(name))
self.name = name or tracer.get_var_name(depth=2 + src_loc_at, default="$signal")
if shape is None:
shape = unsigned(1)
self.width, self.signed = Shape.cast(shape, src_loc_at=1 + src_loc_at)
if isinstance(reset, Enum):
reset = reset.value
if not isinstance(reset, int):
raise TypeError("Reset value has to be an int or an integral Enum")
reset_width = bits_for(reset, self.signed)
if reset != 0 and reset_width > self.width:
warnings.warn("Reset value {!r} requires {} bits to represent, but the signal "
"only has {} bits"
.format(reset, reset_width, self.width),
SyntaxWarning, stacklevel=2 + src_loc_at)
self.reset = reset
self.reset_less = bool(reset_less)
self.attrs = OrderedDict(() if attrs is None else attrs)
if decoder is None and isinstance(shape, type) and issubclass(shape, Enum):
decoder = shape
if isinstance(decoder, type) and issubclass(decoder, Enum):
def enum_decoder(value):
try:
return "{0.name:}/{0.value:}".format(decoder(value))
except ValueError:
return str(value)
self.decoder = enum_decoder
else:
self.decoder = decoder
# Not a @classmethod because nmigen.compat requires it.
@staticmethod
def like(other, *, name=None, name_suffix=None, src_loc_at=0, **kwargs):
"""Create Signal based on another.
Parameters
----------
other : Value
Object to base this Signal on.
"""
if name is not None:
new_name = str(name)
elif name_suffix is not None:
new_name = other.name + str(name_suffix)
else:
new_name = tracer.get_var_name(depth=2 + src_loc_at, default="$like")
kw = dict(shape=Value.cast(other).shape(), name=new_name)
if isinstance(other, Signal):
kw.update(reset=other.reset, reset_less=other.reset_less,
attrs=other.attrs, decoder=other.decoder)
kw.update(kwargs)
return Signal(**kw, src_loc_at=1 + src_loc_at)
def shape(self):
return Shape(self.width, self.signed)
def _lhs_signals(self):
return ValueSet((self,))
def _rhs_signals(self):
return ValueSet((self,))
def __repr__(self):
return "(sig {})".format(self.name)
@final
class ClockSignal(Value):
"""Clock signal for a clock domain.
Any ``ClockSignal`` is equivalent to ``cd.clk`` for a clock domain with the corresponding name.
All of these signals ultimately refer to the same signal, but they can be manipulated
independently of the clock domain, even before the clock domain is created.
Parameters
----------
domain : str
Clock domain to obtain a clock signal for. Defaults to ``"sync"``.
"""
def __init__(self, domain="sync", *, src_loc_at=0):
super().__init__(src_loc_at=src_loc_at)
if not isinstance(domain, str):
raise TypeError("Clock domain name must be a string, not {!r}".format(domain))
if domain == "comb":
raise ValueError("Domain '{}' does not have a clock".format(domain))
self.domain = domain
def shape(self):
return Shape(1)
def _lhs_signals(self):
return ValueSet((self,))
def _rhs_signals(self):
raise NotImplementedError("ClockSignal must be lowered to a concrete signal") # :nocov:
def __repr__(self):
return "(clk {})".format(self.domain)
@final
class ResetSignal(Value):
"""Reset signal for a clock domain.
Any ``ResetSignal`` is equivalent to ``cd.rst`` for a clock domain with the corresponding name.
All of these signals ultimately refer to the same signal, but they can be manipulated
independently of the clock domain, even before the clock domain is created.
Parameters
----------
domain : str
Clock domain to obtain a reset signal for. Defaults to ``"sync"``.
allow_reset_less : bool
If the clock domain is reset-less, act as a constant ``0`` instead of reporting an error.
"""
def __init__(self, domain="sync", allow_reset_less=False, *, src_loc_at=0):
super().__init__(src_loc_at=src_loc_at)
if not isinstance(domain, str):
raise TypeError("Clock domain name must be a string, not {!r}".format(domain))
if domain == "comb":
raise ValueError("Domain '{}' does not have a reset".format(domain))
self.domain = domain
self.allow_reset_less = allow_reset_less
def shape(self):
return Shape(1)
def _lhs_signals(self):
return ValueSet((self,))
def _rhs_signals(self):
raise NotImplementedError("ResetSignal must be lowered to a concrete signal") # :nocov:
def __repr__(self):
return "(rst {})".format(self.domain)
class Array(MutableSequence):
"""Addressable multiplexer.
An array is similar to a ``list`` that can also be indexed by ``Value``s; indexing by an integer or a slice works the same as for Python lists, but indexing by a ``Value`` results
in a proxy.
The array proxy can be used as an ordinary ``Value``, i.e. participate in calculations and
assignments, provided that all elements of the array are values. The array proxy also supports
attribute access and further indexing, each returning another array proxy; this means that
the results of indexing into arrays, arrays of records, and arrays of arrays can all
be used as first-class values.
It is an error to change an array or any of its elements after an array proxy was created.
Changing the array directly will raise an exception. However, it is not possible to detect
the elements being modified; if an element's attribute or element is modified after the proxy
for it has been created, the proxy will refer to stale data.
Examples
--------
Simple array::
gpios = Array(Signal() for _ in range(10))
with m.If(bus.we):
m.d.sync += gpios[bus.addr].eq(bus.w_data)
with m.Else():
m.d.sync += bus.r_data.eq(gpios[bus.addr])
Multidimensional array::
mult = Array(Array(x * y for y in range(10)) for x in range(10))
a = Signal.range(10)
b = Signal.range(10)
r = Signal(8)
m.d.comb += r.eq(mult[a][b])
Array of records::
layout = [
("r_data", 16),
("r_en", 1),
]
buses = Array(Record(layout) for busno in range(4))
master = Record(layout)
m.d.comb += [
buses[sel].r_en.eq(master.r_en),
master.r_data.eq(buses[sel].r_data),
]
"""
def __init__(self, iterable=()):
self._inner = list(iterable)
self._proxy_at = None
self._mutable = True
def __getitem__(self, index):
if isinstance(index, Value):
if self._mutable:
self._proxy_at = tracer.get_src_loc()
self._mutable = False
return ArrayProxy(self, index)
else:
return self._inner[index]
def __len__(self):
return len(self._inner)
def _check_mutability(self):
if not self._mutable:
raise ValueError("Array can no longer be mutated after it was indexed with a value "
"at {}:{}".format(*self._proxy_at))
def __setitem__(self, index, value):
self._check_mutability()
self._inner[index] = value
def __delitem__(self, index):
self._check_mutability()
del self._inner[index]
def insert(self, index, value):
self._check_mutability()
self._inner.insert(index, value)
def __repr__(self):
return "(array{} [{}])".format(" mutable" if self._mutable else "",
", ".join(map(repr, self._inner)))
@final
class ArrayProxy(Value):
def __init__(self, elems, index, *, src_loc_at=0):
super().__init__(src_loc_at=1 + src_loc_at)
self.elems = elems
self.index = Value.cast(index)
def __getattr__(self, attr):
return ArrayProxy([getattr(elem, attr) for elem in self.elems], self.index)
def __getitem__(self, index):
return ArrayProxy([ elem[index] for elem in self.elems], self.index)
def _iter_as_values(self):
return (Value.cast(elem) for elem in self.elems)
def shape(self):
width, signed = 0, False
for elem_width, elem_signed in (elem.shape() for elem in self._iter_as_values()):
width = max(width, elem_width + elem_signed)
signed = max(signed, elem_signed)
return Shape(width, signed)
def _lhs_signals(self):
signals = union((elem._lhs_signals() for elem in self._iter_as_values()), start=ValueSet())
return signals
def _rhs_signals(self):
signals = union((elem._rhs_signals() for elem in self._iter_as_values()), start=ValueSet())
return self.index._rhs_signals() | signals
def __repr__(self):
return "(proxy (array [{}]) {!r})".format(", ".join(map(repr, self.elems)), self.index)
class UserValue(Value):
"""Value with custom lowering.
A ``UserValue`` is a value whose precise representation does not have to be immediately known,
which is useful in certain metaprogramming scenarios. Instead of providing fixed semantics
upfront, it is kept abstract for as long as possible, only being lowered to a concrete nMigen
value when required.
Note that the ``lower`` method will only be called once; this is necessary to ensure that
nMigen's view of representation of all values stays internally consistent. If the class
deriving from ``UserValue`` is mutable, then it must ensure that after ``lower`` is called,
it is not mutated in a way that changes its representation.
The following is an incomplete list of actions that, when applied to an ``UserValue`` directly
or indirectly, will cause it to be lowered, provided as an illustrative reference:
* Querying the shape using ``.shape()`` or ``len()``;
* Creating a similarly shaped signal using ``Signal.like``;
* Indexing or iterating through individual bits;
* Adding an assignment to the value to a ``Module`` using ``m.d.<domain> +=``.
"""
def __init__(self, *, src_loc_at=0):
super().__init__(src_loc_at=1 + src_loc_at)
self.__lowered = None
@abstractmethod
def lower(self):
"""Conversion to a concrete representation."""
pass # :nocov:
def _lazy_lower(self):
if self.__lowered is None:
self.__lowered = Value.cast(self.lower())
return self.__lowered
def shape(self):
return self._lazy_lower().shape()
def _lhs_signals(self):
return self._lazy_lower()._lhs_signals()
def _rhs_signals(self):
return self._lazy_lower()._rhs_signals()
@final
class Sample(Value):
"""Value from the past.
A ``Sample`` of an expression is equal to the value of the expression ``clocks`` clock edges
of the ``domain`` clock back. If that moment is before the beginning of time, it is equal
to the value of the expression calculated as if each signal had its reset value.
"""
def __init__(self, expr, clocks, domain, *, src_loc_at=0):
super().__init__(src_loc_at=1 + src_loc_at)
self.value = Value.cast(expr)
self.clocks = int(clocks)
self.domain = domain
if not isinstance(self.value, (Const, Signal, ClockSignal, ResetSignal, Initial)):
raise TypeError("Sampled value must be a signal or a constant, not {!r}"
.format(self.value))
if self.clocks < 0:
raise ValueError("Cannot sample a value {} cycles in the future"
.format(-self.clocks))
if not (self.domain is None or isinstance(self.domain, str)):
raise TypeError("Domain name must be a string or None, not {!r}"
.format(self.domain))
def shape(self):
return self.value.shape()
def _rhs_signals(self):
return ValueSet((self,))
def __repr__(self):
return "(sample {!r} @ {}[{}])".format(
self.value, "<default>" if self.domain is None else self.domain, self.clocks)
def Past(expr, clocks=1, domain=None):
return Sample(expr, clocks, domain)
def Stable(expr, clocks=0, domain=None):
return Sample(expr, clocks + 1, domain) == Sample(expr, clocks, domain)
def Rose(expr, clocks=0, domain=None):
return ~Sample(expr, clocks + 1, domain) & Sample(expr, clocks, domain)
def Fell(expr, clocks=0, domain=None):
return Sample(expr, clocks + 1, domain) & ~Sample(expr, clocks, domain)
@final
class Initial(Value):
"""Start indicator, for model checking.
An ``Initial`` signal is ``1`` at the first cycle of model checking, and ``0`` at any other.
"""
def __init__(self, *, src_loc_at=0):
super().__init__(src_loc_at=src_loc_at)
def shape(self):
return Shape(1)
def _rhs_signals(self):
return ValueSet((self,))
def __repr__(self):
return "(initial)"
class _StatementList(list):
def __repr__(self):
return "({})".format(" ".join(map(repr, self)))
class Statement:
def __init__(self, *, src_loc_at=0):
self.src_loc = tracer.get_src_loc(1 + src_loc_at)
@staticmethod
def cast(obj):
if isinstance(obj, Iterable):
return _StatementList(sum((Statement.cast(e) for e in obj), []))
else:
if isinstance(obj, Statement):
return _StatementList([obj])
else:
raise TypeError("Object {!r} is not an nMigen statement".format(obj))
@final
class Assign(Statement):
def __init__(self, lhs, rhs, *, src_loc_at=0):
super().__init__(src_loc_at=src_loc_at)
self.lhs = Value.cast(lhs)
self.rhs = Value.cast(rhs)
def _lhs_signals(self):
return self.lhs._lhs_signals()
def _rhs_signals(self):
return self.lhs._rhs_signals() | self.rhs._rhs_signals()
def __repr__(self):
return "(eq {!r} {!r})".format(self.lhs, self.rhs)
class UnusedProperty(UnusedMustUse):
pass
class Property(Statement, MustUse):
_MustUse__warning = UnusedProperty
def __init__(self, test, *, _check=None, _en=None, src_loc_at=0):
super().__init__(src_loc_at=src_loc_at)
self.test = Value.cast(test)
self._check = _check
self._en = _en
if self._check is None:
self._check = Signal(reset_less=True, name="${}$check".format(self._kind))
self._check.src_loc = self.src_loc
if _en is None:
self._en = Signal(reset_less=True, name="${}$en".format(self._kind))
self._en.src_loc = self.src_loc
def _lhs_signals(self):
return ValueSet((self._en, self._check))
def _rhs_signals(self):
return self.test._rhs_signals()
def __repr__(self):
return "({} {!r})".format(self._kind, self.test)
@final
class Assert(Property):
_kind = "assert"
@final
class Assume(Property):
_kind = "assume"
@final
class Cover(Property):
_kind = "cover"
# @final
class Switch(Statement):
def __init__(self, test, cases, *, src_loc=None, src_loc_at=0, case_src_locs={}):
if src_loc is None:
super().__init__(src_loc_at=src_loc_at)
else:
# Switch is a bit special in terms of location tracking because it is usually created
# long after the control has left the statement that directly caused its creation.
self.src_loc = src_loc
# Switch is also a bit special in that its parts also have location information. It can't
# be automatically traced, so whatever constructs a Switch may optionally provide it.
self.case_src_locs = {}
self.test = Value.cast(test)
self.cases = OrderedDict()
for orig_keys, stmts in cases.items():
# Map: None -> (); key -> (key,); (key...) -> (key...)
keys = orig_keys
if keys is None:
keys = ()
if not isinstance(keys, tuple):
keys = (keys,)
# Map: 2 -> "0010"; "0010" -> "0010"
new_keys = ()
for key in keys:
if isinstance(key, str):
key = "".join(key.split()) # remove whitespace
elif isinstance(key, int):
key = format(key, "b").rjust(len(self.test), "0")
elif isinstance(key, Enum):
key = format(key.value, "b").rjust(len(self.test), "0")
else:
raise TypeError("Object {!r} cannot be used as a switch key"
.format(key))
assert len(key) == len(self.test)
new_keys = (*new_keys, key)
if not isinstance(stmts, Iterable):
stmts = [stmts]
self.cases[new_keys] = Statement.cast(stmts)
if orig_keys in case_src_locs:
self.case_src_locs[new_keys] = case_src_locs[orig_keys]
def _lhs_signals(self):
signals = union((s._lhs_signals() for ss in self.cases.values() for s in ss),
start=ValueSet())
return signals
def _rhs_signals(self):
signals = union((s._rhs_signals() for ss in self.cases.values() for s in ss),
start=ValueSet())
return self.test._rhs_signals() | signals
def __repr__(self):
def case_repr(keys, stmts):
stmts_repr = " ".join(map(repr, stmts))
if keys == ():
return "(default {})".format(stmts_repr)
elif len(keys) == 1:
return "(case {} {})".format(keys[0], stmts_repr)
else:
return "(case ({}) {})".format(" ".join(keys), stmts_repr)
case_reprs = [case_repr(keys, stmts) for keys, stmts in self.cases.items()]
return "(switch {!r} {})".format(self.test, " ".join(case_reprs))
class _MappedKeyCollection(metaclass=ABCMeta):
@abstractmethod
def _map_key(self, key):
pass # :nocov:
@abstractmethod
def _unmap_key(self, key):
pass # :nocov:
class _MappedKeyDict(MutableMapping, _MappedKeyCollection):
def __init__(self, pairs=()):
self._storage = OrderedDict()
for key, value in pairs:
self[key] = value
def __getitem__(self, key):
key = None if key is None else self._map_key(key)
return self._storage[key]
def __setitem__(self, key, value):
key = None if key is None else self._map_key(key)
self._storage[key] = value
def __delitem__(self, key):
key = None if key is None else self._map_key(key)
del self._storage[key]
def __iter__(self):
for key in self._storage:
if key is None:
yield None
else:
yield self._unmap_key(key)
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
if len(self) != len(other):
return False
for ak, bk in zip(sorted(self._storage), sorted(other._storage)):
if ak != bk:
return False
if self._storage[ak] != other._storage[bk]:
return False
return True
def __len__(self):
return len(self._storage)
def __repr__(self):
pairs = ["({!r}, {!r})".format(k, v) for k, v in self.items()]
return "{}.{}([{}])".format(type(self).__module__, type(self).__name__,
", ".join(pairs))
class _MappedKeySet(MutableSet, _MappedKeyCollection):
def __init__(self, elements=()):
self._storage = OrderedDict()
for elem in elements:
self.add(elem)
def add(self, value):
self._storage[self._map_key(value)] = None
def update(self, values):
for value in values:
self.add(value)
def discard(self, value):
if value in self:
del self._storage[self._map_key(value)]
def __contains__(self, value):
return self._map_key(value) in self._storage
def __iter__(self):
for key in [k for k in self._storage]:
yield self._unmap_key(key)
def __len__(self):
return len(self._storage)
def __repr__(self):
return "{}.{}({})".format(type(self).__module__, type(self).__name__,
", ".join(repr(x) for x in self))
class ValueKey:
def __init__(self, value):
self.value = Value.cast(value)
if isinstance(self.value, Const):
self._hash = hash(self.value.value)
elif isinstance(self.value, (Signal, AnyValue)):
self._hash = hash(self.value.duid)
elif isinstance(self.value, (ClockSignal, ResetSignal)):
self._hash = hash(self.value.domain)
elif isinstance(self.value, Operator):
self._hash = hash((self.value.operator,
tuple(ValueKey(o) for o in self.value.operands)))
elif isinstance(self.value, Slice):
self._hash = hash((ValueKey(self.value.value), self.value.start, self.value.stop))
elif isinstance(self.value, Part):
self._hash = hash((ValueKey(self.value.value), ValueKey(self.value.offset),
self.value.width, self.value.stride))
elif isinstance(self.value, Cat):
self._hash = hash(tuple(ValueKey(o) for o in self.value.parts))
elif isinstance(self.value, ArrayProxy):
self._hash = hash((ValueKey(self.value.index),
tuple(ValueKey(e) for e in self.value._iter_as_values())))
elif isinstance(self.value, Sample):
self._hash = hash((ValueKey(self.value.value), self.value.clocks, self.value.domain))
elif isinstance(self.value, Initial):
self._hash = 0
else: # :nocov:
raise TypeError("Object {!r} cannot be used as a key in value collections"
.format(self.value))
def __hash__(self):
return self._hash
def __eq__(self, other):
if type(other) is not ValueKey:
return False
if type(self.value) is not type(other.value):
return False
if isinstance(self.value, Const):
return self.value.value == other.value.value
elif isinstance(self.value, (Signal, AnyValue)):
return self.value is other.value
elif isinstance(self.value, (ClockSignal, ResetSignal)):
return self.value.domain == other.value.domain
elif isinstance(self.value, Operator):
return (self.value.operator == other.value.operator and
len(self.value.operands) == len(other.value.operands) and
all(ValueKey(a) == ValueKey(b)
for a, b in zip(self.value.operands, other.value.operands)))
elif isinstance(self.value, Slice):
return (ValueKey(self.value.value) == ValueKey(other.value.value) and
self.value.start == other.value.start and
self.value.stop == other.value.stop)
elif isinstance(self.value, Part):
return (ValueKey(self.value.value) == ValueKey(other.value.value) and
ValueKey(self.value.offset) == ValueKey(other.value.offset) and
self.value.width == other.value.width and
self.value.stride == other.value.stride)
elif isinstance(self.value, Cat):
return all(ValueKey(a) == ValueKey(b)
for a, b in zip(self.value.parts, other.value.parts))
elif isinstance(self.value, ArrayProxy):
return (ValueKey(self.value.index) == ValueKey(other.value.index) and
len(self.value.elems) == len(other.value.elems) and
all(ValueKey(a) == ValueKey(b)
for a, b in zip(self.value._iter_as_values(),
other.value._iter_as_values())))
elif isinstance(self.value, Sample):
return (ValueKey(self.value.value) == ValueKey(other.value.value) and
self.value.clocks == other.value.clocks and
self.value.domain == self.value.domain)
elif isinstance(self.value, Initial):
return True
else: # :nocov:
raise TypeError("Object {!r} cannot be used as a key in value collections"
.format(self.value))
def __lt__(self, other):
if not isinstance(other, ValueKey):
return False
if type(self.value) != type(other.value):
return False
if isinstance(self.value, Const):
return self.value < other.value
elif isinstance(self.value, (Signal, AnyValue)):
return self.value.duid < other.value.duid
elif isinstance(self.value, Slice):
return (ValueKey(self.value.value) < ValueKey(other.value.value) and
self.value.start < other.value.start and
self.value.end < other.value.end)
else: # :nocov:
raise TypeError("Object {!r} cannot be used as a key in value collections")
def __repr__(self):
return "<{}.ValueKey {!r}>".format(__name__, self.value)
class ValueDict(_MappedKeyDict):
_map_key = ValueKey
_unmap_key = lambda self, key: key.value
class ValueSet(_MappedKeySet):
_map_key = ValueKey
_unmap_key = lambda self, key: key.value
class SignalKey:
def __init__(self, signal):
self.signal = signal
if isinstance(signal, Signal):
self._intern = (0, signal.duid)
elif type(signal) is ClockSignal:
self._intern = (1, signal.domain)
elif type(signal) is ResetSignal:
self._intern = (2, signal.domain)
else:
raise TypeError("Object {!r} is not an nMigen signal".format(signal))
def __hash__(self):
return hash(self._intern)
def __eq__(self, other):
if type(other) is not SignalKey:
return False
return self._intern == other._intern
def __lt__(self, other):
if type(other) is not SignalKey:
raise TypeError("Object {!r} cannot be compared to a SignalKey".format(signal))
return self._intern < other._intern
def __repr__(self):
return "<{}.SignalKey {!r}>".format(__name__, self.signal)
class SignalDict(_MappedKeyDict):
_map_key = SignalKey
_unmap_key = lambda self, key: key.signal
class SignalSet(_MappedKeySet):
_map_key = SignalKey
_unmap_key = lambda self, key: key.signal
| [
"[email protected]"
] | |
c1060ead727b0ede86f928cc668a97b2d4ad13c8 | b6ab5a3ff4402ed085557cd5ff354ab2ead6e6f8 | /leet_code/rotten_oranges.py | 3f761e12152d708fc5ec530ac701836bb1ed4be6 | [] | no_license | sahiljajodia01/Competitive-Programming | e51587110640663aa32f220feddf6ab10f17c445 | 7ae9b45654aff513bceb0fc058a67ca49273a369 | refs/heads/master | 2021-07-17T08:23:24.143156 | 2020-06-05T11:34:14 | 2020-06-05T11:34:14 | 163,054,750 | 0 | 1 | null | 2019-10-31T05:36:35 | 2018-12-25T06:48:39 | C++ | UTF-8 | Python | false | false | 1,385 | py | # https://leetcode.com/problems/rotting-oranges/
####### Straightforward BFS solution ##########
class Solution:
def orangesRotting(self, grid: List[List[int]]) -> int:
q = []
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 2:
q.append([i, j])
minute = 0
while q != []:
# print(q)
minute += 1
temp = []
for i in range(len(q)):
x, y = q[i]
neighbours = [[1, 0], [0, 1], [-1, 0], [0, -1]]
for n in neighbours:
xn, yn = (x + n[0]), (y+ n[1])
if xn < 0 or xn >= len(grid) or yn < 0 or yn >= len(grid[0]) or grid[xn][yn] == 0 or grid[xn][yn] == 2:
continue
grid[xn][yn] = 2
if [xn, yn] not in temp:
temp.append([xn, yn])
q = temp
# print(minute)
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 1:
return -1
if minute == 0:
return 0
return minute - 1 | [
"[email protected]"
] | |
6f89b5f827a55c4d0a84b8ab0580245c093ab1d8 | acb14262588fb356efb3d1bf3aab8634e43f1a4f | /app/recipe/tests/test_tag_api.py | 9ecdb111ed9fe55141cc540e61cd2ba1bb1d3d2c | [
"MIT"
] | permissive | Gilles00/recipe-app-api | 6d37157e4e0081ba152ccca740fdd13ad03d3aaf | 2ea0de068db0dcc500d54164739184ace1a29a7b | refs/heads/master | 2022-03-05T18:00:13.893911 | 2019-11-26T11:56:05 | 2019-11-26T11:56:05 | 259,035,229 | 1 | 0 | MIT | 2020-04-26T13:18:13 | 2020-04-26T13:18:12 | null | UTF-8 | Python | false | false | 3,126 | py | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag, Recipe
from recipe.serializers import TagSerializer
TAGS_URL = reverse("recipe:tag-list")
class PublicTagsApiTests(TestCase):
"""Test the publicly available tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is requried for retrieving tags"""
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
"""Test the authorized user tags API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
"[email protected]", "password123"
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Test retrieving tags"""
Tag.objects.create(user=self.user, name="Vegan")
Tag.objects.create(user=self.user, name="Dessert")
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by("-name")
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
"""Test that tags returned are for the authenticated user"""
user2 = get_user_model().objects.create_user("[email protected]", "testpass")
Tag.objects.create(user=user2, name="Fruity")
tag = Tag.objects.create(user=self.user, name="Comfort food")
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]["name"], tag.name)
def test_create_tag_successful(self):
"""Test creating a new tag"""
payload = {"name": "Test tag"}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(user=self.user, name=payload["name"]).exists()
self.assertTrue(exists)
def test_create_tag_invalid(self):
"""Test creating tag with invalid payload"""
payload = {"name": ""}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tags_assigned_to_recipes(self):
"""Test filtering tags by those assigned to recipes"""
tag1 = Tag.objects.create(user=self.user, name="Breakfast")
tag2 = Tag.objects.create(user=self.user, name="lunch")
recipe = Recipe.objects.create(
title="Coriander eggs on toast", time_minutes=10, price=5.00, user=self.user
)
recipe.tags.add(tag1)
res = self.client.get(TAGS_URL, {"assigned_only": 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
| [
"[email protected]"
] | |
9b2c3283708ae762deba2107258c8ec473d5a3cd | 4f2f046ba81f07be7fcb0649a7d7ce55fcd9e98f | /tests/tests.py | 5547f7a7f98b5d28140501f313b6b6305c838e56 | [] | no_license | konradkrasno/db_helper_manager | 3a4b1ceca70953938ac348b855da82bf3bfe9eb0 | 273f5146437784684d069a209b98eb6d71d4756f | refs/heads/master | 2023-04-21T14:57:33.434170 | 2021-05-09T18:52:01 | 2021-05-09T18:52:01 | 365,552,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,523 | py | import unittest
from typing import Dict
from unittest import TestCase
from unittest.mock import patch, call
from db_helper_manager.db_manager import DBManager
from db_helper_manager.exceptions import ApiError
from db_helper_manager.rates import Rate
class Fixture(TestCase):
@staticmethod
def db_config() -> Dict:
return {
"DIALECT": "mysql",
"NAME": "test",
"USER": "test",
"PASSWORD": "test",
"HOST": "localhost",
"PORT": 3306,
}
@staticmethod
def json() -> Dict:
return {
"table": "A",
"currency": "dolar amerykański",
"code": "USD",
"rates": [
{
"no": "087/A/NBP/2021",
"effectiveDate": "2021-05-07",
"mid": 3.7861,
}
],
}
def setUp(self) -> None:
self.manager = DBManager(self.db_config())
def tearDown(self) -> None:
pass
class RateTests(Fixture):
@patch("requests.get")
def test_get_current_rate_in_pln_when_ok(self, mock_get):
mock_get.return_value.status_code = 200
mock_get.return_value.json = self.json
rate = Rate.get_current_rate_in_pln("usd")
self.assertEqual(rate, 3.7861)
@patch("requests.get")
def test_get_current_rate_in_pln_when_error(self, mock_get):
mock_get.return_value.status_code = 404
with self.assertRaises(ApiError):
Rate.get_current_rate_in_pln("usd")
class CommandsTests(Fixture):
@patch("db_helper_manager.commands.logger")
@patch("db_helper_manager.commands.text")
@patch("db_helper_manager.commands.UpdateUnitPrices.get_current_rate_in_pln")
@patch("db_helper_manager.commands.Session.execute")
def test_update_unit_prices(self, mock_session, mock_rate, mock_text, mock_logger):
mock_text.return_value = "mocked statement"
mock_rate.return_value = 3.7861
self.manager.update_unit_prices()
mock_session.assert_called_once_with(
"mocked statement",
[
{
"EURORate": 3.7861,
"USDRate": 3.7861,
}
],
)
mock_logger.info.assert_called()
@patch("db_helper_manager.commands.logger")
@patch("db_helper_manager.commands.csv.writer")
@patch("builtins.open")
@patch("db_helper_manager.commands.Session.execute")
def test_fetch_product_data_as_csv(
self, mock_session, mock_open, mock_writer, mock_logger
):
mock_session.return_value = [["fake1"], ["fake2"]]
self.manager.fetch_product_data_as_csv()
mock_open.assert_called_once_with("products.csv", "w", newline="")
expected_calls = [call(self.manager.fields), call(["fake1"]), call(["fake2"])]
mock_writer().writerow.assert_has_calls(expected_calls)
mock_logger.info.assert_called_once_with("Data successfully downloaded.")
@patch("db_helper_manager.commands.UpdateUnitPrices.update_unit_prices")
@patch("db_helper_manager.commands.FetchProductData.fetch_product_data_as_csv")
def test_execute_command(self, mock_fetch_data, mock_update_prices):
self.manager.execute_command("fetch_product_data_as_csv")
mock_fetch_data.assert_called_once()
self.manager.execute_command("update_unit_prices")
mock_update_prices.assert_called_once()
@patch("db_helper_manager.commands.Commands.print_command_list")
def test_execute_command_when_wrong_command(self, mock_print_command_list):
self.manager.execute_command("wrong_command")
mock_print_command_list.assert_called_once()
@patch("builtins.print")
def test_execute_command_when_wrong_args(self, mock_print):
self.manager.execute_command("update_unit_prices", "arg1", "arg2")
mock_print.assert_called_once_with(
"update_unit_prices() takes 1 positional argument but 3 were given"
)
@patch("db_helper_manager.commands.logger")
@patch("db_helper_manager.commands.UpdateUnitPrices.update_unit_prices")
def test_execute_command_when_error(self, mock_update_prices, mock_logger):
mock_update_prices.side_effect = Exception("Exception occurred")
self.manager.execute_command("update_unit_prices")
mock_logger.exception.assert_called_once_with(mock_update_prices.side_effect)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
0f7bf9ae5f1cdcff88b17c9469e88632bb26be4e | a66fd9c13d2705c1603fb10909ee9074659a1055 | /03_DNN_Mnist_fashion_keras.py | c0a6cdfbd29ac3338e5ef1a6397a646da341b9f2 | [] | no_license | lhs7091/pythonTensor2 | feaf609402b2e2cc7c02ec00704a1430e309ef8a | 0c5e7165c1e2b1e99b68ab932f811facf2b01043 | refs/heads/master | 2021-02-27T03:09:47.524839 | 2020-03-08T07:00:05 | 2020-03-08T07:00:05 | 245,572,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,798 | py | from tensorflow.keras.datasets import fashion_mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow_core.python.keras.layers import Flatten
import matplotlib.pyplot as plt
import numpy as np
import random
(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
labels = {0:'T-shirt/top',
1:'Trouser',
2:'Pullover',
3:'Dress',
4:'Coat',
5:'Sandal',
6:'Shirt',
7:'Sneaker',
8:'Bag',
9:'Ankle Boot'}
# dataset reshape
# array -> category
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# scaling
X_train = X_train/255.0
X_test = X_test/255.0
# modeling
model = Sequential()
model.add(Dense(784, input_shape=(28,28,), activation='relu'))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.summary()
#compiling models
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# input dataset in model
model.fit(X_train, y_train, batch_size=200, epochs=1, validation_split=0.2)
'''
48000/48000 [==============================] - 22s 462us/sample - loss: 0.2445 - accuracy: 0.9248 - val_loss: 0.1263 - val_accuracy: 0.9633
'''
# evaluation
score = model.evaluate(X_test, y_test)
print(score)
'''
10000/10000 [==============================] - 2s 202us/sample - loss: 0.1285 - accuracy: 0.9611
[0.12847008485868572, 0.9611]
'''
# real prediction
prediction = model.predict(X_test)
r = random.randint(0, y_test.shape[0])
print('label:', labels[np.argmax(y_test[r])])
print('prediction:', labels[np.argmax(prediction[r])])
plt.imshow(X_test[r].reshape(28,28), cmap='binary')
plt.show() | [
"[email protected]"
] | |
5d5b705d1c703cf8488a9563e05ea926887c8877 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/peloponnesu.py | 6fbbdafbe76f4be8b8f9caacd5b6311dc135d866 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 64 | py | ii = [('SadlMLP.py', 4), ('LeakWTI2.py', 6), ('LeakWTI3.py', 9)] | [
"[email protected]"
] | |
5057caf33a6154adae0ac28b4e9273bbacfadc58 | 5c023a6597cef23ab841b33179ee8a3f0750b0a3 | /tensorflow_asr/featurizers/text_featurizers.py | c175a09a54f886b3a45f8196e417f9c0fb924991 | [
"Apache-2.0"
] | permissive | dathudeptrai/TensorFlowASR | 43a55850ad62f5a51b6587ef54df73a0e0109ca7 | 72cd5d2b932d66ddd61e79ab41bb0d64cb8c4919 | refs/heads/main | 2022-12-29T01:12:44.175317 | 2020-10-18T11:57:05 | 2020-10-18T11:57:05 | 305,675,364 | 1 | 0 | Apache-2.0 | 2020-10-20T10:42:47 | 2020-10-20T10:42:47 | null | UTF-8 | Python | false | false | 10,599 | py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import abc
import codecs
import unicodedata
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tds
from ..utils.utils import preprocess_paths
ENGLISH_CHARACTERS = [" ", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m",
"n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "'"]
class TextFeaturizer(metaclass=abc.ABCMeta):
def __init__(self, decoder_config: dict):
self.scorer = None
self.decoder_config = decoder_config
if self.decoder_config.get("vocabulary", None) is not None:
self.decoder_config["vocabulary"] = preprocess_paths(self.decoder_config["vocabulary"])
self.blank = None
self.tokens2indices = {}
self.tokens = []
self.num_classes = None
def preprocess_text(self, text):
text = unicodedata.normalize("NFC", text.lower())
return text.strip("\n") # remove trailing newline
def add_scorer(self, scorer: any = None):
""" Add scorer to this instance """
self.scorer = scorer
def normalize_indices(self, indices: tf.Tensor) -> tf.Tensor:
"""
Remove -1 in indices by replacing them with blanks
Args:
indices (tf.Tensor): shape any
Returns:
tf.Tensor: normalized indices with shape same as indices
"""
with tf.name_scope("normalize_indices"):
minus_one = -1 * tf.ones_like(indices, dtype=tf.int32)
blank_like = self.blank * tf.ones_like(indices, dtype=tf.int32)
return tf.where(indices == minus_one, blank_like, indices)
def prepand_blank(self, text: tf.Tensor) -> tf.Tensor:
""" Prepand blank index for transducer models """
return tf.concat([[self.blank], text], axis=0)
@abc.abstractclassmethod
def extract(self, text):
raise NotImplementedError()
@abc.abstractclassmethod
def iextract(self, indices):
raise NotImplementedError()
@abc.abstractclassmethod
def indices2upoints(self, indices):
raise NotImplementedError()
class CharFeaturizer(TextFeaturizer):
"""
Extract text feature based on char-level granularity.
By looking up the vocabulary table, each line of transcript will be
converted to a sequence of integer indexes.
"""
def __init__(self, decoder_config: dict):
"""
decoder_config = {
"vocabulary": str,
"blank_at_zero": bool,
"beam_width": int,
"lm_config": {
...
}
}
"""
super(CharFeaturizer, self).__init__(decoder_config)
self.__init_vocabulary()
def __init_vocabulary(self):
lines = []
if self.decoder_config.get("vocabulary", None) is not None:
with codecs.open(self.decoder_config["vocabulary"], "r", "utf-8") as fin:
lines.extend(fin.readlines())
else:
lines = ENGLISH_CHARACTERS
self.blank = 0 if self.decoder_config.get("blank_at_zero", True) else None
self.tokens2indices = {}
self.tokens = []
index = 1 if self.blank == 0 else 0
for line in lines:
line = self.preprocess_text(line)
if line.startswith("#") or not line: continue
self.tokens2indices[line[0]] = index
self.tokens.append(line[0])
index += 1
if self.blank is None: self.blank = len(self.tokens) # blank not at zero
self.vocab_array = self.tokens.copy()
self.tokens.insert(self.blank, "") # add blank token to tokens
self.num_classes = len(self.tokens)
self.tokens = tf.convert_to_tensor(self.tokens, dtype=tf.string)
self.upoints = tf.squeeze(
tf.strings.unicode_decode(
self.tokens, "UTF-8").to_tensor(shape=[None, 1])
)
def extract(self, text: str) -> tf.Tensor:
"""
Convert string to a list of integers
Args:
text: string (sequence of characters)
Returns:
sequence of ints in tf.Tensor
"""
text = self.preprocess_text(text)
text = list(text.strip()) # remove trailing space
indices = [self.tokens2indices[token] for token in text]
return tf.convert_to_tensor(indices, dtype=tf.int32)
def iextract(self, indices: tf.Tensor) -> tf.Tensor:
"""
Convert list of indices to string
Args:
indices: tf.Tensor with dim [B, None]
Returns:
transcripts: tf.Tensor of dtype tf.string with dim [B]
"""
indices = self.normalize_indices(indices)
tokens = tf.gather_nd(self.tokens, tf.expand_dims(indices, axis=-1))
with tf.device("/CPU:0"): # string data is not supported on GPU
tokens = tf.strings.reduce_join(tokens, axis=-1)
return tokens
@tf.function(
input_signature=[
tf.TensorSpec([None], dtype=tf.int32)
]
)
def indices2upoints(self, indices: tf.Tensor) -> tf.Tensor:
"""
Transform Predicted Indices to Unicode Code Points (for using tflite)
Args:
indices: tf.Tensor of Classes in shape [None]
Returns:
unicode code points transcript with dtype tf.int32 and shape [None]
"""
with tf.name_scope("indices2upoints"):
indices = self.normalize_indices(indices)
upoints = tf.gather_nd(self.upoints, tf.expand_dims(indices, axis=-1))
return upoints
class SubwordFeaturizer(TextFeaturizer):
"""
Extract text feature based on char-level granularity.
By looking up the vocabulary table, each line of transcript will be
converted to a sequence of integer indexes.
"""
def __init__(self, decoder_config: dict, subwords=None):
"""
decoder_config = {
"target_vocab_size": int,
"max_subword_length": 4,
"max_corpus_chars": None,
"reserved_tokens": None,
"beam_width": int,
"lm_config": {
...
}
}
"""
super(SubwordFeaturizer, self).__init__(decoder_config)
self.subwords = subwords
self.blank = 0 # subword treats blank as 0
self.num_classes = self.subwords.vocab_size
# create upoints
self.__init_upoints()
def __init_upoints(self):
text = [""]
for idx in np.arange(1, self.num_classes, dtype=np.int32):
text.append(self.subwords.decode([idx]))
self.upoints = tf.strings.unicode_decode(text, "UTF-8")
self.upoints = self.upoints.to_tensor() # [num_classes, max_subword_length]
@classmethod
def build_from_corpus(cls, decoder_config: dict, corpus_files: list):
def corpus_generator():
for file in corpus_files:
with open(file, "r", encoding="utf-8") as f:
lines = f.read().splitlines()
lines = lines[1:]
for line in lines:
line = line.split("\t")
yield line[-1]
subwords = tds.features.text.SubwordTextEncoder.build_from_corpus(
corpus_generator(),
decoder_config.get("target_vocab_size", 1024),
decoder_config.get("max_subword_length", 4),
decoder_config.get("max_corpus_chars", None),
decoder_config.get("reserved_tokens", None)
)
return cls(decoder_config, subwords)
@classmethod
def load_from_file(cls, decoder_config: dict, filename: str = None):
if filename is not None:
filename_prefix = os.path.splitext(preprocess_paths(filename))[0]
else:
filename_prefix = decoder_config.get("vocabulary", None)
subwords = tds.features.text.SubwordTextEncoder.load_from_file(filename_prefix)
return cls(decoder_config, subwords)
def extract(self, text: str) -> tf.Tensor:
"""
Convert string to a list of integers
Args:
text: string (sequence of characters)
Returns:
sequence of ints in tf.Tensor
"""
text = self.preprocess_text(text)
text = text.strip() # remove trailing space
indices = self.subwords.encode(text)
return tf.convert_to_tensor(indices, dtype=tf.int32)
def iextract(self, indices: tf.Tensor) -> tf.Tensor:
"""
Convert list of indices to string
Args:
indices: tf.Tensor with dim [B, None]
Returns:
transcripts: tf.Tensor of dtype tf.string with dim [B]
"""
indices = self.normalize_indices(indices)
with tf.device("/CPU:0"): # string data is not supported on GPU
def decode(x):
if x[0] == self.blank: x = x[1:]
return self.subwords.decode(x)
text = tf.map_fn(
lambda x: tf.numpy_function(decode, inp=[x], Tout=tf.string),
indices,
fn_output_signature=tf.TensorSpec([], dtype=tf.string)
)
return text
@tf.function(
input_signature=[
tf.TensorSpec([None], dtype=tf.int32)
]
)
def indices2upoints(self, indices: tf.Tensor) -> tf.Tensor:
"""
Transform Predicted Indices to Unicode Code Points (for using tflite)
Args:
indices: tf.Tensor of Classes in shape [None]
Returns:
unicode code points transcript with dtype tf.int32 and shape [None]
"""
with tf.name_scope("indices2upoints"):
indices = self.normalize_indices(indices)
upoints = tf.gather_nd(self.upoints, tf.expand_dims(indices, axis=-1))
# upoints now has shape [None, max_subword_length]
shape = tf.shape(upoints)
return tf.reshape(upoints, [shape[0] * shape[1]]) # flatten
| [
"[email protected]"
] | |
a57d0177aa141a526240c03377d48e29c1188e8c | 4674b8088ffdf55905d44995f08a0792a3e4cd5c | /tests/hwsim/test_wmediumd.py | d07d5aca2a89ec1860b5a21adaef3d5fef7ab879 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | vanhoefm/krackattacks-scripts | 41daca791638a92aa4cfa68a582e46119037560e | 4b78669686f74efe664c6543b1b5b1616b22f902 | refs/heads/research | 2022-10-29T20:21:11.512335 | 2022-10-16T18:44:41 | 2022-10-16T18:44:41 | 107,408,514 | 2,184 | 577 | NOASSERTION | 2021-07-06T12:43:49 | 2017-10-18T12:58:08 | C | UTF-8 | Python | false | false | 16,000 | py | # wmediumd sanity checks
# Copyright (c) 2015, Intel Deutschland GmbH
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import tempfile, os, subprocess, errno, hwsim_utils, time
from utils import HwsimSkip
from wpasupplicant import WpaSupplicant
from tshark import run_tshark
from test_ap_open import _test_ap_open
from test_wpas_mesh import check_mesh_support, check_mesh_group_added
from test_wpas_mesh import check_mesh_peer_connected, add_open_mesh_network
from test_wpas_mesh import check_mesh_group_removed
class LocalVariables:
revs = []
CFG = """
ifaces :
{
ids = ["%s", "%s" ];
links = (
(0, 1, 30)
);
};
"""
CFG2 = """
ifaces :
{
ids = ["%s", "%s", "%s"];
};
model:
{
type = "prob";
links = (
(0, 1, 0.000000),
(0, 2, 0.000000),
(1, 2, 1.000000)
);
};
"""
CFG3 = """
ifaces :
{
ids = ["%s", "%s", "%s", "%s", "%s" ];
};
model:
{
type = "prob";
default_prob = 1.0;
links = (
(0, 1, 0.000000),
(1, 2, 0.000000),
(2, 3, 0.000000),
(3, 4, 0.000000)
);
};
"""
def get_wmediumd_version():
if len(LocalVariables.revs) > 0:
return LocalVariables.revs;
try:
verstr = subprocess.check_output(['wmediumd', '-V'])
except OSError, e:
if e.errno == errno.ENOENT:
raise HwsimSkip('wmediumd not available')
raise
vernum = verstr.split(' ')[1][1:]
LocalVariables.revs = vernum.split('.')
for i in range(0, len(LocalVariables.revs)):
LocalVariables.revs[i] = int(LocalVariables.revs[i])
while len(LocalVariables.revs) < 3:
LocalVariables.revs += [0]
return LocalVariables.revs;
def require_wmediumd_version(major, minor, patch):
revs = get_wmediumd_version()
if revs[0] < major or revs[1] < minor or revs[2] < patch:
raise HwsimSkip('wmediumd v%s.%s.%s is too old for this test' %
(revs[0], revs[1], revs[2]))
def output_wmediumd_log(p, params, data):
log_file = open(os.path.abspath(os.path.join(params['logdir'],
'wmediumd.log')), 'a')
log_file.write(data)
log_file.close()
def start_wmediumd(fn, params):
try:
p = subprocess.Popen(['wmediumd', '-c', fn],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except OSError, e:
if e.errno == errno.ENOENT:
raise HwsimSkip('wmediumd not available')
raise
logs = ''
while True:
line = p.stdout.readline()
if not line:
output_wmediumd_log(p, params, logs)
raise Exception('wmediumd was terminated unexpectedly')
if line.find('REGISTER SENT!') > -1:
break
logs += line
return p
def stop_wmediumd(p, params):
p.terminate()
p.wait()
stdoutdata, stderrdata = p.communicate()
output_wmediumd_log(p, params, stdoutdata)
def test_wmediumd_simple(dev, apdev, params):
"""test a simple wmediumd configuration"""
fd, fn = tempfile.mkstemp()
try:
f = os.fdopen(fd, 'w')
f.write(CFG % (apdev[0]['bssid'], dev[0].own_addr()))
f.close()
p = start_wmediumd(fn, params)
try:
_test_ap_open(dev, apdev)
finally:
stop_wmediumd(p, params)
# test that releasing hwsim works correctly
_test_ap_open(dev, apdev)
finally:
os.unlink(fn)
def test_wmediumd_path_simple(dev, apdev, params):
"""test a mesh path"""
# 0 and 1 is connected
# 0 and 2 is connected
# 1 and 2 is not connected
# 1 --- 0 --- 2
# | |
# +-----X-----+
# This tests if 1 and 2 can communicate each other via 0.
require_wmediumd_version(0, 3, 1)
fd, fn = tempfile.mkstemp()
try:
f = os.fdopen(fd, 'w')
f.write(CFG2 % (dev[0].own_addr(), dev[1].own_addr(),
dev[2].own_addr()))
f.close()
p = start_wmediumd(fn, params)
try:
_test_wmediumd_path_simple(dev, apdev)
finally:
stop_wmediumd(p, params)
finally:
os.unlink(fn)
def _test_wmediumd_path_simple(dev, apdev):
for i in range(0, 3):
check_mesh_support(dev[i])
add_open_mesh_network(dev[i], freq="2462", basic_rates="60 120 240")
# Check for mesh joined
for i in range(0, 3):
check_mesh_group_added(dev[i])
state = dev[i].get_status_field("wpa_state")
if state != "COMPLETED":
raise Exception("Unexpected wpa_state on dev" + str(i) + ": " + state)
mode = dev[i].get_status_field("mode")
if mode != "mesh":
raise Exception("Unexpected mode: " + mode)
# Check for peer connected
check_mesh_peer_connected(dev[0])
check_mesh_peer_connected(dev[0])
check_mesh_peer_connected(dev[1])
check_mesh_peer_connected(dev[2])
# Test connectivity 1->2 and 2->1
hwsim_utils.test_connectivity(dev[1], dev[2])
# Check mpath table on 0
res, data = dev[0].cmd_execute(['iw', dev[0].ifname, 'mpath', 'dump'])
if res != 0:
raise Exception("iw command failed on dev0")
if data.find(dev[1].own_addr() + ' ' + dev[1].own_addr()) == -1 or \
data.find(dev[2].own_addr() + ' ' + dev[2].own_addr()) == -1:
raise Exception("mpath not found on dev0:\n" + data)
if data.find(dev[0].own_addr()) > -1:
raise Exception("invalid mpath found on dev0:\n" + data)
# Check mpath table on 1
res, data = dev[1].cmd_execute(['iw', dev[1].ifname, 'mpath', 'dump'])
if res != 0:
raise Exception("iw command failed on dev1")
if data.find(dev[0].own_addr() + ' ' + dev[0].own_addr()) == -1 or \
data.find(dev[2].own_addr() + ' ' + dev[0].own_addr()) == -1:
raise Exception("mpath not found on dev1:\n" + data)
if data.find(dev[2].own_addr() + ' ' + dev[2].own_addr()) > -1 or \
data.find(dev[1].own_addr()) > -1:
raise Exception("invalid mpath found on dev1:\n" + data)
# Check mpath table on 2
res, data = dev[2].cmd_execute(['iw', dev[2].ifname, 'mpath', 'dump'])
if res != 0:
raise Exception("iw command failed on dev2")
if data.find(dev[0].own_addr() + ' ' + dev[0].own_addr()) == -1 or \
data.find(dev[1].own_addr() + ' ' + dev[0].own_addr()) == -1:
raise Exception("mpath not found on dev2:\n" + data)
if data.find(dev[1].own_addr() + ' ' + dev[1].own_addr()) > -1 or \
data.find(dev[2].own_addr()) > -1:
raise Exception("invalid mpath found on dev2:\n" + data)
# remove mesh groups
for i in range(0, 3):
dev[i].mesh_group_remove()
check_mesh_group_removed(dev[i])
dev[i].dump_monitor()
def test_wmediumd_path_ttl(dev, apdev, params):
"""Mesh path request TTL"""
# 0 --- 1 --- 2 --- 3 --- 4
# Test the TTL of mesh path request.
# If the TTL is shorter than path, the mesh path request should be dropped.
require_wmediumd_version(0, 3, 1)
local_dev = []
for i in range(0, 3):
local_dev.append(dev[i])
for i in range(5, 7):
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan" + str(i))
check_mesh_support(wpas)
temp_dev = wpas.request("MESH_INTERFACE_ADD ifname=mesh" + str(i))
if "FAIL" in temp_dev:
raise Exception("MESH_INTERFACE_ADD failed")
local_dev.append(WpaSupplicant(ifname=temp_dev))
fd, fn = tempfile.mkstemp()
try:
f = os.fdopen(fd, 'w')
f.write(CFG3 % (local_dev[0].own_addr(), local_dev[1].own_addr(),
local_dev[2].own_addr(), local_dev[3].own_addr(),
local_dev[4].own_addr()))
f.close()
p = start_wmediumd(fn, params)
try:
_test_wmediumd_path_ttl(local_dev, True)
_test_wmediumd_path_ttl(local_dev, False)
finally:
stop_wmediumd(p, params)
finally:
os.unlink(fn)
for i in range(5, 7):
wpas.interface_remove("wlan" + str(i))
def _test_wmediumd_path_ttl(dev, ok):
for i in range(0, 5):
check_mesh_support(dev[i])
add_open_mesh_network(dev[i], freq="2462", basic_rates="60 120 240")
# Check for mesh joined
for i in range(0, 5):
check_mesh_group_added(dev[i])
state = dev[i].get_status_field("wpa_state")
if state != "COMPLETED":
raise Exception("Unexpected wpa_state on dev" + str(i) + ": " + state)
mode = dev[i].get_status_field("mode")
if mode != "mesh":
raise Exception("Unexpected mode: " + mode)
# set mesh path request ttl
subprocess.check_call([ "iw", "dev", dev[0].ifname, "set", "mesh_param",
"mesh_element_ttl=" + ("4" if ok else "3") ])
# Check for peer connected
for i in range(0, 5):
check_mesh_peer_connected(dev[i])
for i in range(1, 4):
check_mesh_peer_connected(dev[i])
# Test connectivity 0->4 and 0->4
hwsim_utils.test_connectivity(dev[0], dev[4], success_expected=ok)
# Check mpath table on 0
res, data = dev[0].cmd_execute(['iw', dev[0].ifname, 'mpath', 'dump'])
if res != 0:
raise Exception("iw command failed on dev0")
if ok:
if data.find(dev[1].own_addr() + ' ' + dev[1].own_addr()) == -1 or \
data.find(dev[4].own_addr() + ' ' + dev[1].own_addr()) == -1:
raise Exception("mpath not found on dev0:\n" + data)
else:
if data.find(dev[1].own_addr() + ' ' + dev[1].own_addr()) == -1 or \
data.find(dev[4].own_addr() + ' 00:00:00:00:00:00') == -1:
raise Exception("mpath not found on dev0:\n" + data)
if data.find(dev[0].own_addr()) > -1 or \
data.find(dev[2].own_addr()) > -1 or \
data.find(dev[3].own_addr()) > -1:
raise Exception("invalid mpath found on dev0:\n" + data)
# remove mesh groups
for i in range(0, 3):
dev[i].mesh_group_remove()
check_mesh_group_removed(dev[i])
dev[i].dump_monitor()
def test_wmediumd_path_rann(dev, apdev, params):
"""Mesh path with RANN"""
# 0 and 1 is connected
# 0 and 2 is connected
# 1 and 2 is not connected
# 2 is mesh root and RANN enabled
# 1 --- 0 --- 2
# | |
# +-----X-----+
# This tests if 1 and 2 can communicate each other via 0.
require_wmediumd_version(0, 3, 1)
fd, fn = tempfile.mkstemp()
try:
f = os.fdopen(fd, 'w')
f.write(CFG2 % (dev[0].own_addr(), dev[1].own_addr(),
dev[2].own_addr()))
f.close()
p = start_wmediumd(fn, params)
try:
_test_wmediumd_path_rann(dev, apdev)
finally:
stop_wmediumd(p, params)
finally:
os.unlink(fn)
capfile = os.path.join(params['logdir'], "hwsim0.pcapng")
# check Root STA address in root announcement element
filt = "wlan.fc.type_subtype == 0x000d && " + \
"wlan_mgt.fixed.mesh_action == 0x01 && " + \
"wlan_mgt.tag.number == 126"
out = run_tshark(capfile, filt, [ "wlan.rann.root_sta" ])
if out is None:
raise Exception("No captured data found\n")
if out.find(dev[2].own_addr()) == -1 or \
out.find(dev[0].own_addr()) > -1 or \
out.find(dev[1].own_addr()) > -1:
raise Exception("RANN should be sent by dev2 only:\n" + out)
# check RANN interval is in range
filt = "wlan.sa == 02:00:00:00:02:00 && " + \
"wlan.fc.type_subtype == 0x000d && " + \
"wlan_mgt.fixed.mesh_action == 0x01 && " + \
"wlan_mgt.tag.number == 126"
out = run_tshark(capfile, filt, [ "frame.time_relative" ])
if out is None:
raise Exception("No captured data found\n")
lines = out.splitlines()
prev = float(lines[len(lines) - 1])
for i in reversed(range(1, len(lines) - 1)):
now = float(lines[i])
if prev - now < 1.0 or 3.0 < prev - now:
raise Exception("RANN interval " + str(prev - now) +
"(sec) should be close to 2.0(sec)\n")
prev = now
# check no one uses broadcast path request
filt = "wlan.da == ff:ff:ff:ff:ff:ff && " + \
"wlan.fc.type_subtype == 0x000d && " + \
"wlan_mgt.fixed.mesh_action == 0x01 && " + \
"wlan_mgt.tag.number == 130"
out = run_tshark(capfile, filt, [ "wlan.sa", "wlan.da" ])
if out is None:
raise Exception("No captured data found\n")
if len(out) > 0:
raise Exception("invalid broadcast path requests\n" + out)
def _test_wmediumd_path_rann(dev, apdev):
for i in range(0, 3):
check_mesh_support(dev[i])
add_open_mesh_network(dev[i], freq="2462", basic_rates="60 120 240")
# Check for mesh joined
for i in range(0, 3):
check_mesh_group_added(dev[i])
state = dev[i].get_status_field("wpa_state")
if state != "COMPLETED":
raise Exception("Unexpected wpa_state on dev" + str(i) + ": " + state)
mode = dev[i].get_status_field("mode")
if mode != "mesh":
raise Exception("Unexpected mode: " + mode)
# set node 2 as RANN supported root
subprocess.check_call(["iw", "dev", dev[0].ifname, "set", "mesh_param",
"mesh_hwmp_rootmode=0"])
subprocess.check_call(["iw", "dev", dev[1].ifname, "set", "mesh_param",
"mesh_hwmp_rootmode=0"])
subprocess.check_call(["iw", "dev", dev[2].ifname, "set", "mesh_param",
"mesh_hwmp_rootmode=4"])
subprocess.check_call(["iw", "dev", dev[2].ifname, "set", "mesh_param",
"mesh_hwmp_rann_interval=2000"])
# Check for peer connected
check_mesh_peer_connected(dev[0])
check_mesh_peer_connected(dev[0])
check_mesh_peer_connected(dev[1])
check_mesh_peer_connected(dev[2])
# Wait for RANN frame
time.sleep(10)
# Test connectivity 1->2 and 2->1
hwsim_utils.test_connectivity(dev[1], dev[2])
# Check mpath table on 0
res, data = dev[0].cmd_execute(['iw', dev[0].ifname, 'mpath', 'dump'])
if res != 0:
raise Exception("iw command failed on dev0")
if data.find(dev[1].own_addr() + ' ' + dev[1].own_addr()) == -1 or \
data.find(dev[2].own_addr() + ' ' + dev[2].own_addr()) == -1:
raise Exception("mpath not found on dev0:\n" + data)
if data.find(dev[0].own_addr()) > -1:
raise Exception("invalid mpath found on dev0:\n" + data)
# Check mpath table on 1
res, data = dev[1].cmd_execute(['iw', dev[1].ifname, 'mpath', 'dump'])
if res != 0:
raise Exception("iw command failed on dev1")
if data.find(dev[0].own_addr() + ' ' + dev[0].own_addr()) == -1 or \
data.find(dev[2].own_addr() + ' ' + dev[0].own_addr()) == -1:
raise Exception("mpath not found on dev1:\n" + data)
if data.find(dev[2].own_addr() + ' ' + dev[2].own_addr()) > -1 or \
data.find(dev[1].own_addr()) > -1:
raise Exception("invalid mpath found on dev1:\n" + data)
# Check mpath table on 2
res, data = dev[2].cmd_execute(['iw', dev[2].ifname, 'mpath', 'dump'])
if res != 0:
raise Exception("iw command failed on dev2")
if data.find(dev[0].own_addr() + ' ' + dev[0].own_addr()) == -1 or \
data.find(dev[1].own_addr() + ' ' + dev[0].own_addr()) == -1:
raise Exception("mpath not found on dev2:\n" + data)
if data.find(dev[1].own_addr() + ' ' + dev[1].own_addr()) > -1 or \
data.find(dev[2].own_addr()) > -1:
raise Exception("invalid mpath found on dev2:\n" + data)
# remove mesh groups
for i in range(0, 3):
dev[i].mesh_group_remove()
check_mesh_group_removed(dev[i])
dev[i].dump_monitor()
| [
"[email protected]"
] | |
f3c08e3555be0e41eba665d9209acaa6a5e26fc8 | 428328eaaf4011aac9e8cc4aba9329c17fcca4fa | /tests/snippets/chips:newdeco.py | 1cb44b66734e36aeda5fdec18968c77e8f5d508a | [
"MIT"
] | permissive | mfkiwl/jurigged | 8d732bdf09d43c633241d252228596e39af232e7 | 34459a5fd3300447ecbc1f943a9939d350639083 | refs/heads/master | 2023-09-03T22:15:44.480398 | 2021-11-11T15:24:48 | 2021-11-11T15:24:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py |
def crunch(fn):
def deco(*args):
return fn(*args) + 2
return deco
| [
"[email protected]"
] | |
ebb32f52c472bcc16a1678079beff87a7a0bae7c | 4b3883b1591987c6fbcddab7fd2e15d7b7243861 | /books/effectivepy-v2/chap7/ad52/encrypt_data.py | 9c4eeb95ad2d0770adbea5f23a8c6f6674edb294 | [
"MIT"
] | permissive | MerleLiuKun/my-python | 41bdbf6e05281728a824e04da043902dc0fd4698 | 0bec138cc6a9870ca47e0e62e9b92d50fb6cb3d8 | refs/heads/master | 2021-06-11T10:22:44.948418 | 2021-06-02T13:08:18 | 2021-06-02T13:08:18 | 160,791,142 | 1 | 1 | MIT | 2020-05-19T15:10:00 | 2018-12-07T07:58:29 | Python | UTF-8 | Python | false | false | 1,231 | py | """
"""
import os
import subprocess
def run_encrypt(data):
env = os.environ.copy()
env["password"] = "zf7ShyBhZOraQDdE/FiZpm/m/8f9X+M1"
proc = subprocess.Popen(
["openssl", "enc", "-des3", "-pass", "env:password"],
env=env,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
proc.stdin.write(data)
proc.stdin.flush()
return proc
# procs = []
# for _ in range(3):
# data = os.urandom(10)
# proc = run_encrypt(data)
# procs.append(proc)
#
# for proc in procs:
# out, _ = proc.communicate()
# print(out[-10:])
def run_hash(input_stdin):
return subprocess.Popen(
["openssl", "dgst", "-whirlpool", "-binary"],
stdin=input_stdin,
stdout=subprocess.PIPE,
)
encrypt_procs = []
hash_procs = []
for _ in range(10):
data = os.urandom(100)
e_proc = run_encrypt(data)
encrypt_procs.append(e_proc)
h_proc = run_hash(e_proc.stdout)
hash_procs.append(h_proc)
e_proc.stdout.close()
e_proc.stdout = None
for proc in encrypt_procs:
proc.communicate()
assert proc.returncode == 0
for proc in hash_procs:
out, _ = proc.communicate()
print(out[-10:])
assert proc.returncode == 0
| [
"[email protected]"
] | |
aaf296fd3b0614fc9cefe51e775d229b510d5319 | cf152c053f2cedf819b81c1a746db87e07fe5ded | /DL/day5_buy_apple.py | 8e98900ac8a85f86c337ee4b50fefe7e84907b89 | [] | no_license | aorura/tensorProject | db684e0b4aa46d9d4afd1c4b9f5bed8aa99a4c06 | 014db9a8ae8c76299c99f77dafb401cf2e86a3cc | refs/heads/master | 2020-03-08T02:18:56.227130 | 2018-04-06T07:48:41 | 2018-04-06T07:48:41 | 127,855,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | from day5_layer_naive import *
apple=100
apple_num=2
tax=1.1
mul_apple_layer=MultiLayer()
mul_tax_layer=MultiLayer()
#forward
apple_price=mul_apple_layer.forward(apple, apple_num)
price=mul_tax_layer.forward(apple_price, tax)
#backward
dprice=1
dapple_price, dtax=mul_tax_layer.backward(dprice)
dapple, dapple_num=mul_apple_layer.backward(dapple_price)
print("price:", int(price))
print("dapple:", dapple)
print("dapple_num:", int(dapple_num))
print("dtax:", dtax) | [
"[email protected]"
] | |
a261e211d5e2370478f6596bbdebbf0796e8ac1e | 725ac5a0bf72829be627bf8dc82fdc51ba0f94ae | /Text_Generation/GPT2_SummaryGen/transformers/tokenization_albert.py | aca0467d085b7801616e74d05a72e5647ffc3490 | [] | no_license | shawroad/NLP_pytorch_project | fa14b6e4a156229765e1d552901d0492d8e1def3 | 1272fed2dc8fef78a9ded0f1ae1644d613a3b57b | refs/heads/master | 2023-06-25T02:37:35.503251 | 2023-06-12T10:57:11 | 2023-06-12T10:57:11 | 229,694,655 | 530 | 104 | null | 2020-12-08T09:21:47 | 2019-12-23T06:54:29 | Python | UTF-8 | Python | false | false | 10,191 | py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization classes for ALBERT model."""
import logging
import os
import unicodedata
from shutil import copyfile
from .tokenization_utils import PreTrainedTokenizer
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"albert-base-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-spiece.model",
"albert-large-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-spiece.model",
"albert-xlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-spiece.model",
"albert-xxlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-spiece.model",
"albert-base-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v2-spiece.model",
"albert-large-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v2-spiece.model",
"albert-xlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v2-spiece.model",
"albert-xxlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v2-spiece.model",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
SPIECE_UNDERLINE = "▁"
class AlbertTokenizer(PreTrainedTokenizer):
"""
SentencePiece based tokenizer. Peculiarities:
- requires `SentencePiece <https://github.com/google/sentencepiece>`_
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(
self,
vocab_file,
do_lower_case=True,
remove_space=True,
keep_accents=False,
bos_token="[CLS]",
eos_token="[SEP]",
unk_token="<unk>",
sep_token="[SEP]",
pad_token="<pad>",
cls_token="[CLS]",
mask_token="[MASK]",
**kwargs
):
super(AlbertTokenizer, self).__init__(
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use AlbertTokenizer: https://github.com/google/sentencepiece"
"pip install sentencepiece"
)
raise
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.vocab_file = vocab_file
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(vocab_file)
@property
def vocab_size(self):
return len(self.sp_model)
def __getstate__(self):
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d):
self.__dict__ = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use AlbertTokenizer: https://github.com/google/sentencepiece"
"pip install sentencepiece"
)
raise
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file)
def preprocess_text(self, inputs):
if self.remove_space:
outputs = " ".join(inputs.strip().split())
else:
outputs = inputs
outputs = outputs.replace("``", '"').replace("''", '"')
if not self.keep_accents:
outputs = unicodedata.normalize("NFKD", outputs)
outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
if self.do_lower_case:
outputs = outputs.lower()
return outputs
def _tokenize(self, text, sample=False):
""" Tokenize a string. """
text = self.preprocess_text(text)
if not sample:
pieces = self.sp_model.EncodeAsPieces(text)
else:
pieces = self.sp_model.SampleEncodeAsPieces(text, 64, 0.1)
new_pieces = []
for piece in pieces:
if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ""))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
cur_pieces = cur_pieces[1:]
else:
cur_pieces[0] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(cur_pieces)
else:
new_pieces.append(piece)
return new_pieces
def _convert_token_to_id(self, token):
""" Converts a token (str) in an id using the vocab. """
return self.sp_model.PieceToId(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.sp_model.IdToPiece(index)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
return out_string
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks
by concatenating and adding special tokens.
An ALBERT sequence has the following format:
single sequence: [CLS] X [SEP]
pair of sequences: [CLS] A [SEP] B [SEP]
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return cls + token_ids_0 + sep
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.
Args:
token_ids_0: list of ids (must not contain special tokens)
token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids
for sequence pairs
already_has_special_tokens: (default False) Set to True if the token list is already formated with
special tokens for the model
Returns:
A list of integers in the range [0, 1]: 0 for a special token, 1 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formated with special tokens for the model."
)
return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
"""
Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
An ALBERT sequence pair mask has the following format:
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1
| first sequence | second sequence
if token_ids_1 is None, only returns the first portion of the mask (0's).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
def save_vocabulary(self, save_directory):
""" Save the sentencepiece vocabulary (copy original file) and special tokens file
to a directory.
"""
if not os.path.isdir(save_directory):
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
return
out_vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
| [
"[email protected]"
] | |
453723dc7712c3b5489ebdf23891d325b1e539bd | 5864e86954a221d52d4fa83a607c71bacf201c5a | /cherrypy/wsgiserver/ssl_builtin.py | 3366c31bd7cd1a1aab714c33fc208694974566ad | [] | no_license | connoryang/1v1dec | e9a2303a01e5a26bf14159112b112be81a6560fd | 404f2cebf13b311e754d45206008918881496370 | refs/heads/master | 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,563 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\carbon\common\lib\cherrypy\wsgiserver\ssl_builtin.py
try:
import ssl
except ImportError:
ssl = None
from cherrypy import wsgiserver
class BuiltinSSLAdapter(wsgiserver.SSLAdapter):
certificate = None
private_key = None
def __init__(self, certificate, private_key, certificate_chain = None):
if ssl is None:
raise ImportError('You must install the ssl module to use HTTPS.')
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
def bind(self, sock):
return sock
def wrap(self, sock):
try:
s = ssl.wrap_socket(sock, do_handshake_on_connect=True, server_side=True, certfile=self.certificate, keyfile=self.private_key, ssl_version=ssl.PROTOCOL_SSLv23)
except ssl.SSLError as e:
if e.errno == ssl.SSL_ERROR_EOF:
return (None, {})
if e.errno == ssl.SSL_ERROR_SSL:
if e.args[1].endswith('http request'):
raise wsgiserver.NoSSLError
raise
return (s, self.get_environ(s))
def get_environ(self, sock):
cipher = sock.cipher()
ssl_environ = {'wsgi.url_scheme': 'https',
'HTTPS': 'on',
'SSL_PROTOCOL': cipher[1],
'SSL_CIPHER': cipher[0]}
return ssl_environ
def makefile(self, sock, mode = 'r', bufsize = -1):
return wsgiserver.CP_fileobject(sock, mode, bufsize)
| [
"[email protected]"
] | |
1776ab7f61cbb93d36038bca3bd7e5da8f95f1e4 | 9e36b3a0a609f862aa2894a1473896c8465c41a1 | /arelle/DialogOpenTaxonomyPackage.py | 0668077126553bbf9d035a6cfe3ce7daedf03840 | [
"Apache-2.0"
] | permissive | marado/Arelle | 7cd74a66d19be174c9f1fe66f788dd53447bffac | 7ca2bf09c852787cd7a38d68b13c11d5e33e72a2 | refs/heads/master | 2020-04-08T00:27:54.212337 | 2011-10-28T17:49:49 | 2011-10-28T17:49:49 | 2,658,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,313 | py | '''
Open Taxonomy Package Dialog: Reads the metadata and prompts the user to pick an entry point.
'''
from tkinter import *
from tkinter.ttk import *
import re, os
from arelle.CntlrWinTooltip import ToolTip
TAXONOMY_PACKAGE_FILE_NAME = '.taxonomyPackage.xml'
ARCHIVE = 1
DISCLOSURE_SYSTEM = 2
def askForEntryPoint(mainWin, filesource):
filenames = filesource.dir
if filenames is not None: # an IO or other error can return None
dialog = DialogOpenTaxonomyPackage(mainWin,
ARCHIVE,
filesource,
filenames,
_("Select Entry Point"))
if dialog.accepted:
if dialog.webUrl:
return dialog.webUrl
else:
return filesource.url
return None
class DialogOpenTaxonomyPackage(Toplevel):
def __init__(self, mainWin, openType, filesource, filenames, title):
parent = mainWin.parent
super().__init__(parent)
self.parent = parent
parentGeometry = re.match("(\d+)x(\d+)[+]?([-]?\d+)[+]?([-]?\d+)", parent.geometry())
dialogX = int(parentGeometry.group(3))
dialogY = int(parentGeometry.group(4))
self.accepted = False
self.webUrl = None
self.transient(self.parent)
self.title(title)
frame = Frame(self)
treeFrame = Frame(frame, width=500)
vScrollbar = Scrollbar(treeFrame, orient=VERTICAL)
hScrollbar = Scrollbar(treeFrame, orient=HORIZONTAL)
self.treeView = Treeview(treeFrame, xscrollcommand=hScrollbar.set, yscrollcommand=vScrollbar.set, columns=2)
self.treeView.grid(row=0, column=0, sticky=(N, S, E, W))
hScrollbar["command"] = self.treeView.xview
hScrollbar.grid(row=1, column=0, sticky=(E, W))
vScrollbar["command"] = self.treeView.yview
vScrollbar.grid(row=0, column=1, sticky=(N, S))
treeFrame.columnconfigure(0, weight=1)
treeFrame.rowconfigure(0, weight=1)
treeFrame.grid(row=0, column=0, columnspan=4, sticky=(N, S, E, W), padx=3, pady=3)
self.treeView.focus_set()
# set up treeView widget and tabbed pane
self.treeView.column("#0", width=150, anchor="w")
self.treeView.heading("#0", text="Name")
self.treeView.column("#1", width=350, anchor="w")
self.treeView.heading("#1", text="URL")
mainWin.showStatus(_("loading archive {0}").format(filesource.url))
self.filesource = filesource
self.filenames = filenames
selectedNode = None
metadata = filesource.file(filesource.url + os.sep + TAXONOMY_PACKAGE_FILE_NAME)[0]
try:
self.nameToUrls = parseTxmyPkg(mainWin, metadata)
except Exception as e:
self.close()
err = _("Failed to parse metadata; the underlying error was: {0}").format(e)
messagebox.showerror(_("Malformed taxonomy package"), err)
mainWin.addToLog(err)
return
for name, urls in self.nameToUrls.items():
displayUrl = urls[1] # display the canonical URL
self.treeView.insert("", "end", name, values=[displayUrl], text=name)
if selectedNode:
self.treeView.see(selectedNode)
self.treeView.selection_set(selectedNode)
mainWin.showStatus(None)
if openType == DISCLOSURE_SYSTEM:
y = 3
else:
y = 1
okButton = Button(frame, text=_("OK"), command=self.ok)
cancelButton = Button(frame, text=_("Cancel"), command=self.close)
okButton.grid(row=y, column=2, sticky=(S, E, W), pady=3)
cancelButton.grid(row=y, column=3, sticky=(S, E, W), pady=3, padx=3)
frame.grid(row=0, column=0, sticky=(N, S, E, W))
frame.columnconfigure(0, weight=1)
window = self.winfo_toplevel()
window.columnconfigure(0, weight=1)
self.geometry("+{0}+{1}".format(dialogX + 50, dialogY + 100))
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.close)
self.toolTipText = StringVar()
self.treeView.bind("<Motion>", self.motion, '+')
self.treeView.bind("<Leave>", self.leave, '+')
self.toolTipText = StringVar()
self.toolTip = ToolTip(self.treeView,
textvariable=self.toolTipText,
wraplength=640,
follow_mouse=True,
state="disabled")
self.toolTipEpName = None
self.protocol("WM_DELETE_WINDOW", self.close)
self.grab_set()
self.wait_window(self)
def ok(self, event=None):
selection = self.treeView.selection()
if len(selection) > 0:
epName = selection[0]
#index 0 is the remapped Url, as opposed to the canonical one used for display
urlOrFile = self.nameToUrls[epName][0]
if not urlOrFile.endswith("/"):
# check if it's an absolute URL rather than a path into the archive
if urlOrFile.startswith("http://") or urlOrFile.startswith("https://"):
self.webUrl = urlOrFile
else:
# assume it's a path inside the archive:
self.filesource.select(urlOrFile)
self.accepted = True
self.close()
def close(self, event=None):
self.parent.focus_set()
self.destroy()
def leave(self, *args):
self.toolTipRowId = None
def motion(self, *args):
epName = self.treeView.identify_row(args[0].y)
if epName != self.toolTipEpName:
self.toolTipEpName = epName
try:
epUrl = self.nameToUrls[epName][1]
except KeyError:
epUrl = None
self.toolTip._hide()
if epName and epUrl:
self.toolTipText.set("{0}\n{1}".format(epName, epUrl))
self.toolTip.configure(state="normal")
self.toolTip._schedule()
else:
self.toolTipText.set("")
self.toolTip.configure(state="disabled")
from lxml import etree
from urllib.parse import urljoin
from arelle import Locale
def parseTxmyPkg(mainWin, metadataFile):
unNamedCounter = 1
currentLang = Locale.getLanguageCode()
tree = etree.parse(metadataFile)
remappings = dict((m.get("prefix"),m.get("replaceWith"))
for m in tree.iter(tag="{http://www.corefiling.com/xbrl/taxonomypackage/v1}remapping"))
result = {}
for entryPointSpec in tree.iter(tag="{http://www.corefiling.com/xbrl/taxonomypackage/v1}entryPoint"):
name = None
# find closest match name node given xml:lang match to current language or no xml:lang
for nameNode in entryPointSpec.iter(tag="{http://www.corefiling.com/xbrl/taxonomypackage/v1}name"):
xmlLang = nameNode.get('{http://www.w3.org/XML/1998/namespace}lang')
if name is None or not xmlLang or currentLang == xmlLang:
name = nameNode.text
if currentLang == xmlLang: # most prefer one with the current locale's language
break
if not name:
name = _("<unnamed {0}>").format(unNamedCounter)
unNamedCounter += 1
epDocCount = 0
for epDoc in entryPointSpec.iterchildren("{http://www.corefiling.com/xbrl/taxonomypackage/v1}entryPointDocument"):
if epDocCount:
mainWin.addToLog(_("WARNING: skipping multiple-document entry point (not supported)"))
continue
epDocCount += 1
epUrl = epDoc.get('href')
base = epDoc.get('{http://www.w3.org/XML/1998/namespace}base') # cope with xml:base
if base:
resolvedUrl = urljoin(base, epUrl)
else:
resolvedUrl = epUrl
#perform prefix remappings
remappedUrl = resolvedUrl
for prefix, replace in remappings.items():
remappedUrl = resolvedUrl.replace(prefix, replace, 1)
result[name] = (remappedUrl, resolvedUrl)
return result
| [
"[email protected]"
] | |
c16203a4ae9e23eee50178108cef12cff2782847 | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/merra_scripts/03_model_fitting/merra882/567-tideGauge.py | b9b17c856c072ef1398bd25f5d67edcae774eb05 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,041 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 4 15:51:30 2020
This program is designed to validate a multiple
linear regression model by using the KFOLD method
@author: Michael Tadesse
"""
import os
import numpy as np
import pandas as pd
from sklearn import metrics
from scipy import stats
from datetime import datetime
from sklearn.linear_model import LinearRegression
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
def validate():
"""
run KFOLD method for regression
"""
#defining directories
dir_in = "/lustre/fs0/home/mtadesse/merraAllLagged"
dir_out = "/lustre/fs0/home/mtadesse/merraLRValidation"
surge_path = "/lustre/fs0/home/mtadesse/05_dmax_surge_georef"
#cd to the lagged predictors directory
os.chdir(dir_in)
x = 567
y = 568
#empty dataframe for model validation
df = pd.DataFrame(columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse'])
#looping through
for tg in range(x,y):
os.chdir(dir_in)
tg_name = os.listdir()[tg]
print(tg, tg_name)
##########################################
#check if this tg is already taken care of
##########################################
os.chdir(dir_out)
if os.path.isfile(tg_name):
return "file already analyzed!"
os.chdir(dir_in)
#load predictor
pred = pd.read_csv(tg_name)
pred.drop('Unnamed: 0', axis = 1, inplace = True)
#add squared and cubed wind terms (as in WPI model)
pickTerms = lambda x: x.startswith('wnd')
wndTerms = pred.columns[list(map(pickTerms, pred.columns))]
wnd_sqr = pred[wndTerms]**2
wnd_cbd = pred[wndTerms]**3
pred = pd.concat([pred, wnd_sqr, wnd_cbd], axis = 1)
#standardize predictor data
dat = pred.iloc[:,1:]
scaler = StandardScaler()
print(scaler.fit(dat))
dat_standardized = pd.DataFrame(scaler.transform(dat), \
columns = dat.columns)
pred_standardized = pd.concat([pred['date'], dat_standardized], axis = 1)
#load surge data
os.chdir(surge_path)
surge = pd.read_csv(tg_name)
surge.drop('Unnamed: 0', axis = 1, inplace = True)
#remove duplicated surge rows
surge.drop(surge[surge['ymd'].duplicated()].index, axis = 0, inplace = True)
surge.reset_index(inplace = True)
surge.drop('index', axis = 1, inplace = True)
#adjust surge time format to match that of pred
time_str = lambda x: str(datetime.strptime(x, '%Y-%m-%d'))
surge_time = pd.DataFrame(list(map(time_str, surge['ymd'])), columns = ['date'])
time_stamp = lambda x: (datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
surge_new = pd.concat([surge_time, surge[['surge', 'lon', 'lat']]], axis = 1)
#merge predictors and surge to find common time frame
pred_surge = pd.merge(pred_standardized, surge_new.iloc[:,:2], on='date', how='right')
pred_surge.sort_values(by = 'date', inplace = True)
#find rows that have nans and remove them
row_nan = pred_surge[pred_surge.isna().any(axis =1)]
pred_surge.drop(row_nan.index, axis = 0, inplace = True)
pred_surge.reset_index(inplace = True)
pred_surge.drop('index', axis = 1, inplace = True)
#in case pred and surge don't overlap
if pred_surge.shape[0] == 0:
print('-'*80)
print('Predictors and Surge don''t overlap')
print('-'*80)
continue
pred_surge['date'] = pd.DataFrame(list(map(time_stamp, \
pred_surge['date'])), \
columns = ['date'])
#prepare data for training/testing
X = pred_surge.iloc[:,1:-1]
y = pd.DataFrame(pred_surge['surge'])
y = y.reset_index()
y.drop(['index'], axis = 1, inplace = True)
#apply PCA
pca = PCA(.95)
pca.fit(X)
X_pca = pca.transform(X)
#apply 10 fold cross validation
kf = KFold(n_splits=10, random_state=29)
metric_corr = []; metric_rmse = []; #combo = pd.DataFrame(columns = ['pred', 'obs'])
for train_index, test_index in kf.split(X):
X_train, X_test = X_pca[train_index], X_pca[test_index]
y_train, y_test = y['surge'][train_index], y['surge'][test_index]
#train regression model
lm = LinearRegression()
lm.fit(X_train, y_train)
#predictions
predictions = lm.predict(X_test)
# pred_obs = pd.concat([pd.DataFrame(np.array(predictions)), \
# pd.DataFrame(np.array(y_test))], \
# axis = 1)
# pred_obs.columns = ['pred', 'obs']
# combo = pd.concat([combo, pred_obs], axis = 0)
#evaluation matrix - check p value
if stats.pearsonr(y_test, predictions)[1] >= 0.05:
print("insignificant correlation!")
continue
else:
print(stats.pearsonr(y_test, predictions))
metric_corr.append(stats.pearsonr(y_test, predictions)[0])
print(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
metric_rmse.append(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
#number of years used to train/test model
num_years = (pred_surge['date'][pred_surge.shape[0]-1] -\
pred_surge['date'][0]).days/365
longitude = surge['lon'][0]
latitude = surge['lat'][0]
num_pc = X_pca.shape[1] #number of principal components
corr = np.mean(metric_corr)
rmse = np.mean(metric_rmse)
print('num_year = ', num_years, ' num_pc = ', num_pc ,'avg_corr = ',np.mean(metric_corr), ' - avg_rmse (m) = ', \
np.mean(metric_rmse), '\n')
#original size and pca size of matrix added
new_df = pd.DataFrame([tg_name, longitude, latitude, num_years, num_pc, corr, rmse]).T
new_df.columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse']
df = pd.concat([df, new_df], axis = 0)
#save df as cs - in case of interruption
os.chdir(dir_out)
df.to_csv(tg_name)
#cd to dir_in
os.chdir(dir_in)
#run script
validate()
| [
"[email protected]"
] | |
fae82bef91113802e227bc3262962a8552bb2235 | 1e1c85d0d74bc1b111e77f082cd24c94219d7eb0 | /VE-Tests/tests/KSTB/android/e2e_tests/__init__.py | 007625507185402dd51a842facefeefb7c36e1c3 | [] | no_license | anshsaikia/GSSDeliverables-YesProject | b6f5e4de8d853ce21dfe7401c4b9179c40f32a89 | ed786ccfd7b8c344802c7ff6d0cfd4afbffe015e | refs/heads/master | 2020-04-06T04:07:49.034461 | 2017-02-24T13:39:48 | 2017-02-24T13:39:48 | 83,044,504 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24 | py | __author__ = 'tchevall'
| [
"[email protected]"
] | |
aa92cd5b6a1e281f0e443f81d8e6a51fb80d2c9d | a89c80dac438747db4dd60e5f9d35c33d9c231bc | /bootstrap/legacy/container_0/term.py | e44003b63ae8b5c2e4b9c89b9f1c160e69d7eefa | [
"MIT"
] | permissive | Trixter9994/lazero | 09cd13e889421d6567b54aeb5142a7a937b1f35f | 5a2b7f7499fb11e71885defab130dead7864b9fd | refs/heads/master | 2022-12-17T08:40:14.533371 | 2020-09-22T03:12:04 | 2020-09-22T03:12:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,377 | py | from gi.repository import Gtk, Vte, GLib, Pango, Gio
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Vte', '2.91')
# if you really want to, use java instead to do terminal emulation.
# no fucking horrible shits, please?
# either replace it or use it.
class TheWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="GTK3 IDE")
self.set_default_size(600, 300)
terminal = Vte.Terminal()
#pty = terminal.pty_new_sync(Vte.PtyFlags.DEFAULT)
pty = Vte.Pty.new_sync(Vte.PtyFlags.DEFAULT)
terminal.set_pty(pty)
pty.spawn_async(
None,
["/bin/python"],
None,
GLib.SpawnFlags.DO_NOT_REAP_CHILD,
None,
None,
-1,
None,
self.ready
)
# self.terminal.get_pty(self.pty)
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
scroller = Gtk.ScrolledWindow()
scroller.set_hexpand(True)
scroller.set_vexpand(True)
scroller.add(terminal)
box.pack_start(scroller, False, True, 2)
self.add(box)
def ready(self, pty, task):
print('pty ', pty)
win = TheWindow()
win.connect('destroy', Gtk.main_quit)
win.show_all()
Gtk.main()
# what the heck?
# check the implementation of vscode terminal. -> the joke out there. | [
"[email protected]"
] | |
0bc916255bfd134ba21182595402124b86032802 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_flimflam.py | e214907d239173c947b6d0e4fe3c7014e4c32002 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py |
#calss header
class _FLIMFLAM():
def __init__(self,):
self.name = "FLIMFLAM"
self.definitions = [u'talk that is confusing and intended to deceive']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
2040a05f18652b0a19ade2c74ef5fe69dd3abe92 | e99d2630e6acd8d2496c77c8ee215f9d82a610b8 | /tests/test_bazel_workspace.py | 39b7e0e615eae59d9777ea9d20496be15527d46a | [
"Apache-2.0"
] | permissive | useblocks/sphinx-bazel | 0acbbc13331356a871e5a75ebadee2b2158c4df7 | 38f5403393eb08f651100b21648efe2af6b4b047 | refs/heads/master | 2021-08-22T22:31:20.339073 | 2021-06-18T04:53:57 | 2021-06-18T04:53:57 | 171,243,259 | 11 | 4 | Apache-2.0 | 2021-06-18T04:53:58 | 2019-02-18T08:23:47 | Python | UTF-8 | Python | false | false | 426 | py | try:
from pathlib import Path
except ImportError:
from pathlib2 import Path
from sphinx_testing import with_app
@with_app(buildername='html', srcdir='doc_test/bazel_workspace') # , warningiserror=True)
def test_bazel_workspace(app, status, warning):
app.build()
html = Path(app.outdir, 'index.html').read_text()
assert 'workspace-my_workspace_name' in html
assert 'workspace description' in html
| [
"[email protected]"
] | |
21d83e44ad8d952e8947a946bf94f29b3f270b16 | de4d88db6ea32d20020c169f734edd4b95c3092d | /aiotdlib/api/types/basic_group.py | b64635412e5b8db8ca6213e66e871468534ccc6b | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | thiagosm/aiotdlib | 5cc790a5645f7e4cc61bbd0791433ed182d69062 | 4528fcfca7c5c69b54a878ce6ce60e934a2dcc73 | refs/heads/main | 2023-08-15T05:16:28.436803 | 2021-10-18T20:41:27 | 2021-10-18T20:41:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,529 | py | # =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from .chat_member_status import ChatMemberStatus
from ..base_object import BaseObject
class BasicGroup(BaseObject):
"""
Represents a basic group of 0-200 users (must be upgraded to a supergroup to accommodate more than 200 users)
:param id: Group identifier
:type id: :class:`int`
:param member_count: Number of members in the group
:type member_count: :class:`int`
:param status: Status of the current user in the group
:type status: :class:`ChatMemberStatus`
:param is_active: True, if the group is active
:type is_active: :class:`bool`
:param upgraded_to_supergroup_id: Identifier of the supergroup to which this group was upgraded; 0 if none
:type upgraded_to_supergroup_id: :class:`int`
"""
ID: str = Field("basicGroup", alias="@type")
id: int
member_count: int
status: ChatMemberStatus
is_active: bool
upgraded_to_supergroup_id: int
@staticmethod
def read(q: dict) -> BasicGroup:
return BasicGroup.construct(**q)
| [
"[email protected]"
] | |
2bf06aaed7534278af8791becf25639426bcc480 | cf5f24e5a32f8cafe90d4253d727b1c0457da6a4 | /algorithm/boj_16985.py | a3f12c8374c713064a9eea0eb995f12079fa3a68 | [] | no_license | seoljeongwoo/learn | 537659ca942875f6846646c2e21e1e9f2e5b811e | 5b423e475c8f2bc47cb6dee09b8961d83ab08568 | refs/heads/main | 2023-05-04T18:07:27.592058 | 2021-05-05T17:32:50 | 2021-05-05T17:32:50 | 324,725,000 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,164 | py | import sys
from itertools import permutations
from collections import deque
input =sys.stdin.readline
ret = int(1e9)
def range_check(x,y,z):
return 0<=x<5 and 0<=y<5 and 0<=z<5
def rotate(temp, h):
new_temp = [ [ 0 for i in range(5)] for j in range(5)]
for i in range(5):
for j in range(5):
new_temp[4-j][i] = temp[h][i][j]
for i in range(5):
for j in range(5):
temp[h][i][j] = new_temp[i][j]
return temp
def bfs(temp):
global ret
q = deque()
if temp[0][0][0] == 0: return
q.append((0,0,0))
check = [[[-1 for row in range(5)] for col in range(5)] for height in range(5)]
check[0][0][0] = 0
while q:
cx, cy, cz = q.popleft()
for dx, dy, dz in direction:
nx,ny,nz = cx+dx, cy+dy, cz+dz
if range_check(nx, ny, nz) == False : continue
if check[nx][ny][nz] != -1 or temp[nx][ny][nz] == 0: continue
check[nx][ny][nz] = check[cx][cy][cz] +1
if check[nx][ny][nz] >= ret: return
q.append((nx,ny,nz))
if check[4][4][4] == -1: return
ret = min(ret, check[4][4][4])
if ret == 12: print(12); exit(0)
return
def solve(per):
global ret
v = [[[0 for row in range(5)] for col in range(5)] for height in range(5)]
for index,height in enumerate(per):
for row in range(5):
for col in range(5):
v[index][row][col] = board[height][row][col]
for i in range(4):
v = rotate(v,0)
for j in range(4):
v = rotate(v,1)
for k in range(4):
v = rotate(v,2)
for l in range(4):
v = rotate(v,3)
for m in range(4):
v = rotate(v,4)
bfs(v)
return
board = []
direction = [(0,0,1), (0,0,-1), (1,0,0), (-1,0,0) , (0,1,0) , (0,-1,0)]
for height in range(5):
floor = []
for row in range(5):
floor.append(list(map(int,input().split())))
board.append(floor)
for data in list(permutations([i for i in range(5)],5)):
solve(data)
if ret == int(1e9): ret = -1
print(ret) | [
"[email protected]"
] | |
c962789a0d7a79b73036d4123cbdfae8ad0b8b3e | bfe6c95fa8a2aae3c3998bd59555583fed72900a | /reachNumber.py | c529cd721779f2e52f7d23adbc52206920abdac0 | [] | no_license | zzz136454872/leetcode | f9534016388a1ba010599f4771c08a55748694b2 | b5ea6c21bff317884bdb3d7e873aa159b8c30215 | refs/heads/master | 2023-09-01T17:26:57.624117 | 2023-08-29T03:18:56 | 2023-08-29T03:18:56 | 240,464,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | class Solution:
def reachNumber(self, target: int) -> int:
k = 0
s = 0
target = abs(target)
while s < target:
k += 1
s += k
d = s - target
if d % 2 == 0:
return k
k += 1
if (k - d) % 2 != 0:
k += 1
return k
target = 2
print(Solution().reachNumber(target))
| [
"[email protected]"
] | |
5e24b2565a618ba55d6cd3076b1e53d6d810773b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02615/s526468670.py | 459df95840e8558c1521ce47d258b9055f356cf3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | n = int(input())
la = list(map(int, input().split()))
la.sort(reverse=True)
answer = la[0]
for i in range(n - 2):
answer += la[1 + i // 2]
print(answer)
| [
"[email protected]"
] | |
9053f5b0f20440d4b3ba686f4971f8d31a94cc3b | 9af995f2908b986211d76ea4a1cefe01ecca9cb3 | /paddlehub/finetune/task/sequence_task.py | 9ebfc1ae831a79f21d9a934e9adcfa42867f3af8 | [] | no_license | wuhuaha/beike | 94b5477fcb99b12e15a6c87911ec0f872ff9d1a0 | 076d5e594d0b129574d11d097b910471f2b4bdd0 | refs/heads/main | 2023-01-06T17:21:41.475137 | 2020-11-12T14:08:49 | 2020-11-12T14:08:49 | 309,097,633 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,029 | py | # coding:utf-8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from collections import OrderedDict
import numpy as np
import paddle.fluid as fluid
from paddlehub.finetune.evaluate import chunk_eval, calculate_f1
from paddlehub.common.logger import logger
from .base_task import BaseTask
class SequenceLabelTask(BaseTask):
def __init__(self,
feature,
max_seq_len,
num_classes,
dataset=None,
feed_list=None,
data_reader=None,
startup_program=None,
config=None,
metrics_choices="default",
add_crf=False):
if metrics_choices == "default":
metrics_choices = ["f1", "precision", "recall"]
self.add_crf = add_crf
main_program = feature.block.program
super(SequenceLabelTask, self).__init__(
dataset=dataset,
data_reader=data_reader,
main_program=main_program,
feed_list=feed_list,
startup_program=startup_program,
config=config,
metrics_choices=metrics_choices)
self.feature = feature
self.max_seq_len = max_seq_len
self.num_classes = num_classes
@property
def return_numpy(self):
if self.add_crf:
return False
else:
return True
def _build_net(self):
self.seq_len = fluid.layers.data(
name="seq_len", shape=[1], dtype='int64', lod_level=0)
self.seq_len_used = fluid.layers.squeeze(self.seq_len, axes=[1])
if self.add_crf:
unpad_feature = fluid.layers.sequence_unpad(
self.feature, length=self.seq_len_used)
self.emission = fluid.layers.fc(
size=self.num_classes,
input=unpad_feature,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Uniform(low=-0.1, high=0.1),
regularizer=fluid.regularizer.L2DecayRegularizer(
regularization_coeff=1e-4)))
size = self.emission.shape[1]
fluid.layers.create_parameter(
shape=[size + 2, size], dtype=self.emission.dtype, name='crfw')
self.ret_infers = fluid.layers.crf_decoding(
input=self.emission, param_attr=fluid.ParamAttr(name='crfw'))
ret_infers = fluid.layers.assign(self.ret_infers)
return [ret_infers]
else:
self.logits = fluid.layers.fc(
input=self.feature,
size=self.num_classes,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name="cls_seq_label_out_w",
initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
bias_attr=fluid.ParamAttr(
name="cls_seq_label_out_b",
initializer=fluid.initializer.Constant(0.)))
self.ret_infers = fluid.layers.reshape(
x=fluid.layers.argmax(self.logits, axis=2), shape=[-1, 1])
logits = self.logits
logits = fluid.layers.flatten(logits, axis=2)
logits = fluid.layers.softmax(logits)
self.num_labels = logits.shape[1]
return [logits]
def _add_label(self):
label = fluid.layers.data(
name="label", shape=[self.max_seq_len, 1], dtype='int64')
return [label]
def _add_loss(self):
if self.add_crf:
labels = fluid.layers.sequence_unpad(self.labels[0],
self.seq_len_used)
crf_cost = fluid.layers.linear_chain_crf(
input=self.emission,
label=labels,
param_attr=fluid.ParamAttr(name='crfw'))
loss = fluid.layers.mean(x=crf_cost)
else:
labels = fluid.layers.flatten(self.labels[0], axis=2)
ce_loss = fluid.layers.cross_entropy(
input=self.outputs[0], label=labels)
loss = fluid.layers.mean(x=ce_loss)
return loss
def _add_metrics(self):
if self.add_crf:
labels = fluid.layers.sequence_unpad(self.labels[0],
self.seq_len_used)
(precision, recall, f1_score, num_infer_chunks, num_label_chunks,
num_correct_chunks) = fluid.layers.chunk_eval(
input=self.outputs[0],
label=labels,
chunk_scheme="IOB",
num_chunk_types=int(np.ceil((self.num_classes - 1) / 2.0)))
chunk_evaluator = fluid.metrics.ChunkEvaluator()
chunk_evaluator.reset()
return [precision, recall, f1_score]
else:
self.ret_labels = fluid.layers.reshape(
x=self.labels[0], shape=[-1, 1])
return [self.ret_labels, self.ret_infers, self.seq_len_used]
def _calculate_metrics(self, run_states):
total_infer = total_label = total_correct = loss_sum = 0
run_step = run_time_used = run_examples = 0
precision_sum = recall_sum = f1_score_sum = 0
for run_state in run_states:
loss_sum += np.mean(run_state.run_results[-1])
if self.add_crf:
precision_sum += np.mean(
run_state.run_results[0]) * run_state.run_examples
recall_sum += np.mean(
run_state.run_results[1]) * run_state.run_examples
f1_score_sum += np.mean(
run_state.run_results[2]) * run_state.run_examples
else:
np_labels = run_state.run_results[0]
np_infers = run_state.run_results[1]
np_lens = run_state.run_results[2]
label_num, infer_num, correct_num = chunk_eval(
np_labels, np_infers, np_lens, self.num_labels,
self.device_count)
total_infer += infer_num
total_label += label_num
total_correct += correct_num
run_examples += run_state.run_examples
run_step += run_state.run_step
run_time_used = time.time() - run_states[0].run_time_begin
run_speed = run_step / run_time_used
avg_loss = loss_sum / run_examples
if self.add_crf:
precision = precision_sum / run_examples
recall = recall_sum / run_examples
f1 = f1_score_sum / run_examples
else:
precision, recall, f1 = calculate_f1(total_label, total_infer,
total_correct)
# The first key will be used as main metrics to update the best model
scores = OrderedDict()
for metric in self.metrics_choices:
if metric == "precision":
scores["precision"] = precision
elif metric == "recall":
scores["recall"] = recall
elif metric == "f1":
scores["f1"] = f1
else:
raise ValueError("Not Support Metric: \"%s\"" % metric)
return scores, avg_loss, run_speed
@property
def feed_list(self):
if self._compatible_mode:
feed_list = [varname for varname in self._base_feed_list]
if self.is_train_phase or self.is_test_phase:
feed_list += [self.labels[0].name, self.seq_len.name]
else:
feed_list += [self.seq_len.name]
else:
feed_list = super(SequenceLabelTask, self).feed_list
return feed_list
@property
def fetch_list(self):
if self.is_train_phase or self.is_test_phase:
return [metric.name for metric in self.metrics] + [self.loss.name]
elif self.is_predict_phase:
return [self.ret_infers.name] + [self.seq_len_used.name]
return [output.name for output in self.outputs]
def _postprocessing(self, run_states):
if self._compatible_mode:
id2label = {
val: key
for key, val in self._base_data_reader.label_map.items()
}
else:
if self._label_list:
id2label = {}
for index, label in enumerate(self._label_list):
id2label[index] = label
else:
logger.warning(
"Fail to postprocess the predict output. Please set label_list parameter in predict function or initialize the task with dataset parameter."
)
return run_states
results = []
for batch_states in run_states:
batch_results = batch_states.run_results
batch_infers = batch_results[0].reshape([-1]).astype(
np.int32).tolist()
seq_lens = batch_results[1].reshape([-1]).astype(np.int32).tolist()
current_id = 0
for length in seq_lens:
seq_infers = batch_infers[current_id:current_id + length]
seq_result = list(map(id2label.get, seq_infers[1:-1]))
current_id += length if self.add_crf else self.max_seq_len
results.append(seq_result)
return results
| [
"[email protected]"
] | |
c02f8f5bb55fb908952922011688ba908edcb9c1 | 19ea2c6fe2896807cd3db08109a80f0fcd303beb | /2021_03/03_12/CH04/013_Reversed_For01.py | b499b4f0f24761f42264e8a317f63d7fa3367044 | [] | no_license | ITlearning/ROKA_Python | 8078528eda10eab035f06a69a3278ff21d167df2 | d165fc2f276765a43bc859f556e76aaea779641f | refs/heads/main | 2023-03-26T07:47:17.164024 | 2021-03-27T14:22:54 | 2021-03-27T14:22:54 | 345,230,355 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | # 반대로 반복하기(1)
for i in range(4,-1, -1) :
print("현재 반복 변수 : {}".format(i))
# range() 매개변수 3개 방법 | [
"[email protected]"
] | |
58d5dca55a3bf9e020bcac0e1af8db7b09471366 | 2f64dddf0d5df9df42b1c93d8f946c788d19fef0 | /scruffy/env.py | d760d1cfc7f78a3af7918160f96c5a9756ae2065 | [] | no_license | tmr232/scruffy | 41d2598d0b15b8eefd8ab038b1411eb8c384b836 | 3c35369a5a7b67e934d59c321439e3d3e5495970 | refs/heads/master | 2021-01-17T14:18:58.418097 | 2015-02-26T11:52:25 | 2015-02-26T11:52:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,835 | py | import os
import yaml
import itertools
import errno
import logging
import logging.config
from .directory import Directory
from .plugin import PluginManager
from .config import ConfigNode, Config, ConfigEnv, ConfigApplicator
class Environment(object):
"""
An environment in which to run a program
"""
def __init__(self, setup_logging=True, *args, **kwargs):
self._pm = PluginManager()
self._children = {}
self.config = None
# find a config if we have one and load it
self.config = self.find_config(kwargs)
if self.config:
self.config.load()
# setup logging
if setup_logging:
if self.config != None and self.config.logging.dict_config != None:
# configure logging from the configuration
logging.config.dictConfig(self.config.logging.dict_config.to_dict())
else:
# no dict config, set up a basic config so we at least get messages logged to stdout
log = logging.getLogger()
log.setLevel(logging.INFO)
if len(list(filter(lambda h: isinstance(h, logging.StreamHandler), log.handlers))) == 0:
log.addHandler(logging.StreamHandler())
# add children
self.add(**kwargs)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.cleanup()
def __getitem__(self, key):
return self._children[key]
def __getattr__(self, key):
return self._children[key]
def find_config(self, children):
"""
Find a config in our children so we can fill in variables in our other
children with its data.
"""
named_config = None
found_config = None
# first see if we got a kwarg named 'config', as this guy is special
if 'config' in children:
if type(children['config']) == str:
children['config'] = ConfigFile(children['config'])
elif isinstance(children['config'], Config):
children['config'] = children['config']
elif type(children['config']) == dict:
children['config'] = Config(data=children['config'])
else:
raise TypeError("Don't know how to turn {} into a Config".format(type(children['config'])))
named_config = children['config']
# next check the other kwargs
for k in children:
if isinstance(children[k], Config):
found_config = children[k]
# if we still don't have a config, see if there's a directory with one
for k in children:
if isinstance(children[k], Directory):
for j in children[k]._children:
if j == 'config' and not named_config:
named_config = children[k]._children[j]
if isinstance(children[k]._children[j], Config):
found_config = children[k]._children[j]
if named_config:
return named_config
else:
return found_config
def add(self, **kwargs):
"""
Add objects to the environment.
"""
for key in kwargs:
if type(kwargs[key]) == str:
self._children[key] = Directory(kwargs[key])
else:
self._children[key] = kwargs[key]
self._children[key]._env = self
self._children[key].apply_config(ConfigApplicator(self.config))
self._children[key].prepare()
def cleanup(self):
"""
Clean up the environment
"""
for key in self._children:
self._children[key].cleanup()
@property
def plugins(self):
return self._pm.plugins
| [
"[email protected]"
] | |
8e6ddac527a835923457ffb56b86e80403bcc21e | c672675d505f1b0dafb4b81141644caedf24cdef | /CADRE/power_dymos/power_cell_voltage.py | 1190bedce22ea57736456b93baa146f986dc045b | [
"Apache-2.0"
] | permissive | johnjasa/CADRE | aa63c7fa9466dc1839b454f2484346e57204dc8a | a4ffd61582b8474953fc309aa540838a14f29dcf | refs/heads/master | 2020-04-07T03:19:57.501186 | 2018-11-17T19:15:10 | 2018-11-17T19:15:10 | 158,012,106 | 0 | 0 | Apache-2.0 | 2018-11-17T18:17:53 | 2018-11-17T18:17:53 | null | UTF-8 | Python | false | false | 4,874 | py | """
Power discipline for CADRE: Power Cell Voltage component.
"""
from __future__ import print_function, division, absolute_import
from six.moves import range
import os
import numpy as np
from openmdao.api import ExplicitComponent
from MBI import MBI
class PowerCellVoltage(ExplicitComponent):
"""
Compute the output voltage of the solar panels.
"""
def initialize(self):
fpath = os.path.dirname(os.path.realpath(__file__))
self.options.declare('num_nodes', types=(int, ),
desc="Number of time points.")
self.options.declare('filename', fpath + '/../data/Power/curve.dat',
desc="File containing surrogate model for voltage.")
def setup(self):
nn = self.options['num_nodes']
filename = self.options['filename']
dat = np.genfromtxt(filename)
nT, nA, nI = dat[:3]
nT = int(nT)
nA = int(nA)
nI = int(nI)
T = dat[3:3 + nT]
A = dat[3 + nT:3 + nT + nA]
I = dat[3 + nT + nA:3 + nT + nA + nI] # noqa: E741
V = dat[3 + nT + nA + nI:].reshape((nT, nA, nI), order='F')
self.MBI = MBI(V, [T, A, I], [6, 6, 15], [3, 3, 3])
self.x = np.zeros((84 * nn, 3), order='F')
self.xV = self.x.reshape((nn, 7, 12, 3), order='F')
# Inputs
self.add_input('LOS', np.zeros((nn, )), units=None,
desc='Line of Sight over Time')
self.add_input('temperature', np.zeros((nn, 5)), units='degK',
desc='Temperature of solar cells over time')
self.add_input('exposed_area', np.zeros((nn, 7, 12)), units='m**2',
desc='Exposed area to sun for each solar cell over time')
self.add_input('Isetpt', np.zeros((nn, 12)), units='A',
desc='Currents of the solar panels')
# Outputs
self.add_output('V_sol', np.zeros((nn, 12)), units='V',
desc='Output voltage of solar panel over time')
rows = np.arange(nn*12)
cols = np.tile(np.repeat(0, 12), nn) + np.repeat(np.arange(nn), 12)
self.declare_partials('V_sol', 'LOS', rows=rows, cols=cols)
row = np.tile(np.repeat(0, 5), 12) + np.repeat(np.arange(12), 5)
rows = np.tile(row, nn) + np.repeat(12*np.arange(nn), 60)
col = np.tile(np.arange(5), 12)
cols = np.tile(col, nn) + np.repeat(5*np.arange(nn), 60)
self.declare_partials('V_sol', 'temperature', rows=rows, cols=cols)
row = np.tile(np.arange(12), 7)
rows = np.tile(row, nn) + np.repeat(12*np.arange(nn), 84)
cols = np.arange(nn*7*12)
self.declare_partials('V_sol', 'exposed_area', rows=rows, cols=cols)
row_col = np.arange(nn*12)
self.declare_partials('V_sol', 'Isetpt', rows=row_col, cols=row_col)
def setx(self, inputs):
temperature = inputs['temperature']
LOS = inputs['LOS']
exposed_area = inputs['exposed_area']
Isetpt = inputs['Isetpt']
for p in range(12):
i = 4 if p < 4 else (p % 4)
for c in range(7):
self.xV[:, c, p, 0] = temperature[:, i]
self.xV[:, c, p, 1] = LOS * exposed_area[:, c, p]
self.xV[:, c, p, 2] = Isetpt[:, p]
def compute(self, inputs, outputs):
"""
Calculate outputs.
"""
nn = self.options['num_nodes']
self.setx(inputs)
self.raw = self.MBI.evaluate(self.x)[:, 0].reshape((nn, 7, 12), order='F')
outputs['V_sol'] = np.zeros((nn, 12))
for c in range(7):
outputs['V_sol'] += self.raw[:, c, :]
def compute_partials(self, inputs, partials):
"""
Calculate and save derivatives. (i.e., Jacobian)
"""
nn = self.options['num_nodes']
exposed_area = inputs['exposed_area']
LOS = inputs['LOS']
raw1 = self.MBI.evaluate(self.x, 1)[:, 0].reshape((nn, 7, 12), order='F')
raw2 = self.MBI.evaluate(self.x, 2)[:, 0].reshape((nn, 7, 12), order='F')
raw3 = self.MBI.evaluate(self.x, 3)[:, 0].reshape((nn, 7, 12), order='F')
dV_dL = np.empty((nn, 12))
dV_dT = np.zeros((nn, 12, 5))
dV_dA = np.zeros((nn, 7, 12))
dV_dI = np.empty((nn, 12))
for p in range(12):
i = 4 if p < 4 else (p % 4)
for c in range(7):
dV_dL[:, p] += raw2[:, c, p] * exposed_area[:, c, p]
dV_dT[:, p, i] += raw1[:, c, p]
dV_dA[:, c, p] += raw2[:, c, p] * LOS
dV_dI[:, p] += raw3[:, c, p]
partials['V_sol', 'LOS'] = dV_dL.flatten()
partials['V_sol', 'temperature'] = dV_dT.flatten()
partials['V_sol', 'exposed_area'] = dV_dA.flatten()
partials['V_sol', 'Isetpt'] = dV_dI.flatten() | [
"[email protected]"
] | |
950676821e3247a2de1ac3ab907345178473721e | 69bfed466017c654c5d24a3e735430c4dc138af4 | /src/settings.py | fc9c4e70c37f0bca34b88ed0a4c3fc0d16a4d2e2 | [
"MIT"
] | permissive | kartagis/lucy | 20579fe1776cb6a2cfda2c26212d3d63be8829be | 45de24c0b01dfb9329eb31a1bd705df5b26e84a3 | refs/heads/master | 2021-01-23T15:27:20.420914 | 2017-09-07T08:08:14 | 2017-09-07T08:08:14 | 102,708,901 | 0 | 0 | null | 2017-09-07T08:00:34 | 2017-09-07T08:00:34 | null | UTF-8 | Python | false | false | 397 | py | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
db_name = "db/filop.db"
Base = declarative_base()
def session(db_name = db_name):
engine = create_engine('sqlite:///{}'.format(db_name))
session = sessionmaker()
session.configure(bind=engine)
Base.metadata.create_all(engine)
return session()
| [
"[email protected]"
] | |
ddfa181471cceb2fcfaedc2db33679d4f3c3ae67 | a3d6556180e74af7b555f8d47d3fea55b94bcbda | /third_party/blink/renderer/core/url_pattern/DEPS | 7c671850403fe157f8234a4a1b325f2ab5b091ee | [
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | chromium/chromium | aaa9eda10115b50b0616d2f1aed5ef35d1d779d6 | a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c | refs/heads/main | 2023-08-24T00:35:12.585945 | 2023-08-23T22:01:11 | 2023-08-23T22:01:11 | 120,360,765 | 17,408 | 7,102 | BSD-3-Clause | 2023-09-10T23:44:27 | 2018-02-05T20:55:32 | null | UTF-8 | Python | false | false | 277 | # Copyright 2020 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
include_rules = [
"+base/strings/string_util.h",
"+third_party/liburlpattern",
"+url/url_canon.h",
"+url/url_util.h",
]
| [
"[email protected]"
] | ||
e45f74a36ee23641f9d7cd1fdf8a4ba8f9a3e067 | af3ec207381de315f4cb6dddba727d16d42d6c57 | /dialogue-engine/test/programytest/processors/post/test_mergechinese.py | 94679d64e47a6074fb113389ea20ae7e01be2ae1 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mcf-yuichi/cotoba-agent-oss | 02a5554fe81ce21517f33229101013b6487f5404 | ce60833915f484c4cbdc54b4b8222d64be4b6c0d | refs/heads/master | 2023-01-12T20:07:34.364188 | 2020-11-11T00:55:16 | 2020-11-11T00:55:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,284 | py | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
from programy.processors.post.mergechinese import MergeChinesePostProcessor
from programy.context import ClientContext
from programytest.client import TestClient
class MergeChineseTests(unittest.TestCase):
def test_merge_chinese(self):
processor = MergeChinesePostProcessor()
context = ClientContext(TestClient(), "testid")
result = processor.process(context, "Hello")
self.assertIsNotNone(result)
self.assertEqual("Hello", result)
result = processor.process(context, "Hello World")
self.assertIsNotNone(result)
self.assertEqual("Hello World", result)
result = processor.process(context, "你 好")
self.assertIsNotNone(result)
self.assertEqual("你好", result)
result = processor.process(context, "问 你 好")
self.assertIsNotNone(result)
self.assertEqual("问你好", result)
result = processor.process(context, "XX 你 好")
self.assertIsNotNone(result)
self.assertEqual("XX 你好", result)
result = processor.process(context, "XX 你 好 YY")
self.assertIsNotNone(result)
self.assertEqual("XX 你好 YY", result)
| [
"[email protected]"
] | |
64b15f566dc2e930a5e0177cd01827d2c16d2e5e | 59bd639757fd8afcfdba73298a69482fd1f88069 | /cifar10/7_DenseNet/4last-output.py | 95b854521d8ef455b564b73ffa226914c451d537 | [] | no_license | DeepReduce/DeepReduce | f1e14b985affba2796c80d9e795b36cfd4ed9a55 | 707c2b411d65ed77967a3d1ea1506a91cc9d4bfd | refs/heads/master | 2020-08-01T01:47:23.293453 | 2020-06-14T18:09:01 | 2020-06-14T18:09:01 | 210,813,368 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,670 | py | import keras
from keras import optimizers
from keras.datasets import cifar10
from keras.models import Sequential
from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D
from keras.callbacks import LearningRateScheduler, TensorBoard
from keras.models import load_model
from collections import defaultdict
import numpy as np
def init_dict(model, model_layer_dict):
for layer in model.layers:
if 'flatten' in layer.name or 'input' in layer.name:
continue
for index in range(layer.output_shape[-1]):
model_layer_dict[(layer.name, index)] = False
def update_coverage(input_data, model, model_layer_dict, threshold=0.2):
layer_names = [layer.name for layer in model.layers if
'flatten' not in layer.name and 'input' not in layer.name]
intermediate_layer_model = Model(inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names])
intermediate_layer_outputs = intermediate_layer_model.predict(input_data)
for i, intermediate_layer_output in enumerate(intermediate_layer_outputs):
scaled = scale(intermediate_layer_output[0])
for num_neuron in xrange(scaled.shape[-1]):
if np.mean(scaled[..., num_neuron]) > threshold and not model_layer_dict[(layer_names[i], num_neuron)]:
model_layer_dict[(layer_names[i], num_neuron)] = True
def neuron_covered(model_layer_dict):
covered_neurons = len([v for v in model_layer_dict.values() if v])
total_neurons = len(model_layer_dict)
return covered_neurons, total_neurons, covered_neurons / float(total_neurons)
def scale(intermediate_layer_output, rmax=1, rmin=0):
X_std = (intermediate_layer_output - intermediate_layer_output.min()) / (
intermediate_layer_output.max() - intermediate_layer_output.min())
X_scaled = X_std * (rmax - rmin) + rmin
return X_scaled
def color_preprocessing(x_train,x_test):
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# mean = [125.307, 122.95, 113.865]
mean = [123.680, 116.779, 103.939]
# std = [62.9932, 62.0887, 66.7048]
for i in range(3):
x_train[:,:,:,i] = (x_train[:,:,:,i] - mean[i])
x_test[:,:,:,i] = (x_test[:,:,:,i] - mean[i])
return x_train, x_test
from keras import backend as K
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
if('tensorflow' == K.backend()):
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train, x_test = color_preprocessing(x_train, x_test)
model1 = load_model('densenet.h5')
print(model1.evaluate(x_test, y_test))
#print(model1.summary())
#input('check...')
#for layer in model1.layers:
#for index in range(layer.output_shape[-1]):
# print(layer.name)
# print(layer.output_shape)
#print(layer.name)
#print(layer.output_shape)
#print(layer.output_shape[-1])
#print('----------')
model_layer_dict1 = defaultdict(bool)
init_dict(model1,model_layer_dict1)
#print(model_layer_dict1)
#print(len(model_layer_dict1.keys()))
#test_image = x_test[0].reshape([1,32,32,3])
#test_image.shape
#res = model.predict(test_image)
#label = softmax_to_label(res)
#print(label)
#print(x_test[0])
#print(len(x_test[0]))
#print(len(x_test[0][0]))
from keras.models import Model
#threshold = float(0.5)
layer_names = [layer.name for layer in model1.layers if 'flatten' not in layer.name and 'input' not in layer.name]
#print(layer_names)
#input('check...')
#intermediate_layer_model = Model(inputs=model1.input,outputs=[model1.get_layer(layer_name).output for layer_name in layer_names])
intermediate_layer_model = Model(inputs=model1.input, outputs = [model1.get_layer(layer_names[-2]).output])
from tqdm import tqdm
cov = []
flag = 0
neuronlist = []
f = open('Cov/cross_entropy','w')
for g in tqdm(range(len(x_test))):
test_image = x_test[g].reshape([1,32,32,3])
intermediate_layer_outputs = intermediate_layer_model.predict(test_image)
#print(type(intermediate_layer_outputs[0]))
#print(intermediate_layer_outputs[0])
output = intermediate_layer_outputs[0].tolist()
#print(output)
#print(intermediate_layer_output[0])
#print(len(intermediate_layer_output[0]))
#input('pause...')
f.write(str(output) + '\n')
f.close()
| [
"[email protected]"
] | |
1e9c16e2b6d642eebb25c59fd0d5220331672fb8 | 324d8a723bc057b4679014a1a7df08a013f2e237 | /torchpack/runner/hooks/logger.py | 49251f01012a787560503cb09c3bb34fd5925422 | [
"MIT"
] | permissive | lxx1991/torchpack | ff0db24c73479b8d4e1bf77dd5fda4e5a3a7b694 | 3de04972bca89e0a4c53fa896a4f9f62457adc75 | refs/heads/master | 2020-03-24T04:05:14.299613 | 2018-07-26T13:52:38 | 2018-07-26T13:52:38 | 142,443,539 | 0 | 0 | null | 2018-07-26T13:17:29 | 2018-07-26T13:17:29 | null | UTF-8 | Python | false | false | 781 | py | from .hook import Hook
class LoggerHook(Hook):
"""Base class for logger hooks."""
def __init__(self, interval=10, reset_meter=True, ignore_last=True):
self.interval = interval
self.reset_meter = reset_meter
self.ignore_last = ignore_last
def log(self, runner):
pass
def log_and_reset(self, runner):
self.log(runner)
if self.reset_meter:
runner.meter.reset()
def after_train_iter(self, runner):
if not self.every_n_inner_iters(runner, self.interval):
if not self.end_of_epoch(runner):
return
elif self.ignore_last:
return
self.log_and_reset(runner)
def after_val_epoch(self, runner):
self.log_and_reset(runner)
| [
"[email protected]"
] | |
2e5b6109e5a9caf2bc44433828a311f4a7bdbc4b | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5686313294495744_0/Python/jessethegame/technobabble.py | 4dc584f7ee2f0b34adfcd5dd99e118b303b19b94 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,067 | py | from collections import Counter
file = 'C-small-attempt0.in'
def technobabble(topics):
real_firsts = set()
real_seconds = set()
fakes = 0
while topics:
firsts, seconds = zip(*topics)
firsts_counter = Counter(firsts)
seconds_counter = Counter(seconds)
ranked_topics = [((firsts_counter[first] + seconds_counter[second]), first, second) for first, second in topics]
highest_first, highest_second = list(sorted(ranked_topics))[0][1:]
real_firsts.add(highest_first)
real_seconds.add(highest_second)
possible_fakes = len(topics) - 1
topics = [(first, second) for first, second in topics if not first in real_firsts or not second in real_seconds]
fakes += possible_fakes - len(topics)
return fakes
with open(file) as handle:
T = int(handle.readline())
for t in range(T):
N = int(handle.readline())
topics = []
for n in range(N):
topics.append(handle.readline().strip().split())
print "Case #{}: {}".format(t + 1, technobabble(topics))
| [
"[email protected]"
] | |
c239e77199b80a12345eece29962351d9e9c9e3a | aa4024b6a846d2f6032a9b79a89d2e29b67d0e49 | /mbeddr2C_MM/transformation_from_eclipse/Hlayer0rule10.py | bb1691eeceb274bc2e91c97bcfad803a7582cfba | [
"MIT"
] | permissive | levilucio/SyVOLT | 41311743d23fdb0b569300df464709c4954b8300 | 0f88827a653f2e9d3bb7b839a5253e74d48379dc | refs/heads/master | 2023-08-11T22:14:01.998341 | 2023-07-21T13:33:36 | 2023-07-21T13:33:36 | 36,246,850 | 3 | 2 | MIT | 2023-07-21T13:33:39 | 2015-05-25T18:15:26 | Python | UTF-8 | Python | false | false | 2,433 | py | from core.himesis import Himesis
import uuid
class Hlayer0rule10(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule layer0rule10.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(Hlayer0rule10, self).__init__(name='Hlayer0rule10', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """layer0rule10"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'layer0rule10')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
# match class RequiredPort(layer0rule10class0) node
self.add_node()
self.vs[3]["mm__"] = """RequiredPort"""
self.vs[3]["attr1"] = """+"""
# match_contains node for class RequiredPort(layer0rule10class0)
self.add_node()
self.vs[4]["mm__"] = """match_contains"""
# apply class Member(layer0rule10class1) node
self.add_node()
self.vs[5]["mm__"] = """Member"""
self.vs[5]["attr1"] = """1"""
# apply_contains node for class Member(layer0rule10class1)
self.add_node()
self.vs[6]["mm__"] = """apply_contains"""
# Add the edges
self.add_edges([
(0,4), # matchmodel -> match_contains
(4,3), # match_contains -> match_class RequiredPort(layer0rule10class0)
(1,6), # applymodel -> apply_contains
(6,5), # apply_contains -> apply_class Member(layer0rule10class1)
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((5,'name'),('concat',((3,'name'),('constant','__ops')))), ((5,'__ApplyAttribute'),('constant','RequiredPort_ops')), ]
| [
"[email protected]"
] | |
a865a4a19651e7b15a5546c69a0a6e8fd29a34e7 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_206/1242.py | fb480c086828b23e6c6b68107084423f36d2c390 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | inputfile = open("h2.in", mode='r')
outputfile = open("output_h_l.txt", mode='w')
t = int(inputfile.readline().strip())
for case in range(t):
d, n = map(int, inputfile.readline().strip().split(' '))
time = 0
for i in range(n):
hD, hS = map(int, inputfile.readline().strip().split(' '))
hT = (d - hD) / hS
if hT > time:
time = hT
speed = d / time
outputfile.write("case #" + str(case + 1) + ": " + str(speed)+"\n")
print("case #" + str(case + 1) + ": " + str(speed))
outputfile.close()
inputfile.close()
| [
"[email protected]"
] | |
01956a1d6ef57c28be46ff7304cfc60c0c562d05 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_7/ndxren013/util.py | f7c3c9c8c6cfa0051e619ed357a02dcd81f8c0e5 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,152 | py | import copy
def create_grid(grid) :
"""create a 4x4 grid"""
for i in range(4):
tmp = []
for j in range(4):
tmp.append(0)
grid.append(tmp)
def print_grid(grid) :
"""print out a 4x4 grid in 5-width columns within a box"""
print("+--------------------+")
for i in range (4):
print('|', end = '')
for j in range (4) :
if grid[i][j] == 0:
print (" " + " "*(5-(len(str(grid[i][j])))), end = "")
else :
print (str(grid[i][j]) + " "*(5-(len(str(grid[i][j])))), end = "")
print("|")
print("+--------------------+")
def check_lost(grid) :
"""return True if there are no 0 values and no adjacent values that are equal; otherwise False"""
flag = True
for i in range (4) :
for j in range(4) :
if grid[i][j] == 0 :
flag = False
if flag == True :
for i in range (0,4,1) :
for j in range (0,4,1) :
if i < 3 and j < 3:
if grid[i][j] == grid[i][j+1] or grid[i][j] == grid[i+1][j] :
flag = False
if i == 3 and j == 3:
continue
if i == 3:
if grid[i][j] == grid[i][j + 1]:
flag = False
if j == 3:
if grid[i][j] == grid[i + 1][j]:
flag = False
return flag
def check_won(grid) :
"""return True if a value >= 32 is found in the grid; otherwise False"""
flag = False
for i in range (4) :
for j in range (4) :
if grid[i][j] >= 32 :
flag = True
return flag
def copy_grid(grid) :
"""return a copy of the grid"""
return copy.deepcopy(grid)
def grid_equal(grid1, grid2) :
"""check if 2 grids are equal - return boolean value"""
if grid1 == grid2 :
return True
else:
return False
| [
"[email protected]"
] | |
71f69800aac7f2532c02268c353747e0fb1e2a77 | ee2444e8e70f136e6b34a35eb55dc287a7621956 | /clock/clocks.py | 20fd7688ffe7775b14b7fed30fc4decad5af4c9f | [
"Apache-2.0"
] | permissive | vyahello/desktop-clock | ec737c6e12273bc9f309e4dc921740b41e5fbef0 | b9db67ef646db7951354842846e8b6baf03d3076 | refs/heads/master | 2020-04-14T03:12:13.643967 | 2019-01-06T22:10:00 | 2019-01-06T22:10:00 | 163,602,100 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,701 | py | from abc import ABC, abstractmethod
from clock.environment.engines import Engine, ClockEngine
from clock.environment.time import Time, ClockTime
from clock.environment.widgets import Widget, PurpleUIWidget
from clock.environment.windows import Window
from clock.types import Runner
class Clock(ABC):
"""Represent abstraction of clock."""
@abstractmethod
def start(self) -> None:
"""Start running a clock."""
pass
@abstractmethod
def stop(self) -> None:
"""Stop running a clock."""
pass
class DigitalClock(Clock):
"""Unified digital clock."""
def __init__(self, name: str, time: Time, master: Window, widget: Widget) -> None:
self._name = name
self._master = master
self._widget = widget
self._engine: Engine = ClockEngine(time, widget)
def start(self) -> None:
self._master.set_title(self._name)
self._engine.run()
self._master.start_loop()
def stop(self) -> None:
self._master.stop_loop()
class PurpleDigitalClock(Clock):
"""Represent concrete purple digital clock."""
def __init__(self, master: Window, name: str) -> None:
self._clock: Clock = DigitalClock(
name=name,
time=ClockTime(),
master=master,
widget=PurpleUIWidget(master)
)
def start(self) -> None:
self._clock.start()
def stop(self) -> None:
self._clock.stop()
class ClockRunner(Runner):
"""Main clock runner."""
def __init__(self, clock: Clock) -> None:
self._clock: Clock = clock
def perform(self):
"""Start the clock functioning."""
self._clock.start()
| [
"[email protected]"
] | |
9889d3e62066d82b07468267398eeaee10b1399b | bd75c7ec55b78ef189f57596520744f82ec73073 | /Swap Nodes in Pairs.py | 06df07c8a141d9b99481b27baa91b0459c330f30 | [] | no_license | GaoLF/LeetCode-PY | 17058ac0743403292559f9b83a20bf79d89e33f6 | ccd294cfe0c228a21518d077d1aa01e510930ea3 | refs/heads/master | 2021-01-23T02:24:05.940132 | 2015-07-22T13:44:01 | 2015-07-22T13:44:01 | 38,248,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,085 | py | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @param {ListNode} head
# @return {ListNode}
def swapPairs(self, head):
if not head:
return head
iter = head
temp = head
while iter and iter.next:
temp_node = temp.next
if temp == head:
temp_node = iter.next.next
head = head.next
head.next = iter
head.next.next = temp_node
iter = temp_node
else:
temp_node = iter.next.next
temp.next = iter.next
iter.next.next = iter
iter.next = temp_node
temp = iter
iter = iter.next
return head
A = Solution()
a = ListNode(1)
b = ListNode(2)
c = ListNode(3)
d = ListNode(4)
e = ListNode(5)
f = ListNode(6)
#a.next = b
b.next = c
c.next = d
d.next = e
e.next = f
x = A.swapPairs(a)
while x:
print x.val
x = x.next
| [
"[email protected]"
] | |
f3308bc272f6a64b2c2b9e3e700cf49d912db947 | f7b035ee2eccd647a20ac49b0f6a8a2d7c854c81 | /foo.py | ee566b3d0c23c928a4c9f11a5bcadac04d29e753 | [] | no_license | lugrace/sryr-research | 00e0d9ef53573e4a5bec7311b775f61bccd92a8e | 245435161d38009026b21f4811155154927e5ecf | refs/heads/master | 2021-09-15T06:50:54.689077 | 2018-05-28T03:41:11 | 2018-05-28T03:41:11 | 110,488,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | import math
def jumpInt(bar):
Foo = jumpString(bar)
| [
"[email protected]"
] | |
16ae4e8715304dfc152331ccbd298ad4158b5b5b | 113d34bc3a8a9d43c770fd41ee327fd3cbca67dd | /Python3/Path in the matrix.py | 24465fdc0b35da89dd603edf323da1f75d640c77 | [] | no_license | liuyuhang791034063/LeetCode | 2b5d3413abc3ed6f8fccf35f39454e2cfd9807b1 | b613718bf69982535b7c3c9f329a47d5741d8a9e | refs/heads/master | 2020-03-29T01:58:25.836162 | 2019-07-27T04:33:06 | 2019-07-27T04:33:06 | 149,415,780 | 12 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,553 | py | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: Path in the matrix
Description:
Author: God
date: 2018/12/3
-------------------------------------------------
Change Activity: 2018/12/3
-------------------------------------------------
"""
__author__ = 'God'
class Solution:
def hasPath(self, matrix, rows, cols, path):
self.col, self.row = cols, rows
matrix = [list(matrix[cols * i:cols * i + cols]) for i in range(rows)]
for i in range(rows):
for j in range(cols):
if matrix[i][j] == path[0]:
self.b = False
self.search(matrix, path[1:], [(i, j)], i, j)
if self.b:
return True
return False
def search(self, matrix, word, exists, i, j):
if word == '':
self.b = True
return
if j != 0 and (i, j-1) not in exists and matrix[i][j-1] == word[0]:
self.search(matrix, word[1:], exists+[(i, j-1)], i, j-1)
if i != 0 and (i-1, j) not in exists and matrix[i-1][j] == word[0]:
self.search(matrix, word[1:], exists+[(i-1, j)], i-1, j)
if j != self.col - 1 and (i, j+1) not in exists and matrix[i][j+1] == word[0]:
self.search(matrix, word[1:], exists+[(i, j+1)], i, j+1)
if i != self.row - 1 and (i+1, j) not in exists and matrix[i+1][j] == word[0]:
self.search(matrix, word[1:], exists+[(i+1, j)], i+1, j) | [
"[email protected]"
] | |
ca7d198a0e326232ec911103384438c043f5be31 | a1eb0bb73680bc42af97eea6b4d7811453dc6758 | /clustering/venv/Scripts/easy_install-3.6-script.py | 068e92aa69165faea6f8172bb1529439b7853e02 | [] | no_license | PotatoPig/machine-learning | 23c2ba5e7cf9d66c92309437d47d139bbf4e866f | eb7ae7b8bc03d765e508b1a1c222ea15d25b1c21 | refs/heads/master | 2020-07-22T15:00:03.607116 | 2019-09-09T06:47:33 | 2019-09-09T06:47:33 | 207,239,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | #!D:\CS_Project\MachineLearning\clustering\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.6')()
)
| [
"Binhan Xu"
] | Binhan Xu |
0e602feaaebaabffc917cb62b6c5e9a85335fffa | f312fcd24d94be8b32e2d1e50643b01c619aa23b | /tensorboard/plugins/wit_redirect/wit_redirect_plugin.py | 58bcd4d9cfb12ad96adb029933e9ef31ab6a7ad5 | [
"Apache-2.0"
] | permissive | tensorflow/tensorboard | bf316fc5d47f78ef980dd2106c99207892a508d5 | 5961c76dca0fb9bb40d146f5ce13834ac29d8ddb | refs/heads/master | 2023-09-03T23:59:03.264261 | 2023-08-30T22:24:07 | 2023-08-30T22:24:07 | 91,379,993 | 6,766 | 2,063 | Apache-2.0 | 2023-09-14T20:55:56 | 2017-05-15T20:08:07 | TypeScript | UTF-8 | Python | false | false | 1,682 | py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Plugin that only displays a message with installation instructions."""
from tensorboard.plugins import base_plugin
class WITRedirectPluginLoader(base_plugin.TBLoader):
"""Load the redirect notice iff the dynamic plugin is unavailable."""
def load(self, context):
try:
import tensorboard_plugin_wit # noqa: F401
# If we successfully load the dynamic plugin, don't show
# this redirect plugin at all.
return None
except ImportError:
return _WITRedirectPlugin(context)
class _WITRedirectPlugin(base_plugin.TBPlugin):
"""Redirect notice pointing users to the new dynamic LIT plugin."""
plugin_name = "wit_redirect"
def get_plugin_apps(self):
return {}
def is_active(self):
return False
def frontend_metadata(self):
return base_plugin.FrontendMetadata(
element_name="tf-wit-redirect-dashboard",
tab_name="What-If Tool",
)
| [
"[email protected]"
] | |
81a1f59117d0eb5e1339b1325cfa301b91127add | 5b4b1866571453f78db5b06a08ff0eda17b91b04 | /test/vanilla/Expected/AcceptanceTests/Url/url/operations/_paths_operations.py | a142282285bee503445254ca4ba268c9aff892b9 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | koek67/autorest.azure-functions-python | ba345f1d194ca7431daab1210a0cd801d4946991 | b0896d8aec6b0fd6f0bcb12ea8e0489652dc2783 | refs/heads/main | 2022-12-20T13:27:56.405901 | 2020-09-30T08:23:11 | 2020-09-30T08:23:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51,127 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PathsOperations(object):
"""PathsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~url.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get_boolean_true(
self,
**kwargs # type: Any
):
# type: (...) -> None
"""Get true Boolean value on path.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
bool_path = True
# Construct URL
url = self.get_boolean_true.metadata['url'] # type: ignore
path_format_arguments = {
'boolPath': self._serialize.url("bool_path", bool_path, 'bool'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
get_boolean_true.metadata = {'url': '/paths/bool/true/{boolPath}'} # type: ignore
@distributed_trace
def get_boolean_false(
self,
**kwargs # type: Any
):
# type: (...) -> None
"""Get false Boolean value on path.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
bool_path = False
# Construct URL
url = self.get_boolean_false.metadata['url'] # type: ignore
path_format_arguments = {
'boolPath': self._serialize.url("bool_path", bool_path, 'bool'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
get_boolean_false.metadata = {'url': '/paths/bool/false/{boolPath}'} # type: ignore
@distributed_trace
def get_int_one_million(
self,
**kwargs # type: Any
):
# type: (...) -> None
"""Get '1000000' integer value.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
int_path = 1000000
# Construct URL
url = self.get_int_one_million.metadata['url'] # type: ignore
path_format_arguments = {
'intPath': self._serialize.url("int_path", int_path, 'int'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
get_int_one_million.metadata = {'url': '/paths/int/1000000/{intPath}'} # type: ignore
@distributed_trace
def get_int_negative_one_million(
self,
**kwargs # type: Any
):
# type: (...) -> None
"""Get '-1000000' integer value.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
int_path = -1000000
# Construct URL
url = self.get_int_negative_one_million.metadata['url'] # type: ignore
path_format_arguments = {
'intPath': self._serialize.url("int_path", int_path, 'int'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
get_int_negative_one_million.metadata = {'url': '/paths/int/-1000000/{intPath}'} # type: ignore
@distributed_trace
def get_ten_billion(
self,
**kwargs # type: Any
):
# type: (...) -> None
"""Get '10000000000' 64 bit integer value.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
long_path = 10000000000
# Construct URL
url = self.get_ten_billion.metadata['url'] # type: ignore
path_format_arguments = {
'longPath': self._serialize.url("long_path", long_path, 'long'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
get_ten_billion.metadata = {'url': '/paths/long/10000000000/{longPath}'} # type: ignore
@distributed_trace
def get_negative_ten_billion(
self,
**kwargs # type: Any
):
# type: (...) -> None
"""Get '-10000000000' 64 bit integer value.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
long_path = -10000000000
# Construct URL
url = self.get_negative_ten_billion.metadata['url'] # type: ignore
path_format_arguments = {
'longPath': self._serialize.url("long_path", long_path, 'long'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
get_negative_ten_billion.metadata = {'url': '/paths/long/-10000000000/{longPath}'} # type: ignore
@distributed_trace
def float_scientific_positive(
self,
**kwargs # type: Any
):
# type: (...) -> None
"""Get '1.034E+20' numeric value.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
float_path = 103400000000000000000
# Construct URL
url = self.float_scientific_positive.metadata['url'] # type: ignore
path_format_arguments = {
'floatPath': self._serialize.url("float_path", float_path, 'float'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
float_scientific_positive.metadata = {'url': '/paths/float/1.034E+20/{floatPath}'} # type: ignore
@distributed_trace
def float_scientific_negative(
self,
**kwargs # type: Any
):
# type: (...) -> None
"""Get '-1.034E-20' numeric value.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
float_path = -1.034e-20
# Construct URL
url = self.float_scientific_negative.metadata['url'] # type: ignore
path_format_arguments = {
'floatPath': self._serialize.url("float_path", float_path, 'float'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
float_scientific_negative.metadata = {'url': '/paths/float/-1.034E-20/{floatPath}'} # type: ignore
@distributed_trace
def double_decimal_positive(
self,
**kwargs # type: Any
):
# type: (...) -> None
"""Get '9999999.999' numeric value.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
double_path = 9999999.999
# Construct URL
url = self.double_decimal_positive.metadata['url'] # type: ignore
path_format_arguments = {
'doublePath': self._serialize.url("double_path", double_path, 'float'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
double_decimal_positive.metadata = {'url': '/paths/double/9999999.999/{doublePath}'} # type: ignore
@distributed_trace
def double_decimal_negative(
self,
**kwargs # type: Any
):
# type: (...) -> None
"""Get '-9999999.999' numeric value.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
double_path = -9999999.999
# Construct URL
url = self.double_decimal_negative.metadata['url'] # type: ignore
path_format_arguments = {
'doublePath': self._serialize.url("double_path", double_path, 'float'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
double_decimal_negative.metadata = {'url': '/paths/double/-9999999.999/{doublePath}'} # type: ignore
@distributed_trace
def string_unicode(
self,
**kwargs # type: Any
):
# type: (...) -> None
"""Get '啊齄丂狛狜隣郎隣兀﨩' multi-byte string value.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
string_path = "啊齄丂狛狜隣郎隣兀﨩"
# Construct URL
url = self.string_unicode.metadata['url'] # type: ignore
path_format_arguments = {
'stringPath': self._serialize.url("string_path", string_path, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
string_unicode.metadata = {'url': '/paths/string/unicode/{stringPath}'} # type: ignore
@distributed_trace
def string_url_encoded(
self,
**kwargs # type: Any
):
# type: (...) -> None
"""Get 'begin!*'();:@ &=+$,/?#[]end.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
string_path = "begin!*'();:@ &=+$,/?#[]end"
# Construct URL
url = self.string_url_encoded.metadata['url'] # type: ignore
path_format_arguments = {
'stringPath': self._serialize.url("string_path", string_path, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
string_url_encoded.metadata = {'url': '/paths/string/begin%21%2A%27%28%29%3B%3A%40%20%26%3D%2B%24%2C%2F%3F%23%5B%5Dend/{stringPath}'} # type: ignore
@distributed_trace
def string_url_non_encoded(
self,
**kwargs # type: Any
):
# type: (...) -> None
"""Get 'begin!*'();:@&=+$,end.
https://tools.ietf.org/html/rfc3986#appendix-A 'path' accept any 'pchar' not encoded.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
string_path = "begin!*'();:@&=+$,end"
# Construct URL
url = self.string_url_non_encoded.metadata['url'] # type: ignore
path_format_arguments = {
'stringPath': self._serialize.url("string_path", string_path, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
string_url_non_encoded.metadata = {'url': '/paths/string/begin!*\'();:@&=+$,end/{stringPath}'} # type: ignore
@distributed_trace
def string_empty(
self,
**kwargs # type: Any
):
# type: (...) -> None
"""Get ''.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
string_path = ""
# Construct URL
url = self.string_empty.metadata['url'] # type: ignore
path_format_arguments = {
'stringPath': self._serialize.url("string_path", string_path, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
string_empty.metadata = {'url': '/paths/string/empty/{stringPath}'} # type: ignore
@distributed_trace
def string_null(
self,
string_path, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Get null (should throw).
:param string_path: null string value.
:type string_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.string_null.metadata['url'] # type: ignore
path_format_arguments = {
'stringPath': self._serialize.url("string_path", string_path, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
string_null.metadata = {'url': '/paths/string/null/{stringPath}'} # type: ignore
@distributed_trace
def enum_valid(
self,
enum_path, # type: Union[str, "models.UriColor"]
**kwargs # type: Any
):
# type: (...) -> None
"""Get using uri with 'green color' in path parameter.
:param enum_path: send the value green.
:type enum_path: str or ~url.models.UriColor
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.enum_valid.metadata['url'] # type: ignore
path_format_arguments = {
'enumPath': self._serialize.url("enum_path", enum_path, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
enum_valid.metadata = {'url': '/paths/enum/green%20color/{enumPath}'} # type: ignore
@distributed_trace
def enum_null(
self,
enum_path, # type: Union[str, "models.UriColor"]
**kwargs # type: Any
):
# type: (...) -> None
"""Get null (should throw on the client before the request is sent on wire).
:param enum_path: send null should throw.
:type enum_path: str or ~url.models.UriColor
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.enum_null.metadata['url'] # type: ignore
path_format_arguments = {
'enumPath': self._serialize.url("enum_path", enum_path, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
enum_null.metadata = {'url': '/paths/string/null/{enumPath}'} # type: ignore
@distributed_trace
def byte_multi_byte(
self,
byte_path, # type: bytearray
**kwargs # type: Any
):
# type: (...) -> None
"""Get '啊齄丂狛狜隣郎隣兀﨩' multibyte value as utf-8 encoded byte array.
:param byte_path: '啊齄丂狛狜隣郎隣兀﨩' multibyte value as utf-8 encoded byte array.
:type byte_path: bytearray
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.byte_multi_byte.metadata['url'] # type: ignore
path_format_arguments = {
'bytePath': self._serialize.url("byte_path", byte_path, 'bytearray'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
byte_multi_byte.metadata = {'url': '/paths/byte/multibyte/{bytePath}'} # type: ignore
@distributed_trace
def byte_empty(
self,
**kwargs # type: Any
):
# type: (...) -> None
"""Get '' as byte array.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
byte_path = bytearray("", encoding="utf-8")
# Construct URL
url = self.byte_empty.metadata['url'] # type: ignore
path_format_arguments = {
'bytePath': self._serialize.url("byte_path", byte_path, 'bytearray'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
byte_empty.metadata = {'url': '/paths/byte/empty/{bytePath}'} # type: ignore
@distributed_trace
def byte_null(
self,
byte_path, # type: bytearray
**kwargs # type: Any
):
# type: (...) -> None
"""Get null as byte array (should throw).
:param byte_path: null as byte array (should throw).
:type byte_path: bytearray
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.byte_null.metadata['url'] # type: ignore
path_format_arguments = {
'bytePath': self._serialize.url("byte_path", byte_path, 'bytearray'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
byte_null.metadata = {'url': '/paths/byte/null/{bytePath}'} # type: ignore
@distributed_trace
def date_valid(
self,
**kwargs # type: Any
):
# type: (...) -> None
"""Get '2012-01-01' as date.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
date_path = "2012-01-01"
# Construct URL
url = self.date_valid.metadata['url'] # type: ignore
path_format_arguments = {
'datePath': self._serialize.url("date_path", date_path, 'date'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
date_valid.metadata = {'url': '/paths/date/2012-01-01/{datePath}'} # type: ignore
@distributed_trace
def date_null(
self,
date_path, # type: datetime.date
**kwargs # type: Any
):
# type: (...) -> None
"""Get null as date - this should throw or be unusable on the client side, depending on date
representation.
:param date_path: null as date (should throw).
:type date_path: ~datetime.date
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.date_null.metadata['url'] # type: ignore
path_format_arguments = {
'datePath': self._serialize.url("date_path", date_path, 'date'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
date_null.metadata = {'url': '/paths/date/null/{datePath}'} # type: ignore
@distributed_trace
def date_time_valid(
self,
**kwargs # type: Any
):
# type: (...) -> None
"""Get '2012-01-01T01:01:01Z' as date-time.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
date_time_path = "2012-01-01T01:01:01Z"
# Construct URL
url = self.date_time_valid.metadata['url'] # type: ignore
path_format_arguments = {
'dateTimePath': self._serialize.url("date_time_path", date_time_path, 'iso-8601'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
date_time_valid.metadata = {'url': '/paths/datetime/2012-01-01T01%3A01%3A01Z/{dateTimePath}'} # type: ignore
@distributed_trace
def date_time_null(
self,
date_time_path, # type: datetime.datetime
**kwargs # type: Any
):
# type: (...) -> None
"""Get null as date-time, should be disallowed or throw depending on representation of date-time.
:param date_time_path: null as date-time.
:type date_time_path: ~datetime.datetime
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.date_time_null.metadata['url'] # type: ignore
path_format_arguments = {
'dateTimePath': self._serialize.url("date_time_path", date_time_path, 'iso-8601'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
date_time_null.metadata = {'url': '/paths/datetime/null/{dateTimePath}'} # type: ignore
@distributed_trace
def base64_url(
self,
base64_url_path, # type: bytes
**kwargs # type: Any
):
# type: (...) -> None
"""Get 'lorem' encoded value as 'bG9yZW0' (base64url).
:param base64_url_path: base64url encoded value.
:type base64_url_path: bytes
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.base64_url.metadata['url'] # type: ignore
path_format_arguments = {
'base64UrlPath': self._serialize.url("base64_url_path", base64_url_path, 'base64'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
base64_url.metadata = {'url': '/paths/string/bG9yZW0/{base64UrlPath}'} # type: ignore
@distributed_trace
def array_csv_in_path(
self,
array_path, # type: List[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Get an array of string ['ArrayPath1', 'begin!*'();:@ &=+$,/?#[]end' , null, ''] using the csv-
array format.
:param array_path: an array of string ['ArrayPath1', 'begin!*'();:@ &=+$,/?#[]end' , null, '']
using the csv-array format.
:type array_path: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.array_csv_in_path.metadata['url'] # type: ignore
path_format_arguments = {
'arrayPath': self._serialize.url("array_path", array_path, '[str]', div=','),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
array_csv_in_path.metadata = {'url': '/paths/array/ArrayPath1%2cbegin%21%2A%27%28%29%3B%3A%40%20%26%3D%2B%24%2C%2F%3F%23%5B%5Dend%2c%2c/{arrayPath}'} # type: ignore
@distributed_trace
def unix_time_url(
self,
unix_time_url_path, # type: datetime.datetime
**kwargs # type: Any
):
# type: (...) -> None
"""Get the date 2016-04-13 encoded value as '1460505600' (Unix time).
:param unix_time_url_path: Unix time encoded value.
:type unix_time_url_path: ~datetime.datetime
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.unix_time_url.metadata['url'] # type: ignore
path_format_arguments = {
'unixTimeUrlPath': self._serialize.url("unix_time_url_path", unix_time_url_path, 'unix-time'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
unix_time_url.metadata = {'url': '/paths/int/1460505600/{unixTimeUrlPath}'} # type: ignore
| [
"[email protected]"
] | |
13205a31e87c1b4daed148c5469c509a8f892bfa | ff810e6722caab8c0affcf97151f3c8fc332b6a1 | /muddery/worlddata/dao/event_mapper.py | cabd7a3f72345ee671298bcbd1c1df5684fb79c3 | [
"BSD-3-Clause"
] | permissive | tuchang/muddery | 014b69daf33a0042d341b403acc9939ca5e3ef11 | bab4b86c5fe4259b7c22a97d54e4249aab47f99e | refs/heads/master | 2020-04-26T23:38:40.383523 | 2019-01-30T14:54:07 | 2019-01-30T14:54:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | """
Query and deal common tables.
"""
from __future__ import print_function
from evennia.utils import logger
from django.db import transaction
from django.apps import apps
from django.conf import settings
from muddery.utils import defines
from muddery.worlddata.dao.common_mapper_base import ObjectsMapper
def get_object_event(object_key):
"""
Get object's event.
"""
model = apps.get_model(settings.WORLD_DATA_APP, "event_data")
return model.objects.filter(trigger_obj=object_key)
| [
"[email protected]"
] | |
8a60ef7df7b4593fe623e1cd3e266dec4a72850c | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/triangle_20200625195423.py | f43a71703b57e579886f578c35c9e086ac7dbb53 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | # this function is meant to print a triangle
def triangle():
# outer loop is for the rows --> 4
for i in range(0,4):
# inner loop is for colums --> 4
for j in range(0,i+1):
print('*')
#
triangle()
| [
"[email protected]"
] | |
0efe069b4fa0143a1c876e9da6b2664df329c8ff | 8821c29949644faab3023f492bf615fe1cab4049 | /liquidluck/writers/extends.py | dc50589efe1785f5f245c316b5bf36a2d61828f3 | [] | no_license | loopwerk/liquidluck | c29bf2cc70343418dbe6a4dd3b55c9ec96e1f503 | d2c41f0b7cddabd3036bab514cf5ecbcc57becea | refs/heads/main | 2023-02-26T15:41:06.173164 | 2021-02-02T16:32:32 | 2021-02-02T16:32:32 | 335,299,233 | 1 | 0 | null | 2021-02-02T13:32:14 | 2021-02-02T13:32:14 | null | UTF-8 | Python | false | false | 2,165 | py | #!/usr/bin/env python
"""Extends of the core writers
"""
import os
from liquidluck.options import g, settings
from liquidluck.writers.base import BaseWriter
from liquidluck.writers.base import get_post_destination
class PostWriter(BaseWriter):
"""Replace the default post writer, edit settings::
writers = {
'post': 'liquidluck.writers.exends.PostWriter',
}
Get related posts in template with::
- {{post.relation.newer}}
- {{post.relation.older}}
- {% for item in post.relation.related %}
"""
writer_name = 'post'
def __init__(self):
self._template = self.get('post_template', 'post.html')
def start(self):
for index, post in enumerate(g.public_posts):
template = post.template or self._template
relation = self._get_relations(post, index)
post.relation = relation
self.render({'post': post}, template, self._dest_of(post))
for post in g.secure_posts:
post.relation = None
self.render({'post': post}, template, self._dest_of(post))
def _dest_of(self, post):
dest = get_post_destination(post, settings.config['permalink'])
return os.path.join(g.output_directory, dest)
def _get_relations(self, post, index):
total = len(g.public_posts)
newer = None
if index > 0:
newer = g.public_posts[index - 1]
older = None
if index < total - 1:
older = g.public_posts[index + 1]
def get_related_by_tags():
tags = set(post.tags)
base = len(post.tags)
for p in g.public_posts:
prior = len(tags - set(p.tags))
if prior < base and p.title != post.title:
p.related_priority = base - prior
yield p
related = sorted(get_related_by_tags(),
key=lambda o: o.related_priority,
reverse=True)
relation = {
'newer': newer,
'older': older,
'related': related[:4],
}
return relation
| [
"[email protected]"
] | |
eb7275ac23673310b390268735873fa20ddf60fc | 5c3251a8c2f355452a5240518684eb935f9809af | /tasks_project_v2/.tox/py36/lib/python3.6/site-packages/tinydb/queries.py | 972010ba0b7ef014cdc596aaf94e6702eaf8e794 | [] | no_license | cbohara/pytest | b3783ab590aaf7b6a77a2ec372e5206a7152e574 | 13b0ed1c3bcb06042da561a27f4c8857ffefd610 | refs/heads/master | 2021-05-15T12:55:14.841045 | 2017-10-31T03:46:27 | 2017-10-31T03:46:27 | 108,433,716 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,089 | py | """
Contains the querying interface.
Starting with :class:`~tinydb.queries.Query` you can construct complex
queries:
>>> ((where('f1') == 5) & (where('f2') != 2)) | where('s').matches(r'^\w+$')
(('f1' == 5) and ('f2' != 2)) or ('s' ~= ^\w+$ )
Queries are executed by using the ``__call__``:
>>> q = where('val') == 5
>>> q({'val': 5})
True
>>> q({'val': 1})
False
"""
import re
import sys
from .utils import catch_warning, freeze
__all__ = ('Query', 'where')
def is_sequence(obj):
return hasattr(obj, '__iter__')
class QueryImpl(object):
"""
A query implementation.
This query implementation wraps a test function which is run when the
query is evaluated by calling the object.
Queries can be combined with logical and/or and modified with logical not.
"""
def __init__(self, test, hashval):
self.test = test
self.hashval = hashval
def __call__(self, value):
return self.test(value)
def __hash__(self):
return hash(self.hashval)
def __repr__(self):
return 'QueryImpl{0}'.format(self.hashval)
def __eq__(self, other):
return self.hashval == other.hashval
# --- Query modifiers -----------------------------------------------------
def __and__(self, other):
# We use a frozenset for the hash as the AND operation is commutative
# (a | b == b | a)
return QueryImpl(lambda value: self(value) and other(value),
('and', frozenset([self.hashval, other.hashval])))
def __or__(self, other):
# We use a frozenset for the hash as the OR operation is commutative
# (a & b == b & a)
return QueryImpl(lambda value: self(value) or other(value),
('or', frozenset([self.hashval, other.hashval])))
def __invert__(self):
return QueryImpl(lambda value: not self(value),
('not', self.hashval))
class Query(object):
"""
TinyDB Queries.
Allows to build queries for TinyDB databases. There are two main ways of
using queries:
1) ORM-like usage:
>>> User = Query()
>>> db.search(User.name == 'John Doe')
>>> db.search(User['logged-in'] == True)
2) Classical usage:
>>> db.search(where('value') == True)
Note that ``where(...)`` is a shorthand for ``Query(...)`` allowing for
a more fluent syntax.
Besides the methods documented here you can combine queries using the
binary AND and OR operators:
>>> db.search(where('field1').exists() & where('field2') == 5) # Binary AND
>>> db.search(where('field1').exists() | where('field2') == 5) # Binary OR
Queries are executed by calling the resulting object. They expect to get
the document to test as the first argument and return ``True`` or
``False`` depending on whether the documents matches the query or not.
"""
def __init__(self):
self._path = []
def __getattr__(self, item):
query = Query()
query._path = self._path + [item]
return query
__getitem__ = __getattr__
def _generate_test(self, test, hashval):
"""
Generate a query based on a test function.
:param test: The test the query executes.
:param hashval: The hash of the query.
:return: A :class:`~tinydb.queries.QueryImpl` object
"""
if not self._path:
raise ValueError('Query has no path')
def impl(value):
try:
# Resolve the path
for part in self._path:
value = value[part]
except (KeyError, TypeError):
return False
else:
return test(value)
return QueryImpl(impl, hashval)
def __eq__(self, rhs):
"""
Test a dict value for equality.
>>> Query().f1 == 42
:param rhs: The value to compare against
"""
if sys.version_info <= (3, 0): # pragma: no cover
# Special UTF-8 handling on Python 2
def test(value):
with catch_warning(UnicodeWarning):
try:
return value == rhs
except UnicodeWarning:
# Dealing with a case, where 'value' or 'rhs'
# is unicode and the other is a byte string.
if isinstance(value, str):
return value.decode('utf-8') == rhs
elif isinstance(rhs, str):
return value == rhs.decode('utf-8')
else: # pragma: no cover
def test(value):
return value == rhs
return self._generate_test(lambda value: test(value),
('==', tuple(self._path), freeze(rhs)))
def __ne__(self, rhs):
"""
Test a dict value for inequality.
>>> Query().f1 != 42
:param rhs: The value to compare against
"""
return self._generate_test(lambda value: value != rhs,
('!=', tuple(self._path), freeze(rhs)))
def __lt__(self, rhs):
"""
Test a dict value for being lower than another value.
>>> Query().f1 < 42
:param rhs: The value to compare against
"""
return self._generate_test(lambda value: value < rhs,
('<', tuple(self._path), rhs))
def __le__(self, rhs):
"""
Test a dict value for being lower than or equal to another value.
>>> where('f1') <= 42
:param rhs: The value to compare against
"""
return self._generate_test(lambda value: value <= rhs,
('<=', tuple(self._path), rhs))
def __gt__(self, rhs):
"""
Test a dict value for being greater than another value.
>>> Query().f1 > 42
:param rhs: The value to compare against
"""
return self._generate_test(lambda value: value > rhs,
('>', tuple(self._path), rhs))
def __ge__(self, rhs):
"""
Test a dict value for being greater than or equal to another value.
>>> Query().f1 >= 42
:param rhs: The value to compare against
"""
return self._generate_test(lambda value: value >= rhs,
('>=', tuple(self._path), rhs))
def exists(self):
"""
Test for a dict where a provided key exists.
>>> Query().f1.exists() >= 42
:param rhs: The value to compare against
"""
return self._generate_test(lambda _: True,
('exists', tuple(self._path)))
def matches(self, regex):
"""
Run a regex test against a dict value (whole string has to match).
>>> Query().f1.matches(r'^\w+$')
:param regex: The regular expression to use for matching
"""
return self._generate_test(lambda value: re.match(regex, value),
('matches', tuple(self._path), regex))
def search(self, regex):
"""
Run a regex test against a dict value (only substring string has to
match).
>>> Query().f1.search(r'^\w+$')
:param regex: The regular expression to use for matching
"""
return self._generate_test(lambda value: re.search(regex, value),
('search', tuple(self._path), regex))
def test(self, func, *args):
"""
Run a user-defined test function against a dict value.
>>> def test_func(val):
... return val == 42
...
>>> Query().f1.test(test_func)
:param func: The function to call, passing the dict as the first
argument
:param args: Additional arguments to pass to the test function
"""
return self._generate_test(lambda value: func(value, *args),
('test', tuple(self._path), func, args))
def any(self, cond):
"""
Checks if a condition is met by any document in a list,
where a condition can also be a sequence (e.g. list).
>>> Query().f1.any(Query().f2 == 1)
Matches::
{'f1': [{'f2': 1}, {'f2': 0}]}
>>> Query().f1.any([1, 2, 3])
Matches::
{'f1': [1, 2]}
{'f1': [3, 4, 5]}
:param cond: Either a query that at least one document has to match or
a list of which at least one document has to be contained
in the tested document.
- """
if callable(cond):
def _cmp(value):
return is_sequence(value) and any(cond(e) for e in value)
else:
def _cmp(value):
return is_sequence(value) and any(e in cond for e in value)
return self._generate_test(lambda value: _cmp(value),
('any', tuple(self._path), freeze(cond)))
def all(self, cond):
"""
Checks if a condition is met by any document in a list,
where a condition can also be a sequence (e.g. list).
>>> Query().f1.all(Query().f2 == 1)
Matches::
{'f1': [{'f2': 1}, {'f2': 1}]}
>>> Query().f1.all([1, 2, 3])
Matches::
{'f1': [1, 2, 3, 4, 5]}
:param cond: Either a query that all documents have to match or a list
which has to be contained in the tested document.
"""
if callable(cond):
def _cmp(value):
return is_sequence(value) and all(cond(e) for e in value)
else:
def _cmp(value):
return is_sequence(value) and all(e in value for e in cond)
return self._generate_test(lambda value: _cmp(value),
('all', tuple(self._path), freeze(cond)))
def where(key):
return Query()[key]
| [
"[email protected]"
] | |
c56af7a7378410adccb3cbec6898cb84a8ad7148 | fd711822ba16edf959d729cfecec2903d06a7e65 | /backend/msm_mobile_2212fgbx_17149/settings.py | 3df27e4c1a10fbc3a9554cff8bf5434e37862f55 | [] | no_license | crowdbotics-apps/msm-mobile-2212fgbx-17149 | 21c5aac1ef48afcc2fa2cbc67322844365d2b6ae | 9e0df24add57b3ed9d6d2cf51b9411858837a60b | refs/heads/master | 2023-02-02T23:44:39.254133 | 2020-12-22T07:21:50 | 2020-12-22T07:21:50 | 323,546,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,062 | py | """
Django settings for msm_mobile_2212fgbx_17149 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'modules',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'msm_mobile_2212fgbx_17149.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'msm_mobile_2212fgbx_17149.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"[email protected]"
] | |
67fe520ef708626c818809afe39e7d4be0ca0482 | afb16c3188bf06af65ae0d998e114c72342bd8be | /note/demo/pyqt_demo/pyqtdeploy-3.3.0/pyqtdeploy/gui/__init__.py | 7bb7439211741600c2869895c4c546dfdd990b0e | [
"BSD-3-Clause"
] | permissive | onsunsl/onsunsl.github.io | aa75f399f1c647bc2e62314633bfe35187e59ad4 | 4ed2b1b9a2407afcbffdf304020d42b81c4c8cdc | refs/heads/master | 2023-05-26T12:33:11.167270 | 2023-04-01T10:18:05 | 2023-04-01T10:18:05 | 237,595,319 | 1 | 0 | null | 2023-05-23T20:13:11 | 2020-02-01T10:02:58 | Python | UTF-8 | Python | false | false | 1,487 | py | # Copyright (c) 2019, Riverbank Computing Limited
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Publish the sub-package's API.
from .exception_handlers import handle_user_exception
from .project_gui import ProjectGUI
| [
"[email protected]"
] | |
4feb4ee318210f8ba9d5a5900e78c390ad285d83 | 51b20543e5ed606636bcde9fba329e5fa948de2e | /communityprofiles/profiles/oldmigrations/0028_rename_datadisplay.py | cf6e22a06d39652831e0cdfc8b64e75216157ffa | [
"MIT"
] | permissive | 216software/Profiles | b821112225e8522b7b558cab87ae1c12c68c653b | 651da880a3d4295243205bdae4de88504edc91de | refs/heads/dev | 2023-03-16T04:49:01.389186 | 2023-03-09T17:04:04 | 2023-03-09T17:04:04 | 59,139,518 | 3 | 0 | null | 2016-05-18T18:02:53 | 2016-05-18T18:02:53 | null | UTF-8 | Python | false | false | 15,732 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'DataDisplay'
db.rename_table('profiles_datadisplay', 'profiles_datadisplaytemplate')
#db.delete_table('profiles_datadisplay')
# Removing M2M table for field records on 'DataDisplay'
db.rename_table('profiles_datadisplay_records', 'profiles_datadisplaytemplate_records')
db.rename_column('profiles_datadisplaytemplate_records', 'datadisplay_id', 'datadisplaytemplate_id')
#db.delete_table('profiles_datadisplay_records')
# Removing M2M table for field levels on 'DataDisplay'
db.rename_table('profiles_datadisplay_levels', 'profiles_datadisplaytemplate_levels')
db.rename_column('profiles_datadisplaytemplate_levels', 'datadisplay_id', 'datadisplaytemplate_id')
#db.delete_table('profiles_datadisplay_levels')
# Removing M2M table for field domains on 'DataDisplay'
db.rename_table('profiles_datadisplay_domains', 'profiles_datadisplaytemplate_domains')
db.rename_column('profiles_datadisplaytemplate_domains', 'datadisplay_id', 'datadisplaytemplate_id')
#db.delete_table('profiles_datadisplay_domains')
# Removing M2M table for field indicators on 'DataDisplay'
db.rename_table('profiles_datadisplay_indicators', 'profiles_datadisplaytemplate_indicators')
db.rename_column('profiles_datadisplaytemplate_indicators', 'datadisplay_id', 'datadisplaytemplate_id')
#db.delete_table('profiles_datadisplay_indicators')
def backwards(self, orm):
# Deleting model 'DataDisplay'
db.rename_table('profiles_datadisplaytemplate', 'profiles_datadisplay')
#db.delete_table('profiles_datadisplay')
# Removing M2M table for field records on 'DataDisplay'
db.rename_table('profiles_datadisplaytemplate_records', 'profiles_datadisplay_records')
db.rename_column('profiles_datadisplay_records', 'datadisplaytemplate_id', 'datadisplay_id')
#db.delete_table('profiles_datadisplay_records')
# Removing M2M table for field levels on 'DataDisplay'
db.rename_table('profiles_datadisplaytemplate_levels', 'profiles_datadisplay_levels')
db.rename_column('profiles_datadisplay_levels', 'datadisplaytemplate_id', 'datadisplay_id')
#db.delete_table('profiles_datadisplay_levels')
# Removing M2M table for field domains on 'DataDisplay'
db.rename_table('profiles_datadisplaytemplate_domains', 'profiles_datadisplay_domains')
db.rename_column('profiles_datadisplay_domains', 'datadisplaytemplate_id', 'datadisplay_id')
#db.delete_table('profiles_datadisplay_domains')
# Removing M2M table for field indicators on 'DataDisplay'
db.rename_table('profiles_datadisplaytemplate_indicators', 'profiles_datadisplay_indicators')
db.rename_column('profiles_datadisplay_indicators', 'datadisplaytemplate_id', 'datadisplay_id')
#db.delete_table('profiles_datadisplay_indicators')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profiles.datadisplaytemplate': {
'Meta': {'object_name': 'DataDisplayTemplate'},
'display_type': ('django.db.models.fields.CharField', [], {'default': "'STANDARD'", 'max_length': '11'}),
'domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.DataDomain']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.Indicator']", 'symmetrical': 'False', 'blank': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.GeoLevel']", 'symmetrical': 'False', 'blank': 'True'}),
'records': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.GeoRecord']", 'symmetrical': 'False', 'blank': 'True'}),
'source': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'subsubtitle': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'profiles.datadomain': {
'Meta': {'object_name': 'DataDomain'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.Indicator']", 'through': "orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'})
},
'profiles.datasource': {
'Meta': {'object_name': 'DataSource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'profiles.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.DataSource']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
'profiles.georecord': {
'Meta': {'unique_together': "(('slug', 'level'), ('level', 'geo_id', 'custom_name', 'owner'))", 'object_name': 'GeoRecord'},
'components': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components_rel_+'", 'blank': 'True', 'to': "orm['profiles.GeoRecord']"}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'geo_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoLevel']"}),
'mappings': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mappings_rel_+'", 'blank': 'True', 'to': "orm['profiles.GeoRecord']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '100', 'blank': 'True'})
},
'profiles.indicator': {
'Meta': {'object_name': 'Indicator'},
'data_domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.DataDomain']", 'through': "orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '10'}),
'display_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display_percent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.GeoLevel']", 'symmetrical': 'False'}),
'limitations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purpose': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'routine_use': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'universe': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'})
},
'profiles.indicatordata': {
'Meta': {'unique_together': "(('indicator', 'record', 'time'),)", 'object_name': 'IndicatorData'},
'change_from_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'data_as_change_from'", 'null': 'True', 'to': "orm['profiles.Time']"}),
'change_to_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'data_as_change_to'", 'null': 'True', 'to': "orm['profiles.Time']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Time']", 'null': 'True'})
},
'profiles.indicatordomain': {
'Meta': {'object_name': 'IndicatorDomain'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataDomain']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"})
},
'profiles.indicatorpart': {
'Meta': {'object_name': 'IndicatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Time']"})
},
'profiles.time': {
'Meta': {'object_name': 'Time'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sort': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'})
}
}
complete_apps = ['profiles']
| [
"[email protected]"
] | |
df3dd0c6444826ae7b47594b45a3af13c9367411 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/cloud/automl/v1beta1/automl-v1beta1-py/google/cloud/automl_v1beta1/types/text.py | 4cfbeec4414685c6289c3c27575e043c78d1d454 | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,303 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.automl_v1beta1.types import classification
__protobuf__ = proto.module(
package='google.cloud.automl.v1beta1',
manifest={
'TextClassificationDatasetMetadata',
'TextClassificationModelMetadata',
'TextExtractionDatasetMetadata',
'TextExtractionModelMetadata',
'TextSentimentDatasetMetadata',
'TextSentimentModelMetadata',
},
)
class TextClassificationDatasetMetadata(proto.Message):
r"""Dataset metadata for classification.
Attributes:
classification_type (google.cloud.automl_v1beta1.types.ClassificationType):
Required. Type of the classification problem.
"""
classification_type = proto.Field(proto.ENUM, number=1,
enum=classification.ClassificationType,
)
class TextClassificationModelMetadata(proto.Message):
r"""Model metadata that is specific to text classification.
Attributes:
classification_type (google.cloud.automl_v1beta1.types.ClassificationType):
Output only. Classification type of the
dataset used to train this model.
"""
classification_type = proto.Field(proto.ENUM, number=3,
enum=classification.ClassificationType,
)
class TextExtractionDatasetMetadata(proto.Message):
r"""Dataset metadata that is specific to text extraction"""
class TextExtractionModelMetadata(proto.Message):
r"""Model metadata that is specific to text extraction.
Attributes:
model_hint (str):
Indicates the scope of model use case.
- ``default``: Use to train a general text extraction
model. Default value.
- ``health_care``: Use to train a text extraction model
that is tuned for healthcare applications.
"""
model_hint = proto.Field(proto.STRING, number=3)
class TextSentimentDatasetMetadata(proto.Message):
r"""Dataset metadata for text sentiment.
Attributes:
sentiment_max (int):
Required. A sentiment is expressed as an integer ordinal,
where higher value means a more positive sentiment. The
range of sentiments that will be used is between 0 and
sentiment_max (inclusive on both ends), and all the values
in the range must be represented in the dataset before a
model can be created. sentiment_max value must be between 1
and 10 (inclusive).
"""
sentiment_max = proto.Field(proto.INT32, number=1)
class TextSentimentModelMetadata(proto.Message):
r"""Model metadata that is specific to text sentiment."""
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
70010e56865e6859e5fb7ce76d3db89396781ae3 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02404/s524653076.py | 6b28671655af949d1e273a3f672228e10e89b441 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | while True:
num = list(map(int,input().split()))
if(num[0] == 0 and num[1] == 0): break
else:
flag = False
for i in range(num[0]):
str = ""
if(i == 0 or i == num[0]-1): flag=True
else: flag=False
for j in range(num[1]):
if(j == 0 or j == num[1]-1 or flag):
str = str + "#"
else:
str = str + "."
print(str)
print()
| [
"[email protected]"
] | |
217781305f8edabd2f783fbd2dfab45ad641bc8b | 88b063ec8e543e6f62f3adac6be214128a984548 | /backend/chat/api/v1/viewsets.py | f866d1b60c17b260c5991567c246fc3286766130 | [] | no_license | crowdbotics-apps/chat-app-28513 | 6939d1da6c53f6d44786b4f822b4fb4c1fedd57f | 129f56f533f8f3076fdcfcfe3180942e5890f9f2 | refs/heads/master | 2023-06-06T17:26:04.090136 | 2021-07-06T23:23:40 | 2021-07-06T23:23:40 | 383,620,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,983 | py | from rest_framework import authentication
from chat.models import (
Message,
ThreadMember,
MessageAction,
ThreadAction,
ForwardedMessage,
Thread,
)
from .serializers import (
MessageSerializer,
ThreadMemberSerializer,
MessageActionSerializer,
ThreadActionSerializer,
ForwardedMessageSerializer,
ThreadSerializer,
)
from rest_framework import viewsets
class ForwardedMessageViewSet(viewsets.ModelViewSet):
serializer_class = ForwardedMessageSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = ForwardedMessage.objects.all()
class ThreadActionViewSet(viewsets.ModelViewSet):
serializer_class = ThreadActionSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = ThreadAction.objects.all()
class MessageActionViewSet(viewsets.ModelViewSet):
serializer_class = MessageActionSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = MessageAction.objects.all()
class ThreadViewSet(viewsets.ModelViewSet):
serializer_class = ThreadSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Thread.objects.all()
class MessageViewSet(viewsets.ModelViewSet):
serializer_class = MessageSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Message.objects.all()
class ThreadMemberViewSet(viewsets.ModelViewSet):
serializer_class = ThreadMemberSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = ThreadMember.objects.all()
| [
"[email protected]"
] | |
726f56c748b2aa68e9e184b243ff36945ab2243e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02421/s282455109.py | e0be63d9b3954f9063104c79b5c40180297004d4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | point_a,point_b = 0,0
for i in range(int(input())):
k =[]
a,b = input().split()
k = [[i,j] for i,j in zip(a,b) if i != j]
if k == []:
if len(a) < len(b):
point_b+=3
elif len(a) > len(b):
point_a += 3
else:
point_a+=1
point_b+=1
elif ord(k[0][0]) < ord(k[0][1]):
point_b += 3
else :
point_a += 3
print(point_a,point_b) | [
"[email protected]"
] | |
968b545c65e188b2f1d2f91599be84694e58c3f0 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/refactoring/introduceVariable/formattingOfParenthesizedTuplePreserved.after.py | d6d33f19491d398e0b0c4c6b3471f43c25c72a52 | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 50 | py | a = ('foo',
'bar',
'baz')
func(param=a)
| [
"[email protected]"
] | |
e5cb1c5a23fb4af6846b5bf7dc61c210c9fbfe4d | b68115ac6cd996c1a09d70c2cf7158715c125aae | /simulation/tests/context.py | 542253303d40ec9076d7a91ffa82a0c73939aaa3 | [] | no_license | joakim-hove/fmu_storage | 3a71d7521818658a252e90a3b08c32810a86d544 | c02feb69493a9e17592b1b5e3cf201c559b20bdf | refs/heads/master | 2021-08-28T06:54:59.291006 | 2017-12-11T13:51:36 | 2017-12-11T13:51:36 | 103,049,778 | 0 | 2 | null | 2017-09-14T13:14:26 | 2017-09-10T17:53:27 | Python | UTF-8 | Python | false | false | 2,918 | py | import getpass
import os
import grp
import random
from ecl.ecl import EclGrid, EclGridGenerator, EclSum, openFortIO, EclFile
from ecl.test import TestAreaContext
from ecl.test.ecl_mock import createEclSum
from simulation.models import *
def fopr(days):
return days
def random_fopr(days):
return fopr(days) * random.random( )
def fopt(days):
return days
def random_fopt(days):
return fopt(days) * random.random()
def fgpt(days):
if days < 50:
return days
else:
return 100 - days
def random_fgpt(days):
return fgpt(days) * random.random()
class TestContext(object):
def __init__(self):
length = 100
case = createEclSum("CASE" , [("FOPT", None , 0) , ("FOPR" , None , 0), ("FGPT" , None , 0)],
sim_length_days = length,
num_report_step = 10,
num_mini_step = 10,
func_table = {"FOPT" : fopt,
"FOPR" : fopr ,
"FGPT" : fgpt })
self.user = getpass.getuser()
self.group = grp.getgrgid( os.getgid( ) )[0]
self.case = case
with TestAreaContext("summary"):
case.fwrite( )
self.summary = Summary.create( "CASE.SMSPEC" , "CASE.UNSMRY" , self.group )
self.simulation = Simulation.create( summary = self.summary, parameters = [("CPARAM1", 100), ("CPARAM2", 200)] )
self.grid = EclGridGenerator.create_rectangular( (10,10,10),(1,1,1) )
@classmethod
def create_INIT(cls):
ecl_kw = EclKW(1000 , "PORV" , EclDataType.ECL_FLOAT )
with openFortIO("CASE.INIT", FortIO.WRITE_MODE) as f:
ecl_kw.fwrite( f )
return EclFile( "CASE.INIT" )
@classmethod
def create_UNRST(cls):
ecl_kw = EclKW(1000 , "PRESSURE" , EclDataType.ECL_FLOAT )
with openFortIO("CASE.UNRST", FortIO.WRITE_MODE) as f:
ecl_kw.fwrite( f )
return EclFile( "CASE.UNRST" )
@classmethod
def random_simulation(cls):
length = 100
case = createEclSum("CASE" , [("FOPT", None , 0) , ("FOPR" , None , 0), ("FGPT" , None , 0)],
sim_length_days = length,
num_report_step = 10,
num_mini_step = 10,
func_table = {"FOPT" : random_fopt,
"FOPR" : random_fopr ,
"FGPT" : random_fgpt })
group = grp.getgrgid( os.getgid( ) )[0]
with TestAreaContext("summary"):
case.fwrite( )
summary_case = Summary.create( "CASE.SMSPEC" , "CASE.UNSMRY" , group )
return Simulation.create( summary = summary_case, parameters = [("CPARAM1", 100*random.random()), ("CPARAM2", 200*random.random())] )
| [
"[email protected]"
] | |
28071d902234225339df9863437a44aa01511de6 | 6cc50a15672155f7d66e88830ad1baec6a061077 | /processing/legacy/anisotropy/random_trials/grid_test/submitter.py | 237c124558f1838e5cf2d1f6b45d2c356f636c95 | [
"MIT"
] | permissive | jrbourbeau/cr-composition | 16b29c672b2d1c8d75c1c45e35fe6bb60b53ffe2 | e9efb4b713492aaf544b5dd8bb67280d4f108056 | refs/heads/master | 2020-06-24T21:48:21.784277 | 2018-11-01T21:30:56 | 2018-11-01T21:30:56 | 74,618,907 | 0 | 1 | MIT | 2018-08-23T21:01:03 | 2016-11-23T22:31:01 | Jupyter Notebook | UTF-8 | Python | false | false | 1,237 | py | #!/usr/bin/env python
import os
import pycondor
import comptools as comp
if __name__ == "__main__":
# Define output directories
error = os.path.join(comp.paths.condor_data_dir, 'grid_test/error')
output = os.path.join(comp.paths.condor_data_dir, 'grid_test/output')
log = os.path.join(comp.paths.condor_scratch_dir, 'grid_test/log')
submit = os.path.join(comp.paths.condor_scratch_dir, 'grid_test/submit')
# Define path to executables
job_ex = os.path.abspath('test_script.py')
# Extra lines for submitting to the open science grid
extra_lines = ['Requirements = HAS_CVMFS_icecube_opensciencegrid_org',
'use_x509userproxy=true',
'should_transfer_files = YES',
'when_to_transfer_output = ON_EXIT']
grid = 'gsiftp://gridftp-users.icecube.wisc.edu'
# Create Dagman instance
dag_name = 'test_dag'
dagman = pycondor.Dagman(dag_name, submit=submit, verbose=1)
job_name = 'test_job'
job = pycondor.Job(job_name, job_ex, error=error, output=output,
log=log, submit=submit, extra_lines=extra_lines,
verbose=1)
dagman.add_job(job)
dagman.build_submit(fancyname=True)
| [
"[email protected]"
] | |
f3e6214771059eaad09dcfbffb014559b1f6d936 | 0d1b38738bf8d3a46efdf44ef6dd1fd061a0ff3e | /python/tak/ptn/__init__.py | ab2d16a528bceaaba2de3029070ad29c57dcf878 | [
"MIT"
] | permissive | nelhage/taktician | aea285e49c97d9212390075239abf4816b0023ee | 8ab398ad8ce65a7615da476c6e99c3f6d5d24d76 | refs/heads/main | 2022-12-08T23:43:26.458026 | 2022-11-06T23:31:12 | 2022-11-06T23:31:12 | 57,939,961 | 60 | 14 | MIT | 2022-11-06T23:31:13 | 2016-05-03T03:54:39 | Go | UTF-8 | Python | false | false | 38 | py | from .ptn import *
from .tps import *
| [
"[email protected]"
] | |
c35d40a6aaaa6fe00d05db758d350f86d9bc8b5d | 7bcb0b7f721c8fa31da7574f13ed0056127715b3 | /src/apps/bi/kruskals.py | 5a6e72b1e0f1a21c431315fa0eb2abb11ade3fbc | [] | no_license | simonchapman1986/ripe | 09eb9452ea16730c105c452eefb6a6791c1b4a69 | c129da2249b5f75015f528e4056e9a2957b7d884 | refs/heads/master | 2022-07-22T05:15:38.485619 | 2016-01-15T12:53:43 | 2016-01-15T12:53:43 | 49,718,671 | 1 | 0 | null | 2022-07-07T22:50:50 | 2016-01-15T12:53:09 | Python | UTF-8 | Python | false | false | 1,537 | py | __author__ = 'simon'
parent = dict()
rank = dict()
def make_set(vertice):
parent[vertice] = vertice
rank[vertice] = 0
def find(vertice):
if parent[vertice] != vertice:
parent[vertice] = find(parent[vertice])
return parent[vertice]
def union(vertice1, vertice2):
root1 = find(vertice1)
root2 = find(vertice2)
if root1 != root2:
if rank[root1] > rank[root2]:
parent[root2] = root1
else:
parent[root1] = root2
if rank[root1] == rank[root2]: rank[root2] += 1
def kruskal(graph):
for vertice in graph['vertices']:
make_set(vertice)
minimum_spanning_tree = set()
edges = list(graph['edges'])
edges.sort()
for edge in edges:
weight, vertice1, vertice2 = edge
if find(vertice1) != find(vertice2):
union(vertice1, vertice2)
minimum_spanning_tree.add(edge)
return minimum_spanning_tree
def example():
"""
>>> graph = {\
'vertices': ['A', 'B', 'C', 'D', 'E', 'F'],\
'edges': set([\
(1, 'A', 'B'),\
(5, 'A', 'C'),\
(3, 'A', 'D'),\
(4, 'B', 'C'),\
(2, 'B', 'D'),\
(1, 'C', 'D'),\
])\
}
>>> minimum_spanning_tree = set([\
(1, 'A', 'B'),\
(2, 'B', 'D'),\
(1, 'C', 'D'),\
])
>>> print bool(kruskal(graph) == minimum_spanning_tree)
True
"""
pass | [
"[email protected]"
] | |
6fc23d15c1c3ed67d9789862a4419cf73d03b598 | 556f9c2db9c88120dc6dc7bc4280935db78e3eaa | /scripts/test_zero.py | c28139a20225537e6deb1324f888d975b27f2fee | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | BlockResearchGroup/compas_ags | 55dde6a2755c644b484767c8c359f6bfe68531a5 | 4507ff09be1a881d3f3520bc465a9dcda52b42ed | refs/heads/main | 2023-04-11T04:43:50.850869 | 2022-11-17T10:46:23 | 2022-11-17T10:46:23 | 113,161,567 | 13 | 5 | MIT | 2022-11-17T10:35:43 | 2017-12-05T09:31:08 | Python | UTF-8 | Python | false | false | 2,327 | py | import compas_ags
from compas_ags.diagrams import FormGraph
from compas_ags.diagrams import FormDiagram
from compas_ags.diagrams import ForceDiagram
# from compas_ags.ags import graphstatics
from compas_ags.viewers import Viewer
from compas.rpc import Proxy
graphstatics = Proxy('compas_ags.ags.graphstatics')
# this file has unloaded, 2-valent nodes
# they will be removed automatically
# and the result renumbered
FILE = compas_ags.get('debugging/zero.obj')
graph = FormGraph.from_obj(FILE)
form = FormDiagram.from_graph(graph)
force = ForceDiagram.from_formdiagram(form)
# fix the supports
form.vertices_attribute('is_fixed', True, [8, 7])
# set the loads
form.edge_force((0, 1), +10.0)
form.edge_force((2, 3), +10.0)
form.edge_force((4, 5), +10.0)
# # compute initial form and force diagrams
# graphstatics.form_update_q_from_qind(form)
# graphstatics.force_update_from_form(force, form)
# compute initial form and force diagrams
form.data = graphstatics.form_update_q_from_qind_proxy(form.data)
force.data = graphstatics.force_update_from_form_proxy(force.data, form.data)
# change the geometry of the force diagram
force.vertex_attribute(6, 'x', force.vertex_attribute(8, 'x'))
force.vertex_attribute(9, 'x', force.vertex_attribute(10, 'x'))
force.vertex_attributes(7, 'xyz', force.vertex_attributes(6, 'xyz'))
force.vertex_attributes(11, 'xyz', force.vertex_attributes(9, 'xyz'))
# # change the depth of the structure
# force.vertices_attribute('x', 20, [6, 7, 8, 9, 10, 11])
# fix some of the nodes in the from diagram
# to constraint the problem to a single solution
form.vertices_attribute('is_fixed', True, [0, 2, 5])
# # update the form diagram
# graphstatics.form_update_from_force(form, force)
# update the form diagram
form.data = graphstatics.form_update_from_force_proxy(form.data, force.data)
# ==============================================================================
# Visualize
# ==============================================================================
viewer = Viewer(form, force, delay_setup=False, figsize=(12, 7.5))
viewer.draw_form(
vertexsize=0.15,
vertexcolor={key: '#000000' for key in (8, 7)},
vertexlabel={key: key for key in form.vertices()})
viewer.draw_force(
vertexsize=0.15,
vertexlabel={key: key for key in force.vertices()})
viewer.show()
| [
"[email protected]"
] | |
1b1047863ca9ab109e3bf32a674b4f7077fcfb6d | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_136/1892.py | 5fc6c54ef1e3222a84986cd7b64701be7742ba30 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | INITIAL_RATE = 2
class Case(object):
def __init__(self,C,F,X):
self.C = C
self.F = F
self.X = X
def solve(self):
time = self.X/INITIAL_RATE
return self.__solve_in_loop(INITIAL_RATE, time,0)
def __solve_in_loop(self,last_rate,last_time,overhead):
while (True):
time_to_farm = self.C/last_rate + overhead
new_rate = last_rate + self.F
new_time = time_to_farm + self.X/new_rate
if new_time >= last_time:
return last_time
last_time = new_time
last_rate = new_rate
overhead = time_to_farm
def parse_stdin():
n = int(raw_input())
cases = []
for i in xrange(n):
c = [float(x) for x in raw_input().split(' ')]
cases.append(Case(c[0],c[1],c[2]))
return cases
def main():
cases = parse_stdin()
i = 1
for c in cases:
print 'Case #{:d}: {:3.7f}'.format(i, c.solve())
i += 1
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
b4d0a06a96fa7688f6cf7f0e79de64a4065cf646 | c0feb8693883e4b29096ad45b6b2113b7cad69ef | /supervised_learning/0x03-optimization/0-main.py | dcbea35b2e1336070660c73f686a5451c7af267c | [] | no_license | vandeldiegoc/holbertonschool-machine_learning | 905977a15dbb59753115936215a870fa0f46f52e | bda9efa60075afa834433ff1b5179db80f2487ae | refs/heads/main | 2023-07-06T23:28:59.393284 | 2021-08-10T21:58:40 | 2021-08-10T21:58:40 | 318,391,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | #/usr/bin/env python3
import numpy as np
normalization_constants = __import__('0-norm_constants').normalization_constants
if __name__ == '__main__':
np.random.seed(0)
a = np.random.normal(0, 2, size=(100, 1))
b = np.random.normal(2, 1, size=(100, 1))
c = np.random.normal(-3, 10, size=(100, 1))
X = np.concatenate((a, b, c), axis=1)
m, s = normalization_constants(X)
print(m)
print(s) | [
"[email protected]"
] | |
939ef16942fc3121165f9df42f9a4b943a6b7273 | 7ec38beb6f041319916390ee92876678412b30f7 | /src/leecode/array_medium_1282(2).py | 74ad89bcac3387e7faae9e44a1009212c7d51539 | [] | no_license | hopensic/LearnPython | 3570e212a1931d4dad65b64ecdd24414daf51c73 | f735b5d865789843f06a623a4006f8883d6d1ae0 | refs/heads/master | 2022-02-18T23:11:30.663902 | 2022-02-12T17:51:56 | 2022-02-12T17:51:56 | 218,924,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | from collections import defaultdict
class Solution:
def groupThePeople(self, groupSizes):
count = defaultdict(list)
for i, size in enumerate(groupSizes):
count[size].append(i)
return [l[i:i + s] for s, l in count.items() for i in range(0, len(l), s)]
lst = [3, 3, 3, 3, 3, 1, 3]
s = Solution()
print(s.groupThePeople(lst))
| [
"[email protected]"
] | |
cd967ebaafe8d50ba8eb76a9166a6187a6d13a31 | a7e3ffcd2e011f091763370a66aab9bd04d4ffec | /trade/urls.py | fe530542c751a1c69c2b97f02fb07a1a1b56e89f | [] | no_license | jiangyuwei666/Shop | 0363a5c1c55c796e5ff56f07c663eea4bc08de71 | 78d7d4647f5c101c89fc5188808cddecf16d1ee6 | refs/heads/master | 2022-12-27T11:05:57.190555 | 2019-08-18T07:50:37 | 2019-08-18T07:50:37 | 189,996,974 | 0 | 0 | null | 2022-12-16T09:43:17 | 2019-06-03T12:02:22 | Python | UTF-8 | Python | false | false | 412 | py | from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from apps.trade.views import ShoppingCartViewSet, OrderInfoViewSet, OrderGoodsViewSet
router = DefaultRouter()
router.register('shopping_cart', ShoppingCartViewSet)
router.register('order_info', OrderInfoViewSet)
router.register('order_goods', OrderGoodsViewSet)
urlpatterns = [
url(r'', include(router.urls)),
]
| [
"[email protected]"
] | |
5f1c7ca9826e83c4e2252cfcfb3335b01d8a46bd | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5636311922769920_0/Python/jonyafek/d-large.py | 681f1d58b9ba783300076960e61ec020cdc6a53d | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,192 | py | #!/usr/bin/python
def solve(k, c, s):
#print "k: " + str(k)
#print "c: " + str(c)
#print "s: " + str(s)
result = ""
numbers_to_verify = range(1, k + 1)
numbers_to_check = []
while numbers_to_verify:
number = 0
for level in xrange(c):
temp_num = 1
if numbers_to_verify:
temp_num = numbers_to_verify.pop()
#print "temp_num: " + str(temp_num)
#print "level: " + str(level)
if 0 == level:
level_value = temp_num
else:
level_value = (k ** level) * (temp_num - 1)
#print "level value: " + str(level_value)
number += level_value
#print "number: " + str(number)
numbers_to_check.append(number)
#print "appended number: " + str(number)
if len(numbers_to_check) > s:
return "IMPOSSIBLE"
for num in numbers_to_check:
result += str(num) + " "
return result.strip()
import sys
input_lines = open(sys.argv[1], "rt").readlines()
stripped_input_lines = [line.strip() for line in input_lines]
num_tests = int(input_lines[0])
#print num_tests
i=1
for line in stripped_input_lines[1:]:
k = int(line.split()[0])
c = int(line.split()[1])
s = int(line.split()[2])
result = solve(k, c, s)
print "Case #" + str(i) + ": " + str(result)
i += 1
| [
"[email protected]"
] | |
05efe8ebcc108f820c6f960b90f10a1c382616f0 | b5bde7b0be53cf62e4aa19085e75d61636213abb | /celebs/migrations/0001_initial.py | 0b1719c8467ec6535bfa6428967be5b4cddc7c60 | [] | no_license | pydatageek/imdb-clone-django-vue | 3ecaa2dbf97225a202c574c06953b4be80fc240b | 2c77f49be3e5a40b368110630641f22b686eb7bc | refs/heads/master | 2022-12-07T18:29:22.303516 | 2020-04-03T04:54:07 | 2020-04-03T04:54:07 | 252,633,801 | 1 | 0 | null | 2022-11-22T05:27:31 | 2020-04-03T04:40:03 | HTML | UTF-8 | Python | false | false | 3,349 | py | # Generated by Django 2.2.10 on 2020-04-03 02:02
import celebs.models
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Celebrity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('added_date', models.DateTimeField(auto_now_add=True, verbose_name='Added Date')),
('modified_date', models.DateTimeField(auto_now=True, verbose_name='Added Date')),
('slug', models.SlugField(blank=True, max_length=160, unique=True, verbose_name='Slug')),
('first_name', models.CharField(max_length=75, verbose_name='First Name')),
('last_name', models.CharField(max_length=75, verbose_name='Last Name')),
('nick_name', models.CharField(blank=True, default='', max_length=50, verbose_name='Nick Name')),
('birth_date', models.DateField(blank=True, null=True, verbose_name='Birth Date')),
('birth_place', models.CharField(blank=True, default='', max_length=100, verbose_name='Birth Place')),
('content', models.TextField(blank=True, default='', verbose_name='Biography')),
('source_content', models.URLField(blank=True, default='', verbose_name='Biography Souce')),
('trailer', models.URLField(blank=True, default='', help_text='trailer url (ONLY for youtube videos yet)', verbose_name='Trailer')),
('image', models.ImageField(blank=True, default='celebs/default_celeb.jpg', null=True, upload_to=celebs.models.celeb_directory_path, verbose_name='Image')),
('credit_image', models.CharField(blank=True, default='', max_length=250, verbose_name='Image Credit')),
],
options={
'verbose_name': 'Celebrity',
'verbose_name_plural': 'Celebrities',
'ordering': ('last_name', 'first_name'),
},
),
migrations.CreateModel(
name='CelebrityDuty',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'verbose_name': 'Celebrity Duty',
'verbose_name_plural': 'Celebrity Duties',
},
),
migrations.CreateModel(
name='Duty',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('added_date', models.DateTimeField(auto_now_add=True, verbose_name='Added Date')),
('modified_date', models.DateTimeField(auto_now=True, verbose_name='Added Date')),
('name', models.CharField(max_length=100, unique=True, verbose_name='Name')),
('slug', models.SlugField(blank=True, max_length=110, unique=True, verbose_name='Slug')),
('code', models.CharField(max_length=1, verbose_name='Code')),
],
options={
'verbose_name': 'Duty',
'verbose_name_plural': 'Duties',
'ordering': ('code',),
},
),
]
| [
"[email protected]"
] | |
1aaf359a44b493c5f11b48b1be9151ffa1bd2dcd | d2b6b9792e5dde0a994e875d23d8a8ace2651fca | /tests/test_build_and_lint.py | 5d623626f0a8f1d543e647aaf380c31c4bccc2cd | [
"AFL-3.0",
"CC-BY-2.5",
"AFL-2.1",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | michauhl/planemo | 908d9bd95febcce3aa8a2e932260f0bd3151f433 | fdcc6003c1fa45cbe1d074ad9d0f9a491ba99c06 | refs/heads/master | 2020-03-11T20:54:11.424703 | 2018-04-17T19:06:32 | 2018-04-17T19:06:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,573 | py | import os
import yaml
from .test_utils import CliTestCase, skip_if_environ
class BuildAndLintTestCase(CliTestCase):
def test_build_and_lint(self):
with self._isolate():
self._check_exit_code(_init_command())
self._check_lint(exit_code=0)
def test_build_and_lint_with_macros(self):
with self._isolate() as f:
self._check_exit_code(_init_command(macros=True))
self._check_lint(exit_code=0)
macros_file = os.path.join(f, "macros.xml")
assert os.path.exists(macros_file)
def test_lint_fails_if_no_help(self):
with self._isolate():
self._check_exit_code(_init_command(help_text=False))
self._check_lint(exit_code=1)
def test_lint_fails_if_no_test(self):
with self._isolate():
self._check_exit_code(_init_command(test_case=False))
self._check_lint(exit_code=1)
def test_lint_fails_if_no_doi(self):
with self._isolate():
self._check_exit_code(_init_command(doi=False))
self._check_lint(exit_code=1)
@skip_if_environ("PLANEMO_SKIP_CWLTOOL_TESTS")
def test_cwl(self):
with self._isolate() as f:
self._check_exit_code(_cwl_init_command())
self._check_lint(filename="seqtk_seq.cwl", exit_code=0)
with open(os.path.join(f, "seqtk_seq.cwl")) as stream:
process_dict = yaml.load(stream)
assert process_dict["id"] == "seqtk_seq"
assert process_dict["label"] == "Convert to FASTA (seqtk)"
assert process_dict["baseCommand"] == ["seqtk", "seq"]
input0 = process_dict["inputs"][0]
assert input0["inputBinding"]["position"] == 1
assert input0["inputBinding"]["prefix"] == "-a"
assert input0["type"] == "File"
output = process_dict["outputs"][0]
assert output["type"] == "File"
assert output["outputBinding"]["glob"] == "out"
assert process_dict["stdout"] == "out"
with open(os.path.join(f, "seqtk_seq_tests.yml")) as stream:
test_dict = yaml.load(stream)
assert test_dict
@skip_if_environ("PLANEMO_SKIP_CWLTOOL_TESTS")
def test_cwl_fail_on_empty_help(self):
with self._isolate():
self._check_exit_code(_cwl_init_command(help_text=False))
self._check_lint(filename="seqtk_seq.cwl", exit_code=1)
@skip_if_environ("PLANEMO_SKIP_CWLTOOL_TESTS")
def test_cwl_fail_on_no_docker(self):
with self._isolate():
self._check_exit_code(_cwl_init_command(help_text=False))
self._check_lint(filename="seqtk_seq.cwl", exit_code=1)
def _check_lint(self, filename="seqtk_seq.xml", exit_code=0):
lint_cmd = ["lint", "--fail_level", "warn", filename]
try:
self._check_exit_code(lint_cmd, exit_code=exit_code)
except Exception:
with open(filename, "r") as f:
print("Failing file contents are [%s]." % f.read())
raise
def _cwl_init_command(help_text=True, container=True, test_case=True):
command = [
"tool_init", "--force", "--cwl",
"--id", "seqtk_seq",
"--name", "Convert to FASTA (seqtk)",
"--container", "jmchilton/seqtk:v1",
"--name", "Convert to FASTA (seqtk)",
"--example_command", "seqtk seq -a 2.fastq > 2.fasta",
"--example_input", "2.fastq",
"--example_output", "2.fasta"
]
if container:
command.extend(["--container", "jmchilton/seqtk:v1"])
if help_text:
command.extend(["--help_text", "The help text."])
if test_case:
command.append("--test_case")
return command
def _init_command(test_case=True, help_text=True, doi=True, macros=False):
command = [
"tool_init", "--force",
"--id", "seqtk_seq",
"--name", "Convert to FASTA (seqtk)",
"--requirement", "[email protected]",
"--example_command", "seqtk seq -a 2.fastq > 2.fasta",
"--example_input", "2.fastq",
"--example_output", "2.fasta"
]
if test_case:
command.append("--test_case")
if help_text:
command.extend(["--help_text", "The help text."])
if doi:
command.extend(["--doi", "10.1101/014043"])
command.extend(["--cite_url", "https://github.com/ekg/vcflib"])
command.extend(["--cite_url", "http://wiki.hpc.ufl.edu/doc/Seqtk"])
if macros:
command.append("--macros")
return command
| [
"[email protected]"
] | |
61eb0d93423d7ff12f16262371ed2cf4fa2a0fa6 | 5736e117e8d0e011107c3ce4943cce44ea242263 | /DP/Lavenshtein.py | 8744443adb2b9663278c3aa5e76d760482984ce3 | [] | no_license | arvakagdi/Dynamic-Programming | 7119d2005f12b9b441b6e3a582d99a5e4ddffa4d | 49a199413fa939335308533a8303974e3a82cc5c | refs/heads/main | 2023-01-11T01:36:36.862590 | 2020-11-14T14:10:14 | 2020-11-14T14:10:14 | 312,829,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py | # Time: O(mn) || Spacr: O(nm)
def levenshteinDistance(str1, str2):
# set up a matrix of size of first and second string
edits = [[x for x in range(len(str1)+1)] for y in range(len(str2) + 1)]
for i in range(1,len(str2) + 1):
edits[i][0] = edits[i - 1][0] + 1 # set 1st(0th) column to base values
for i in range(1,len(str2) + 1):
for j in range(1,len(str1) + 1):
if str2[i - 1] == str1[j - 1]:
edits[i][j] = edits[i - 1][j - 1]
else:
edits[i][j] = 1 + min(edits[i-1][j], edits[i][j-1], edits[i-1][j-1])
return edits[-1][-1]
def levenshteinDistance1(str1, str2):
small = str1 if len(str1) < len(str2) else str2
big = str1 if len(str1) >= len(str2) else str2
evenEdits = [x for x in range(len(small) + 1)]
oddEdits = [None for x in range(len(small) + 1)]
for i in range(1, len(big) + 1):
if i % 2 == 1:
curr = oddEdits
prev = evenEdits
else:
curr = evenEdits
prev = oddEdits
curr[0] = i
for j in range(1,len(small) + 1):
if big[i-1] == small[j-1]:
curr[j] = prev[j-1]
else:
curr[j] = 1 + min(prev[j-1], curr[j-1], prev[j])
return evenEdits[-1] if len(big)%2 == 0 else oddEdits[-1]
print(levenshteinDistance1("abc", "yabd")) | [
"[email protected]"
] | |
b2936740e8936183c7d49945d098ee718bc25273 | 86c360ece5931b8a48f895e8233a571720a5c273 | /fabfile.py | a589579347eeb70a07bdf0c72850e3ad088f3d88 | [] | no_license | dschien/bbc_tool_deploy | c83501a33fa17754a530a36391637a59569d497c | ab136aa7872031b99fcee318bc23390b93639db1 | refs/heads/master | 2021-01-10T12:20:28.586805 | 2016-10-19T16:10:06 | 2016-10-19T16:10:06 | 52,797,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | true | false | 6,817 | py | import boto3
import ConfigParser
import logging
import boto3
import time
from fabric.api import *
from fabric.contrib.files import exists
CONFIG_FILE = "settings.cfg"
config = ConfigParser.RawConfigParser()
config.read(CONFIG_FILE)
env.forward_agent = True
env.update(config._sections['ec2'])
env.hosts = [config.get('ec2', 'host')]
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - [%(levelname)s] - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
container_state = {'RUNNING': 1, 'STOPPED': 2, 'NOT_FOUND': 3}
def create_instance():
print('creating instance')
ec2 = boto3.resource('ec2')
instances = ec2.create_instances(
ImageId='ami-e1398992',
MinCount=1,
MaxCount=1,
KeyName='ep-host',
SecurityGroupIds=['sg-e78fbc83'],
InstanceType='m4.large',
Placement={
'AvailabilityZone': 'eu-west-1a',
},
BlockDeviceMappings=[
{
'DeviceName': '/dev/xvda',
'Ebs': {
'SnapshotId': 'snap-7d042fb4',
'VolumeSize': 8,
'DeleteOnTermination': True,
'VolumeType': 'gp2',
},
},
],
IamInstanceProfile={'Name': 'ec2_default_instance_role'},
EbsOptimized=True | False
)
iid = instances[0].id
# give the instance a tag name
ec2.create_tags(
Resources=[iid],
Tags=mktag(env.notebook_server_tag)
)
return instances[0]
from fabric.colors import red, green
def assert_running(instance):
if instance.state['Name'] != "running":
print "Firing up instance"
instance.start()
# Give it 10 minutes to appear online
for i in range(120):
time.sleep(5)
# instance.update()
print instance.state
if instance.state['Name'] == "running":
break
else:
print red("Instance did not enter 'running' state within 120s.")
if instance.state['Name'] == "running":
dns = instance.public_dns_name
print "Instance up and running at %s" % dns
config.set('ec2', 'host', dns)
config.set('ec2', 'instance', instance.id)
# config.write(CONFIG_FILE)
print "updating env.hosts"
env.hosts = [dns, ]
print env.hosts
# Writing our configuration file to 'example.cfg'
with open(CONFIG_FILE, 'wb') as configfile:
config.write(configfile)
return instance
def mktag(val):
return [{'Key': 'Name', 'Value': val}]
def assert_instance():
"""
Return an EC2 Instance
:return:
"""
ec2 = boto3.resource('ec2')
instances = ec2.instances.filter(
Filters=[{'Name': 'tag:Name', 'Values': [env.notebook_server_tag]},
# {'Name': 'instance-state-name', 'Values': ['running']}
])
instance_list = [instance for instance in instances]
if len(instance_list) == 0:
print('not existing, will create')
return create_instance()
else:
return assert_running(instance_list[0])
def initial_deployment_with_assert():
print('checking instance')
instance = assert_instance()
execute(_initial_deployment, hosts=[instance.public_dns_name])
def initial_deployment():
execute(_initial_deployment)
def _initial_deployment():
print env.hosts
with settings(warn_only=True):
result = run('docker info')
if result.failed:
sudo('yum install -y docker')
sudo('sudo service docker start')
sudo('sudo usermod -a -G docker ec2-user')
# sudo('yum install -y git')
if not exists('bbc_tool', verbose=True):
sudo('yum install -y git')
run('git clone [email protected]:dschien/bbc_tool.git')
else:
update()
build_container()
start_nb_server()
def update():
with cd('bbc_tool'):
run('git pull')
def start_nb_server(with_assert=False):
if with_assert:
print('checking instance')
instance = assert_instance()
execute(_run_container, hosts=[instance.public_dns_name])
else:
execute(_run_container)
def _run_container():
update()
cmd = 'docker run -d -p 8888:8888 --name nb-server -v $(pwd):/home/jovyan/work -e PASSWORD="%s" dschien/nb' % \
env.nb_password
with cd('bbc_tool'):
run(cmd)
def build_container(with_assert=False):
print('checking instance')
if with_assert:
assert_instance()
with cd('bbc_tool/docker'):
run('docker build -t dschien/nb .')
def inspect_container(container_name_or_id=''):
""" e.g. fab --host ep.iodicus.net inspect_container:container_name_or_id=... """
with settings(warn_only=True):
result = run("docker inspect --format '{{ .State.Running }}' " + container_name_or_id)
running = (result == 'true')
if result.failed:
logger.warn('inspect_container failed for container {}'.format(container_name_or_id))
return container_state['NOT_FOUND']
if not running:
logger.info('container {} stopped'.format(container_name_or_id))
return container_state['STOPPED']
logger.info('container {} running'.format(container_name_or_id))
return container_state['RUNNING']
def stop_container(container_name_or_id=''):
with settings(warn_only=True):
result = run("docker stop " + container_name_or_id)
if not result.failed:
logger.info('container {} stopped'.format(container_name_or_id))
def remove_container(container_name_or_id=''):
with settings(warn_only=True):
result = run("docker rm " + container_name_or_id)
if result == container_name_or_id:
logger.info('container {} removed'.format(container_name_or_id))
else:
logger.warn('unexpect command result, check log output')
def docker_logs(container_name_or_id=''):
with settings(warn_only=True):
run('docker logs --tail 50 -f {}'.format(container_name_or_id))
def redeploy_container(container_name_or_id=''):
""" e.g. fab --host ep.iodicus.net inspect_container:container_name_or_id=... """
state = inspect_container(container_name_or_id)
if state == container_state['RUNNING']:
stop_container(container_name_or_id)
remove_container(container_name_or_id)
start_nb_server()
def update_site():
"""
Pull from git and restart docker containers
:return:
"""
update()
for container in ['nb-server']:
redeploy_container(container)
| [
"[email protected]"
] | |
acd3af53e483f5486883751ff18a9e9a124f4c06 | 038e6e13ad4a81cee5dbbd6ccc322d48330d15d7 | /AnswerCode/463IslandPerimeter.py | b6168c15bf6864a09d4e81b9745085bc8ea3662f | [] | no_license | aistoume/Leetcode | ad69dae6d9f41a03c883fc2582d0afd6997f83d6 | d8dc574b611d0e3d42367ccd47a44fd8443b0b27 | refs/heads/master | 2021-01-12T14:27:18.245818 | 2018-11-09T00:21:04 | 2018-11-09T00:21:04 | 70,066,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | ### Youbin 2017/06/21
### 463 Island Perimeter
class Solution(object):
def islandPerimeter(self, grid):
p = 0
newGrid = [[0]*(len(grid[0])+2)]
for l in grid:
row = [0]+ l + [0]
newGrid.append(row)
newGrid.append([0]*(len(grid[0])+2))
for row in range(1,len(newGrid)-1):
for col in range(1, len(newGrid[1])-1):
if newGrid[row][col] == 1:
if newGrid[row-1][col] == 0:
p+=1
if newGrid[row+1][col] == 0:
p+=1
if newGrid[row][col-1] == 0:
p+=1
if newGrid[row][col+1] == 0:
p+=1
return p
s = Solution()
l = [[0,1,0,0],[1,1,1,0],[0,1,0,0],[1,1,0,0]]
r = s.islandPerimeter(l)
print r | [
"[email protected]"
] | |
d69f2d5892bc361a4bb224fe5d218221024326c8 | 3ddc7fb5ac7ac91753a29beced3d2cfb63a2ba8b | /src/minij_proxy_asgi_aiohttp.py | 1c317a05af5abdd396904aa838e8becb2573d4f2 | [] | no_license | abilian/asgi-sandbox | 3f978ac9eba139e248af3508b506c87ed6f87fe1 | 49f6c50a94e90045c7c60533f6e564e6b745d8b5 | refs/heads/main | 2023-05-31T08:39:57.855151 | 2021-06-29T13:34:33 | 2021-06-29T13:34:33 | 364,506,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,003 | py | import asyncio
from typing import Mapping
import aiohttp
import fire
import uvicorn
from aiohttp import ClientSSLError, ClientTimeout, TooManyRedirects
from starlette.requests import Request
from starlette.responses import Response
# Extremely aggressive and hardcoded value
TIMEOUT = 10
DEFAULT_ACCESS_URL = "https://mynij.app.officejs.com"
async def application(scope, receive, send):
if scope["type"] != "http":
return
request = Request(scope, receive)
response = Response()
if request.method != "GET":
response.status = 405
else:
async with aiohttp.ClientSession() as client:
await fetch_content(client, request, response)
await response(scope, receive, send)
async def fetch_content(
client: aiohttp.ClientSession, request: Request, response: Response
) -> None:
url = request.query_params["url"]
proxy_query_header = make_request_headers(request.headers)
try:
proxy_response = await client.get(
url, headers=proxy_query_header, timeout=TIMEOUT
)
except ClientSSLError:
# Invalid SSL Certificate
status = 526
except ConnectionError:
status = 523
except ClientTimeout:
status = 524
except TooManyRedirects:
status = 520
else:
response.body = await proxy_response.content.read()
if proxy_response.status == 500:
response.status = 520
else:
copy_proxy_headers(proxy_response, response)
response.headers["Access-Control-Allow-Origin"] = get_access_url(request.headers)
def make_request_headers(headers: Mapping):
request_headers = {}
HEADERS = [
"Content-Type",
"Accept",
"Accept-Language",
"Range",
"If-Modified-Since",
"If-None-Match",
]
for k in HEADERS:
v = headers.get(k)
if v:
request_headers[k] = str(v)
return request_headers
def get_access_url(headers: Mapping):
return headers.get("Origin", DEFAULT_ACCESS_URL)
def copy_proxy_headers(proxy_response, response) -> None:
HEADERS = [
"Content-Disposition",
"Content-Type",
"Date",
"Last-Modified",
"Vary",
"Cache-Control",
"Etag",
"Accept-Ranges",
"Content-Range",
]
for k, v in proxy_response.headers.items():
k = k.title()
if k in HEADERS:
response.headers[k] = v
def main(host="localhost", port=8000, server="uvicorn"):
if server == "uvicorn":
uvicorn.run(
"minij_proxy_asgi_aiohttp:application",
host=host,
port=port,
log_level="info",
)
elif server == "hypercorn":
from hypercorn.asyncio import serve
from hypercorn.config import Config
config = Config()
config.bind = [f"{host}:{port}"]
asyncio.run(serve(application, config))
if __name__ == "__main__":
fire.Fire(main)
| [
"[email protected]"
] | |
c239fa23fae2712d15c071e023a056a4116c2caf | b4cc610bbd069c2b3e1f50c82303d48de21843a4 | /ce/c235_test.py | 896a898bca43a1811bb53b5b1aac0718705bf3b4 | [] | no_license | AakashKumarNain/pythonesque | d47b890ff42fa7baa3f25f9569d8a7310c7aa710 | 3225aaf878c52962becafd60a50243a91f92b264 | refs/heads/master | 2020-03-18T00:07:00.624695 | 2018-05-19T09:24:16 | 2018-05-19T09:24:16 | 134,078,646 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | """
Simple or trump
author: Manny [email protected]
info: http://thisthread.blogspot.com/2017/01/codeeval-simple-or-trump.html
https://www.codeeval.com/open_challenges/235/
"""
import unittest
from ce.c235 import solution
class TestCodeEval(unittest.TestCase):
def test_provided_1(self):
self.assertEqual('2H', solution('AD 2H | H'))
def test_provided_2(self):
self.assertEqual('KD KH', solution('KD KH | C'))
def test_provided_3(self):
self.assertEqual('JH', solution('JH 10S | C'))
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
8609337d0074cd189a54453539b0385f45cc2b9b | ed719ee21d88b1d3fa03fbcc41cb2683930ea665 | /month05/AI/day05/demo05_traffic.py | a52e182e55428309879deb30567c217b8cf38444 | [] | no_license | KarlLichterVonRandoll/learning_python | 453305c0af116014e384e4335d53b9775587483d | c458e203e7f7bfce9641408ef63d6ba041ed7fef | refs/heads/master | 2022-02-23T03:04:24.580457 | 2019-09-17T03:43:45 | 2019-09-17T03:43:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,663 | py | """
案例:车流量预测 回归问题
"""
import numpy as np
import sklearn.preprocessing as sp
import sklearn.model_selection as ms
import sklearn.svm as svm
import sklearn.metrics as sm
class DigitEncoder():
# 模拟LabelEncoder自定义的数字编码器
def fit_transform(self, y):
return y.astype('i4')
def transform(self, y):
return y.astype('i4')
def inverse_transform(self, y):
return y.astype('str')
# 加载并整理数据
data = np.loadtxt(
'../ml_data/traffic.txt', delimiter=',',
dtype='U20')
data = data.T
# 整理数据集
x, y, encoders = [], [], []
for row in range(len(data)):
# 确定当前这组特征使用何种编码器
if data[row][0].isdigit():
encoder = DigitEncoder()
else:
encoder = sp.LabelEncoder()
# 整理数据集
if row < len(data) - 1:
x.append(encoder.fit_transform(data[row]))
else:
y = encoder.fit_transform(data[row])
encoders.append(encoder)
x = np.array(x).T
y = np.array(y)
print(x.shape, y.shape, x[0], y[0])
# 划分训练集
train_x, test_x, train_y, test_y = \
ms.train_test_split(
x, y, test_size=0.25, random_state=7)
model = svm.SVR(
kernel='rbf', C=10, epsilon=0.2)
model.fit(train_x, train_y)
pred_test_y = model.predict(test_x)
# r2_score
print(sm.r2_score(test_y, pred_test_y))
print(sm.mean_absolute_error(test_y, pred_test_y))
data = [['Tuesday', '13:35', 'San Francisco', 'no']]
data = np.array(data).T
x = []
for row in range(len(data)):
encoder = encoders[row]
x.append(encoder.transform(data[row]))
x = np.array(x).T
pred_y = model.predict(x)
print(pred_y) | [
"[email protected]"
] | |
803f26d0e831665f687af15088f956c6846a247d | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_08_01/aio/operations_async/_available_delegations_operations_async.py | a4120781c732285c6b81e9300d16784aafbd4b5e | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 5,085 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AvailableDelegationsOperations:
"""AvailableDelegationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location: str,
**kwargs
) -> AsyncIterable["models.AvailableDelegationsResult"]:
"""Gets all of the available subnet delegations for this subscription in this region.
:param location: The location of the subnet.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailableDelegationsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_08_01.models.AvailableDelegationsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AvailableDelegationsResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AvailableDelegationsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/availableDelegations'} # type: ignore
| [
"[email protected]"
] | |
4785a18fd7f8139ca9ffe991135f4fb33afce469 | 97f285b6f8016a8d1d2d675fffb771df3c9e37b9 | /web/simplehttpputserver.py | 29a977339705467a06f309e41d64dc6f919fd323 | [] | no_license | oskomorokhov/python | ef5408499840465d18852954aee9de460d0e7250 | 8909396c4200bd2fca19d3f216ed5f484fb2192a | refs/heads/master | 2021-05-14T09:27:25.413163 | 2019-12-12T21:00:05 | 2019-12-12T21:00:05 | 116,327,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | import SimpleHTTPServer
import BaseHTTPServer
class SputHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_PUT(self):
print self.headers
length = int(self.headers["Content-Length"])
path = self.translate_path(self.path)
with open(path, "wb") as dst:
dst.write(self.rfile.read(length))
if __name__ == '__main__':
SimpleHTTPServer.test(HandlerClass=SputHTTPRequestHandler)
| [
"[email protected]"
] | |
8f48ea04409d30f7fbe228bdf8395f39c7f6e20a | e377a9333401b992548ddb7e6ccd9452fdd1d071 | /os_package_registry/__init__.py | 05645fc92042b3bde8d401005e923f323fdd870c | [
"MIT"
] | permissive | OpenSpending-Clone/os-package-registry | 58a9d7754400f546f1bcdaa6b017dcf1dfa21049 | 27b82eff040a93e31f958820c62aa084a9cd1cc0 | refs/heads/master | 2022-02-24T22:32:10.714979 | 2019-11-06T21:07:36 | 2019-11-06T21:07:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | from .package_registry import PackageRegistry
| [
"[email protected]"
] | |
3944f19dacca45a6c4ca733edff9d19acdd24250 | 4a8bfa3407aa98a04ede3162f85467b1b5012fe7 | /tests/test_api/test_methods/test_edit_message_live_location.py | 07b61a9d518c71e42d0b70073e468713690f8296 | [] | no_license | aiogram/tg-codegen | 07ec80814eec46f464d2490fd27b7b6b27257f1b | ba3c2f893591d45dda418dd16e0646e260afdf14 | refs/heads/master | 2022-12-09T10:44:10.781570 | 2021-11-07T23:33:25 | 2021-11-07T23:33:25 | 218,523,371 | 24 | 5 | null | 2022-12-08T08:47:43 | 2019-10-30T12:33:21 | Python | UTF-8 | Python | false | false | 1,141 | py | import pytest
from aiogram.api.methods import EditMessageLiveLocation, Request
from tests.mocked_bot import MockedBot
@pytest.mark.skip
class TestEditMessageLiveLocation:
@pytest.mark.asyncio
async def test_method(self, bot: MockedBot):
prepare_result = bot.add_result_for(EditMessageLiveLocation, ok=True, result=None)
response: Union[Message, bool] = await EditMessageLiveLocation(
latitude=..., longitude=...,
)
request: Request = bot.get_request()
assert request.method == "editMessageLiveLocation"
# assert request.data == {}
assert response == prepare_result.result
@pytest.mark.asyncio
async def test_bot_method(self, bot: MockedBot):
prepare_result = bot.add_result_for(EditMessageLiveLocation, ok=True, result=None)
response: Union[Message, bool] = await bot.edit_message_live_location(
latitude=..., longitude=...,
)
request: Request = bot.get_request()
assert request.method == "editMessageLiveLocation"
# assert request.data == {}
assert response == prepare_result.result
| [
"[email protected]"
] | |
1ee61b83ccfaad20826b03e78474d2cbc307c42e | 320bf3ddd6233577d9f2f08f046eaef96f881e4e | /Pillow-4.3.0/Tests/test_image_fromqpixmap.py | 543b74bbf249991a39c73878b670772b46f1f152 | [
"MIT"
] | permissive | leorzz/simplemooc | 057ba3e220c20907017edfd8d0fc0422f9a6d99c | 8b1c5e939d534b1fd729596df4c59fc69708b896 | refs/heads/master | 2022-10-22T02:24:46.733062 | 2017-12-17T16:37:04 | 2017-12-17T16:37:04 | 112,488,280 | 0 | 1 | MIT | 2022-10-08T17:50:17 | 2017-11-29T14:52:23 | Python | UTF-8 | Python | false | false | 837 | py | from helper import unittest, PillowTestCase, hopper
from test_imageqt import PillowQtTestCase, PillowQPixmapTestCase
from PIL import ImageQt
class TestFromQPixmap(PillowQPixmapTestCase, PillowTestCase):
def roundtrip(self, expected):
PillowQtTestCase.setUp(self)
result = ImageQt.fromqpixmap(ImageQt.toqpixmap(expected))
# Qt saves all pixmaps as rgb
self.assert_image_equal(result, expected.convert('RGB'))
def test_sanity_1(self):
self.roundtrip(hopper('1'))
def test_sanity_rgb(self):
self.roundtrip(hopper('RGB'))
def test_sanity_rgba(self):
self.roundtrip(hopper('RGBA'))
def test_sanity_l(self):
self.roundtrip(hopper('L'))
def test_sanity_p(self):
self.roundtrip(hopper('P'))
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.