filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_20450 | #!/usr/local/bin/python3
## revFerno: Reverse maps miRferno results to genome co-ordinates
## Updated: version-1.0 12/30/2017
## Property of Meyers Lab at University of Delaware
## Author: [email protected]
## Author: [email protected]
## ############
import sys,os,re,time,timeit,csv,glob,string,shutil,operator
import subprocess, multiprocessing
from multiprocessing import Process, Queue, Pool
from operator import itemgetter
#########################################################################
#### USER SETTINGS ######################################################
predTar = "All.targs.parsed.csv"
Local = "Y" ## 'N' means internal version and 'Y' means external version i.e. local files used instead of miRNA and fasta files from server
GenomeDB = "xxxxxxxxxx" ## DB to get fastaOut file
# PAREdb = 'xxxxxxxx ' ## Make sure that your Library is in the DB
genomeFeature = 1 ## 0 for gene and 1 for inter; 2 for both
scoreThres = 5 ## Score cutoff to filter results especially for intergenic targets
#### STEPS ######################
getCoords = 0 ## Get genomic coordinates from genomeDB, if OFF then "gene_coords" file in current folder will be used
## If 0 then make sure the gene_coords file correspond to genomeFeature selected above or just turn this ON
revMap = 1 ## Reverse map miRferno coordinates
##### DEVELOPER OPTIONS ##########
nthread = 6 ## Need automatic calculation like nProc
nproc = "Y" ## Used by parallel processing
generateFasta = "N" ## Functionality not required in current version
dataserver = "xxxxxxxxx"
### FUNCTIONS ###################
def ConnectToDB(server, infile):
##infile values are '0' when you dont want to uplaod data from local file and '1' when you wish to upload data by local file
##EX:con=sql.connect(host= server, user='kakrana', passwd='livetheday', local_infile = infile)
##Now later in script you can
##cur.execute("LOAD DATA LOCAL INFILE './scoring_input_extend2' INTO TABLE bioinfo_data.mir_page_results FIELDS TERMINATED BY ','")
print ('\nTrying to connect to mySQL server on %s' % (server))
# Try to connect to the database
try:
con=sql.connect(host= server, user='xxxxxxxx', passwd='xxxxxxxx')###local_infile = 1 not supported yet so a table has to be updated on row basis
print ('Connection Established\n')
# If we cannot connect to the database, send an error to the user and exit the program.
except sql.Error:
print ("Error %d: %s" % (sql.Error.args[0],sql.Error.args[1]))
sys.exit(1)
return con
def extractCoords(con,db):
cur= con.cursor()
###Filteration of RNA types not done at this stage becasue we need all the genes to get intergenic regions
###after filteration few genes will be missing and counted as intergenic region
###########################################################TEST LINE#################################################################
#cur.execute("SELECT chr_id,strand,gene,start,end,type FROM %s.gene_master WHERE type LIKE 'protein_coding' AND gene NOT LIKE 'BLAST%%' ORDER BY chr_id,strand,start" % (db))###extra % escapes the % in query
#####################################################################################################################################
##If we remove miRNA here than only gene entry will be removed but its sequence will be covered into intergenic
if genomeFeature == 1: ##Intergenic
print("Fetching intergenic coords - If script stucks here for more then a minute, then cancel - ctrl+c and rerun ")
cur.execute("SELECT chr_id,strand,gene,start,end,type FROM %s.gene_master WHERE type NOT LIKE 'mirna' AND gene NOT LIKE 'BLAST%%' ORDER BY chr_id,strand,start" % (db))### Extra % escapes the % in query
elif genomeFeature == 0: ##Protein Coding
print("Fetching gene coords - If script stucks here for more then a minute, then cancel - ctrl+c and rerun ")
cur.execute("SELECT chr_id,strand,gene,start,end,type FROM %s.gene_master WHERE type LIKE 'protein_coding' AND gene NOT LIKE 'BLAST%%' ORDER BY chr_id,strand,start" % (db))### Extra % escapes the % in query
genome_info = cur.fetchall() ### List with information of all the genes in genome
##Check if list is empty
if not genome_info:
print ('^^^Gene Coords query returned with an empty list..Exiting^^^')
sys.exit()
####(1, 'c','AT1G01020', 5928, 8737, protein_coding)
#print (genome_info)
##Find length of chromosomes to calculate intergenics
cur.execute('SELECT chr_id, length FROM %s.chromosome_master' % (db))
chromo_len = cur.fetchall()
chromo_dict = dict(chromo_len) ### Made a dict so that chromosome numer could be searched to get chromosome length
# print ('These are the chromosomes: %s and their length' % (chromo_dict))
genome_info_inter = genome_info ###This list will also hold intergenics
####GET INTERGENIC REGIONS AND APPEND TO THE LIST WITH GENE COORDS
alist = []###list maintained to check if first gene on chromosome and strand shows up than intergenic is just the start of gene
#for gene1, gene2 from izip(*[iter(genome_info)]*2):
#for i,j in pairwise (genome_info):
#for gene1, gene2 in it.izip(genome_info[1:], genome_info):
#for i in range(0, int(len(genome_info))-1):###maybe for i in range(0, len(genome_info -1))
for i in range(0, int(len(genome_info))+1): ## May 20-14 - Modifed from above after extensive trouble shooting - Now the last entry is read and both up and Down calculated
#print (i)
gene1 = (genome_info[i])
gene2 = (genome_info[i+1])
gene_type = 'inter' ###set to intergenic by default
#print(gene1,gene2)
##Remove/skip redundant genes with same start and end......What about Overlapping genes????
if gene1[3] == gene2[3] and gene1[4] == gene2[4]:
##gene is same/overlapping consider next gene
pass
else:
##Calculate coordinates of intergenic regions
##Is this is first gene on chromosome and strand intergenic region is from position1 - Only chr_id and strand is checked.
if tuple(gene1[0:2]) not in alist:
print ('\n------Caching gene coords for chromosome: %s and strand: %s------\n' % (gene1[0], gene1[1]))
#print ('Gene1:%s\nGene2:%s' % (gene1,gene2))
alist.append((gene1[0:2]))
inter_start1 = 1
inter_end1 = gene1[3]-1###1 nt before start of Gene1 a.k.a the first gene on chromosome in this case
##As two genes are read together, the upstream intergenic region gor gene2 must be calculated in same step
## If both the genes belong to same chr and strand i.e. chromosome has atleast two genes
if gene1[0] == gene2[0] and gene1[1] == gene2[1]:
inter_start2 = gene1[4]+1##From end of first gene of chromosome
inter_end2 = gene2[3]-1###Till start of second gene
if gene1[1] == 'w': ##The gene is on positive strand so upstream
inter_name1 = ('%s_up' % (gene1[2]))
inter_name2 = ('%s_up' % gene2[2])
else: ##Its on negative strand
inter_name1 = ('%s_down' % (gene1[2]))
inter_name2 = ('%s_up' % gene1[2])
#if inter_name1 == inter_name2:
#print ('\nLoop1 - First Gene and more than one gene on this chr and strand')
#print (gene1[0],gene1[1],inter_name1,inter_start1,inter_end1,gene_type)
#print (gene2[0],gene2[1],inter_name2,inter_start2,inter_end2,gene_type)
genome_info_inter.append((gene1[0],gene1[1],inter_name1,inter_start1,inter_end1,gene_type))##Chr_id_strand,intergeneic name, inter start, inter end, gene type
genome_info_inter.append((gene2[0],gene2[1],inter_name2,inter_start2,inter_end2,gene_type))##Chr_id_strand,intergeneic name, inter start, inter end, gene type
## If the first two genes are not from same strand i.e. there is only one gene on this chromosme and strand- code added after Aspragus scaffolds had just one gene
else: ## intergenic from end of chromosme/scaffold
inter_start2 = gene1[4]+1##From end of first gene of chromosome
inter_end2 = chromo_dict[gene1[0]]###Till end of chromosome
if gene1[1] == 'w': ##The gene is on positive strand so upstream
inter_name1 = ('%s_up' % (gene1[2]))
inter_name2 = ('%s_down' % gene1[2])
else: ##Its on negative strand
inter_name1 = ('%s_down' % (gene1[2]))
inter_name2 = ('%s_up' % gene1[2])
#if inter_name1 == inter_name2:
#print ('\nLoop2 - First gene on this chromosme and strand but also the only one')
#print (gene1[0],gene1[1],inter_name1,inter_start1,inter_end1,gene_type)
#print (gene1[0],gene1[1],inter_name2,inter_start2,inter_end2,gene_type)
genome_info_inter.append((gene1[0],gene1[1],inter_name1,inter_start1,inter_end1,gene_type))##Chr_id_strand,intergeneic name, inter start, inter end, gene type
genome_info_inter.append((gene1[0],gene1[1],inter_name2,inter_start2,inter_end2,gene_type))##Chr_id_strand,intergeneic name, inter start, inter end, gene typ
else:
if gene1[0] == gene2[0] and gene1[1] == gene2[1]:###If chr_id and strands are equal than find intergenic. These are gene on same chromosme and strand
inter_start = gene1[4]+1###End of Gene 1
inter_end = gene2[3]-1 ###1 nt before start of gene 2
if gene2[1] == 'w': ##Positive strand
inter_name = ('%s_up' % (gene2[2]))
else:## reverse strand
inter_name = ('%s_up' % (gene1[2]))
#print ('\nLoop3 - Not the first gene on chr and strand')
#print (gene2[0],gene2[1],inter_name,inter_start,inter_end,gene_type)
genome_info_inter.append((gene2[0],gene2[1],inter_name,inter_start,inter_end,gene_type))
else: ###That means gene1 is at end of one chromosome and gene 2 is begining of chromosome so we have to extract intergenic at end of one chromosome
inter_start = gene1[4]+1###End of gene1
inter_end = chromo_dict[gene1[0]]###End of chromosome searched using chromosome id of gene1 from chromosome dictionary
if gene1[1] == 'w':##Positive strand end of chromosme
inter_name = ('%s_down' % (gene1[2]))
else: ##Negative strand first intergenic of chromosme
inter_name = ('%s_up' % (gene1[2]))
#print ('\nLoop4 - Not the first gene on chromosome and Strand AND the last gene on chromosome')
#print (gene1[0],gene1[1],inter_name,inter_start,inter_end,gene_type)
#
genome_info_inter.append((gene1[0],gene1[1],inter_name,inter_start,inter_end,gene_type)) ## Chr_id, strand
## Additional check for scaffolded genomes, if there are no genes in a scffold it's whole seqeunce will be fetched as intergenic
if genomeFeature == 1:
for i in chromo_dict.keys():
alen = chromo_dict[i]
# print("Chr:%s | Length:%s" % (i,alen))
if tuple((i,'c')) in alist:
pass
else:
# print("Get the tuple")
inter_name = ('chr%s_c_all' % (i))
genome_info_inter.append((i,'c',inter_name,1,alen,'inter')) ## Chr_id, strand, name, start, stop, length
if tuple((i,'w')) in alist:
pass
else:
# print("Get the tuple")
inter_name = ('chr%s_w_all' % (i))
genome_info_inter.append((i,'w',inter_name,1,alen,'inter')) ## Chr_id, strand, name, start, stop, length
###Sort the list after adding intergenic regions on on basis of chr_id and strand that is essential while caching chromosme during slicing sequences
genome_info_inter_sort = sorted(genome_info_inter, key=operator.itemgetter(0,1))
#print(genome_info_inter_sort)
###Write all cooords for troubleshooting
all_coords_out = open('all_coords', 'w')
for i in genome_info_inter_sort:
all_coords_out.write('%s,%s,%s,%s,%s,%s\n' % (i[0:]))
all_coords_out.close()
###Filter list to remove unwanted types like miRNA,tRNA,rRNA,snoRNA,snRNA, short or no intergenic
gene_coords_file = '../coords'####To check wheter coords are printed in chr_id and strand sorted or not
coords_out = open(gene_coords_file, 'w')
gene_coords = []## List that will hold genes to fetch, this removes unecessary RNAs and also fix miRNA double entry i.e as gene and miRNA
for entry in genome_info_inter_sort:
#print(entry[5])
###If RNA type is removed here than that region is not included in analysis but if RNA is removed in mySQL query than only gene is removed and region becomes intergenic
#if (entry[5] == 'miRNA' or entry[5] == 'tRNA' or entry[5] == 'rRNA' or entry [5] == 'snoRNA'): ##Replace this IF with tuple check i.e miRNA in tuple
if genomeFeature == 1: ## Inter
if (entry[5] == 'miRNA' or entry[5] == 'tRNA' or entry[5] == 'rRNA' or entry [5] == 'snoRNA' or entry [5] == 'protein-coding' or entry [5] == 'protein_coding' or entry [5] == 'misc_RNA'):
pass
else:
if entry[4]-entry[3] > 25:###If there is no intergenic regon b/w genes or too short than filter
#gene_coords.append(entry[:5])
#coords_out.write('%s,%s,%s,%s,%s\n' % (entry[0:5]))
gene_coords.append(entry[0:])
coords_out.write('%s,%s,%s,%s,%s,%s\n' % (entry[0:]))
else: ## Protein coding
if (entry[5] == 'miRNA' or entry[5] == 'tRNA' or entry[5] == 'rRNA' or entry [5] == 'snoRNA' or entry [5] == 'inter'):
pass
else:
if entry[4]-entry[3] > 25:###If there is no intergenic regon b/w genes or too short than filter
gene_coords.append(entry[0:])
coords_out.write('%s,%s,%s,%s,%s,%s\n' % (entry[0:]))
coords_out.close()
#print(gene_coords)
return gene_coords ### A list of selected gene coords
def RevMapCoord(ent): #### DOES THIS MODULE WORKS IF USER USES MULTIPLE LIBRARIES - Parallelize
## Create a dictionary from list of coords to be searched later
## Gene_coords structure: 1, 'c','AT1G01020', 5928, 8737, protein_coding
# print (ent)
gene_name = ent[1]
bind_site = ent[2].split('-')
# cleave_site = int(ent[8])
## Reverse map co-ordinates ##########################################################
if Local == 'N':
print ('**Reverse mapping - Web analysis**')
###Check whether gene is from reverse or posiive strand by memebr ship test on dictionary
if gene_name in coord_dict_wat:
print ('Entry: %s in positive strand: %s' % (ent[0:4],coord_dict_wat[gene_name]))
geno_start = coord_dict_wat[gene_name][1]###Use for reverse mapping of postive genes
#print('Looking for chr_id')
chr_id = coord_dict_wat[gene_name][0]
#print('chr_id found')
strand = 'w' ## AVlilable in dictionary also coord_dict_crick[gene_name][1]
gtype = coord_dict_wat[gene_name][2] ## Gene type
# new_cleave_site = (int(geno_start)-1)+int(cleave_site)###1 is reduced to give correct positions
new_bind_site_start = (int(geno_start)-1)+int(bind_site[0])
new_bind_site_end = (int(geno_start)-1)+int(bind_site[1])
new_bind_site = '%s-%s' % (new_bind_site_start,new_bind_site_end)
else:
print ('Entry: %s in reverse strand: %s' % (ent[0:4],coord_dict_crick[gene_name]))
geno_end = coord_dict_crick[gene_name][2] ### Use for reverse mapping of negative genes
#print('Looking for chr_id')
chr_id = coord_dict_crick[gene_name][0]
#print('chr_id found')
strand = 'c' ## Available in dictionary also coord_dict_crick[gene_name][1]
gtype = coord_dict_crick[gene_name][2] ##Gene type
# new_cleave_site = (int(geno_end)+1)-int(cleave_site)###1 is added to give correct positions
new_bind_site_end = (int(geno_end)+1)-int(bind_site[0])###As the sequence was reversed before TF and CL, their binding start and end direction has also changed - Verified-OK
new_bind_site_start = (int(geno_end)+1)-int(bind_site[1])
new_bind_site = '%s-%s' % (new_bind_site_start,new_bind_site_end)
elif Local=='Y': ## 'Y' i.e Local analysis
print ('**No Reverse mapping of Co-ordinates will be performed - Local analysis**')
if gene_name in coord_dict_wat:
print ('Entry: %s in positive strand: %s' % (ent[0:4],coord_dict_wat[gene_name]))
geno_start = coord_dict_wat[gene_name][1]###Use for reverse mapping of postive genes
#print('Looking for chr_id')
chr_id = coord_dict_wat[gene_name][0]
#print('chr_id found')
strand = 'w' ## AVlilable in dictionary also coord_dict_crick[gene_name][1]
gtype = coord_dict_wat[gene_name][2] ## Gene type
# new_cleave_site = (int(geno_start)-1)+int(cleave_site)###1 is reduced to give correct positions
new_bind_site_start = (int(geno_start)-1)+int(bind_site[0])
new_bind_site_end = (int(geno_start)-1)+int(bind_site[1])
new_bind_site = '%s-%s' % (new_bind_site_start,new_bind_site_end)
else:
print ('Entry: %s in reverse strand: %s' % (ent[0:4],coord_dict_crick[gene_name]))
geno_end = coord_dict_crick[gene_name][2] ### Use for reverse mapping of negative genes
#print('Looking for chr_id')
chr_id = coord_dict_crick[gene_name][0]
#print('chr_id found')
strand = 'c' ## Available in dictionary also coord_dict_crick[gene_name][1]
gtype = coord_dict_crick[gene_name][2] ##Gene type
# new_cleave_site = (int(geno_end)+1)-int(cleave_site)###1 is added to give correct positions
new_bind_site_end = (int(geno_end)+1)-int(bind_site[0])###As the sequence was reversed before TF and CL, their binding start and end direction has also changed - Verified-OK
new_bind_site_start = (int(geno_end)+1)-int(bind_site[1])
new_bind_site = '%s-%s' % (new_bind_site_start,new_bind_site_end)
else:
print("Do nothing")
print("Rev Mapped: %s,%s,%s,%s" % (str(chr_id),strand,new_bind_site_start,new_bind_site_end))
rev_mapped_entry = ("%s,%s,%s,%s,%s" % (','.join(ent),str(chr_id),strand,new_bind_site_start,new_bind_site_end))
return rev_mapped_entry
def PP(module,alist):
print('***********Parallel instance of %s is being executed*********' % (module))
start = time.time()
##PP is being used for Bowtie mappings - This will avoid overflooding of processes to server
nprocPP = round((nproc/int(nthread))+1) ## 1 added so as to avoid 0 processor being allocated in serial mode
print('\nnprocPP:%s\n' % (nprocPP))
npool = Pool(int(nprocPP))
npool.map(module, alist)
def PPmultiple(module,alist1,alist2):
start = time.time()
npool = Pool(int(nproc))
npool.map(lambda args: module(*args), alist2)
def PPResults(module,alist):
npool = Pool(int(nproc))
res = npool.map_async(module, alist)
if(res.get() == []):
print("YEP!")
results = (res.get())
npool.close() ### Added by Reza to close hanging
return results
### MAIN #######################
def main():
### 1. GET COORDS ####
global con
print ("\n\nStep 1/4: Getting coords from server or local file")
if getCoords == 1:
## Get coords from GFF file or our genoem DB
if Local == 'Y':
if generateFasta == 'Y':
coords = extractFeatures(genomeFile,gffFile) ## Extracts Coords from GFF3
# fastaOut = getFASTALocal(genomeFile,coords) ##Creates FASTA file
global tagLen ## Required later for tag2FASTA step as well
# unambiguousBaseCounter(fastaOut,tagLen)
else:
print("\nThe input FASTA file is considered 'as is' for analysis\n")
# fastaOut = genomeFile ### Make it better
else:
if generateFasta == 'Y':
con = ConnectToDB(dataserver,0)
coords = extractCoords(con,GenomeDB)###Get selected genes and all intergenic coords - all coords all also fetched too
# fastaOut = getFASTAServer(con,GenomeDB,coords)###Extracts FASTA of supplied coords
# unambiguousBaseCounter(fastaOut,tagLen)
else:
print("\nThe input FASTA file is considered 'as is' for analysis\n")
con = ConnectToDB(dataserver,0)
coords = extractCoords(con,GenomeDB) ##For reverse mapping
# fastaOut = genomeFile ### Make it better
else:
## Read coords from gene coords file
fh_in = open("../coords","r")
fileRead = fh_in.readlines()
coords = [] ## List to store coords
for i in fileRead:
ent = i.strip("\n").split(",")
# print("This is read entry",ent)
coords.append((ent))
print("Coords read from file:%s entries" % (len(coords)))
print("Step 1/4: DONE!!\n\n")
time.sleep(1)
#### 2. PREPARE DICTIONARY OF COORDS ########################################
print("Step 2/4: Preparing dictionary of coordinates")
global coord_dict_wat, coord_dict_crick
coord_dict_wat = {} ## Dictionary of genes at watson strand, +
coord_dict_crick = {}###Dictionary of genes at crick strand, - strand
shutil.rmtree('./revMapped', ignore_errors=True) ## AK Added
os.mkdir('./revMapped') ## AK added
global nproc
nproc ='1' ## Need better handling
if Local == 'N':
for i in coords:### gene_coords is a list in script, also written out as file of same name
# print ("This is a coord:",i)
#strand = i.split(',')[1] for file###TEST if reading from file
strand = i[1]
if strand == 'c':### if entry in reverse strand
atuple = (i[0],i[4],i[5])
coord_dict_crick[i[2]] = atuple ## Gene name as key and chr_id,strand, end and gene type as value
else:
atuple = (i[0],i[3],i[5])
coord_dict_wat[i[2]] = atuple ## Gene name as key and chr_id,strand, end and gene type as value
elif Local == 'Y':
for i in coords:### gene_coords is a list in script, also written out as file of same name
# print ("This is a coord:",i)
#strand = i.split(',')[1] for file###TEST if reading from file
strand = i[1]
if strand == 'c':### if entry in reverse strand
atuple = (i[0],i[3],i[4],i[5])
coord_dict_crick[i[2]] = atuple ## Gene name as key and chr_id,start, end and gene type as value
else:
atuple = (i[0],i[3],i[4],i[5])
coord_dict_wat[i[2]] = atuple ## Gene name as key and chr_id,start, end and gene type as value
print (atuple)
else:
print("Do nothing")
print("Strand dictionary made")
print("Step 2/4: DONE!!\n\n")
time.sleep(1)
##### 3. Read the scoring input extend file and change coordinates ##########
print("Step 3/4: Reading predicted/validated targets in list")
print("Reading '%s' file from folder" % predTar)
ResFls = [] ## File list
# ResFls = [file for file in os.listdir('./') if file.endswith ('targs.parsed.csv')]
ResFls.append(predTar) ## To work on single file
print(ResFls)
acount = 0 ## All entries
bcount = 0 ## That passed user provided score cutoff
for afile in ResFls:
fh_in = open('./%s' % afile, 'r')### PARE VALIDATED results
header = fh_in.readline() ## Waste header
ScoInpExt = [] ## list of final results or parlist
for res in fh_in:
acount+=1
res_strp = res.strip('\n')
ent =res_strp.split(',')
if float(ent[5]) <= scoreThres:
ScoInpExt.append(ent)
bcount+= 1
else:
# print("Below user provides score threshold")
pass
print("Total entries in predicted file:%s | Passed user threshold:%s" % (acount,bcount))
print("Step 3/4: DONE!!\n\n")
time.sleep(2)
### 4.REVERSE MAP ##########
## Rev Map #######
print("Step 4/4: Reverse mapping using coords dict and targets list")
## # TEST- SINGLE CORE - For troubleshooting ----##
# print('\n***Entering RevMapCoord- Serial***\n')
# ValidTarGeno = []
# for i in ScoInpExt:
# print("\nEntry for reverse mapping:%s" % (i))
# z = RevMapCoord(i)
# ValidTarGeno.append(z)
## NORMAL- PARALLEL MODE - Uncommnet test above for normal use
print('\n***Entering RevMapCoord- parallel***\n')
print ('**Reverse mapping initiated**\n\n')
ValidTarGeno = PPResults(RevMapCoord, ScoInpExt) ## Results are in form of list
print ('Reverse mapping complete for:%s\n\n\n' % (afile))
print("Step 4/4: Done!!\n\n")
time.sleep(1)
#### WRITE RESULTS ##############
print ("Writing Results")
revmapRes = './revMapped/%s_revmapped.csv' % (afile)
fh_out = open(revmapRes, 'w')
fh_out.write('%s,Chr,Strand,GenomicBindStart,GenomicBindEnd\n' % (header.strip('\n')))
for i in ValidTarGeno: ## Write Results from list to file
if i != 'E13-3-13':##Filter for error 13-13-13 that is small window abundance = 0 and ratio calculation error
fh_out.write('%s\n' % (i))
else:
print (i)
fh_in.close()
#fh_in2.close()
fh_out.close()
### RUN ########################
if __name__ == '__main__':
if nproc == 'Y':
nproc = int(multiprocessing.cpu_count()*0.80)
else:
nproc == int(nproc)
start = time.time()
main()
end = time.time()
print ('Complete run time is %s' % (round(end-start,2)))
print('The run has completed sucessfully.....CHEERS! - Exiting..\n')
sys.exit()
## v01
## Written to revmap internal mirFErno predicted targets
## v01 -> v02 [stable] [Nov-4-2015]
## Added compatibility to sPARTAv1.16 and above
## Modifed user input - now afile is provided for reverse mapping instead of lookup in folder
## Improved user experienece stuff, cleaned up the script
## v0.9 -> v1.0
## Fixed reverse mapping error (same as revFerno) - No valid triggers were being found at 'c' strand - In reverse mapping
#### function for crick strand genomic start site was beng used instead of genomic end to compute
#### new coordinates - Fixed by replacing geno_end = coord_dict_crick[gene_name][1] with
#### geno_end = coord_dict_crick[gene_name][2] - see the difference in coordinates
|
the-stack_0_20452 | # Copyright 2009 by Tiago Antao <[email protected]>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Control GenePop through an easier interface.
This interface is less efficient than the standard GenePopControler
"""
from .Controller import GenePopController
from Bio.PopGen import GenePop
class EasyController(object):
"""Define a class for an easier interface with the GenePop program."""
def __init__(self, fname, genepop_dir=None):
"""Initialize the controller.
genepop_dir is the directory where GenePop is.
The binary should be called Genepop (capital G)
"""
self._fname = fname
self._controller = GenePopController(genepop_dir)
self.__fst_pair_locus = {} # More caches like this needed!
self.__allele_frequency = {} # More caches like this needed!
def get_basic_info(self):
"""Obtain the population list and loci list from the file."""
with open(self._fname) as f:
rec = GenePop.read(f)
return rec.pop_list, rec.loci_list
# 1.3
def test_hw_pop(self, pop_pos, test_type="probability"):
"""Perform Hardy-Weinberg test on the given position."""
if test_type == "deficiency":
hw_res = self._controller.test_pop_hz_deficiency(self._fname)
elif test_type == "excess":
hw_res = self._controller.test_pop_hz_excess(self._fname)
else:
loci_res, hw_res, fisher_full = self._controller.test_pop_hz_prob(self._fname, ".P")
for i in range(pop_pos - 1):
next(hw_res)
return next(hw_res)
# 1.4
def test_hw_global(self, test_type="deficiency", enum_test=True,
dememorization=10000, batches=20, iterations=5000):
"""Perform Hardy-Weinberg global Heterozygote test."""
if test_type == "deficiency":
pop_res, loc_res, all = self._controller.test_global_hz_deficiency(
self._fname, enum_test, dememorization, batches, iterations)
else:
pop_res, loc_res, all = self._controller.test_global_hz_excess(
self._fname, enum_test, dememorization, batches, iterations)
return list(pop_res), list(loc_res), all
# 2.1
def test_ld_all_pair(self, locus1, locus2, dememorization=10000,
batches=20, iterations=5000):
"""Test for linkage disequilibrium for each pair of loci in each population."""
all_ld = self._controller.test_ld(self._fname, dememorization, batches, iterations)[1]
for ld_case in all_ld:
(l1, l2), result = ld_case
if (l1 == locus1 and l2 == locus2) or (l1 == locus2 and l2 == locus1):
return result
def estimate_nm(self):
"""Estimate Nm. Just a simple bridge."""
return self._controller.estimate_nm(self._fname)
def get_heterozygosity_info(self, pop_pos, locus_name):
"""Return the heterozygosity info for a certain locus on a population.
Returns (Expected homozygotes, observed homozygotes,
Expected heterozygotes, observed heterozygotes)
"""
geno_freqs = self._controller.calc_allele_genotype_freqs(self._fname)
pop_iter, loc_iter = geno_freqs
pops = list(pop_iter)
return pops[pop_pos][1][locus_name][1]
def get_genotype_count(self, pop_pos, locus_name):
"""Return the genotype counts for a certain population and locus."""
geno_freqs = self._controller.calc_allele_genotype_freqs(self._fname)
pop_iter, loc_iter = geno_freqs
pop_iter = list(pop_iter)
return pop_iter[pop_pos][1][locus_name][0]
def get_fis(self, pop_pos, locus_name):
"""Return the Fis for a certain population and locus.
Below CW means Cockerham and Weir and RH means Robertson and Hill.
Returns a pair:
- dictionary [allele] = (repetition count, frequency, Fis CW )
with information for each allele
- a triple with total number of alleles, Fis CW, Fis RH
"""
geno_freqs = self._controller.calc_allele_genotype_freqs(self._fname)
pop_iter, loc_iter = geno_freqs
pops = list(pop_iter)
return pops[pop_pos][1][locus_name][2:]
def get_alleles(self, pop_pos, locus_name):
"""Return the alleles for a certain population and locus."""
geno_freqs = self._controller.calc_allele_genotype_freqs(self._fname)
pop_iter, loc_iter = geno_freqs
pop_iter = list(pop_iter)
return list(pop_iter[pop_pos][1][locus_name][2].keys())
def get_alleles_all_pops(self, locus_name):
"""Return the alleles for a certain population and locus."""
geno_freqs = self._controller.calc_allele_genotype_freqs(self._fname)
pop_iter, loc_iter = geno_freqs
for locus_info in loc_iter:
if locus_info[0] == locus_name:
return locus_info[1]
def get_allele_frequency(self, pop_pos, locus_name):
"""Calculate the allele frequency for a certain locus on a population."""
if len(self.__allele_frequency) == 0:
geno_freqs = self._controller.calc_allele_genotype_freqs(self._fname)
pop_iter, loc_iter = geno_freqs
for locus_info in loc_iter:
if locus_info[0] is None:
self.__allele_frequency[locus_info[0]] = None, None
else:
self.__allele_frequency[locus_info[0]] = locus_info[1:]
info = self.__allele_frequency[locus_name]
pop_name, freqs, total = info[1][pop_pos]
allele_freq = {}
alleles = info[0]
for i in range(len(alleles)):
allele_freq[alleles[i]] = freqs[i]
return total, allele_freq
def get_multilocus_f_stats(self):
"""Return the multilocus F stats.
Explain averaging.
Returns Fis(CW), Fst, Fit
"""
return self._controller.calc_fst_all(self._fname)[0]
def get_f_stats(self, locus_name):
"""Return F stats for a locus.
Returns Fis(CW), Fst, Fit, Qintra, Qinter
"""
loci_iter = self._controller.calc_fst_all(self._fname)[1]
for name, fis, fst, fit, qintra, qinter in loci_iter:
if name == locus_name:
return fis, fst, fit, qintra, qinter
def get_avg_fis(self):
"""Calculate identity-base average Fis."""
return self._controller.calc_diversities_fis_with_identity(self._fname)[1]
def get_avg_fst_pair(self):
"""Calculate Allele size-base average Fis for all population pairs."""
return self._controller.calc_fst_pair(self._fname)[1]
def get_avg_fst_pair_locus(self, locus):
"""Calculate Allele size-base average Fis for all population pairs of the given locus."""
if len(self.__fst_pair_locus) == 0:
iter = self._controller.calc_fst_pair(self._fname)[0]
for locus_info in iter:
self.__fst_pair_locus[locus_info[0]] = locus_info[1]
return self.__fst_pair_locus[locus]
def calc_ibd(self, is_diplo=True, stat="a", scale="Log", min_dist=0.00001):
"""Calculate isolation by distance statistics for Diploid or Haploid."""
if is_diplo:
return self._controller.calc_ibd_diplo(self._fname, stat, scale, min_dist)
else:
return self._controller.calc_ibd_haplo(self._fname, stat, scale, min_dist)
|
the-stack_0_20453 | # -*- coding: UTF-8 -*-
"""
A module to save a VoigtFit dataset to a file.
The files are saved in HDF5 format to allow easy portability.
The module also contains a function to sconvert
the older pickled datasets to the new HDF5 format.
"""
__author__ = 'Jens-Kristian Krogager'
import numpy as np
from os.path import splitext, basename
import pickle
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import h5py
from lmfit import Parameters
from ..container import regions
def dataset_to_hdf(fname):
""" Convert a pickled dataset to the HDF5 format"""
f = open(fname, 'rb')
ds = pickle.load(f)
f.close()
f_base = basename(fname)
root, ext = splitext(f_base)
hdf_fname = root + '.vfit.h5'
save_hdf_dataset(ds, hdf_fname)
return hdf_fname
def save_hdf_dataset(ds, fname, verbose=True):
"""
Save VoigtFit.dataset to a HDF5 file.
The function maps the internal data to a HDF5 data model.
"""
if splitext(fname)[1] == '.hdf5':
pass
else:
fname += '.hdf5'
with h5py.File(fname, 'w') as hdf:
# set main attributes:
hdf.attrs['redshift'] = ds.redshift
if hasattr(ds.velspan, '__iter__'):
vmin, vmax = ds.velspan
hdf.attrs['vmin'] = vmin
hdf.attrs['vmax'] = vmax
else:
hdf.attrs['vmin'] = -ds.velspan
hdf.attrs['vmax'] = ds.velspan
if hasattr(ds, 'name'):
hdf.attrs['name'] = ds.name
else:
hdf.attrs['name'] = ''
if hasattr(ds, 'verbose'):
hdf.attrs['verbose'] = ds.verbose
else:
hdf.attrs['verbose'] = True
# .data:
data = hdf.create_group('data')
for num, chunk in enumerate(ds.data):
spec = data.create_group('spec%i' % (num+1))
spec.attrs['filename'] = ds.data_filenames[num]
spec.attrs['res'] = chunk['res']
spec.attrs['norm'] = chunk['norm']
spec.attrs['nsub'] = chunk['nsub']
spec.attrs['specID'] = chunk['specID']
spec.create_dataset('wl', data=chunk['wl'])
spec.create_dataset('flux', data=chunk['flux'])
spec.create_dataset('mask', data=chunk['mask'])
spec.create_dataset('error', data=chunk['error'])
# .regions:
hdf_regions = hdf.create_group('regions')
for num, reg in enumerate(ds.regions):
reg_group = hdf_regions.create_group('region%i' % (num+1))
if hasattr(reg.velspan, '__iter__'):
vmin, vmax = reg.velspan
reg_group.attrs['vmin'] = vmin
reg_group.attrs['vmax'] = vmax
else:
reg_group.attrs['vmin'] = -reg.velspan
reg_group.attrs['vmax'] = reg.velspan
reg_group.attrs['res'] = reg.res
reg_group.attrs['normalized'] = reg.normalized
reg_group.attrs['cont_err'] = reg.cont_err
reg_group.attrs['new_mask'] = reg.new_mask
reg_group.attrs['specID'] = reg.specID
reg_group.attrs['kernel_fwhm'] = reg.kernel_fwhm
reg_group.attrs['kernel_nsub'] = reg.kernel_nsub
reg_group.attrs['label'] = reg.label
reg_group.create_dataset('kernel', data=reg.kernel)
reg_group.create_dataset('wl', data=reg.wl)
reg_group.create_dataset('flux', data=reg.flux)
reg_group.create_dataset('mask', data=reg.mask)
reg_group.create_dataset('error', data=reg.err)
lines = reg_group.create_group('lines')
for line in reg.lines:
lines.create_group(line.tag)
lines[line.tag].attrs['active'] = line.active
# .molecules:
molecules = hdf.create_group('molecules')
if hasattr(ds, 'molecules'):
for molecule, items in ds.molecules.items():
pre_array = [tuple(item) for item in items]
band_data = np.array(pre_array,
dtype=[('band', 'S8'), ('Jmax', 'i4')])
molecules.create_dataset(molecule, data=band_data)
fine_lines = hdf.create_group('fine_lines')
if hasattr(ds, 'fine_lines'):
for ground_state, lines in ds.fine_lines.items():
# line_array = np.array(lines, dtype='str')
line_array = [s.encode("ascii", "ignore") for s in lines]
fine_lines.create_dataset(str(ground_state), data=line_array)
# .components:
components = hdf.create_group('components')
for ion, ds_comps in ds.components.items():
ion_group = components.create_group(ion)
for cnum, comp in enumerate(ds_comps):
comp_group = ion_group.create_group("comp%i" % (cnum+1))
comp_group.attrs['z'] = comp.z
comp_group.attrs['b'] = comp.b
comp_group.attrs['logN'] = comp.logN
for key, val in comp.options.items():
val = 'None' if val is None else val
comp_group.attrs[key] = val
# .best_fit:
if ds.best_fit is not None:
p_opt = ds.best_fit
best_fit = hdf.create_group('best_fit')
for ion, comps in ds.components.items():
params = best_fit.create_group(ion)
for n in range(len(comps)):
param_group = params.create_group("comp%i" % (n+1))
# Save best-fit values:
param_group.attrs['z'] = p_opt['z%i_%s' % (n, ion)].value
param_group.attrs['b'] = p_opt['b%i_%s' % (n, ion)].value
param_group.attrs['logN'] = p_opt['logN%i_%s' % (n, ion)].value
# and uncertainties:
param_group.attrs['z_err'] = p_opt['z%i_%s' % (n, ion)].stderr
param_group.attrs['b_err'] = p_opt['b%i_%s' % (n, ion)].stderr
param_group.attrs['logN_err'] = p_opt['logN%i_%s' % (n, ion)].stderr
# Save Chebyshev parameters:
cheb_group = best_fit.create_group('cheb_params')
for parname in list(ds.best_fit.keys()):
if 'cheb_p' in parname:
coeff = ds.best_fit[parname]
cheb_par = cheb_group.create_group(parname)
cheb_par.attrs['value'] = coeff.value
cheb_par.attrs['error'] = coeff.stderr
if verbose:
print("Successfully saved the dataset to file: " + fname)
def load_dataset_from_hdf(fname):
from ..container.lines import Line, lineList
from ..container.dataset import DataSet
"""Load dataset from HDF5 file and instantiate a `VoigtFit.Dataset' class."""
with h5py.File(fname, 'r') as hdf:
z_sys = hdf.attrs['redshift']
ds = DataSet(z_sys)
if 'velspan' in hdf.attrs:
vspan = hdf.attrs['velspan']
ds.velspan = (-vspan, vspan)
else:
vmin = hdf.attrs['vmin']
vmax = hdf.attrs['vmax']
ds.velspan = (vmin, vmax)
ds.verbose = hdf.attrs['verbose']
if 'name' in hdf.attrs.keys():
ds.set_name(hdf.attrs['name'])
else:
ds.set_name('')
# Load .data:
data = hdf['data']
for chunk in data.values():
# For backward compatibility:
if 'filename' in chunk.attrs.keys():
filename = chunk.attrs['filename']
else:
filename = ''
res = chunk.attrs['res']
norm = chunk.attrs['norm']
if 'nsub' in chunk.attrs.keys():
nsub = chunk.attrs['nsub']
else:
nsub = 1
wl = np.array(chunk['wl'])
flux = np.array(chunk['flux'])
error = np.array(chunk['error'])
if 'mask' in chunk.keys():
mask = np.array(chunk['mask'])
else:
mask = np.ones_like(wl, dtype=bool)
ds.add_data(wl, flux, res,
err=error, normalized=norm, nsub=nsub,
mask=mask, filename=filename)
# Load .regions:
# --- this will be deprecated in later versions
hdf_regions = hdf['regions']
for reg in hdf_regions.values():
region_lines = list()
for line_tag, line_group in reg['lines'].items():
act = line_group.attrs['active']
# Add check for backward compatibility:
if line_tag in lineList['trans']:
line_instance = Line(line_tag, active=act)
region_lines.append(line_instance)
ds.all_lines.append(line_tag)
ds.lines[line_tag] = line_instance
else:
print(" [WARNING] - Anomaly detected for line:")
print(" %s" % line_tag)
print(" I suspect that the atomic linelist has changed...")
print("")
# Instantiate the Region Class with the first Line:
line_init = region_lines[0]
if 'velspan' in reg.attrs.keys():
vspan = reg.attrs['velspan']
vspan = (-vspan, vspan)
else:
vmin = reg.attrs['vmin']
vmax = reg.attrs['vmax']
vspan = (vmin, vmax)
if 'specID' in reg.attrs.keys():
specID = reg.attrs['specID']
else:
specID = 'sid_tmp'
Region = regions.Region(vspan, specID, line_init)
if len(region_lines) == 1:
# The first and only line has already been loaded
pass
elif len(region_lines) > 1:
# Load the rest of the lines:
for line in region_lines[1:]:
Region.lines.append(line)
else:
err_msg = "Something went wrong in this region: %s. No lines are defined!" % str(reg.name)
raise ValueError(err_msg)
# Set region data and attributes:
Region.res = reg.attrs['res']
Region.normalized = reg.attrs['normalized']
Region.cont_err = reg.attrs['cont_err']
Region.new_mask = reg.attrs['new_mask']
if 'kernel_fwhm' in reg.attrs.keys():
Region.kernel_fwhm = reg.attrs['kernel_fwhm']
else:
Region.kernel_fwhm = reg.attrs['res']
try:
Region.label = reg.attrs['label']
except KeyError:
Region.label = ''
try:
Region.kernel_nsub = reg.attrs['kernel_nsub']
except KeyError:
Region.kernel_nsub = 1
if 'kernel' in reg.keys():
if len(reg['kernel'].shape) == 2:
Region.kernel = np.array(reg['kernel'])
else:
Region.kernel = float(reg['kernel'][()])
else:
Region.kernel = reg.attrs['res']
Region.wl = np.array(reg['wl'])
Region.flux = np.array(reg['flux'])
Region.mask = np.array(reg['mask'])
Region.err = np.array(reg['error'])
ds.regions.append(Region)
# Load .molecules:
molecules = hdf['molecules']
if len(molecules) > 0:
for molecule, band_data in molecules.items():
bands = [[b, J] for b, J in band_data]
ds.molecules[molecule] = bands
# No need to call ds.add_molecule
# lines are added above when defining the regions.
# Load .fine_lines:
# Older datasets do not have 'fine_lines', so add a check for backwards compatibility:
if 'fine_lines' in hdf:
fine_lines = hdf['fine_lines']
if len(fine_lines) > 0:
for ground_state, line_tags in fine_lines.items():
unicode_list = [s.decode('utf-8') for s in line_tags]
ds.fine_lines[ground_state] = unicode_list
# Load .components:
components = hdf['components']
if 'best_fit' in hdf:
# --- Prepare fit parameters [class: lmfit.Parameters]
ds.best_fit = Parameters()
for ion, comps in components.items():
ds.components[ion] = list()
N_comps = len(comps)
if N_comps > 0:
for n in range(N_comps):
pointer = '/components/%s/comp%i' % (ion, n+1)
comp = hdf[pointer]
if 'best_fit' in hdf:
# If 'best_fit' exists, use the best-fit values.
# The naming for 'best_fit' and 'components' is parallel
# so one variable in components can easily be identified
# in the best_fit data group by replacing the path:
fit_pointer = pointer.replace('components', 'best_fit')
z = hdf[fit_pointer].attrs['z']
z_err = hdf[fit_pointer].attrs['z_err']
b = hdf[fit_pointer].attrs['b']
b_err = hdf[fit_pointer].attrs['b_err']
logN = hdf[fit_pointer].attrs['logN']
logN_err = hdf[fit_pointer].attrs['logN_err']
else:
z = comp.attrs['z']
z_err = None
b = comp.attrs['b']
b_err = None
logN = comp.attrs['logN']
logN_err = None
# Extract component options:
opts = dict()
for varname in ['z', 'b', 'N']:
tie = comp.attrs['tie_%s' % varname]
tie = None if tie == 'None' else tie
vary = comp.attrs['var_%s' % varname]
opts['tie_%s' % varname] = tie
opts['var_%s' % varname] = vary
# Add component to DataSet class:
ds.add_component(ion, z, b, logN, **opts)
if 'best_fit' in hdf:
# Add Parameters to DataSet.best_fit:
z_name = 'z%i_%s' % (n, ion)
b_name = 'b%i_%s' % (n, ion)
N_name = 'logN%i_%s' % (n, ion)
ds.best_fit.add(z_name, value=z, vary=opts['var_z'])
ds.best_fit[z_name].stderr = z_err
ds.best_fit.add(b_name, value=b, vary=opts['var_b'],
min=0.)
ds.best_fit[b_name].stderr = b_err
ds.best_fit.add(N_name, value=logN, vary=opts['var_N'])
ds.best_fit[N_name].stderr = logN_err
if 'best_fit' in hdf:
# Now the components have been defined in `ds`, so I can use them for the loop
# to set the parameter ties:
for ion, comps in ds.components.items():
for n, comp in enumerate(comps):
z, b, logN = comp.get_pars()
z_name = 'z%i_%s' % (n, ion)
b_name = 'b%i_%s' % (n, ion)
N_name = 'logN%i_%s' % (n, ion)
if comp.tie_z:
ds.best_fit[z_name].expr = comp.tie_z
if comp.tie_b:
ds.best_fit[b_name].expr = comp.tie_b
if comp.tie_N:
ds.best_fit[N_name].expr = comp.tie_N
# Load Chebyshev parameters:
cheb_group = hdf['best_fit/cheb_params']
for parname, cheb_par in cheb_group.items():
ds.best_fit.add(parname, value=cheb_par.attrs['value'])
ds.best_fit[parname].stderr = cheb_par.attrs['error']
return ds
def SaveDataSet(filename, ds):
"""Save dataset to HDF5 file."""
print(" [WARNING] - this function is deprecated. Use save_dataset()")
save_hdf_dataset(ds, filename)
def LoadDataSet(filename):
"""Load a dataset from a HDF5 file."""
print(" [WARNING] - this function is deprecated. Use load_dataset()")
ds = load_dataset_from_hdf(filename)
return ds
def save_dataset(filename, ds):
"""Save dataset to HDF5 file."""
save_hdf_dataset(ds, filename)
def load_dataset(filename):
"""Load a dataset from a HDF5 file."""
ds = load_dataset_from_hdf(filename)
return ds
|
the-stack_0_20456 | """Storage layer for the UWS implementation."""
from __future__ import annotations
from datetime import datetime, timedelta, timezone
from functools import wraps
from typing import Any, Awaitable, Callable, List, Optional, TypeVar, cast
from safir.database import datetime_from_db, datetime_to_db
from sqlalchemy import delete
from sqlalchemy.exc import DBAPIError, OperationalError
from sqlalchemy.ext.asyncio import async_scoped_session
from sqlalchemy.future import select
from sqlalchemy.orm import scoped_session
from .exceptions import UnknownJobError
from .models import (
Availability,
ExecutionPhase,
Job,
JobDescription,
JobError,
JobParameter,
JobResult,
)
from .schema.job import Job as SQLJob
from .schema.job_parameter import JobParameter as SQLJobParameter
from .schema.job_result import JobResult as SQLJobResult
F = TypeVar("F", bound=Callable[..., Any])
G = TypeVar("G", bound=Callable[..., Awaitable[Any]])
__all__ = ["FrontendJobStore", "WorkerJobStore"]
def _convert_job(job: SQLJob) -> Job:
"""Convert the SQL representation of a job to its dataclass.
The internal representation of a job uses a dataclass that is kept
intentionally separate from the database schema so that the conversion
can be done explicitly and the rest of the code kept separate from
SQLAlchemy database models. This internal helper function converts
from the database representation to the internal representation.
"""
error = None
if job.error_code and job.error_type and job.error_message:
error = JobError(
error_type=job.error_type,
error_code=job.error_code,
message=job.error_message,
detail=job.error_detail,
)
return Job(
job_id=str(job.id),
message_id=job.message_id,
owner=job.owner,
phase=job.phase,
run_id=job.run_id,
creation_time=datetime_from_db(job.creation_time),
start_time=datetime_from_db(job.start_time),
end_time=datetime_from_db(job.end_time),
destruction_time=datetime_from_db(job.destruction_time),
execution_duration=job.execution_duration,
quote=job.quote,
parameters=[
JobParameter(
parameter_id=p.parameter, value=p.value, is_post=p.is_post
)
for p in sorted(job.parameters, key=lambda p: p.id)
],
results=[
JobResult(
result_id=r.result_id,
url=r.url,
size=r.size,
mime_type=r.mime_type,
)
for r in sorted(job.results, key=lambda r: r.sequence)
],
error=error,
)
def retry_async_transaction(g: G) -> G:
"""Retry once if a transaction failed.
Notes
-----
The UWS database workers may be run out of order (the one indicating the
job has started may be run after the one indicating the job has finished,
for example), which means we need a ``REPEATABLE READ`` transaction
isolation level so that we can check if a job status change has already
been done and avoid setting a job in ``COMPLETED`` back to ``EXECUTING``.
Unfortunately, that isolation level causes the underlying database to
raise an exception on commit if we raced with another worker. We
therefore need to retry if a transaction failed with an exception.
The only functions that can race for a given job are the frontend setting
the job status to ``QUEUED``, the backend setting it to ``EXECUTING``, and
the backend setting it to ``COMPLETED`` or ``ERROR``. Priorities should
force the second to always execute before the third, so we should only
race with at most one other SQL transaction. Therefore, retrying once
should be sufficient.
"""
@wraps(g)
async def wrapper(*args: Any, **kwargs: Any) -> Any:
for _ in range(1, 5):
try:
return await g(*args, **kwargs)
except (DBAPIError, OperationalError):
continue
return await g(*args, **kwargs)
return cast(G, wrapper)
class FrontendJobStore:
"""Stores and manipulates jobs in the database for the frontend.
This is the async storage layer used by the web service frontend. Workers
use the `WorkerJobStore`, which is synchronous.
Parameters
----------
session : `sqlalchemy.ext.asyncio.async_scoped_session`
The underlying database session.
Notes
-----
Timestamp handling deserves special comment. By default, SQLAlchemy
databases do not store timestamp information in database rows. It's
possible to use a variant data type to do so in PostgreSQL, but since
all database times will be in UTC, there's no need to do so.
psycopg2 silently discards the UTC timezone information when storing a
datetime (and apparently silently adds it when retrieving one). However,
asyncpg does not do this, and attempts to store a timezone-aware datetime
in a database column that is not defined as holding timezone information
results in an error.
Best practices for Python are to make every datetime normally seen in the
program timezone-aware so that one is never bitten by unexpected timezone
variations. Therefore, the storage layer should only expose
timezone-aware datetimes.
This is done by stripping the timezone from datetimes when stored in the
database (making the assumption that all datetimes will use UTC, which is
maintained by the rest of the UWS layer), and adding the UTC timezone back
to datetimes when retrieved from the database.
"""
def __init__(self, session: async_scoped_session) -> None:
self._session = session
async def add(
self,
*,
owner: str,
run_id: Optional[str] = None,
params: List[JobParameter],
execution_duration: int,
lifetime: int,
) -> Job:
"""Create a record of a new job.
The job will be created in pending status.
Parameters
----------
owner : `str`
The username of the owner of the job.
run_id : `str`, optional
A client-supplied opaque identifier to record with the job.
params : List[`vocutouts.uws.models.JobParameter`]
The input parameters to the job.
execution_duration : `int`
The maximum length of time for which a job is allowed to run in
seconds.
lifetime : `int`
The maximum lifetime of the job and its results, in seconds.
After this time, any record of the job will be deleted.
Returns
-------
job : `vocutouts.uws.models.Job`
The internal representation of the newly-created job.
"""
now = datetime.now(tz=timezone.utc).replace(microsecond=0)
destruction_time = now + timedelta(seconds=lifetime)
sql_params = [
SQLJobParameter(
parameter=p.parameter_id,
value=p.value,
is_post=p.is_post,
)
for p in params
]
job = SQLJob(
owner=owner,
phase=ExecutionPhase.PENDING,
run_id=run_id,
creation_time=datetime_to_db(now),
destruction_time=datetime_to_db(destruction_time),
execution_duration=execution_duration,
parameters=sql_params,
results=[],
)
async with self._session.begin():
self._session.add_all([job, *sql_params])
await self._session.flush()
return _convert_job(job)
async def availability(self) -> Availability:
"""Check that the database is up."""
try:
async with self._session.begin():
await self._session.execute(select(SQLJob.id).limit(1))
return Availability(available=True)
except OperationalError:
note = "cannot query UWS job database"
return Availability(available=False, note=note)
except Exception as e:
note = f"{type(e).__name__}: {str(e)}"
return Availability(available=False, note=note)
async def delete(self, job_id: str) -> None:
"""Delete a job by ID."""
async with self._session.begin():
stmt = delete(SQLJob).where(SQLJob.id == int(job_id))
await self._session.execute(stmt)
async def get(self, job_id: str) -> Job:
"""Retrieve a job by ID."""
async with self._session.begin():
job = await self._get_job(job_id)
return _convert_job(job)
async def list_jobs(
self,
user: str,
*,
phases: Optional[List[ExecutionPhase]] = None,
after: Optional[datetime] = None,
count: Optional[int] = None,
) -> List[JobDescription]:
"""List the jobs for a particular user.
Parameters
----------
user : `str`
Name of the user whose jobs to load.
phases : List[`vocutouts.uws.models.ExecutionPhase`], optional
Limit the result to jobs in this list of possible execution
phases.
after : `datetime.datetime`, optional
Limit the result to jobs created after the given datetime in UTC.
count : `int`, optional
Limit the results to the most recent count jobs.
Returns
-------
descriptions : List[`vocutouts.uws.models.JobDescription`]
List of job descriptions matching the search criteria.
"""
stmt = select(
SQLJob.id,
SQLJob.owner,
SQLJob.phase,
SQLJob.run_id,
SQLJob.creation_time,
).where(SQLJob.owner == user)
if phases:
stmt = stmt.where(SQLJob.phase.in_(phases))
if after:
stmt = stmt.where(SQLJob.creation_time > datetime_to_db(after))
stmt = stmt.order_by(SQLJob.creation_time.desc())
if count:
stmt = stmt.limit(count)
async with self._session.begin():
jobs = await self._session.execute(stmt)
return [
JobDescription(
job_id=str(j.id),
owner=j.owner,
phase=j.phase,
run_id=j.run_id,
creation_time=j.creation_time,
)
for j in jobs.all()
]
@retry_async_transaction
async def mark_queued(self, job_id: str, message_id: str) -> None:
"""Mark a job as queued for processing.
This is called by the web frontend after queuing the work. However,
the worker may have gotten there first and have already updated the
phase to executing, in which case we should not set it back to
queued.
Parameters
----------
job_id : `str`
The identifier of the job.
message_id : `str`
The identifier for the execution of that job in the work queuing
system.
"""
async with self._session.begin():
job = await self._get_job(job_id)
job.message_id = message_id
if job.phase in (ExecutionPhase.PENDING, ExecutionPhase.HELD):
job.phase = ExecutionPhase.QUEUED
async def update_destruction(
self, job_id: str, destruction: datetime
) -> None:
"""Update the destruction time of a job.
Parameters
----------
job_id : `str`
The identifier of the job.
destruction : `datetime.datetime`
The new destruction time.
"""
async with self._session.begin():
job = await self._get_job(job_id)
job.destruction_time = datetime_to_db(destruction)
async def update_execution_duration(
self, job_id: str, execution_duration: int
) -> None:
"""Update the destruction time of a job.
Parameters
----------
job_id : `str`
The identifier of the job.
execution_duration : `int`
The new execution duration.
"""
async with self._session.begin():
job = await self._get_job(job_id)
job.execution_duration = execution_duration
async def _get_job(self, job_id: str) -> SQLJob:
"""Retrieve a job from the database by job ID."""
stmt = select(SQLJob).where(SQLJob.id == int(job_id))
job = (await self._session.execute(stmt)).scalar_one_or_none()
if not job:
raise UnknownJobError(job_id)
return job
def retry_transaction(f: F) -> F:
"""Retry once if a transaction failed.
Notes
-----
The UWS database workers may be run out of order (the one indicating the
job has started may be run after the one indicating the job has finished,
for example), which means we need a ``REPEATABLE READ`` transaction
isolation level so that we can check if a job status change has already
been done and avoid setting a job in ``COMPLETED`` back to ``EXECUTING``.
Unfortunately, that isolation level causes the underlying database to
raise an exception on commit if we raced with another worker. We
therefore need to retry if a transaction failed with an exception.
"""
@wraps(f)
def wrapper(*args: Any, **kwargs: Any) -> Any:
for _ in range(1, 5):
try:
return f(*args, **kwargs)
except OperationalError:
continue
return f(*args, **kwargs)
return cast(F, wrapper)
class WorkerJobStore:
"""Records worker actions in the database.
This is the synchronous storage layer used by the backend workers.
Parameters
----------
session : `sqlalchemy.orm.scoped_session`
The underlying database session.
"""
def __init__(self, session: scoped_session) -> None:
self._session = session
@retry_transaction
def mark_completed(self, job_id: str, results: List[JobResult]) -> None:
"""Mark a job as completed."""
with self._session.begin():
job = self._get_job(job_id)
job.phase = ExecutionPhase.COMPLETED
job.end_time = datetime_to_db(datetime.now(tz=timezone.utc))
for sequence, result in enumerate(results, start=1):
sql_result = SQLJobResult(
job_id=job.id,
result_id=result.result_id,
sequence=sequence,
url=result.url,
size=result.size,
mime_type=result.mime_type,
)
self._session.add(sql_result)
@retry_transaction
def mark_errored(self, job_id: str, error: JobError) -> None:
"""Mark a job as failed with an error."""
with self._session.begin():
job = self._get_job(job_id)
job.phase = ExecutionPhase.ERROR
job.end_time = datetime_to_db(datetime.now(tz=timezone.utc))
job.error_type = error.error_type
job.error_code = error.error_code
job.error_message = error.message
job.error_detail = error.detail
@retry_transaction
def start_executing(
self, job_id: str, message_id: str, start_time: datetime
) -> None:
"""Mark a job as executing.
Parameters
----------
job_id : `str`
The identifier of the job.
message_id : `str`
The identifier for the execution of that job in the work queuing
system.
start_time : `datetime`
The time at which the job started executing.
"""
with self._session.begin():
job = self._get_job(job_id)
if job.phase in (ExecutionPhase.PENDING, ExecutionPhase.QUEUED):
job.phase = ExecutionPhase.EXECUTING
job.start_time = datetime_to_db(start_time)
job.message_id = message_id
def _get_job(self, job_id: str) -> SQLJob:
"""Retrieve a job from the database by job ID."""
stmt = select(SQLJob).where(SQLJob.id == int(job_id))
job = self._session.execute(stmt).scalar_one_or_none()
if not job:
raise UnknownJobError(job_id)
return job
|
the-stack_0_20457 | # -*- coding: utf-8 -*-
# @Time : 2019-12-25 17:00
# @Author : binger
import time
import sys
import json
import logging
logger = logging.getLogger("utils")
def new_registry(attribute=None):
"""
Returns an empty dict and a @register decorator.
"""
registry = {}
def register(key: str):
def decorator(func):
registry[key] = func
if attribute:
setattr(func, attribute, key)
return func
return decorator
return registry, register
from functools import wraps
# 3s 执行, 默认重复3次吗, 有唤醒,先唤醒,没唤醒不执行
def apply_repeat_run(awake_cb=None, at_exception_cb=None, tries=3, interval=0.1, timeout=3):
"""add apply_connect_continued semantics to a function."""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
n = 0
exception_stack = None
start = time.time()
while n != tries:
if n != 0 and awake_cb and not awake_cb():
continue
else:
n += 1
try:
result = func(*args, **kwargs)
break
except Exception as e:
exception_stack = e
if at_exception_cb and at_exception_cb(e):
if time.time() - start > timeout:
raise e
time.sleep(interval)
else:
raise e
else:
raise exception_stack
logger.debug("takes: {}, user NO.:{}".format(round(time.time() - start, 2), n))
return result
# decorator.__name__ = func.__name__
return wrapper
# apply_repeat_run.__name__ = decorator.__name__
# print("decorator: ", decorator)
return decorator
if __name__ == "__main__":
pass
|
the-stack_0_20458 | # -*- coding: utf-8 -*-
__author__ = 'CubexX'
from werkzeug.contrib.fixers import ProxyFix
from flask_orator import Orator
from flask import Flask
import memcache
import json
with open('config.json', 'r') as file:
CONFIG = json.loads(file.read())
cache = memcache.Client(CONFIG['cache']['servers'], debug=CONFIG['cache']['debug'])
app = Flask(__name__, static_folder='../static')
app.config['ORATOR_DATABASES'] = {
'mysql': {
'driver': CONFIG['db']['driver'],
'host': CONFIG['db']['host'],
'database': CONFIG['db']['database'],
'user': CONFIG['db']['user'],
'password': CONFIG['db']['password']
}
}
app.wsgi_app = ProxyFix(app.wsgi_app)
app.secret_key = CONFIG['salt']
db = Orator(app)
from app.views import index, group, user, admin
if __name__ == '__main__':
app.run()
|
the-stack_0_20461 | #!/usr/bin/env python3
from __future__ import print_function
from cffi import FFI
import time
ffi = FFI()
ffi.cdef("""
typedef uint64_t counter_t;
typedef struct {
counter_t packets;
counter_t bytes;
} vlib_counter_t;
typedef enum {
STAT_DIR_TYPE_ILLEGAL = 0,
STAT_DIR_TYPE_SCALAR_INDEX,
STAT_DIR_TYPE_COUNTER_VECTOR_SIMPLE,
STAT_DIR_TYPE_COUNTER_VECTOR_COMBINED,
STAT_DIR_TYPE_ERROR_INDEX,
STAT_DIR_TYPE_NAME_VECTOR,
} stat_directory_type_t;
typedef struct
{
stat_directory_type_t type;
union {
uint64_t index;
uint64_t value;
uint64_t *data;
};
char name[128]; // TODO change this to pointer to "somewhere"
} stat_segment_directory_entry_t;
typedef struct
{
char *name;
stat_directory_type_t type;
union
{
double scalar_value;
counter_t *error_vector;
counter_t **simple_counter_vec;
vlib_counter_t **combined_counter_vec;
uint8_t **name_vector;
};
} stat_segment_data_t;
typedef struct
{
uint64_t version;
void *base;
uint64_t epoch;
uint64_t in_progress;
stat_segment_directory_entry_t *directory_vector;
uint64_t **error_vector;
} stat_segment_shared_header_t;
typedef struct
{
uint64_t current_epoch;
stat_segment_shared_header_t *shared_header;
stat_segment_directory_entry_t *directory_vector;
ssize_t memory_size;
} stat_client_main_t;
stat_client_main_t * stat_client_get(void);
void stat_client_free(stat_client_main_t * sm);
int stat_segment_connect_r (char *socket_name, stat_client_main_t * sm);
int stat_segment_connect (char *socket_name);
void stat_segment_disconnect_r (stat_client_main_t * sm);
void stat_segment_disconnect (void);
uint32_t *stat_segment_ls_r (uint8_t ** patterns, stat_client_main_t * sm);
uint32_t *stat_segment_ls (uint8_t ** pattern);
stat_segment_data_t *stat_segment_dump_r (uint32_t * stats,
stat_client_main_t * sm);
stat_segment_data_t *stat_segment_dump (uint32_t * counter_vec);
void stat_segment_data_free (stat_segment_data_t * res);
double stat_segment_heartbeat_r (stat_client_main_t * sm);
int stat_segment_vec_len(void *vec);
uint8_t **stat_segment_string_vector(uint8_t **string_vector, char *string);
char *stat_segment_index_to_name_r (uint32_t index, stat_client_main_t * sm);
uint64_t stat_segment_version(void);
uint64_t stat_segment_version_r(stat_client_main_t *sm);
void free(void *ptr);
""") # noqa: E501
# Utility functions
def make_string_vector(api, strings):
vec = ffi.NULL
if type(strings) is not list:
strings = [strings]
for s in strings:
vec = api.stat_segment_string_vector(vec, ffi.new("char []",
s.encode('utf-8')))
return vec
def make_string_list(api, vec):
vec_len = api.stat_segment_vec_len(vec)
return [ffi.string(vec[i]) for i in range(vec_len)]
# 2-dimensonal array of thread, index
def simple_counter_vec_list(api, e):
vec = []
for thread in range(api.stat_segment_vec_len(e)):
len_interfaces = api.stat_segment_vec_len(e[thread])
if_per_thread = [e[thread][interfaces]
for interfaces in range(len_interfaces)]
vec.append(if_per_thread)
return vec
def vlib_counter_dict(c):
return {'packets': c.packets,
'bytes': c.bytes}
def combined_counter_vec_list(api, e):
vec = []
for thread in range(api.stat_segment_vec_len(e)):
len_interfaces = api.stat_segment_vec_len(e[thread])
if_per_thread = [vlib_counter_dict(e[thread][interfaces])
for interfaces in range(len_interfaces)]
vec.append(if_per_thread)
return vec
def error_vec_list(api, e):
vec = []
for thread in range(api.stat_segment_vec_len(e)):
vec.append(e[thread])
return vec
def name_vec_list(api, e):
return [ffi.string(e[i]).decode('utf-8') for i in
range(api.stat_segment_vec_len(e)) if e[i] != ffi.NULL]
def stat_entry_to_python(api, e):
# Scalar index
if e.type == 1:
return e.scalar_value
if e.type == 2:
return simple_counter_vec_list(api, e.simple_counter_vec)
if e.type == 3:
return combined_counter_vec_list(api, e.combined_counter_vec)
if e.type == 4:
return error_vec_list(api, e.error_vector)
if e.type == 5:
return name_vec_list(api, e.name_vector)
raise NotImplementedError()
class VPPStatsIOError(IOError):
message = "Stat segment client connection returned: " \
"%(retval)s %(strerror)s."
strerror = {-1: "Stat client couldn't open socket",
-2: "Stat client socket open but couldn't connect",
-3: "Receiving file descriptor failed",
-4: "mmap fstat failed",
-5: "mmap map failed"
}
def __init__(self, message=None, **kwargs):
if 'retval' in kwargs:
self.retval = kwargs['retval']
kwargs['strerror'] = self.strerror[int(self.retval)]
if not message:
try:
message = self.message % kwargs
except Exception:
message = self.message
else:
message = message % kwargs
super(VPPStatsIOError, self).__init__(message)
class VPPStatsClientLoadError(RuntimeError):
pass
class VPPStats(object):
VPPStatsIOError = VPPStatsIOError
default_socketname = '/run/vpp/stats.sock'
sharedlib_name = 'libvppapiclient.so'
def __init__(self, socketname=default_socketname, timeout=10):
self.socketname = socketname
self.timeout = timeout
self.connected = False
try:
self.api = ffi.dlopen(VPPStats.sharedlib_name)
except Exception:
raise VPPStatsClientLoadError("Could not open: %s" %
VPPStats.sharedlib_name)
def connect(self):
self.client = self.api.stat_client_get()
poll_end_time = time.time() + self.timeout
while time.time() < poll_end_time:
rv = self.api.stat_segment_connect_r(
self.socketname.encode('utf-8'), self.client)
# Break out if success or any other error than "no such file"
# (indicating that VPP hasn't started yet)
if rv == 0 or ffi.errno != 2:
self.connected = True
break
if rv != 0:
raise VPPStatsIOError(retval=rv)
def heartbeat(self):
if not self.connected:
self.connect()
return self.api.stat_segment_heartbeat_r(self.client)
def ls(self, patterns):
if not self.connected:
self.connect()
return self.api.stat_segment_ls_r(make_string_vector(self.api,
patterns),
self.client)
def lsstr(self, patterns):
if not self.connected:
self.connect()
rv = self.api.stat_segment_ls_r(make_string_vector(self.api,
patterns),
self.client)
if rv == ffi.NULL:
raise VPPStatsIOError()
return [ffi.string(self.api.stat_segment_index_to_name_r(
rv[i], self.client)).decode('utf-8')
for i in range(self.api.stat_segment_vec_len(rv))]
def dump(self, counters):
if not self.connected:
self.connect()
stats = {}
rv = self.api.stat_segment_dump_r(counters, self.client)
# Raise exception and retry
if rv == ffi.NULL:
raise VPPStatsIOError()
rv_len = self.api.stat_segment_vec_len(rv)
for i in range(rv_len):
n = ffi.string(rv[i].name).decode('utf-8')
e = stat_entry_to_python(self.api, rv[i])
if e is not None:
stats[n] = e
return stats
def get_counter(self, name):
retries = 0
while True:
try:
d = self.ls(name)
s = self.dump(d)
if len(s) > 1:
raise AttributeError('Matches multiple counters {}'
.format(name))
k, v = s.popitem()
return v
except VPPStatsIOError:
if retries > 10:
return None
retries += 1
def get_err_counter(self, name):
"""Get an error counter. The errors from each worker thread
are summed"""
return sum(self.get_counter(name))
def disconnect(self):
try:
self.api.stat_segment_disconnect_r(self.client)
self.api.stat_client_free(self.client)
self.connected = False
del self.client
except AttributeError:
# no need to disconnect if we're not connected
pass
def set_errors(self):
'''Return all errors counters > 0'''
retries = 0
while True:
try:
error_names = self.ls(['/err/'])
error_counters = self.dump(error_names)
break
except VPPStatsIOError:
if retries > 10:
return None
retries += 1
return {k: sum(error_counters[k])
for k in error_counters.keys() if sum(error_counters[k])}
def set_errors_str(self):
'''Return all errors counters > 0 pretty printed'''
s = ['ERRORS:']
error_counters = self.set_errors()
for k in sorted(error_counters):
s.append('{:<60}{:>10}'.format(k, error_counters[k]))
return '%s\n' % '\n'.join(s)
|
the-stack_0_20462 | import collections.abc
from abc import abstractmethod
from typing import Dict
from folio_uuid import FOLIONamespaces, FolioUUID
from folioclient import FolioClient
from folio_migration_tools.custom_exceptions import TransformationProcessError
from folio_migration_tools.library_configuration import LibraryConfiguration
from folio_migration_tools.mapper_base import MapperBase
from folio_migration_tools.migration_report import MigrationReport
class UserMapperBase(MapperBase):
def __init__(self, folio_client: FolioClient, library_configuration: LibraryConfiguration):
super().__init__(library_configuration, folio_client)
self.legacy_id_map: Dict[str, str] = {}
self.migration_report = MigrationReport()
self.folio_client = folio_client
self.mapped_folio_fields = {}
self.ref_data_dicts = {}
self.mapped_legacy_fields = {}
def print_mapping_report(self, total_records):
print("\n## Mapped FOLIO fields")
d_sorted = {k: self.mapped_folio_fields[k] for k in sorted(self.mapped_folio_fields)}
print("FOLIO Field | % | Has value")
print("--- | --- | --- :")
for k, v in d_sorted.items():
mp = v / total_records
mapped_per = "{:.0%}".format(max(mp, 0))
print(f"{k} | {mapped_per} | {v} ")
print("\n## Mapped Legacy fields")
d_sorted = {k: self.mapped_legacy_fields[k] for k in sorted(self.mapped_legacy_fields)}
print("Legacy Field | % | Has Value")
print("--- | --- | --- :")
for k, v in d_sorted.items():
mp = v / total_records
mapped_per = "{:.0%}".format(max(mp, 0))
print(f"{k} | {mapped_per} | {v}")
def report_legacy_mapping(self, legacy_object):
for field_name, value in legacy_object.items():
v = 1 if value else 0
if field_name not in self.mapped_legacy_fields:
self.mapped_legacy_fields[field_name] = [1, v]
else:
self.mapped_legacy_fields[field_name][0] += 1
self.mapped_legacy_fields[field_name][1] += v
def report_folio_mapping(self, folio_object):
flat_object = flatten(folio_object)
for field_name, value in flat_object.items():
v = 1 if value else 0
if field_name not in self.mapped_folio_fields:
self.mapped_folio_fields[field_name] = [1, v]
else:
self.mapped_folio_fields[field_name][0] += 1
self.mapped_folio_fields[field_name][1] += v
def instantiate_user(self, legacy_id):
if not legacy_id:
raise TransformationProcessError(
"",
(
"Legacy id not present. Have you set "
"the legacyIdentifier in the mapping file?"
),
)
user_id = str(FolioUUID(self.folio_client.okapi_url, FOLIONamespaces.users, legacy_id))
return {
"metadata": self.folio_client.get_metadata_construct(),
"id": user_id,
"type": "object",
"personal": {},
"customFields": {},
}
def validate(self, folio_user):
failures = []
self.migration_report.add(
"Number of addresses per user",
len(folio_user["personal"].get("addresses", [])),
)
req_fields = ["username", "email", "active"]
for req in req_fields:
if req not in folio_user:
failures.append(req)
self.migration_report.add(
"Failed records that needs to get fixed",
f"Required field {req} is missing from {folio_user['username']}",
)
if not folio_user["personal"].get("lastName", ""):
failures.append("lastName")
self.migration_report.add(
"Failed records that needs to get fixed",
f"Required field personal.lastName is missing from {folio_user['username']}",
)
if failures:
self.migration_report.add("User validation", "Total failed users")
for failure in failures:
self.migration_report.add("User validation", f"{failure}")
raise ValueError(f"Record {folio_user['username']} failed validation {failures}")
def write_migration_report(self):
for a in self.migration_report:
print("")
print(f"## {a} - {len(self.migration_report[a])} things")
print("Measure | Count")
print("--- | ---:")
b = self.migration_report[a]
sortedlist = [(k, b[k]) for k in sorted(b, key=as_str)]
for b in sortedlist:
print(f"{b[0]} | {b[1]}")
def save_migration_report_to_disk(self, file_path, total_records):
with open(file_path, "w+") as report_file:
for a in self.migration_report:
report_file.write("\n")
report_file.write(f"## {a} - {len(self.migration_report[a])} things\n")
report_file.write("Measure | Count\n")
report_file.write("--- | ---:\n")
b = self.migration_report[a]
sortedlist = [(k, b[k]) for k in sorted(b, key=as_str)]
for b in sortedlist:
report_file.write(f"{b[0]} | {b[1]}\n")
report_file.write("\n## Mapped FOLIO fields\n")
d_sorted = {k: self.mapped_folio_fields[k] for k in sorted(self.mapped_folio_fields)}
report_file.write("FOLIO Field | % | Has Value\n")
report_file.write("--- | --- | --- | ---:\n")
for k, v in d_sorted.items():
mp = v / total_records
mapped_per = "{:.0%}".format(max(mp, 0))
report_file.write(f"{k} | {mapped_per} | {v} \n")
report_file.write("\n## Mapped Legacy fields\n")
d_sorted = {k: self.mapped_legacy_fields[k] for k in sorted(self.mapped_legacy_fields)}
report_file.write("Legacy Field | % | Has Value\n")
report_file.write("--- | --- | --- | ---:\n")
for k, v in d_sorted.items():
mp = v / total_records
mapped_per = "{:.0%}".format(max(mp, 0))
report_file.write(f"{k} | {mapped_per} | {v}\n")
@staticmethod
def print_dict_to_md_table(my_dict, h1="", h2=""):
d_sorted = {k: my_dict[k] for k in sorted(my_dict)}
print(f"{h1} | {h2}")
print("--- | ---:")
for k, v in d_sorted.items():
print(f"{k} | {v}")
def get_ref_data_tuple_by_code(self, ref_data, ref_name, code):
return self.get_ref_data_tuple(ref_data, ref_name, code, "code")
def get_ref_data_tuple_by_name(self, ref_data, ref_name, name):
return self.get_ref_data_tuple(ref_data, ref_name, name, "name")
def get_ref_data_tuple(self, ref_data, ref_name, key_value, key_type):
dict_key = f"{ref_name}{key_type}"
ref_object = self.ref_data_dicts.get(dict_key, {}).get(key_value.lower().strip(), ())
# logging.info(f"{key_value} - {ref_object} - {dict_key}")
if ref_object:
return ref_object
else:
d = {}
for r in ref_data:
d[r[key_type].lower()] = (r["id"], r["name"])
self.ref_data_dicts[dict_key] = d
return self.ref_data_dicts.get(dict_key, {}).get(key_value.lower().strip(), ())
@abstractmethod
def do_map(self, legacy_user, object_map):
raise NotImplementedError
@abstractmethod
def get_users(self, source_file, file_format: str):
raise NotImplementedError
def flatten(d, parent_key="", sep="."):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.abc.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def as_str(s):
try:
return str(s), ""
except ValueError:
return "", s
|
the-stack_0_20465 | # list of all classification algorithms from sklearn
from sklearn.utils import all_estimators
from sklearn.base import ClassifierMixin
classifiers=[est for est in all_estimators() if issubclass(est[1], ClassifierMixin)]
# print(classifiers)
for clf in zip(classifiers):
print(clf)
estimators = all_estimators(type_filter='classifier')
print("\n") #############################################################################################
all_clfs = []
for name, ClassifierClass in estimators:
print('Appending', name)
clf = ClassifierClass()
all_clfs.append(clf) |
the-stack_0_20466 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('data_interfaces', '0003_update__automaticupdaterulecriteria__match_type__choices'),
]
operations = [
migrations.AddField(
model_name='automaticupdateaction',
name='property_value_type',
field=models.CharField(default=b'EXACT', max_length=15),
preserve_default=True,
),
migrations.AddField(
model_name='automaticupdaterule',
name='filter_on_server_modified',
field=models.BooleanField(default=True),
preserve_default=True,
),
migrations.AlterField(
model_name='automaticupdaterule',
name='server_modified_boundary',
field=models.IntegerField(null=True),
preserve_default=True,
),
]
|
the-stack_0_20467 | from __future__ import print_function
import sys
import json
import time
from functools import wraps
from pprint import pprint
from relevanced_client import Client
from . import caching, urls, crawl
def get_client():
return Client('localhost', 8097)
@caching.memoize0
def load_large_math():
return crawl.crawl_urls(urls.MATH)
@caching.memoize0
def load_large_poli():
return crawl.crawl_urls(urls.POLITICS)
def init_documents(client):
existing_docs = set(client.list_all_documents().documents)
math_docs = list(load_large_math())
articles_by_url = {}
urls_by_length = []
for url, doc in load_large_math().iteritems():
articles_by_url[doc['url']] = doc['text']
urls_by_length.append((len(doc['text']), doc['url']))
for url, doc in load_large_poli().iteritems():
articles_by_url[doc['url']] = doc['text']
urls_by_length.append((len(doc['text']), doc['url']))
urls_by_length.sort(key=lambda x: x[0])
urls_by_length.reverse()
for _, url in urls_by_length:
text = articles_by_url[url]
if url not in existing_docs:
res = client.create_document_with_id(
url, text
)
print('created : %s' % res.id)
def init_centroids(client):
existing_centroids = set(client.list_all_centroids().centroids)
for coll in ('wiki_math', 'wiki_poli'):
if coll not in existing_centroids:
client.create_centroid(coll)
existing_math_docs = set(client.list_all_documents_for_centroid('wiki_math').documents)
existing_poli_docs = set(client.list_all_documents_for_centroid('wiki_poli').documents)
missing_math = [url for url in load_large_math().keys() if url not in existing_math_docs]
client.add_documents_to_centroid(
'wiki_math', missing_math
)
# for url in load_large_math().keys():
# if url not in existing_math_docs:
# client.add_document_to_centroid(
# 'wiki_math', url
# )
missing_poli = [url for url in load_large_poli().keys() if url not in existing_poli_docs]
client.add_documents_to_centroid(
'wiki_poli', missing_poli
)
# for url in load_large_poli().keys():
# if url not in existing_poli_docs:
# client.add_document_to_centroid(
# 'wiki_poli', url
# )
def main():
client = get_client()
init_documents(client)
if not len(client.list_all_centroids().centroids) >= 2:
init_centroids(client)
client.multi_join_centroids(['wiki_math', 'wiki_poli'])
print('\n\n')
print('math vs math')
for doc in load_large_math().values()[:10]:
print(doc['title'])
print(client.get_text_similarity('wiki_math', doc['text']))
print('\n\n')
print('math vs poli')
for doc in load_large_math().values()[:10]:
print(doc['title'])
print(client.get_text_similarity('wiki_poli', doc['text']))
|
the-stack_0_20469 | #!/usr/bin/env python3
#
# Copyright (2021) The Delta Lake Project Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import subprocess
from os import path
import shutil
import argparse
def delete_if_exists(path):
# if path exists, delete it.
if os.path.exists(path):
shutil.rmtree(path)
print("Deleted %s " % path)
def run_maven_proj(test_dir, example, version, maven_repo, scala_version):
print(f"\n\n##### Running Maven verification {example} on standalone version {version} with scala version {scala_version}#####")
clear_artifact_cache()
with WorkingDirectory(test_dir):
cmd = ["mvn", "package", "exec:java", "-Dexec.cleanupDaemonThreads=false",
f"-Dexec.mainClass=example.{example}",
f"-Dscala.version={scala_version}", f"-Dstaging.repo.url={maven_repo}",
f"-Dstandalone.version={version}"]
run_cmd(cmd, stream_output=True)
def run_sbt_proj(test_dir, example, version, maven_repo, scala_version):
print(f"\n\n##### Running SBT verification {example} on standalone version {version} with scala version {scala_version}#####")
clear_artifact_cache()
env = {"STANDALONE_VERSION": str(version)}
if maven_repo:
env["EXTRA_MAVEN_REPO"] = maven_repo
with WorkingDirectory(test_dir):
cmd = ["build/sbt", f"++ {scala_version}", f"{example[0].lower() + example[1:]}/runMain example.{example}"]
run_cmd(cmd, stream_output=True, env=env)
def clear_artifact_cache():
print("Clearing Delta artifacts from ivy2 and mvn cache")
delete_if_exists(os.path.expanduser("~/.ivy2/cache/io.delta"))
delete_if_exists(os.path.expanduser("~/.ivy2/local/io.delta"))
delete_if_exists(os.path.expanduser("~/.m2/repository/io/delta/"))
def run_cmd(cmd, throw_on_error=True, env=None, stream_output=False, **kwargs):
cmd_env = os.environ.copy()
if env:
cmd_env.update(env)
if stream_output:
child = subprocess.Popen(cmd, env=cmd_env, **kwargs)
exit_code = child.wait()
if throw_on_error and exit_code != 0:
raise Exception("Non-zero exitcode: %s" % (exit_code))
return exit_code
else:
child = subprocess.Popen(
cmd,
env=cmd_env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs)
(stdout, stderr) = child.communicate()
exit_code = child.wait()
if throw_on_error and exit_code != 0:
raise Exception(
"Non-zero exitcode: %s\n\nSTDOUT:\n%s\n\nSTDERR:%s" %
(exit_code, stdout, stderr))
return (exit_code, stdout, stderr)
class WorkingDirectory(object):
def __init__(self, working_directory):
self.working_directory = working_directory
self.old_workdir = os.getcwd()
def __enter__(self):
os.chdir(self.working_directory)
def __exit__(self, tpe, value, traceback):
os.chdir(self.old_workdir)
if __name__ == "__main__":
"""
Script to run integration tests which are located in the examples directory.
call this by running "python3 run-integration-tests.py"
additionally the version can be provided as a command line argument.
"""
root_dir = path.dirname(__file__)
parser = argparse.ArgumentParser()
parser.add_argument(
"--version",
required=False,
default="0.3.0",
help="Delta Standalone version to use to run the integration tests")
parser.add_argument(
"--maven-repo",
required=False,
default=None,
help="Additional Maven repo to resolve staged new release artifacts")
args = parser.parse_args()
examples = [("convert-to-delta", "ConvertToDelta"),
("hello-world", "HelloWorld")]
for dir, c in examples:
run_maven_proj(path.join(root_dir, dir), c, args.version, args.maven_repo, "2.11")
run_maven_proj(path.join(root_dir, dir), c, args.version, args.maven_repo, "2.12")
run_maven_proj(path.join(root_dir, dir), c, args.version, args.maven_repo, "2.13")
run_sbt_proj(root_dir, c, args.version, args.maven_repo, "2.11.12")
run_sbt_proj(root_dir, c, args.version, args.maven_repo, "2.12.8")
run_sbt_proj(root_dir, c, args.version, args.maven_repo, "2.13.8")
|
the-stack_0_20470 | from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='Code for Feature Matching with FLANN tutorial.')
parser.add_argument('--input1', help='Path to input image 1.', default='../data/box.png')
parser.add_argument('--input2', help='Path to input image 2.', default='../data/box_in_scene.png')
args = parser.parse_args()
img1 = cv.imread(args.input1, cv.IMREAD_GRAYSCALE)
img2 = cv.imread(args.input2, cv.IMREAD_GRAYSCALE)
if img1 is None or img2 is None:
print('Could not open or find the images!')
exit(0)
#-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
minHessian = 400
detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian)
keypoints1, descriptors1 = detector.detectAndCompute(img1, None)
keypoints2, descriptors2 = detector.detectAndCompute(img2, None)
#-- Step 2: Matching descriptor vectors with a FLANN based matcher
# Since SURF is a floating-point descriptor NORM_L2 is used
matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED)
knn_matches = matcher.knnMatch(descriptors1, descriptors2, 2)
#-- Filter matches using the Lowe's ratio test
ratio_thresh = 0.7
good_matches = []
for m,n in knn_matches:
if m.distance / n.distance <= ratio_thresh:
good_matches.append(m)
#-- Draw matches
img_matches = np.empty((max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1], 3), dtype=np.uint8)
cv.drawMatches(img1, keypoints1, img2, keypoints2, good_matches, img_matches, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
#-- Show detected matches
cv.imshow('Good Matches', img_matches)
cv.waitKey()
|
the-stack_0_20472 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from oslo_config import cfg
from oslo_config import types
from kolla.version import version_info as version
BASE_OS_DISTRO = ['centos', 'rhel', 'ubuntu', 'oraclelinux', 'debian']
DISTRO_RELEASE = {
'centos': '7',
'rhel': '7',
'oraclelinux': '7',
'debian': '8',
'ubuntu': '16.04',
}
DELOREAN = ("http://buildlogs.centos.org/centos/7/cloud/x86_64/"
"rdo-trunk-master-tested/delorean.repo")
# TODO(pbourke): update to buildlogs.centos.org once this moves
DELOREAN_DEPS = "http://trunk.rdoproject.org/centos7/delorean-deps.repo"
INSTALL_TYPE_CHOICES = ['binary', 'source', 'rdo', 'rhos']
_PROFILE_OPTS = [
cfg.ListOpt('infra',
default=['ceph', 'cron', 'elasticsearch', 'etcd', 'haproxy',
'heka', 'keepalived', 'kibana', 'kolla-toolbox',
'mariadb', 'memcached', 'mongodb', 'openvswitch',
'rabbitmq', 'tgtd'],
help='Infra images'),
cfg.ListOpt('main',
default=['cinder', 'ceilometer', 'glance', 'heat',
'horizon', 'iscsi', 'keystone', 'neutron', 'nova',
'swift'],
help='Main images'),
cfg.ListOpt('aux',
default=['aodh', 'cloudkitty', 'congress', 'designate',
'freezer', 'gnocchi', 'influxdb', 'ironic', 'karbor',
'kuryr', 'magnum', 'manila', 'mistral', 'murano',
'octavia', 'panko', 'rally', 'sahara', 'searchlight',
'senlin', 'solum', 'tacker' 'telegraf', 'trove',
'zaqar'],
help='Aux Images'),
cfg.ListOpt('default',
default=['chrony', 'cron', 'kolla-toolbox', 'glance',
'haproxy', 'heat', 'horizon', 'keepalived',
'keystone', 'memcached', 'mariadb', 'neutron', 'nova',
'openvswitch', 'rabbitmq', 'heka'],
help='Default images'),
cfg.ListOpt('gate',
default=['chrony', 'cron', 'glance', 'haproxy', 'keepalived',
'keystone', 'kolla-toolbox', 'mariadb', 'memcached',
'neutron', 'nova', 'openvswitch', 'rabbitmq', 'heka'],
help='Gate images')
]
_CLI_OPTS = [
cfg.StrOpt('base', short='b', default='centos',
choices=BASE_OS_DISTRO,
help='The distro type of the base image'),
cfg.StrOpt('base-tag', default='latest',
help='The base distro image tag'),
cfg.StrOpt('base-image',
help='The base image name. Default is the same with base'),
cfg.BoolOpt('debug', short='d', default=False,
help='Turn on debugging log level'),
cfg.DictOpt('build-args',
help='Set docker build time variables'),
cfg.BoolOpt('keep', default=False,
help='Keep failed intermediate containers'),
cfg.BoolOpt('list-dependencies', short='l',
help='Show image dependencies (filtering supported)'),
cfg.BoolOpt('list-images',
help='Show all available images'),
cfg.StrOpt('namespace', short='n', default='kolla',
help='The Docker namespace name'),
cfg.BoolOpt('cache', default=True,
help='Use the Docker cache when building',
),
cfg.MultiOpt('profile', types.String(), short='p',
help=('Build a pre-defined set of images, see [profiles]'
' section in config. The default profiles are:'
' {}'.format(', '.join(
[opt.name for opt in _PROFILE_OPTS])
))),
cfg.BoolOpt('push', default=False,
help='Push images after building'),
cfg.IntOpt('push-threads', default=1, min=1,
help=('The number of threads to user while pushing'
' Images. Note: Docker can not handle threading'
' push properly.')),
cfg.IntOpt('retries', short='r', default=3, min=0,
help='The number of times to retry while building'),
cfg.MultiOpt('regex', types.String(), positional=True,
help=('Build only images matching regex and its'
' dependencies')),
cfg.StrOpt('registry',
help=('The docker registry host. The default registry host'
' is Docker Hub')),
cfg.StrOpt('save-dependency',
help=('Path to the file to store the docker image'
' dependency in Graphviz dot format')),
cfg.StrOpt('type', short='t', default='binary',
choices=INSTALL_TYPE_CHOICES,
dest='install_type',
help=('The method of the OpenStack install')),
cfg.IntOpt('threads', short='T', default=8, min=1,
help=('The number of threads to use while building.'
' (Note: setting to one will allow real time'
' logging.)')),
cfg.StrOpt('tag', default=version.cached_version_string(),
help='The Docker tag'),
cfg.BoolOpt('template-only', default=False,
help=("Don't build images. Generate Dockerfile only")),
cfg.IntOpt('timeout', default=120,
help='Time in seconds after which any operation times out'),
cfg.StrOpt('template-override',
help='Path to template override file'),
cfg.StrOpt('logs-dir', help='Path to logs directory'),
]
_BASE_OPTS = [
cfg.StrOpt('maintainer',
default='Kolla Project (https://launchpad.net/kolla)',
help='The MAINTAINER field'),
cfg.ListOpt('rpm_setup_config', default=[DELOREAN, DELOREAN_DEPS],
help=('Comma separated list of .rpm or .repo file(s) '
'or URL(s) to install before building containers')),
cfg.StrOpt('apt_sources_list', help=('Path to custom sources.list')),
cfg.StrOpt('apt_preferences', help=('Path to custom apt/preferences'))
]
SOURCES = {
'openstack-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/requirements/'
'requirements-master.tar.gz')},
'aodh-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/aodh/'
'aodh-master.tar.gz')},
'barbican-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/barbican/'
'barbican-master.tar.gz')},
'bifrost-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/bifrost/'
'bifrost-master.tar.gz')},
'ceilometer-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/ceilometer/'
'ceilometer-master.tar.gz')},
'cinder-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/cinder/'
'cinder-master.tar.gz')},
'congress-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/congress/'
'congress-master.tar.gz')},
'cloudkitty-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/cloudkitty/'
'cloudkitty-master.tar.gz')},
'designate-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/designate/'
'designate-master.tar.gz')},
'freezer-api': {
'type': 'url',
'location': ('http://tarballs.openstack.org/freezer-api/'
'freezer-api-master.tar.gz')},
'freezer-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/freezer/'
'freezer-master.tar.gz')},
'glance-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/glance/'
'glance-master.tar.gz')},
'gnocchi-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/gnocchi/'
'gnocchi-master.tar.gz')},
'heat-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/heat/'
'heat-master.tar.gz')},
'horizon': {
'type': 'url',
'location': ('http://tarballs.openstack.org/horizon/'
'horizon-master.tar.gz')},
'horizon-plugin-cloudkitty-dashboard': {
'type': 'url',
'location': ('http://tarballs.openstack.org/cloudkitty-dashboard/'
'cloudkitty-dashboard-master.tar.gz')},
'horizon-plugin-designate-dashboard': {
'type': 'url',
'location': ('http://tarballs.openstack.org/designate-dashboard/'
'designate-dashboard-master.tar.gz')},
'horizon-plugin-ironic-ui': {
'type': 'url',
'location': ('http://tarballs.openstack.org/ironic-ui/'
'ironic-ui-master.tar.gz')},
'horizon-plugin-magnum-ui': {
'type': 'url',
'location': ('http://tarballs.openstack.org/magnum-ui/'
'magnum-ui-master.tar.gz')},
'horizon-plugin-manila-ui': {
'type': 'url',
'location': ('http://tarballs.openstack.org/manila-ui/'
'manila-ui-master.tar.gz')},
'horizon-plugin-neutron-lbaas-dashboard': {
'type': 'url',
'location': ('http://tarballs.openstack.org/neutron-lbaas-dashboard/'
'neutron-lbaas-dashboard-master.tar.gz')},
'horizon-plugin-sahara-dashboard': {
'type': 'url',
'location': ('http://tarballs.openstack.org/sahara-dashboard/'
'sahara-dashboard-master.tar.gz')},
'horizon-plugin-searchlight-ui': {
'type': 'url',
'location': ('http://tarballs.openstack.org/searchlight-ui/'
'searchlight-ui-master.tar.gz')},
'horizon-plugin-senlin-dashboard': {
'type': 'url',
'location': ('http://tarballs.openstack.org/senlin-dashboard/'
'senlin-dashboard-master.tar.gz')},
'horizon-plugin-trove-dashboard': {
'type': 'url',
'location': ('http://tarballs.openstack.org/trove-dashboard/'
'trove-dashboard-master.tar.gz')},
'horizon-plugin-zaqar-ui': {
'type': 'url',
'location': ('http://tarballs.openstack.org/zaqar-ui/'
'zaqar-ui-master.tar.gz')},
'ironic-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/ironic/'
'ironic-master.tar.gz')},
'ironic-inspector': {
'type': 'url',
'location': ('http://tarballs.openstack.org/ironic-inspector/'
'ironic-inspector-master.tar.gz')},
'karbor-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/karbor/'
'karbor-master.tar.gz')},
'keystone-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/keystone/'
'keystone-master.tar.gz')},
'kuryr-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/kuryr/'
'kuryr-master.tar.gz')},
'kuryr-libnetwork': {
'type': 'url',
'location': ('http://tarballs.openstack.org/kuryr-libnetwork/'
'kuryr-libnetwork-master.tar.gz')},
'magnum-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/magnum/'
'magnum-master.tar.gz')},
'manila-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/manila/'
'manila-master.tar.gz')},
'mistral-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/mistral/'
'mistral-master.tar.gz')},
'murano-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/murano/'
'murano-master.tar.gz')},
'neutron-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/neutron/'
'neutron-master.tar.gz')},
'neutron-base-plugin-neutron-fwaas': {
'type': 'url',
'location': ('http://tarballs.openstack.org/neutron-fwaas/'
'neutron-fwaas-master.tar.gz')},
'neutron-lbaas-agent': {
'type': 'url',
'location': ('http://tarballs.openstack.org/neutron-lbaas/'
'neutron-lbaas-master.tar.gz')},
'neutron-server-plugin-neutron-lbaas': {
'type': 'url',
'location': ('http://tarballs.openstack.org/neutron-lbaas/'
'neutron-lbaas-master.tar.gz')},
'neutron-sfc-agent': {
'type': 'url',
'location': ('http://tarballs.openstack.org/networking-sfc/'
'networking-sfc-master.tar.gz')},
'neutron-server-plugin-vpnaas-agent': {
'type': 'url',
'location': ('http://tarballs.openstack.org/neutron-vpnaas/'
'neutron-vpnaas-master.tar.gz')},
'neutron-vpnaas-agent': {
'type': 'url',
'location': ('http://tarballs.openstack.org/neutron-vpnaas/'
'neutron-vpnaas-master.tar.gz')},
'nova-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/nova/'
'nova-master.tar.gz')},
'nova-spicehtml5proxy': {
'type': 'url',
'location': ('http://github.com/SPICE/spice-html5/tarball/'
'spice-html5-0.1.6')},
'nova-novncproxy': {
'type': 'url',
'location': ('http://github.com/kanaka/noVNC/tarball/'
'v0.5.1')},
'octavia-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/octavia/'
'octavia-master.tar.gz')},
'panko-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/panko/'
'panko-master.tar.gz')},
'rally': {
'type': 'url',
'location': ('http://tarballs.openstack.org/rally/'
'rally-master.tar.gz')},
'sahara-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/sahara/'
'sahara-master.tar.gz')},
'searchlight-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/searchlight/'
'searchlight-master.tar.gz')},
'senlin-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/senlin/'
'senlin-master.tar.gz')},
'solum-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/solum/'
'solum-master.tar.gz')},
'swift-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/swift/'
'swift-master.tar.gz')},
'tacker': {
'type': 'url',
'location': ('http://tarballs.openstack.org/tacker/'
'tacker-master.tar.gz')},
'tempest': {
'type': 'url',
'location': ('http://tarballs.openstack.org/tempest/'
'tempest-master.tar.gz')},
'trove-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/trove/'
'trove-master.tar.gz')},
'watcher-base': {
'type': 'url',
'location': ('http://tarballs.openstack.org/watcher/'
'watcher-master.tar.gz')},
'zaqar': {
'type': 'url',
'location': ('http://tarballs.openstack.org/zaqar/'
'zaqar-master.tar.gz')}
}
def get_source_opts(type_=None, location=None, reference=None):
return [cfg.StrOpt('type', choices=['local', 'git', 'url'],
default=type_,
help='Source location type'),
cfg.StrOpt('location', default=location,
help='The location for source install'),
cfg.StrOpt('reference', default=reference,
help=('Git reference to pull, commit sha, tag '
'or branch name'))]
def gen_all_source_opts():
for name, params in SOURCES.items():
type_ = params['type']
location = params['location']
reference = params.get('reference')
yield name, get_source_opts(type_, location, reference)
def list_opts():
return itertools.chain([(None, _CLI_OPTS),
(None, _BASE_OPTS),
('profiles', _PROFILE_OPTS)],
gen_all_source_opts(),
)
def parse(conf, args, usage=None, prog=None,
default_config_files=None):
conf.register_cli_opts(_CLI_OPTS)
conf.register_opts(_BASE_OPTS)
conf.register_opts(_PROFILE_OPTS, group='profiles')
for name, opts in gen_all_source_opts():
conf.register_opts(opts, name)
conf(args=args,
project='kolla',
usage=usage,
prog=prog,
version=version.cached_version_string(),
default_config_files=default_config_files)
# NOTE(jeffrey4l): set the default base tag based on the
# base option
conf.set_default('base_tag', DISTRO_RELEASE.get(conf.base))
if not conf.base_image:
conf.base_image = conf.base
|
the-stack_0_20474 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Appier Framework
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Appier Framework.
#
# Hive Appier Framework is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Appier Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Appier Framework. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <[email protected]>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import os
import sys
import json
from . import legacy
FILE_NAME = "appier.json"
""" The default name of the file that is going to be
used for the loading of configuration values from JSON """
FILE_TEMPLATE = "appier.%s.json"
""" The template to be used in the construction of the
domain specific configuration file paths """
HOME_FILE = "~/.home"
""" The location of the file that may be used to "redirect"
the home directory contents to a different directory """
IMPORT_NAMES = ("$import", "$include", "$IMPORT", "$INCLUDE")
""" The multiple possible definitions of the special configuration
name that references a list of include files to be loaded """
CASTS = {
bool : lambda v: v if isinstance(v, bool) else v in ("1", "true", "True"),
list : lambda v: v if isinstance(v, list) else v.split(";") if v else [],
tuple : lambda v: v if isinstance(v, tuple) else tuple(v.split(";") if v else [])
}
""" The map containing the various cast method
operation associated with the various data types,
they provide a different type of casting strategy """
ENV_ENCODINGS = (
"utf-8",
sys.getdefaultencoding(),
sys.getfilesystemencoding()
)
""" The sequence of encodings that are going to
be used to try to decode possible byte based strings
for the various environment variable values """
CONFIGS = {}
""" The map that contains the key value association
for all the currently set global configurations """
CONFIG_F = []
""" The list of files that have been used for the loading
of the configuration through this session, every time a
loading of configuration from a file occurs the same path
is added to this global list """
HOMES = []
""" Global reference to the paths to the directory considered
to be the home on in terms of configuration, this value should
be set on the initial loading of the ".home" file """
if not isinstance(__builtins__, dict):
__builtins__ = __builtins__.__dict__
def conf(name, default = None, cast = None, ctx = None):
"""
Retrieves the configuration value for the provided value
defaulting to the provided default value in case no value
is found for the provided name.
An optional cast operation may be performed on the value
in case it's requested.
:type name: String
:param name: The name of the configuration value to be
retrieved.
:type default: Object
:param default: The default value to be retrieved in case
no value was found for the provided name.
:type cast: Type/String
:param cast: The cast operation to be performed in the
resolved value (optional).
:type ctx: Dictionary
:param ctx: The context dictionary to be used for situations
where a more contextual configuration is meant to be used instead
of the process wide global configuration.
:rtype: Object
:return: The value for the configuration with the requested
name or the default value if no value was found.
"""
configs = ctx["configs"] if ctx else CONFIGS
cast = _cast_r(cast)
value = configs.get(name, default)
if cast and not value == None: value = cast(value)
return value
def conf_prefix(prefix, ctx = None):
configs = ctx["configs"] if ctx else CONFIGS
configs_prefix = dict()
for name, value in configs.items():
if not name.startswith(prefix): continue
configs_prefix[name] = value
return configs_prefix
def conf_suffix(suffix, ctx = None):
configs = ctx["configs"] if ctx else CONFIGS
configs_suffix = dict()
for name, value in configs.items():
if not name.endswith(suffix): continue
configs_suffix[name] = value
return configs_suffix
def conf_s(name, value, ctx = None):
configs = ctx["configs"] if ctx else CONFIGS
configs[name] = value
def conf_r(name, ctx = None):
configs = ctx["configs"] if ctx else CONFIGS
if not name in configs: return
del configs[name]
def conf_d(ctx = None):
configs = ctx["configs"] if ctx else CONFIGS
return configs
def conf_ctx():
return dict(configs = dict(), config_f = dict())
def load(names = (FILE_NAME,), path = None, encoding = "utf-8", ctx = None):
paths = []
homes = get_homes()
for home in homes:
paths += [
os.path.join(home),
os.path.join(home, ".config"),
]
paths += [sys.prefix]
paths.append(path)
for path in paths:
for name in names:
load_file(name = name, path = path, encoding = encoding, ctx = ctx)
load_env(ctx = ctx)
def load_file(name = FILE_NAME, path = None, encoding = "utf-8", ctx = None):
configs = ctx["configs"] if ctx else CONFIGS
config_f = ctx["config_f"] if ctx else CONFIG_F
if path: path = os.path.normpath(path)
if path: file_path = os.path.join(path, name)
else: file_path = name
file_path = os.path.abspath(file_path)
file_path = os.path.normpath(file_path)
base_path = os.path.dirname(file_path)
exists = os.path.exists(file_path)
if not exists: return
exists = file_path in config_f
if exists: config_f.remove(file_path)
config_f.append(file_path)
file = open(file_path, "rb")
try: data = file.read()
finally: file.close()
if not data: return
data = data.decode(encoding)
data_j = json.loads(data)
_load_includes(base_path, data_j, encoding = encoding)
for key, value in data_j.items():
if not _is_valid(key): continue
configs[key] = value
def load_env(ctx = None):
configs = ctx["configs"] if ctx else CONFIGS
config = dict(os.environ)
homes = get_homes()
for home in homes:
_load_includes(home, config)
for key, value in legacy.iteritems(config):
if not _is_valid(key): continue
configs[key] = value
is_bytes = legacy.is_bytes(value)
if not is_bytes: continue
for encoding in ENV_ENCODINGS:
try: value = value.decode(encoding)
except UnicodeDecodeError: pass
else: break
configs[key] = value
def get_homes(
file_path = HOME_FILE,
default = "~",
encoding = "utf-8",
force_default = False
):
global HOMES
if HOMES: return HOMES
HOMES = os.environ.get("HOMES", None)
HOMES = HOMES.split(";") if HOMES else HOMES
if not HOMES == None: return HOMES
default = os.path.expanduser(default)
default = os.path.abspath(default)
default = os.path.normpath(default)
HOMES = [default]
file_path = os.path.expanduser(file_path)
file_path = os.path.normpath(file_path)
exists = os.path.exists(file_path)
if not exists: return HOMES
if not force_default: del HOMES[:]
file = open(file_path, "rb")
try: data = file.read()
finally: file.close()
data = data.decode("utf-8")
data = data.strip()
paths = data.splitlines()
paths = [path.strip() for path in paths]
for path in paths:
path = path.strip()
if not path: continue
path = os.path.expanduser(path)
path = os.path.abspath(path)
path = os.path.normpath(path)
HOMES.append(path)
return HOMES
def _cast_r(cast):
is_string = type(cast) in legacy.STRINGS
if is_string: cast = __builtins__.get(cast, None)
if not cast: return None
return CASTS.get(cast, cast)
def _load_includes(base_path, config, encoding = "utf-8"):
includes = ()
for alias in IMPORT_NAMES:
includes = config.get(alias, includes)
if legacy.is_string(includes):
includes = includes.split(";")
for include in includes:
load_file(
name = include,
path = base_path,
encoding = encoding
)
def _is_valid(key):
if key in IMPORT_NAMES: return False
return True
def _is_devel():
"""
Simple debug/development level detection mechanism to be
used at load time to determine if the system is running
under a development (debug) environment.
This function should not be used at runtime as there are
other (more powerful) mechanisms to archive the same
type of results.
:rtype: bool
:return: If the current environment is running under a
development type level of traceability.
"""
return conf("LEVEL", "INFO") in ("DEBUG",)
def _is_secure():
"""
Simple secure variable that should be overriden only under
very specific/critical operation that control if the current
environment should hide information that would otherwise
compromise some of the system's secrecy (eg: version, name, etc.)
:rtype: bool
:return: If the current environment is running under a
secured type level of traceability.
"""
return conf("SECURE", True, cast = bool)
load()
|
the-stack_0_20475 | # encoding=utf8
"""The implementation of tasks."""
import logging
import numpy as np
from WeOptPy.task.countingtask import CountingTask
logging.basicConfig()
logger = logging.getLogger('NiaPy.runner.Runner')
logger.setLevel('INFO')
class StoppingTask(CountingTask):
r"""Optimization task with implemented checking for stopping criterias.
Args:
no_gen (int): Maximum number of algorithm iterations/generations.
no_fes (int): Maximum number of function evaluations.
rvalue (float): Reference function/fitness values to reach in optimization.
x (numpy.ndarray): Best found individual.
x_f (float): Best found individual function/fitness value.
See Also:
* :class:`NiaPy.util.CountingTask`
"""
def __init__(self, no_fes=np.inf, no_gen=np.inf, rvalue=None, verbose=False, **kwargs):
r"""Initialize task class for optimization.
Arguments:
no_fes (Optional[int]): Number of function evaluations.
no_gen (Optional[int]): Number of generations or iterations.
rvalue (Optional[float]): Reference value of function/fitness function.
verbose (Optional[bool]): Enable/disable logging of improvements.
Note:
Storing improvements during the evolutionary cycle is
captured in self.n_evals and self.x_f_vals
See Also:
* :func:`NiaPy.util.CountingTask.__init__`
"""
CountingTask.__init__(self, **kwargs)
self.refValue = (-np.inf if rvalue is None else rvalue)
self.verbose = verbose
self.x, self.x_f = None, np.inf
self.nFES, self.nGEN = no_fes, no_gen
self.n_evals = []
self.x_f_vals = []
def eval(self, a):
r"""Evaluate solution.
Args:
a (numpy.ndarray): Solution to evaluate.
Returns:
float: Fitness/function value of solution.
See Also:
* :func:`NiaPy.util.StoppingTask.stopCond`
* :func:`NiaPy.util.CountingTask.eval`
"""
if self.stop_cond(): return np.inf * self.optType.value
x_f = CountingTask.eval(self, a)
if x_f < self.x_f:
self.x_f = x_f
self.n_evals.append(self.Evals)
self.x_f_vals.append(x_f)
if self.verbose: logger.info('no_fes:%d => %s' % (self.Evals, self.x_f))
return x_f
def stop_cond(self):
r"""Check if stopping condition reached.
Returns:
bool: `True` if number of function evaluations or number of algorithm iterations/generations or reference values is reach else `False`
"""
return (self.Evals >= self.nFES) or (self.Iters >= self.nGEN) or (self.refValue > self.x_f)
def stop_cond_i(self):
r"""Check if stopping condition reached and increase number of iterations.
Returns:
bool: `True` if number of function evaluations or number of algorithm iterations/generations or reference values is reach else `False`.
See Also:
* :func:`NiaPy.util.StoppingTask.stopCond`
* :func:`NiaPy.util.CountingTask.nextIter`
"""
r = self.stop_cond()
CountingTask.next_iteration(self)
return r
def return_conv(self):
r"""Get values of x and y axis for plotting covariance graph.
Returns:
Tuple[List[int], List[float]]:
1. List of ints of function evaluations.
2. List of ints of function/fitness values.
"""
r1, r2 = [], []
for i, v in enumerate(self.n_evals):
r1.append(v), r2.append(self.x_f_vals[i])
if i >= len(self.n_evals) - 1: break
diff = self.n_evals[i + 1] - v
if diff <= 1: continue
for j in range(diff - 1): r1.append(v + j + 1), r2.append(self.x_f_vals[i])
return r1, r2
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
|
the-stack_0_20477 | import os
import fnmatch
import shutil
from collections import defaultdict
from conans.util.files import mkdir
def report_copied_files(copied, output):
ext_files = defaultdict(list)
for f in copied:
_, ext = os.path.splitext(f)
ext_files[ext].append(os.path.basename(f))
if not ext_files:
return False
for ext, files in ext_files.items():
files_str = (", ".join(files)) if len(files) < 5 else ""
file_or_files = "file" if len(files) == 1 else "files"
if not ext:
output.info("Copied %d %s: %s" % (len(files), file_or_files, files_str))
else:
output.info("Copied %d '%s' %s: %s" % (len(files), ext, file_or_files, files_str))
return True
class FileCopier(object):
""" main responsible of copying files from place to place:
package: build folder -> package folder
imports: package folder -> user folder
export: user folder -> store "export" folder
"""
def __init__(self, root_source_folder, root_destination_folder, excluded=None):
"""
Takes the base folders to copy resources src -> dst. These folders names
will not be used in the relative names while copying
param root_source_folder: The base folder to copy things from, typically the
store build folder
param root_destination_folder: The base folder to copy things to, typicall the
store package folder
"""
self._base_src = root_source_folder
self._base_dst = root_destination_folder
self._copied = []
self._excluded = [root_destination_folder]
if excluded:
self._excluded.append(excluded)
def report(self, output):
return report_copied_files(self._copied, output)
def __call__(self, pattern, dst="", src="", keep_path=True, links=False, symlinks=None,
excludes=None, ignore_case=False):
"""
param pattern: an fnmatch file pattern of the files that should be copied. Eg. *.dll
param dst: the destination local folder, wrt to current conanfile dir, to which
the files will be copied. Eg: "bin"
param src: the source folder in which those files will be searched. This folder
will be stripped from the dst name. Eg.: lib/Debug/x86
param keep_path: False if you want the relative paths to be maintained from
src to dst folders, or just drop. False is useful if you want
to collect e.g. many *.libs among many dirs into a single
lib dir
return: list of copied files
"""
if symlinks is not None:
links = symlinks
# Check for ../ patterns and allow them
if pattern.startswith(".."):
rel_dir = os.path.abspath(os.path.join(self._base_src, pattern))
base_src = os.path.dirname(rel_dir)
pattern = os.path.basename(rel_dir)
else:
base_src = self._base_src
src = os.path.join(base_src, src)
dst = os.path.join(self._base_dst, dst)
files_to_copy, link_folders = self._filter_files(src, pattern, links, excludes,
ignore_case)
copied_files = self._copy_files(files_to_copy, src, dst, keep_path, links)
self._link_folders(src, dst, link_folders)
self._copied.extend(files_to_copy)
return copied_files
def _filter_files(self, src, pattern, links, excludes, ignore_case):
""" return a list of the files matching the patterns
The list will be relative path names wrt to the root src folder
"""
filenames = []
linked_folders = []
for root, subfolders, files in os.walk(src, followlinks=True):
if root in self._excluded:
subfolders[:] = []
continue
if links and os.path.islink(root):
linked_folders.append(os.path.relpath(root, src))
subfolders[:] = []
continue
basename = os.path.basename(root)
# Skip git or svn subfolders
if basename in [".git", ".svn"]:
subfolders[:] = []
continue
if basename == "test_package": # DO NOT export test_package/build folder
try:
subfolders.remove("build")
except:
pass
relative_path = os.path.relpath(root, src)
for f in files:
relative_name = os.path.normpath(os.path.join(relative_path, f))
filenames.append(relative_name)
if ignore_case:
filenames = {f.lower(): f for f in filenames}
pattern = pattern.lower()
files_to_copy = fnmatch.filter(filenames, pattern)
if excludes:
if not isinstance(excludes, (tuple, list)):
excludes = (excludes, )
if ignore_case:
excludes = [e.lower() for e in excludes]
for exclude in excludes:
files_to_copy = [f for f in files_to_copy if not fnmatch.fnmatch(f, exclude)]
if ignore_case:
files_to_copy = [filenames[f] for f in files_to_copy]
return files_to_copy, linked_folders
@staticmethod
def _link_folders(src, dst, linked_folders):
for linked_folder in linked_folders:
link = os.readlink(os.path.join(src, linked_folder))
dst_link = os.path.join(dst, linked_folder)
try:
# Remove the previous symlink
os.remove(dst_link)
except OSError:
pass
# link is a string relative to linked_folder
# e.j: os.symlink("test/bar", "./foo/test_link") will create a link to foo/test/bar in ./foo/test_link
mkdir(os.path.dirname(dst_link))
os.symlink(link, dst_link)
# Remove empty links
for linked_folder in linked_folders:
dst_link = os.path.join(dst, linked_folder)
abs_path = os.path.realpath(dst_link)
if not os.path.exists(abs_path):
os.remove(dst_link)
@staticmethod
def _copy_files(files, src, dst, keep_path, symlinks):
""" executes a multiple file copy from [(src_file, dst_file), (..)]
managing symlinks if necessary
"""
copied_files = []
for filename in files:
abs_src_name = os.path.join(src, filename)
filename = filename if keep_path else os.path.basename(filename)
abs_dst_name = os.path.normpath(os.path.join(dst, filename))
try:
os.makedirs(os.path.dirname(abs_dst_name))
except:
pass
if symlinks and os.path.islink(abs_src_name):
linkto = os.readlink(abs_src_name) # @UndefinedVariable
try:
os.remove(abs_dst_name)
except OSError:
pass
os.symlink(linkto, abs_dst_name) # @UndefinedVariable
else:
shutil.copy2(abs_src_name, abs_dst_name)
copied_files.append(abs_dst_name)
return copied_files
|
the-stack_0_20478 | #!/usr/bin/env python3
# Copyright 2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import kfp
from kfp import components
from kfp import dsl
import os
import subprocess
diagnose_me_op = components.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/566dddfdfc0a6a725b6e50ea85e73d8d5578bbb9/components/diagnostics/diagnose_me/component.yaml')
confusion_matrix_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/1.6.0/components/local/confusion_matrix/component.yaml')
roc_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/1.6.0/components/local/roc/component.yaml')
dataproc_create_cluster_op = components.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/1.6.0/components/gcp/dataproc/create_cluster/component.yaml')
dataproc_delete_cluster_op = components.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/1.6.0/components/gcp/dataproc/delete_cluster/component.yaml')
dataproc_submit_pyspark_op = components.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/1.6.0/components/gcp/dataproc/submit_pyspark_job/component.yaml'
)
dataproc_submit_spark_op = components.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/1.6.0/components/gcp/dataproc/submit_spark_job/component.yaml'
)
_PYSRC_PREFIX = 'gs://ml-pipeline/sample-pipeline/xgboost' # Common path to python src.
_XGBOOST_PKG = 'gs://ml-pipeline/sample-pipeline/xgboost/xgboost4j-example-0.8-SNAPSHOT-jar-with-dependencies.jar'
_TRAINER_MAIN_CLS = 'ml.dmlc.xgboost4j.scala.example.spark.XGBoostTrainer'
_PREDICTOR_MAIN_CLS = 'ml.dmlc.xgboost4j.scala.example.spark.XGBoostPredictor'
def delete_directory_from_gcs(dir_path):
"""Delete a GCS dir recursively. Ignore errors."""
try:
subprocess.call(['gsutil', '-m', 'rm', '-r', dir_path])
except:
pass
# ! Please do not forget to enable the Dataproc API in your cluster https://console.developers.google.com/apis/api/dataproc.googleapis.com/overview
# ================================================================
# The following classes should be provided by components provider.
def dataproc_analyze_op(
project,
region,
cluster_name,
schema,
train_data,
output):
"""Submit dataproc analyze as a pyspark job.
:param project: GCP project ID.
:param region: Which zone to run this analyze.
:param cluster_name: Name of the cluster.
:param schema: GCS path to the schema.
:param train_data: GCS path to the training data.
:param output: GCS path to store the output.
"""
return dataproc_submit_pyspark_op(
project_id=project,
region=region,
cluster_name=cluster_name,
main_python_file_uri=os.path.join(_PYSRC_PREFIX, 'analyze_run.py'),
args=['--output', str(output), '--train', str(train_data), '--schema', str(schema)]
)
def dataproc_transform_op(
project,
region,
cluster_name,
train_data,
eval_data,
target,
analysis,
output
):
"""Submit dataproc transform as a pyspark job.
:param project: GCP project ID.
:param region: Which zone to run this analyze.
:param cluster_name: Name of the cluster.
:param train_data: GCS path to the training data.
:param eval_data: GCS path of the eval csv file.
:param target: Target column name.
:param analysis: GCS path of the analysis results
:param output: GCS path to use for output.
"""
# Remove existing [output]/train and [output]/eval if they exist.
delete_directory_from_gcs(os.path.join(output, 'train'))
delete_directory_from_gcs(os.path.join(output, 'eval'))
return dataproc_submit_pyspark_op(
project_id=project,
region=region,
cluster_name=cluster_name,
main_python_file_uri=os.path.join(_PYSRC_PREFIX,
'transform_run.py'),
args=[
'--output',
str(output),
'--analysis',
str(analysis),
'--target',
str(target),
'--train',
str(train_data),
'--eval',
str(eval_data)
])
def dataproc_train_op(
project,
region,
cluster_name,
train_data,
eval_data,
target,
analysis,
workers,
rounds,
output,
is_classification=True
):
if is_classification:
config='gs://ml-pipeline/sample-data/xgboost-config/trainconfcla.json'
else:
config='gs://ml-pipeline/sample-data/xgboost-config/trainconfreg.json'
return dataproc_submit_spark_op(
project_id=project,
region=region,
cluster_name=cluster_name,
main_class=_TRAINER_MAIN_CLS,
spark_job=json.dumps({'jarFileUris': [_XGBOOST_PKG]}),
args=json.dumps([
str(config),
str(rounds),
str(workers),
str(analysis),
str(target),
str(train_data),
str(eval_data),
str(output)
]))
def dataproc_predict_op(
project,
region,
cluster_name,
data,
model,
target,
analysis,
output
):
return dataproc_submit_spark_op(
project_id=project,
region=region,
cluster_name=cluster_name,
main_class=_PREDICTOR_MAIN_CLS,
spark_job=json.dumps({'jarFileUris': [_XGBOOST_PKG]}),
args=json.dumps([
str(model),
str(data),
str(analysis),
str(target),
str(output)
]))
# =======================================================================
@dsl.pipeline(
name='xgboost-trainer',
description='A trainer that does end-to-end distributed training for XGBoost models.'
)
def xgb_train_pipeline(
output='gs://{{kfp-default-bucket}}',
project='{{kfp-project-id}}',
diagnostic_mode='HALT_ON_ERROR',
rounds=5,
):
output_template = str(output) + '/' + dsl.RUN_ID_PLACEHOLDER + '/data'
region='us-central1'
workers=2
quota_check=[{'region':region,'metric':'CPUS','quota_needed':12.0}]
train_data='gs://ml-pipeline/sample-data/sfpd/train.csv'
eval_data='gs://ml-pipeline/sample-data/sfpd/eval.csv'
schema='gs://ml-pipeline/sample-data/sfpd/schema.json'
true_label='ACTION'
target='resolution'
required_apis='dataproc.googleapis.com'
cluster_name='xgb-%s' % dsl.RUN_ID_PLACEHOLDER
# Current GCP pyspark/spark op do not provide outputs as return values, instead,
# we need to use strings to pass the uri around.
analyze_output = output_template
transform_output_train = os.path.join(output_template, 'train', 'part-*')
transform_output_eval = os.path.join(output_template, 'eval', 'part-*')
train_output = os.path.join(output_template, 'train_output')
predict_output = os.path.join(output_template, 'predict_output')
_diagnose_me_op = diagnose_me_op(
bucket=output,
execution_mode=diagnostic_mode,
project_id=project,
target_apis=required_apis,
quota_check=quota_check)
with dsl.ExitHandler(exit_op=dataproc_delete_cluster_op(
project_id=project,
region=region,
name=cluster_name
)):
_create_cluster_op = dataproc_create_cluster_op(
project_id=project,
region=region,
name=cluster_name,
initialization_actions=[
os.path.join(_PYSRC_PREFIX,
'initialization_actions.sh'),
],
image_version='1.5'
).after(_diagnose_me_op)
_analyze_op = dataproc_analyze_op(
project=project,
region=region,
cluster_name=cluster_name,
schema=schema,
train_data=train_data,
output=output_template
).after(_create_cluster_op).set_display_name('Analyzer')
_transform_op = dataproc_transform_op(
project=project,
region=region,
cluster_name=cluster_name,
train_data=train_data,
eval_data=eval_data,
target=target,
analysis=analyze_output,
output=output_template
).after(_analyze_op).set_display_name('Transformer')
_train_op = dataproc_train_op(
project=project,
region=region,
cluster_name=cluster_name,
train_data=transform_output_train,
eval_data=transform_output_eval,
target=target,
analysis=analyze_output,
workers=workers,
rounds=rounds,
output=train_output
).after(_transform_op).set_display_name('Trainer')
_predict_op = dataproc_predict_op(
project=project,
region=region,
cluster_name=cluster_name,
data=transform_output_eval,
model=train_output,
target=target,
analysis=analyze_output,
output=predict_output
).after(_train_op).set_display_name('Predictor')
_cm_op = confusion_matrix_op(
predictions=os.path.join(predict_output, 'part-*.csv'),
output_dir=output_template
).after(_predict_op)
_roc_op = roc_op(
predictions_dir=os.path.join(predict_output, 'part-*.csv'),
true_class=true_label,
true_score_column=true_label,
output_dir=output_template
).after(_predict_op)
if __name__ == '__main__':
kfp.compiler.Compiler().compile(xgb_train_pipeline, __file__ + '.yaml')
|
the-stack_0_20479 | #
# A rudimentary text input class for PsychoPy experiments
# Author: Meng Du
# August 2018
#
from psychopy import visual, core, event
# TODO overflow control? try TextBox?
class DumbTextInput(object):
"""
DumbTextInput is a rudimentary textbox that allows users to enter texts in psychopy with a standard English keyboard.
It draws itself right after being created, and other psychopy.visual stimuli could be drawn together with it.
Example:
text_in = DumbTextInput(window=win, width=1.5, height=1,
other_stim=[psychopy.visual.TextStim(win, 'Enter text below', pos=(0, 0.8))])
while True:
response, rt, last_key = text_in.wait_key()
if last_key[0] == 'return': # or other conditions when you want to end the response
break
It's only been tested on Macs so far.
"""
def __init__(self, window, width, height, pos=(0, 0), bg_color='white', text_color='black', line_height=0.05,
padding=0.01, max_length=float('inf'), valid_chars=None, other_stim=(), **kwargs):
"""
Accept all input parameters that psychopy.visual.TextStim accepts
:param max_length: (int) maximum length of the text input
:param valid_chars: a list of characters that are allowed as input;
if None, all letters, numbers and standar characters are allowed
:param padding: (float) distance between text and the border of the text box
:param other_stim: a list of other psychopy stimuli to be displayed together
"""
self._key_mapping = {
'grave': ('`', '~'),
'minus': ('-', '_'),
'equal': ('=', '+'),
'bracketleft': ('[', '{'),
'bracketright': (']', '}'),
'semicolon': (';', ':'),
'apostrophe': ('\'', '"'),
'comma': (',', '<'),
'period': ('.', '>'),
'slash': ('/', '?'),
'backslash': ('\\', '|'),
'return': '\n',
'space': ' ',
'1': '!',
'2': '@',
'3': '#',
'4': '$',
'5': '%',
'6': '^',
'7': '&',
'8': '*',
'9': '(',
'0': ')',
}
self.window = window
self.other_stim = other_stim
self.max_length = max_length
self.valid_chars = valid_chars
self.text = ''
text_pos = (pos[0] - float(width) / 2 + padding, pos[1] + float(height) / 2 - padding)
wrap_width = width - padding * 2
self.text_stim = visual.TextStim(self.window, self.text, color=text_color, pos=text_pos, height=line_height,
alignHoriz='left', alignVert='top', wrapWidth=wrap_width, **kwargs)
self.background = visual.Rect(window, width=width, height=height, pos=pos, fillColor=bg_color)
self.draw()
self.timer = core.Clock()
def draw(self):
self.background.draw()
for stim in self.other_stim:
if stim is not None: # skipping "None"
stim.draw()
self.text_stim.setText(self.text + u'\u258c')
self.text_stim.draw()
self.window.flip()
def update(self, key):
"""
:param key: a tuple containing information about the key pressed, as returned by psychopy.event.waitKeys where
modifiers=True
"""
text_changed = True
key_name = key[0]
modifiers = key[1]
if key_name == 'backspace':
self.text = self.text[:-1]
elif len(self.text) == self.max_length: # exceeding the maximum length
text_changed = False
else:
# get character entered
char = ''
if len(key_name) == 1:
if key_name.isalpha():
char = key_name.upper() if modifiers['shift'] or modifiers['capslock'] else key_name
else:
char = self._key_mapping[key_name] if modifiers['shift'] else key_name
elif key_name in self._key_mapping: # key_name length > 1
if isinstance(self._key_mapping[key_name], tuple):
char = self._key_mapping[key_name][1] if modifiers['shift'] else self._key_mapping[key_name][0]
else:
char = self._key_mapping[key_name]
else:
text_changed = False
# check if character is allowed
if (self.valid_chars is None) or (char in self.valid_chars):
self.text += char
else:
text_changed = False
if text_changed:
self.draw()
def add_other_stim(self, other_stim):
"""
:param other_stim: a psychopy visual stimulus or a list of them
"""
if isinstance(other_stim, visual.BaseVisualStim):
self.other_stim.append(other_stim)
else:
self.other_stim += other_stim
self.draw()
def wait_key(self):
"""
:return: the complete input text string, reaction time in seconds, and a information tuple about the last key
pressed ('key_name', {'modifier_names': True/False})
"""
key = event.waitKeys(modifiers=True, timeStamped=self.timer)[0]
self.update(key)
return self.text, key[2], (key[0], key[1])
|
the-stack_0_20483 | from alibabacloud.clients.ecs_20140526 import EcsClient
from base import SDKTestBase, MyServer
class UserAgentTest(SDKTestBase):
@staticmethod
def joint_default_user_agent():
import platform
base = '%s (%s %s;%s) Python/%s Alibabacloud/%s python-requests/%s' \
% ('AlibabaCloud',
platform.system(),
platform.release(),
platform.machine(),
platform.python_version(),
__import__('alibabacloud').__version__,
__import__(
'alibabacloud.vendored.requests.__version__', globals(), locals(),
['vendored', 'requests', '__version__'], 0).__version__)
return base
def init_temp_client_config(self):
client_config = self.client_config
client_config.http_port = 51352
client_config.endpoint = "localhost"
return client_config
def test_default_user_agent(self):
client_config = self.init_temp_client_config()
client = EcsClient(client_config, self.init_credentials_provider())
with MyServer() as s:
client.describe_instances()
user_agent = s.headers.get('User-Agent')
self.assertEqual(self.joint_default_user_agent(), user_agent)
def test_append_user_agent(self):
client_config = self.init_temp_client_config()
client_config.user_agent = 'alibabacloudpythonsdk'
client = EcsClient(client_config, self.init_credentials_provider())
with MyServer() as s:
client.describe_instances()
user_agent = s.headers.get('User-Agent')
self.assertEqual(self.joint_default_user_agent() + ' alibabacloudpythonsdk', user_agent)
|
the-stack_0_20486 | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio ([email protected])`
:copyright: © 2013 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
bonneville.utils.odict
~~~~~~~~~~~~~~~~
This is a compatibility/"importability" layer for an ordered dictionary.
Tries to import from the standard library if python >= 2.7, then from the
``ordereddict`` package available from PyPi, and, as a last resort,
provides an ``OrderedDict`` implementation based on::
http://code.activestate.com/recipes/576669/
'''
try:
from collections import OrderedDict # pylint: disable=E0611
except ImportError:
try:
from ordereddict import OrderedDict
except ImportError:
## {{{ http://code.activestate.com/recipes/576693/ (r9)
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
# try:
# from _abcoll import KeysView, ValuesView, ItemsView
# except ImportError:
# pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds): # pylint: disable=E1003
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
super(OrderedDict, self).__init__() # pylint: disable=E1003
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.values():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def keys(self):
'od.keys() -> an iterator over the keys in od'
return iter(self)
def values(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def items(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds): # pylint: disable=E0211
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}): # pylint: disable=W0102
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self) == len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# # -- the following methods are only used in Python 2.7 --
#
# def viewkeys(self):
# "od.viewkeys() -> a set-like object providing a view on od's keys"
# return KeysView(self)
#
# def viewvalues(self):
# "od.viewvalues() -> an object providing a view on od's values"
# return ValuesView(self)
#
# def viewitems(self):
# "od.viewitems() -> a set-like object providing a view on od's items"
# return ItemsView(self)
# ## end of http://code.activestate.com/recipes/576693/ }}}
|
the-stack_0_20487 | import io
import json
import mimetypes
import os
import warnings
from abc import ABC, abstractmethod
from multidict import CIMultiDict
from . import hdrs
from .helpers import (PY_36, content_disposition_header, guess_filename,
parse_mimetype, sentinel)
from .streams import DEFAULT_LIMIT
__all__ = ('PAYLOAD_REGISTRY', 'get_payload', 'payload_type', 'Payload',
'BytesPayload', 'StringPayload',
'IOBasePayload', 'BytesIOPayload', 'BufferedReaderPayload',
'TextIOPayload', 'StringIOPayload', 'JsonPayload')
TOO_LARGE_BYTES_BODY = 2 ** 20
class LookupError(Exception):
pass
def get_payload(data, *args, **kwargs):
return PAYLOAD_REGISTRY.get(data, *args, **kwargs)
def register_payload(factory, type):
PAYLOAD_REGISTRY.register(factory, type)
class payload_type:
def __init__(self, type):
self.type = type
def __call__(self, factory):
register_payload(factory, self.type)
return factory
class PayloadRegistry:
"""Payload registry.
note: we need zope.interface for more efficient adapter search
"""
def __init__(self):
self._registry = []
def get(self, data, *args, **kwargs):
if isinstance(data, Payload):
return data
for factory, type in self._registry:
if isinstance(data, type):
return factory(data, *args, **kwargs)
raise LookupError()
def register(self, factory, type):
self._registry.append((factory, type))
class Payload(ABC):
_size = None
_headers = None
_content_type = 'application/octet-stream'
def __init__(self, value, *, headers=None, content_type=sentinel,
filename=None, encoding=None, **kwargs):
self._value = value
self._encoding = encoding
self._filename = filename
if headers is not None:
self._headers = CIMultiDict(headers)
if content_type is sentinel and hdrs.CONTENT_TYPE in self._headers:
content_type = self._headers[hdrs.CONTENT_TYPE]
if content_type is sentinel:
content_type = None
self._content_type = content_type
@property
def size(self):
"""Size of the payload."""
return self._size
@property
def filename(self):
"""Filename of the payload."""
return self._filename
@property
def headers(self):
"""Custom item headers"""
return self._headers
@property
def encoding(self):
"""Payload encoding"""
return self._encoding
@property
def content_type(self):
"""Content type"""
if self._content_type is not None:
return self._content_type
elif self._filename is not None:
mime = mimetypes.guess_type(self._filename)[0]
return 'application/octet-stream' if mime is None else mime
else:
return Payload._content_type
def set_content_disposition(self, disptype, quote_fields=True, **params):
"""Sets ``Content-Disposition`` header."""
if self._headers is None:
self._headers = CIMultiDict()
self._headers[hdrs.CONTENT_DISPOSITION] = content_disposition_header(
disptype, quote_fields=quote_fields, **params)
@abstractmethod
async def write(self, writer):
"""Write payload.
writer is an AbstractPayloadWriter instance:
"""
class BytesPayload(Payload):
def __init__(self, value, *args, **kwargs):
assert isinstance(value, (bytes, bytearray, memoryview)), \
"value argument must be byte-ish (%r)" % type(value)
if 'content_type' not in kwargs:
kwargs['content_type'] = 'application/octet-stream'
super().__init__(value, *args, **kwargs)
self._size = len(value)
if self._size > TOO_LARGE_BYTES_BODY:
if PY_36:
kwargs = {'source': self}
else:
kwargs = {}
warnings.warn("Sending a large body directly with raw bytes might"
" lock the event loop. You should probably pass an "
"io.BytesIO object instead", ResourceWarning,
**kwargs)
async def write(self, writer):
await writer.write(self._value)
class StringPayload(BytesPayload):
def __init__(self, value, *args,
encoding=None, content_type=None, **kwargs):
if encoding is None:
if content_type is None:
encoding = 'utf-8'
content_type = 'text/plain; charset=utf-8'
else:
mimetype = parse_mimetype(content_type)
encoding = mimetype.parameters.get('charset', 'utf-8')
else:
if content_type is None:
content_type = 'text/plain; charset=%s' % encoding
super().__init__(
value.encode(encoding),
encoding=encoding, content_type=content_type, *args, **kwargs)
class StringIOPayload(StringPayload):
def __init__(self, value, *args, **kwargs):
super().__init__(value.read(), *args, **kwargs)
class IOBasePayload(Payload):
def __init__(self, value, disposition='attachment', *args, **kwargs):
if 'filename' not in kwargs:
kwargs['filename'] = guess_filename(value)
super().__init__(value, *args, **kwargs)
if self._filename is not None and disposition is not None:
self.set_content_disposition(disposition, filename=self._filename)
async def write(self, writer):
try:
chunk = self._value.read(DEFAULT_LIMIT)
while chunk:
await writer.write(chunk)
chunk = self._value.read(DEFAULT_LIMIT)
finally:
self._value.close()
class TextIOPayload(IOBasePayload):
def __init__(self, value, *args,
encoding=None, content_type=None, **kwargs):
if encoding is None:
if content_type is None:
encoding = 'utf-8'
content_type = 'text/plain; charset=utf-8'
else:
mimetype = parse_mimetype(content_type)
encoding = mimetype.parameters.get('charset', 'utf-8')
else:
if content_type is None:
content_type = 'text/plain; charset=%s' % encoding
super().__init__(
value,
content_type=content_type, encoding=encoding, *args, **kwargs)
@property
def size(self):
try:
return os.fstat(self._value.fileno()).st_size - self._value.tell()
except OSError:
return None
async def write(self, writer):
try:
chunk = self._value.read(DEFAULT_LIMIT)
while chunk:
await writer.write(chunk.encode(self._encoding))
chunk = self._value.read(DEFAULT_LIMIT)
finally:
self._value.close()
class BytesIOPayload(IOBasePayload):
@property
def size(self):
position = self._value.tell()
end = self._value.seek(0, os.SEEK_END)
self._value.seek(position)
return end - position
class BufferedReaderPayload(IOBasePayload):
@property
def size(self):
try:
return os.fstat(self._value.fileno()).st_size - self._value.tell()
except OSError:
# data.fileno() is not supported, e.g.
# io.BufferedReader(io.BytesIO(b'data'))
return None
class JsonPayload(BytesPayload):
def __init__(self, value,
encoding='utf-8', content_type='application/json',
dumps=json.dumps, *args, **kwargs):
super().__init__(
dumps(value).encode(encoding),
content_type=content_type, encoding=encoding, *args, **kwargs)
PAYLOAD_REGISTRY = PayloadRegistry()
PAYLOAD_REGISTRY.register(BytesPayload, (bytes, bytearray, memoryview))
PAYLOAD_REGISTRY.register(StringPayload, str)
PAYLOAD_REGISTRY.register(StringIOPayload, io.StringIO)
PAYLOAD_REGISTRY.register(TextIOPayload, io.TextIOBase)
PAYLOAD_REGISTRY.register(BytesIOPayload, io.BytesIO)
PAYLOAD_REGISTRY.register(
BufferedReaderPayload, (io.BufferedReader, io.BufferedRandom))
PAYLOAD_REGISTRY.register(IOBasePayload, io.IOBase)
|
the-stack_0_20488 | from __future__ import division
import torch
import torch.nn as nn
from .base_new import BaseDetectorNew
from .test_mixins import RPNTestMixin
from .. import builder
from ..registry import DETECTORS
from mmdet.core import (build_assigner, bbox2roi, dbbox2roi, bbox2result, build_sampler,
dbbox2result, merge_aug_masks, roi2droi, mask2poly,
get_best_begin_point, polygonToRotRectangle_batch,
gt_mask_bp_obbs_list, choose_best_match_batch,
choose_best_Rroi_batch, dbbox_rotate_mapping, bbox_rotate_mapping)
from mmdet.core import (bbox_mapping, merge_aug_proposals, merge_aug_bboxes,
merge_aug_masks, multiclass_nms, merge_rotate_aug_proposals,
merge_rotate_aug_bboxes, multiclass_nms_rbbox)
import copy
from mmdet.core import RotBox2Polys, polygonToRotRectangle_batch
@DETECTORS.register_module
class RoITransformerAnalysis(BaseDetectorNew, RPNTestMixin):
def __init__(self,
backbone,
neck=None,
shared_head=None,
shared_head_rbbox=None,
rpn_head=None,
bbox_roi_extractor=None,
bbox_head=None,
rbbox_roi_extractor=None,
rbbox_head=None,
mask_roi_extractor=None,
mask_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
assert bbox_roi_extractor is not None
assert bbox_head is not None
assert rbbox_roi_extractor is not None
assert rbbox_head is not None
super(RoITransformerAnalysis, self).__init__()
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
if rpn_head is not None:
self.rpn_head = builder.build_head(rpn_head)
if shared_head is not None:
self.shared_head = builder.build_shared_head(shared_head)
if shared_head_rbbox is not None:
self.shared_head_rbbox = builder.build_shared_head(shared_head_rbbox)
if bbox_head is not None:
self.bbox_roi_extractor = builder.build_roi_extractor(
bbox_roi_extractor)
self.bbox_head = builder.build_head(bbox_head)
# import pdb
# pdb.set_trace()
if rbbox_head is not None:
self.rbbox_roi_extractor = builder.build_roi_extractor(
rbbox_roi_extractor)
self.rbbox_head = builder.build_head(rbbox_head)
if mask_head is not None:
if mask_roi_extractor is not None:
self.mask_roi_extractor = builder.build_roi_extractor(
mask_roi_extractor)
self.share_roi_extractor = False
else:
self.share_roi_extractor = True
self.mask_roi_extractor = self.rbbox_roi_extractor
self.mask_head = builder.build_head(mask_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
@property
def with_rpn(self):
return hasattr(self, 'rpn_head') and self.rpn_head is not None
def init_weights(self, pretrained=None):
super(RoITransformerAnalysis, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
if self.with_rpn:
self.rpn_head.init_weights()
if self.with_shared_head:
self.shared_head.init_weights(pretrained=pretrained)
if self.with_shared_head_rbbox:
self.shared_head_rbbox.init_weights(pretrained=pretrained)
if self.with_bbox:
self.bbox_roi_extractor.init_weights()
self.bbox_head.init_weights()
if self.with_rbbox:
self.rbbox_roi_extractor.init_weights()
self.rbbox_head.init_weights()
if self.with_mask:
self.mask_head.init_weights()
if not self.share_roi_extractor:
self.mask_roi_extractor.init_weights()
def extract_feat(self, img):
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_train(self,
img,
img_meta,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None):
x = self.extract_feat(img)
losses = dict()
# trans gt_masks to gt_obbs
gt_obbs = gt_mask_bp_obbs_list(gt_masks)
# RPN forward and loss
if self.with_rpn:
rpn_outs = self.rpn_head(x)
rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta,
self.train_cfg.rpn)
rpn_losses = self.rpn_head.loss(
*rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
losses.update(rpn_losses)
proposal_cfg = self.train_cfg.get('rpn_proposal',
self.test_cfg.rpn)
proposal_inputs = rpn_outs + (img_meta, proposal_cfg)
proposal_list = self.rpn_head.get_bboxes(*proposal_inputs)
else:
proposal_list = proposals
# assign gts and sample proposals (hbb assign)
if self.with_bbox or self.with_mask:
bbox_assigner = build_assigner(self.train_cfg.rcnn[0].assigner)
bbox_sampler = build_sampler(
self.train_cfg.rcnn[0].sampler, context=self)
num_imgs = img.size(0)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
assign_result = bbox_assigner.assign(
proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
# bbox head forward and loss
if self.with_bbox:
rois = bbox2roi([res.bboxes for res in sampling_results])
# TODO: a more flexible way to decide which feature maps to use
bbox_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
cls_score, bbox_pred = self.bbox_head(bbox_feats)
## rbbox
rbbox_targets = self.bbox_head.get_target(
sampling_results, gt_masks, gt_labels, self.train_cfg.rcnn[0])
loss_bbox = self.bbox_head.loss(cls_score, bbox_pred,
*rbbox_targets)
# losses.update(loss_bbox)
for name, value in loss_bbox.items():
losses['s{}.{}'.format(0, name)] = (value)
pos_is_gts = [res.pos_is_gt for res in sampling_results]
roi_labels = rbbox_targets[0]
with torch.no_grad():
# import pdb
# pdb.set_trace()
rotated_proposal_list = self.bbox_head.refine_rbboxes(
roi2droi(rois), roi_labels, bbox_pred, pos_is_gts, img_meta
)
# import pdb
# pdb.set_trace()
# assign gts and sample proposals (rbb assign)
if self.with_rbbox:
bbox_assigner = build_assigner(self.train_cfg.rcnn[1].assigner)
bbox_sampler = build_sampler(
self.train_cfg.rcnn[1].sampler, context=self)
num_imgs = img.size(0)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
gt_obbs_best_roi = choose_best_Rroi_batch(gt_obbs[i])
assign_result = bbox_assigner.assign(
rotated_proposal_list[i], gt_obbs_best_roi, gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = bbox_sampler.sample(
assign_result,
rotated_proposal_list[i],
torch.from_numpy(gt_obbs_best_roi).float().to(rotated_proposal_list[i].device),
gt_labels[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
if self.with_rbbox:
# (batch_ind, x_ctr, y_ctr, w, h, angle)
rrois = dbbox2roi([res.bboxes for res in sampling_results])
# feat enlarge
# rrois[:, 3] = rrois[:, 3] * 1.2
# rrois[:, 4] = rrois[:, 4] * 1.4
rrois[:, 3] = rrois[:, 3] * self.rbbox_roi_extractor.w_enlarge
rrois[:, 4] = rrois[:, 4] * self.rbbox_roi_extractor.h_enlarge
rbbox_feats = self.rbbox_roi_extractor(x[:self.rbbox_roi_extractor.num_inputs],
rrois)
if self.with_shared_head_rbbox:
rbbox_feats = self.shared_head_rbbox(rbbox_feats)
cls_score, rbbox_pred = self.rbbox_head(rbbox_feats)
rbbox_targets = self.rbbox_head.get_target_rbbox(sampling_results, gt_obbs,
gt_labels, self.train_cfg.rcnn[1])
loss_rbbox = self.rbbox_head.loss(cls_score, rbbox_pred, *rbbox_targets)
for name, value in loss_rbbox.items():
losses['s{}.{}'.format(1, name)] = (value)
return losses
def simple_test(self, img, img_meta, proposals=None, rescale=False):
x = self.extract_feat(img)
proposal_list = self.simple_test_rpn(
x, img_meta, self.test_cfg.rpn) if proposals is None else proposals
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
rcnn_test_cfg = self.test_cfg.rcnn
rois = bbox2roi(proposal_list)
bbox_feats = self.bbox_roi_extractor(
x[:len(self.bbox_roi_extractor.featmap_strides)], rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
cls_score, bbox_pred = self.bbox_head(bbox_feats)
bbox_label = cls_score.argmax(dim=1)
rrois = self.bbox_head.regress_by_class_rbbox(roi2droi(rois), bbox_label, bbox_pred,
img_meta[0])
rrois_enlarge = copy.deepcopy(rrois)
rrois_enlarge[:, 3] = rrois_enlarge[:, 3] * self.rbbox_roi_extractor.w_enlarge
rrois_enlarge[:, 4] = rrois_enlarge[:, 4] * self.rbbox_roi_extractor.h_enlarge
rbbox_feats = self.rbbox_roi_extractor(
x[:len(self.rbbox_roi_extractor.featmap_strides)], rrois_enlarge)
if self.with_shared_head_rbbox:
rbbox_feats = self.shared_head_rbbox(rbbox_feats)
rcls_score, rbbox_pred = self.rbbox_head(rbbox_feats)
det_rbboxes, det_labels = self.rbbox_head.get_det_rbboxes(
rrois,
rcls_score,
rbbox_pred,
img_shape,
scale_factor,
rescale=rescale,
cfg=rcnn_test_cfg)
rbbox_results = dbbox2result(det_rbboxes, det_labels,
self.rbbox_head.num_classes)
return rbbox_results
def aug_test(self, imgs, img_metas, proposals=None, rescale=None):
# raise NotImplementedError
# import pdb; pdb.set_trace()
proposal_list = self.aug_test_rpn_rotate(
self.extract_feats(imgs), img_metas, self.test_cfg.rpn)
rcnn_test_cfg = self.test_cfg.rcnn
aug_rbboxes = []
aug_rscores = []
for x, img_meta in zip(self.extract_feats(imgs), img_metas):
# only one image in the batch
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
scale_factor, flip)
angle = img_meta[0]['angle']
# print('img shape: ', img_shape)
if angle != 0:
try:
proposals = bbox_rotate_mapping(proposal_list[0][:, :4], img_shape,
angle)
except:
import pdb; pdb.set_trace()
rois = bbox2roi([proposals])
# recompute feature maps to save GPU memory
roi_feats = self.bbox_roi_extractor(
x[:len(self.bbox_roi_extractor.featmap_strides)], rois)
if self.with_shared_head:
roi_feats = self.shared_head(roi_feats)
cls_score, bbox_pred = self.bbox_head(roi_feats)
bbox_label = cls_score.argmax(dim=1)
rrois = self.bbox_head.regress_by_class_rbbox(roi2droi(rois), bbox_label,
bbox_pred,
img_meta[0])
rrois_enlarge = copy.deepcopy(rrois)
rrois_enlarge[:, 3] = rrois_enlarge[:, 3] * self.rbbox_roi_extractor.w_enlarge
rrois_enlarge[:, 4] = rrois_enlarge[:, 4] * self.rbbox_roi_extractor.h_enlarge
rbbox_feats = self.rbbox_roi_extractor(
x[:len(self.rbbox_roi_extractor.featmap_strides)], rrois_enlarge)
if self.with_shared_head_rbbox:
rbbox_feats = self.shared_head_rbbox(rbbox_feats)
rcls_score, rbbox_pred = self.rbbox_head(rbbox_feats)
rbboxes, rscores = self.rbbox_head.get_det_rbboxes(
rrois,
rcls_score,
rbbox_pred,
img_shape,
scale_factor,
rescale=rescale,
cfg=None)
aug_rbboxes.append(rbboxes)
aug_rscores.append(rscores)
merged_rbboxes, merged_rscores = merge_rotate_aug_bboxes(
aug_rbboxes, aug_rscores, img_metas, rcnn_test_cfg
)
det_rbboxes, det_rlabels = multiclass_nms_rbbox(
merged_rbboxes, merged_rscores, rcnn_test_cfg.score_thr,
rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img)
if rescale:
_det_rbboxes = det_rbboxes
else:
_det_rbboxes = det_rbboxes.clone()
_det_rbboxes[:, :4] *= img_metas[0][0]['scale_factor']
rbbox_results = dbbox2result(_det_rbboxes, det_rlabels,
self.rbbox_head.num_classes)
return rbbox_results
|
the-stack_0_20489 | import unittest
import numpy as np
from .eft import *
import pkgutil
from wilson import wcxf
import wilson
par = {
'm_Z': 91.1876,
'm_b': 4.18,
'm_d': 4.8e-3,
'm_s': 0.095,
'm_t': 173.3,
'm_c': 1.27,
'm_u': 2.3e-3,
'alpha_e': 1/127.944,
'alpha_s': 0.1185,
'GF': 1.1663787e-5,
}
class TestEFT(unittest.TestCase):
def test_eft_old(self):
wc = WilsonCoefficients()
wc.set_initial({'CVLL_bsbs': 0.1j, 'C9_bsmumu':-1.5, 'CVL_bctaunutau': 0.2}, 160.)
d1 = wc.get_wc('bsbs', 4.8, par)
d2 = wc.get_wc('bsbs', 4.8, par) # again, to test the cache
self.assertDictEqual(d1, d2)
wc.get_wc('bsmumu', 4.8, par)
wc.get_wc('bctaunutau', 4.8, par)
def test_set_initial_wcxf(self):
test_file = pkgutil.get_data('flavio', 'data/test/wcxf-flavio-example.yml')
flavio_wc = WilsonCoefficients()
wcxf_wc = wcxf.WC.load(test_file.decode('utf-8'))
wcxf_wc.validate()
flavio_wc.set_initial_wcxf(wcxf_wc)
wc_out = flavio_wc.get_wc('bsee', 160, par)
self.assertEqual(wc_out['C9_bsee'], -1+0.01j)
self.assertEqual(wc_out['C9p_bsee'], 0.1)
self.assertEqual(wc_out['C10_bsee'], 0.05j)
self.assertEqual(wc_out['C10p_bsee'], 0.1-0.3j)
self.assertEqual(wc_out['CS_bsee'], 0)
wcxf_wc.basis = 'unknown basis'
with self.assertRaises((KeyError, ValueError, AssertionError)):
flavio_wc.set_initial_wcxf(wcxf_wc)
def test_set_initial_wcxf_minimal(self):
for eft in ['WET', 'WET-4', 'WET-3']:
wc = wcxf.WC(eft, 'flavio', 120, {'CVLL_sdsd': {'Im': 1}})
fwc = WilsonCoefficients()
fwc.set_initial_wcxf(wc)
self.assertEqual(fwc.get_wc('sdsd', 120, par, eft=eft)['CVLL_sdsd'], 1j)
pf = 4 * par['GF'] / np.sqrt(2)
wc = wcxf.WC(eft, 'Bern', 120, {'1dsds': {'Im': 1/pf}})
fwc = WilsonCoefficients()
fwc.set_initial_wcxf(wc)
self.assertAlmostEqual(fwc.get_wc('sdsd', 120, par, eft=eft)['CVLL_sdsd'], 1j)
def tets_repr(self):
wc = WilsonCoefficients()
wc._repr_markdown_()
wc.set_initial({'C7_bs': -0.1}, 5)
wc._repr_markdown_()
def test_get_initial_wcxf_minimal(self):
for eft in ['WET', 'WET-4', 'WET-3']:
wc = wcxf.WC(eft, 'flavio', 120, {'CVLL_sdsd': {'Im': 1}})
fwc = WilsonCoefficients()
fwc.set_initial_wcxf(wc)
wc2 = fwc.get_initial_wcxf
self.assertEqual(wc.eft, wc2.eft)
self.assertEqual(wc2.basis, 'flavio')
self.assertDictEqual(wc.dict, wc2.dict)
def test_deprecations(self):
"""Check that deprecated or renamed Wilson coefficients raise/warn"""
wc = WilsonCoefficients()
wc.set_initial({'C9_bsmumu': 1.2}, 5) # this should work
with self.assertRaises((KeyError, AssertionError)):
wc.set_initial({'C9_bsmumu': 1.2, 'C7effp_bs': 3}, 5)
with self.assertRaises((KeyError, AssertionError)):
wc.set_initial({'C9_bsmumu': 1.2, 'C8eff_sd': 3}, 5)
with self.assertRaises((KeyError, AssertionError)):
wc.set_initial({'C9_bsmumu': 1.2, 'CV_bcenu': 3}, 5)
with self.assertRaises((KeyError, AssertionError)):
wc.set_initial({'C3Qp_bs': 1.2, 'C1_bs': 3}, 5)
|
the-stack_0_20490 | """
Simple faucet server
Proxies mint requests to local client that owns association keys
"""
import decimal
import os
import platform
import random
import re
import sys
import flask
import pexpect
MAX_MINT = 10 ** 19 # 10 trillion libras
def create_client():
if application.client is None or not application.client.isalive():
# If we have comma separated list take a random one
ac_hosts = os.environ['AC_HOST'].split(',')
ac_host = random.choice(ac_hosts)
ac_port = os.environ['AC_PORT']
print("Connecting to ac on: {}:{}".format(ac_host, ac_port))
cmd = "/opt/libra/bin/client --host {} --port {} -m {} -s {}".format(
ac_host,
ac_port,
"/opt/libra/etc/mint.key",
"/opt/libra/etc/trusted_peers.config.toml")
application.client = pexpect.spawn(cmd)
application.client.expect("Please, input commands")
application = flask.Flask(__name__)
application.client = None
print(sys.version, platform.python_version())
create_client()
@application.route("/", methods=('POST',))
def send_transaction():
address = flask.request.args['address']
# Return immediately if address is invalid
if re.match('^[a-f0-9]{64}$', address) is None:
return 'Malformed address', 400
try:
amount = decimal.Decimal(flask.request.args['amount'])
except decimal.InvalidOperation:
return 'Bad amount', 400
if amount > MAX_MINT:
return 'Exceeded max amount of {}'.format(MAX_MINT / (10 ** 6)), 400
try:
create_client()
application.client.sendline(
"a m {} {}".format(address, amount / (10 ** 6)))
application.client.expect("Mint request submitted", timeout=2)
application.client.sendline("a la")
application.client.expect(r"sequence_number: ([0-9]+)", timeout=1)
except pexpect.exceptions.ExceptionPexpect:
application.client.terminate(True)
raise
return application.client.match.groups()[0]
|
the-stack_0_20491 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2021-12-12 16:27:19
# @Author : Sun Bing
import numpy as np
from keras import backend as K
from keras.losses import categorical_crossentropy
from keras.metrics import categorical_accuracy
from keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
from keras.layers import UpSampling2D, Cropping2D
from keras.layers import Input
from keras import Model
import pyswarms as ps
import utils_backdoor
from decimal import Decimal
import os
import sys
import time
from keras.preprocessing import image
##############################
# PARAMETERS #
##############################
RESULT_DIR = '../results' # directory for storing results
IMG_FILENAME_TEMPLATE = 'mnist_visualize_%s_label_%d.png' # image filename template for visualization results
# input size
IMG_ROWS = 28
IMG_COLS = 28
IMG_COLOR = 1
INPUT_SHAPE = (IMG_ROWS, IMG_COLS, IMG_COLOR)
MASK_SHAPE = (IMG_ROWS, IMG_COLS)
NUM_CLASSES = 10 # total number of classes in the model
CALSAL_STEP = 4
TEST_ONLY = 1
class causal_attribution:
# upsample size, default is 1
#UPSAMPLE_SIZE = 1
# pixel intensity range of image and preprocessing method
# raw: [0, 255]
# mnist: [0, 1]
# imagenet: imagenet mean centering
# inception: [-1, 1]
INTENSITY_RANGE = 'mnist'
# type of regularization of the mask
#REGULARIZATION = 'l1'
# threshold of attack success rate for dynamically changing cost
#ATTACK_SUCC_THRESHOLD = 0.99
# patience
#PATIENCE = 10
# multiple of changing cost, down multiple is the square root of this
#COST_MULTIPLIER = 1.5,
# if resetting cost to 0 at the beginning
# default is true for full optimization, set to false for early detection
#RESET_COST_TO_ZERO = True
# min/max of mask
MASK_MIN = 0
MASK_MAX = 1
# min/max of raw pixel intensity
COLOR_MIN = 0
COLOR_MAX = 1
# number of color channel
IMG_COLOR = 1
# whether to shuffle during each epoch
#SHUFFLE = True
# batch size of optimization
BATCH_SIZE = 32
# verbose level, 0, 1 or 2
VERBOSE = 1
# whether to return log or not
#RETURN_LOGS = True
# whether to save last pattern or best pattern
#SAVE_LAST = False
# epsilon used in tanh
#EPSILON = K.epsilon()
# early stop flag
#EARLY_STOP = True
# early stop threshold
#EARLY_STOP_THRESHOLD = 0.99
# early stop patience
#EARLY_STOP_PATIENCE = 2 * PATIENCE
# save tmp masks, for debugging purpose
#SAVE_TMP = False
# dir to save intermediate masks
#TMP_DIR = 'tmp'
# whether input image has been preprocessed or not
RAW_INPUT_FLAG = False
SPLIT_LAYER = 6
REP_N = 5
def __init__(self, model, generator, input_shape,
steps, mini_batch, num_classes,
mask_min=MASK_MIN, mask_max=MASK_MAX,
color_min=COLOR_MIN, color_max=COLOR_MAX, img_color=IMG_COLOR,
batch_size=BATCH_SIZE, verbose=VERBOSE,
rep_n=REP_N):
self.model = model
self.input_shape = input_shape
self.gen = generator
self.steps = 1 #steps
self.mini_batch = mini_batch
self.num_classes = num_classes
self.mask_min = mask_min
self.mask_max = mask_max
self.color_min = color_min
self.color_max = color_max
self.img_color = img_color
self.batch_size = batch_size
self.verbose = verbose
self.rep_n = rep_n # number of neurons to repair
self.r_weight = None
self.target = 3
self.alpha = 0.2
self.current_class = 5
# split the model for causal inervention
self.model1, self.model2 = self.split_keras_model(self.model, self.SPLIT_LAYER)
pass
def split_keras_model(self, lmodel, index):
model1 = Model(inputs=lmodel.inputs, outputs=lmodel.layers[index - 1].output)
model2_input = Input(lmodel.layers[index].input_shape[1:])
model2 = model2_input
for layer in lmodel.layers[index:]:
model2 = layer(model2)
model2 = Model(inputs=model2_input, outputs=model2)
return (model1, model2)
def get_perturbed_input(self, x):
mask_flatten = []
pattern_flatten = []
mask = []
pattern = []
y_label = self.target
mask_filename = IMG_FILENAME_TEMPLATE % ('mask', y_label)
if os.path.isfile('%s/%s' % (RESULT_DIR, mask_filename)):
img = image.load_img(
'%s/%s' % (RESULT_DIR, mask_filename),
color_mode='grayscale',
target_size=INPUT_SHAPE)
mask = image.img_to_array(img)
mask /= 255
mask = mask[:, :, 0]
pattern_filename = IMG_FILENAME_TEMPLATE % ('pattern', y_label)
if os.path.isfile('%s/%s' % (RESULT_DIR, pattern_filename)):
img = image.load_img(
'%s/%s' % (RESULT_DIR, pattern_filename),
color_mode='rgb',
target_size=INPUT_SHAPE)
pattern = image.img_to_array(img)
pattern = pattern[:, :, 1] / 255.
filtered = np.multiply(x, np.expand_dims(np.subtract(np.ones((MASK_SHAPE)), mask), axis=2))
fusion = np.expand_dims(np.multiply(pattern, mask),axis=2)
x_out = np.add(filtered, fusion)
#test
#'''
utils_backdoor.dump_image(x[0]* 255,
'../results/ori_img0.png',
'png')
utils_backdoor.dump_image(x_out[0] * 255,
'../results/img0.png',
'png')
utils_backdoor.dump_image(np.expand_dims(mask, axis=2) * 255,
'../results/mask_test.png',
'png')
utils_backdoor.dump_image(np.expand_dims(pattern,axis=2)* 255, '../results/pattern_test.png', 'png')
fusion = np.expand_dims(np.multiply(pattern, mask), axis=2)
utils_backdoor.dump_image(fusion, '../results/fusion_test.png', 'png')
#'''
return x_out
def injection_func(self, mask, pattern, adv_img):
return mask * pattern + (1 - mask) * adv_img
def reset_opt(self):
K.set_value(self.opt.iterations, 0)
for w in self.opt.weights:
K.set_value(w, np.zeros(K.int_shape(w)))
pass
def analyze_alpha(self, gen):
alpha_list = [0.9]
for alpha in alpha_list:
self.alpha = alpha
print('alpha: {}'.format(alpha))
for i in range(0, 1):
print('iteration: {}'.format(i))
self.analyze_each(gen)
pass
def analyze(self, gen):
class_list = [0,1,2,3,4,5,6,7,8,9]
for each_class in class_list:
self.current_class = each_class
print('current_class: {}'.format(each_class))
self.analyze_eachclass(gen)
pass
def analyze_eachclass(self, gen):
ana_start_t = time.time()
'''
# find hidden range
for step in range(self.steps):
min = []
min_p = []
max = []
max_p = []
#self.mini_batch = 2
for idx in range(self.mini_batch):
X_batch, Y_batch = gen.next()
X_batch_perturbed = self.get_perturbed_input(X_batch)
min_i, max_i = self.get_in_range(X_batch)
min.append(min_i)
max.append(max_i)
min_i, max_i = self.get_in_range(X_batch_perturbed)
min_p.append(min_i)
max_p.append(max_i)
p_prediction = self.model.predict(X_batch_perturbed)
ori_predict = self.model.predict(X_batch)
np.savetxt("../results/p_prediction.txt", p_prediction, fmt="%s")
np.savetxt("../results/ori_predict.txt", ori_predict, fmt="%s")
predict = np.argmax(p_prediction, axis=1)
ori_predict = np.argmax(ori_predict, axis=1)
labels = np.argmax(Y_batch, axis=1)
min = np.min(np.array(min), axis=0)
max = np.max(np.array(max), axis=0)
min_p = np.min(np.array(min_p), axis=0)
max_p = np.max(np.array(max_p), axis=0)
'''
min = np.full((28, 28), 0, dtype=float)
max = np.full((28, 28), 255, dtype=float)
#'''
# loop start
for step in range(self.steps):
#'''
ie_batch = []
#self.mini_batch = 2
for idx in range(self.mini_batch):
X_batch, _ = gen.next()
#X_batch_perturbed = self.get_perturbed_input(X_batch)
# find hidden neuron interval
# find
#ie_batch.append(self.get_ie_do_h(X_batch, np.minimum(min_p, min), np.maximum(max_p, max)))
ie_batch.append(self.get_tie_do_in(X_batch, self.current_class, min, max))
ie_mean = np.mean(np.array(ie_batch),axis=0)
np.savetxt('../results/ie'+ str(self.current_class) +'.txt', ie_mean.reshape(28,28), fmt="%s")
ori_predict = self.model.predict(ie_mean.reshape(1,28,28,1))
np.savetxt("../results/ie_predict" + str(self.current_class) + ".txt", ori_predict, fmt="%s")
ori_predict = np.argmax(ori_predict, axis=1)
print("prediction: {}\n".format(ori_predict))
#print("expected: {}".format(self.current_class))
return
# ie_mean dim: 512 * 43
# find tarted class: diff of each column
col_diff = np.max(ie_mean, axis=0) - np.min(ie_mean, axis=0)
col_diff = np.transpose([np.arange(len(col_diff)), col_diff])
ind = np.argsort(col_diff[:, 1])[::-1]
col_diff = col_diff[ind]
np.savetxt("../results/col_diff.txt", col_diff, fmt="%s")
row_diff = np.max(ie_mean, axis=1) - np.min(ie_mean, axis=1)
row_diff = np.transpose([np.arange(len(row_diff)), row_diff])
ind = np.argsort(row_diff[:, 1])[::-1]
row_diff = row_diff[ind]
np.savetxt("../results/row_diff.txt", row_diff, fmt="%s")
ana_start_t = time.time() - ana_start_t
print('fault localization time: {}s'.format(ana_start_t))
#'''
rep_t = time.time()
# row_diff contains sensitive neurons: top self.rep_n
# index
self.rep_index = []
result, acc = self.pso_test([], self.target)
print("before repair: attack SR: {}, BE acc: {}".format(result, acc))
#'''
self.rep_index = row_diff[:,:1][:self.rep_n,:]
print("repair index: {}".format(self.rep_index.T))
'''
self.rep_index = [1563, 1552, 1547, 1331, 1541]
print("repair index: {}".format(self.rep_index))
#'''
self.repair()
rep_t = time.time() - rep_t
result, acc = self.pso_test(self.r_weight, self.target)
print("after repair: attack SR: {}, BE acc: {}".format(result, acc))
print('PSO time: {}s'.format(rep_t))
pass
def analyze_gradient(self, gen):
#'''
ana_start_t = time.time()
# find hidden range
for step in range(self.steps):
min = []
min_p = []
max = []
max_p = []
#self.mini_batch = 2
for idx in range(self.mini_batch):
X_batch, Y_batch = gen.next()
X_batch_perturbed = self.get_perturbed_input(X_batch)
min_i, max_i = self.get_h_range(X_batch)
min.append(min_i)
max.append(max_i)
min_i, max_i = self.get_h_range(X_batch_perturbed)
min_p.append(min_i)
max_p.append(max_i)
p_prediction = self.model.predict(X_batch_perturbed)
ori_predict = self.model.predict(X_batch)
np.savetxt("../results/p_prediction.txt", p_prediction, fmt="%s")
np.savetxt("../results/ori_predict.txt", ori_predict, fmt="%s")
predict = np.argmax(p_prediction, axis=1)
ori_predict = np.argmax(ori_predict, axis=1)
labels = np.argmax(Y_batch, axis=1)
min = np.min(np.array(min), axis=0)
max = np.max(np.array(max), axis=0)
min_p = np.min(np.array(min_p), axis=0)
max_p = np.max(np.array(max_p), axis=0)
#'''
# loop start
for step in range(self.steps):
#'''
ie_batch = []
#self.mini_batch = 2
for idx in range(self.mini_batch):
X_batch, _ = gen.next()
#X_batch_perturbed = self.get_perturbed_input(X_batch)
# find hidden neuron interval
# find
#ie_batch.append(self.get_ie_do_h(X_batch, np.minimum(min_p, min), np.maximum(max_p, max)))
ie_batch.append(self.get_gradient(X_batch, self.target, min, max))
ie_mean = np.mean(np.array(ie_batch),axis=0)
np.savetxt("../results/ori.txt", ie_mean, fmt="%s")
row_diff = ie_mean
row_diff = np.transpose([np.arange(len(row_diff)), row_diff])
ind = np.argsort(row_diff[:, 1])[::-1]
row_diff = row_diff[ind]
np.savetxt("../results/row_diff.txt", row_diff, fmt="%s")
ana_start_t = time.time() - ana_start_t
print('fault localization time: {}s'.format(ana_start_t))
#'''
rep_t = time.time()
# row_diff contains sensitive neurons: top self.rep_n
# index
self.rep_index = []
result, acc = self.pso_test([], self.target)
print("before repair: attack SR: {}, BE acc: {}".format(result, acc))
#'''
self.rep_index = row_diff[:,:1][:self.rep_n,:]
print("repair index: {}".format(self.rep_index.T))
'''
self.rep_index = [1563, 1552, 1547, 1331, 1541]
print("repair index: {}".format(self.rep_index))
'''
self.repair()
rep_t = time.time() - rep_t
#self.rep_index = [461, 395, 491, 404, 219]
#self.r_weight = [-0.13325777, 0.08095828, -0.80547224, -0.59831971, -0.23067632]
result, acc = self.pso_test(self.r_weight, self.target)
print("after repair: attack SR: {}, BE acc: {}".format(result, acc))
print('PSO time: {}s'.format(rep_t))
pass
# return
def get_ie_do_h(self, x, min, max):
pre_layer5 = self.model1.predict(x)
l_shape = pre_layer5.shape
ie = []
hidden_min = min.reshape(-1)
hidden_max = max.reshape(-1)
num_step = CALSAL_STEP
_pre_layer5 = np.reshape(pre_layer5, (len(pre_layer5), -1))
for i in range (len(_pre_layer5[0])):
ie_i = []
for h_val in np.linspace(hidden_min[i], hidden_max[i], num_step):
do_hidden = _pre_layer5.copy()
do_hidden[:, i] = h_val
pre_final = self.model2.predict(do_hidden.reshape(l_shape))
ie_i.append(np.mean(pre_final,axis=0))
ie.append(np.mean(np.array(ie_i),axis=0))
return np.array(ie)
def get_ie_do_in(self, x, min, max):
x_shape = x.shape
x = x.reshape((len(x), np.prod(x[0].shape)))
ie = []
in_min = min.reshape(-1)
in_max = max.reshape(-1)
num_step = CALSAL_STEP
for i in range (len(x)):
ie_i = []
for h_val in np.linspace(in_min[i], in_max[i], num_step):
do_in = x.copy()
do_in[:, i] = h_val
pre_final = self.model.predict(do_in.reshape(x_shape))
ie_i.append(np.mean(pre_final,axis=0))
ie.append(np.mean(np.array(ie_i),axis=0))
return np.array(ie)
# get ie of targeted class
def get_tie_do_h(self, x, t_dix, min, max):
pre_layer5 = self.model1.predict(x)
l_shape = pre_layer5.shape
ie = []
hidden_min = min.reshape(-1)
hidden_max = max.reshape(-1)
num_step = CALSAL_STEP
_pre_layer5 = np.reshape(pre_layer5, (len(pre_layer5), -1))
for i in range (len(_pre_layer5[0])):
ie_i = []
for h_val in np.linspace(hidden_min[i], hidden_max[i], num_step):
do_hidden = _pre_layer5.copy()
do_hidden[:, i] = h_val
pre_final = self.model.predict(do_hidden.reshape(l_shape))
ie_i.append(np.mean(pre_final,axis=0)[t_dix])
ie.append(np.array(ie_i))
return np.array(ie)
# get ie of targeted class with intervention on input neuron
def get_tie_do_in(self, x, t_dix, min, max):
x_shape = x.shape
x = x.reshape((len(x), np.prod(x[0].shape)))
ie = []
in_min = min.reshape(-1)
in_max = max.reshape(-1)
num_step = CALSAL_STEP
for i in range (len(x[0])):
ie_i = []
for h_val in np.linspace(in_min[i], in_max[i], num_step):
do_in = x.copy()
do_in[:, i] = h_val
pre_final = self.model.predict(do_in.reshape(x_shape))
ie_i.append(np.mean(pre_final[:,t_dix],axis=0))
ie.append(np.mean(np.array(ie_i),axis=0))
return np.array(ie)
# get ie of targeted class
def get_gradient_ie_do_h(self, x, t_dix, min, max):
pre_layer5 = self.model1.predict(x)
l_shape = pre_layer5.shape
ie = []
hidden_min = min.reshape(-1)
hidden_max = max.reshape(-1)
num_step = CALSAL_STEP
_pre_layer5 = np.reshape(pre_layer5, (len(pre_layer5), -1))
for i in range (len(_pre_layer5[0])):
ie_i = []
last_ie_i = 0
first_loop = 0
for h_val in np.linspace(hidden_min[i], hidden_max[i], num_step):
do_hidden = _pre_layer5.copy()
do_hidden[:, i] = h_val
pre_final = self.model2.predict(do_hidden.reshape(l_shape))
this_ie_i = np.mean(pre_final,axis=0)[t_dix]
#delta
if first_loop == 0:
first_loop = 1
last_ie_i = this_ie_i
last_val = h_val
continue
if (h_val - last_val) != 0.0:
delta = (this_ie_i - last_ie_i) / (h_val - last_val)
else:
delta = (this_ie_i - last_ie_i)
ie_i.append(delta)
last_ie_i = this_ie_i
last_val = h_val
ie.append(np.array(ie_i))
return np.array(ie)
def get_gradient(self, x, t_dix, min, max):
pre_layer5 = self.model1.predict(x)
l_shape = pre_layer5.shape
ie = []
hidden_min = min.reshape(-1)
hidden_max = max.reshape(-1)
num_step = CALSAL_STEP
_pre_layer5 = np.reshape(pre_layer5, (len(pre_layer5), -1))
for i in range (len(_pre_layer5[0])):
pre_final = self.model.predict(x)
last_ie_i = np.mean(pre_final, axis=0)[t_dix]
do_hidden = _pre_layer5.copy()
last_val = do_hidden[:, i]
delta_x = np.ones(do_hidden[:, i].shape)
do_hidden[:, i] = np.add(last_val, delta_x)
#do_hidden[:, i] = last_val * 1.05
pre_final = self.model2.predict(do_hidden.reshape(l_shape))
this_ie_i = np.mean(pre_final, axis=0)[t_dix]
#delta = np.divide((this_ie_i - last_ie_i), (last_val * 0.05))
delta = this_ie_i - last_ie_i
ie.append(np.array(delta))
#return (np.array(np.mean(np.array(ie),axis=1)))
return np.array(ie)
# return
def get_die_do_h(self, x, x_p, min, max):
pre_layer5 = self.model1.predict(x)
pre_layer5_p = self.model1.predict(x_p)
ie = []
hidden_min = min
hidden_max = max
num_step = 16
for i in range (len(pre_layer5[0])):
ie_i = []
for h_val in np.linspace(hidden_min[i], hidden_max[i], num_step):
do_hidden = pre_layer5.copy()
do_hidden[:, i] = h_val
pre_final = self.model2.predict(do_hidden)
pre_final_ori = self.model2.predict(pre_layer5_p)
ie_i.append(np.mean(np.absolute(pre_final - pre_final_ori),axis=0))
ie.append(np.mean(np.array(ie_i),axis=0))
return np.array(ie)
# return
def get_final(self, x, x_p, min, max):
return np.mean(self.model.predict(x),axis=0)
def get_h_range(self, x):
pre_layer5 = self.model1.predict(x)
max = np.max(pre_layer5,axis=0)
min = np.min(pre_layer5, axis=0)
return min, max
def get_in_range(self, x):
max = np.max(x,axis=0)
min = np.min(x, axis=0)
return min, max
def repair(self):
# repair
print('Start reparing...')
print('alpha: {}'.format(self.alpha))
options = {'c1': 0.41, 'c2': 0.41, 'w': 0.8}
#'''# original
optimizer = ps.single.GlobalBestPSO(n_particles=20, dimensions=self.rep_n, options=options,
bounds=([[-10.0] * self.rep_n, [10.0] * self.rep_n]),
init_pos=np.ones((20, self.rep_n), dtype=float), ftol=1e-3,
ftol_iter=10)
#'''
# Perform optimization
best_cost, best_pos = optimizer.optimize(self.pso_fitness_func, iters=100)
# Obtain the cost history
# print(optimizer.cost_history)
# Obtain the position history
# print(optimizer.pos_history)
# Obtain the velocity history
# print(optimizer.velocity_history)
#print('neuron to repair: {} at layter: {}'.format(self.r_neuron, self.r_layer))
#print('best cost: {}'.format(best_cost))
#print('best pos: {}'.format(best_pos))
self.r_weight = best_pos
return best_pos
# optimization target perturbed sample has the same label as clean sample
def pso_fitness_func(self, weight):
result = []
for i in range (0, int(len(weight))):
r_weight = weight[i]
cost = self.pso_test_rep(r_weight)
#print('cost: {}'.format(cost))
result.append(cost)
#print(result)
return result
def pso_test_rep(self, r_weight):
#result = []
result = 0.0
tot_count = 0
correct = 0
# per particle
for idx in range(self.mini_batch):
X_batch, Y_batch = self.gen.next()
X_batch_perturbed = self.get_perturbed_input(X_batch)
p_prediction = self.model1.predict(X_batch_perturbed)
o_prediction = self.model1.predict(X_batch)
l_shape = p_prediction.shape
_p_prediction = np.reshape(p_prediction, (len(p_prediction), -1))
_o_prediction = np.reshape(o_prediction, (len(o_prediction), -1))
do_hidden = _p_prediction.copy()
o_hidden = _o_prediction.copy()
for i in range (0, len(self.rep_index)):
rep_idx = int(self.rep_index[i])
do_hidden[:, rep_idx] = (r_weight[i]) * _p_prediction[:, rep_idx]
o_hidden[:, rep_idx] = (r_weight[i]) * _o_prediction[:, rep_idx]
p_prediction = self.model2.predict(do_hidden.reshape(l_shape))
o_prediction = self.model2.predict(o_hidden.reshape(l_shape))
# cost is the difference
#cost = np.abs(p_prediction - Y_batch)
#cost = np.mean(cost,axis=0)
#result.append(cost)
labels = np.argmax(Y_batch, axis=1)
predict = np.argmax(p_prediction, axis=1)
o_predict = np.argmax(o_prediction, axis=1)
o_correct = np.sum(labels == o_predict)
correct = correct + o_correct
o_target = (labels == self.target * np.ones(predict.shape))
pre_target = (predict == self.target * np.ones(predict.shape))
attack_success = np.sum(predict == self.target * np.ones(predict.shape)) - np.sum(o_target & pre_target)
#cost = np.sum(labels != predict)
result = result + attack_success
tot_count = tot_count + len(labels)
result = result / tot_count
correct = correct / tot_count
cost = (1.0 - self.alpha) * result + self.alpha * (1 - correct)
return cost
def pso_test(self, r_weight, target):
result = 0.0
correct = 0.0
tot_count = 0
if len(self.rep_index) != 0:
# per particle
for idx in range(self.mini_batch):
X_batch, Y_batch = self.gen.next()
X_batch_perturbed = self.get_perturbed_input(X_batch)
o_prediction = self.model1.predict(X_batch)
p_prediction = self.model1.predict(X_batch_perturbed)
_p_prediction = np.reshape(p_prediction, (len(p_prediction), -1))
_o_prediction = np.reshape(o_prediction, (len(o_prediction), -1))
l_shape = p_prediction.shape
do_hidden = _p_prediction.copy()
o_hidden = _o_prediction.copy()
for i in range (0, len(self.rep_index)):
rep_idx = int(self.rep_index[i])
do_hidden[:, rep_idx] = (r_weight[i]) * _p_prediction[:, rep_idx]
o_hidden[:, rep_idx] = (r_weight[i]) * _o_prediction[:, rep_idx]
p_prediction = self.model2.predict(do_hidden.reshape(l_shape))
o_prediction = self.model2.predict(o_hidden.reshape(l_shape))
labels = np.argmax(Y_batch, axis=1)
predict = np.argmax(p_prediction, axis=1)
o_predict = np.argmax(o_prediction, axis=1)
# cost is the difference
o_target = (labels == target * np.ones(predict.shape))
pre_target = (predict == target * np.ones(predict.shape))
attack_success = np.sum(predict == target * np.ones(predict.shape)) - np.sum(o_target & pre_target)
#diff = np.sum(labels != predict)
result = result + attack_success
tot_count = tot_count + len(labels)
o_correct = np.sum(labels == o_predict)
correct = correct + o_correct
result = result / tot_count
correct = correct / tot_count
else:
# per particle
for idx in range(self.mini_batch):
X_batch, Y_batch = self.gen.next()
X_batch_perturbed = self.get_perturbed_input(X_batch)
o_prediction = np.argmax(self.model.predict(X_batch), axis=1)
p_prediction = self.model.predict(X_batch_perturbed)
labels = np.argmax(Y_batch, axis=1)
predict = np.argmax(p_prediction, axis=1)
#o_target = (labels == target * np.ones(predict.shape))
#pre_target = (predict == target * np.ones(predict.shape))
# cost is the difference
#attack_success = np.sum(predict == target * np.ones(predict.shape)) - np.sum(o_target & pre_target)
attack_success = np.sum(predict == target * np.ones(predict.shape))
#diff = np.sum(labels != predict)
result = result + attack_success
o_correct = np.sum(labels == o_prediction)
correct = correct + o_correct
tot_count = tot_count + len(labels)
result = result / tot_count
correct = correct / tot_count
return result, correct
|
the-stack_0_20492 | import sys
import importlib
import datetime
bl_info = {
"name": "Photon-v2",
"description": "A hobby renderer featuring PBR.",
"author": "Tzu-Chieh Chang (D01phiN)",
"version": (0, 0, 0),
"blender": (2, 78, 0),
"location": "Info Header >> Render Engine Menu",
"warning": "WIP...", # showing warning icon and text in addons panel
"category": "Render"
}
print("PhotonBlend initializing...")
print(datetime.datetime.now())
bmodulePackageName = "bmodule"
bmoduleNames = [
"p2exporter",
"material",
"renderer",
"light",
"node",
"world"
]
bmoduleFullNames = []
for bmoduleName in bmoduleNames:
bmoduleFullNames.append("{}.{}.{}".format(__name__, bmodulePackageName, bmoduleName))
for bmoduleFullName in bmoduleFullNames:
if bmoduleFullName in sys.modules:
importlib.reload(sys.modules[bmoduleFullName])
else:
importlib.import_module(bmoduleFullName)
def register():
for moduleName in bmoduleFullNames:
if moduleName in sys.modules:
if hasattr(sys.modules[moduleName], "register"):
sys.modules[moduleName].register()
else:
print("bmodule %s should contain a register() function" % moduleName)
else:
print("bmodule %s is not correctly imported" % moduleName)
def unregister():
for moduleName in bmoduleFullNames:
if moduleName in sys.modules:
if hasattr(sys.modules[moduleName], "unregister"):
sys.modules[moduleName].unregister()
else:
print("bmodule %s should contain an unregister() function" % moduleName)
else:
print("bmodule %s is not correctly imported" % moduleName)
|
the-stack_0_20493 | import torch
import torch.nn as nn
import torch.nn.functional as F
"""
the input x in both networks should be [o, g], where o is the observation and g is the goal.
"""
# define the actor network
class actor(nn.Module):
def __init__(self, env_params):
super(actor, self).__init__()
self.max_action = env_params['action_max']
self.fc1 = nn.Linear(env_params['obs'] + env_params['goal'], 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, 256)
self.action_out = nn.Linear(256, env_params['action'])
def forward(self, x):
# print("------------------")
# print(x)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
# print(x)
# print(torch.tanh(self.action_out(x)))
actions = self.max_action.item() * torch.tanh(self.action_out(x))
return actions
class critic(nn.Module):
def __init__(self, env_params):
super(critic, self).__init__()
# print("models.py/critic: ", env_params['action_max'])
# self.max_action = env_params['action_max']
self.max_action = torch.Tensor([env_params['action_max']])
self.fc1 = nn.Linear(env_params['obs'] + env_params['goal'] + env_params['action'], 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, 256)
self.q_out = nn.Linear(256, 1)
def forward(self, x, actions):
x = torch.cat([x, actions / self.max_action], dim=1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
q_value = self.q_out(x)
return q_value
|
the-stack_0_20494 | from typing import Optional
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import sys
import shutil
import logging
logging.basicConfig(level=logging.INFO)
import cv2
import numpy as np
import onnx
from onnx_tf.backend import prepare
import torch
import tensorflow as tf
from typing import Union
# # Need to import loaded model's class definition.
# from try_lode_model import MyModule
class Torch2TFLiteConverter:
def __init__(
self,
torch_model_path: str,
tflite_model_save_path: str,
sample_file_path: Optional[str] = None,
target_shape: tuple = (224, 224, 3),
seed: int = 10,
normalize: bool = True,
use_jit: bool = False
):
self.torch_model_path = torch_model_path
self.tflite_model_path = tflite_model_save_path
self.sample_file_path = sample_file_path
self.target_shape = target_shape
self.seed = seed
self.normalize = normalize
self.tmpdir = '/tmp/torch2tflite/'
self.__check_tmpdir()
self.onnx_model_path = os.path.join(self.tmpdir, 'model.onnx')
self.tf_model_path = os.path.join(self.tmpdir, 'tf_model')
self.torch_model = self.load_torch_model(use_jit=use_jit)
self.sample_data = self.load_sample_input(sample_file_path, target_shape, seed, normalize)
def convert(self):
self.torch2onnx()
self.onnx2tf()
self.tf2tflite()
torch_output = self.inference_torch()
tflite_output = self.inference_tflite(self.load_tflite())
self.calc_error(torch_output, tflite_output)
def __check_tmpdir(self):
try:
if os.path.exists(self.tmpdir) and os.path.isdir(self.tmpdir):
shutil.rmtree(self.tmpdir)
logging.info(f'Old temp directory removed')
os.makedirs(self.tmpdir, exist_ok=True)
logging.info(f'Temp directory created at {self.tmpdir}')
except Exception:
logging.error('Can not create temporary directory, exiting!')
sys.exit(-1)
def load_torch_model(self, use_jit: bool = False) -> Union[torch.nn.Module, torch.jit.ScriptModule]:
try:
if self.torch_model_path.endswith('.pth') or self.torch_model_path.endswith('.pt'):
if not use_jit:
model = torch.load(self.torch_model_path, map_location='cpu')
else:
logging.info('Use jit to load')
model = torch.jit.load(self.torch_model_path, map_location='cpu')
model = model.eval()
logging.info('PyTorch model successfully loaded and mapped to CPU')
logging.info('Type of loaded module %s', type(model))
return model
else:
logging.error('Specified file path not compatible with torch2tflite, exiting!')
sys.exit(-1)
except Exception:
logging.error('Can not load PyTorch model. Please make sure'
'that model saved like `torch.save(model, PATH)`')
raise
# sys.exit(-1)
def load_tflite(self):
interpret = tf.lite.Interpreter(self.tflite_model_path)
interpret.allocate_tensors()
logging.info(f'TFLite interpreter successfully loaded from, {self.tflite_model_path}')
return interpret
@staticmethod
def load_sample_input(
file_path: Optional[str] = None,
target_shape: tuple = (224, 224, 3),
seed: int = 10,
normalize: bool = True
):
if file_path is not None:
if (len(target_shape) == 3 and target_shape[-1] == 1) or len(target_shape) == 2:
imread_flags = cv2.IMREAD_GRAYSCALE
elif len(target_shape) == 3 and target_shape[-1] == 3:
imread_flags = cv2.IMREAD_COLOR
else:
imread_flags = cv2.IMREAD_ANYCOLOR + cv2.IMREAD_ANYDEPTH
try:
logging.warning('dsize=%s', target_shape[:2])
img = cv2.resize(
src=cv2.imread(file_path, imread_flags),
dsize=target_shape[:2],
interpolation=cv2.INTER_LINEAR
)
if len(img.shape) == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if normalize:
img = img * 1. / 255
img = img.astype(np.float32)
sample_data_np = np.transpose(img, (2, 0, 1))[np.newaxis, :, :, :]
sample_data_torch = torch.from_numpy(sample_data_np)
logging.info(f'Sample input successfully loaded from, {file_path}')
except Exception:
logging.error(f'Can not load sample input from, {file_path}')
raise
# sys.exit(-1)
else:
logging.info(f'Sample input file path not specified, random data will be generated')
np.random.seed(seed)
data = np.random.random(target_shape).astype(np.float32)
sample_data_np = np.transpose(data, (2, 0, 1))[np.newaxis, :, :, :]
sample_data_torch = torch.from_numpy(sample_data_np)
logging.info(f'Sample input randomly generated')
return {'sample_data_np': sample_data_np, 'sample_data_torch': sample_data_torch}
def torch2onnx(self) -> None:
torch.onnx.export(
model=self.torch_model,
args=self.sample_data['sample_data_torch'],
f=self.onnx_model_path,
verbose=False,
export_params=True,
do_constant_folding=False,
input_names=['input'],
opset_version=10,
output_names=['output'])
def onnx2tf(self) -> None:
onnx_model = onnx.load(self.onnx_model_path)
onnx.checker.check_model(onnx_model)
tf_rep = prepare(onnx_model)
tf_rep.export_graph(self.tf_model_path)
def tf2tflite(self) -> None:
converter = tf.lite.TFLiteConverter.from_saved_model(self.tf_model_path)
tflite_model = converter.convert()
with open(self.tflite_model_path, 'wb') as f:
f.write(tflite_model)
def inference_torch(self) -> np.ndarray:
y_pred = self.torch_model(self.sample_data['sample_data_torch'])
return y_pred.detach().cpu().numpy()
def inference_tflite(self, tflite_model) -> np.ndarray:
input_details = tflite_model.get_input_details()
output_details = tflite_model.get_output_details()
tflite_model.set_tensor(input_details[0]['index'], self.sample_data['sample_data_np'])
tflite_model.invoke()
y_pred = tflite_model.get_tensor(output_details[0]['index'])
return y_pred
@staticmethod
def calc_error(result_torch, result_tflite):
mse = ((result_torch - result_tflite) ** 2).mean(axis=None)
mae = np.abs(result_torch - result_tflite).mean(axis=None)
logging.info(f'MSE (Mean-Square-Error): {mse}\tMAE (Mean-Absolute-Error): {mae}')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--torch-path', type=str, required=True)
parser.add_argument('--use-jit', action='store_true')
parser.add_argument('--tflite-path', type=str, required=True)
parser.add_argument('--target-shape', type=int, nargs=3, default=[224, 224, 3])
parser.add_argument('--sample-file', type=str)
parser.add_argument('--seed', type=int, default=10)
args = parser.parse_args()
args.target_shape = tuple(args.target_shape)
logging.info('args.torch_path=%s', args.torch_path)
logging.info('args.target_shape=%s', args.target_shape)
logging.info('args.use_jit=%s', args.use_jit)
conv = Torch2TFLiteConverter(
args.torch_path,
args.tflite_path,
args.sample_file,
args.target_shape,
args.seed,
use_jit=args.use_jit
)
conv.convert()
sys.exit(0)
|
the-stack_0_20495 | import datetime
import json
import collections
from flask import Blueprint, request, render_template
from sqlalchemy import func
from ..app import db_session
from ..models import PageView, Post
from ..utils.requests import from_request_get_feed_params
from ..utils.posts import get_posts
blueprint = Blueprint('stats', __name__,
template_folder='../templates', static_folder='../static')
def _round_date(d):
"""Round date to the previous Monday"""
return d - datetime.timedelta(d.weekday())
@blueprint.route('/post_stats', methods=['GET'])
@PageView.logged
def post_stats():
""" Return the total views, distinct views, total likes and total comments
for a given post """
post_id = request.args.get('post_id', '')
post = (db_session.query(Post)
.filter(Post.path == post_id)
.first())
if not post:
return json.dumps({})
return json.dumps({'all_views': post.view_count,
'distinct_views': post.view_user_count,
'total_likes': post.vote_count,
'total_comments': post.comment_count})
@blueprint.route('/stats', methods=['GET'])
@PageView.logged
def stats():
""" Render the stats page, creating graphs for
pageviews daily, weekly, post creation weekly and
cumulative posts per week
"""
feed_params = from_request_get_feed_params(request)
# count daily and weekly pageviews
datetime_pageviews = (db_session.query(PageView.created_at, func.count(PageView.id))
.group_by(PageView.created_at)
.all())
daily_pageviews = {}
weekly_pageviews = {}
for k, v in datetime_pageviews:
d = k.date()
wk = _round_date(d)
daily_pageviews[d] = daily_pageviews.get(d, 0) + v
weekly_pageviews[wk] = weekly_pageviews.get(wk, 0) + v
# count post creation (created and updated)
posts = (db_session.query(Post)
.filter(Post.is_published).all())
created_at_counts = collections.Counter([_round_date(post.created_at.date()) for post in posts])
updated_at_counts = collections.Counter([_round_date(post.updated_at.date()) for post in posts])
all_week_keys = set(created_at_counts.keys()).union(updated_at_counts.keys())
weekly_posts_created_and_updated = dict((k, [created_at_counts.get(k, 0), updated_at_counts.get(k, 0)])
for k in all_week_keys)
# cumulative weekly post created
weekly_cumulative_posts = {}
cum_created_val = 0
for week in sorted(all_week_keys):
cum_created_val += created_at_counts.get(week, 0)
weekly_cumulative_posts[week] = cum_created_val
# count post per author
posts, _ = get_posts(feed_params)
post_per_author_count = collections.Counter([author.format_name for post in posts for author in post.authors])
return render_template('stats.html',
feed_params=feed_params,
daily_pageviews=daily_pageviews,
weekly_posts_created_and_updated=weekly_posts_created_and_updated,
weekly_cumulative_posts=weekly_cumulative_posts,
weekly_pageviews=weekly_pageviews,
post_per_author_count=post_per_author_count)
|
the-stack_0_20498 | import subprocess
import sys
import setuptools
def install(package):
subprocess.call([sys.executable, "-m", "pip", "install", package])
with open("README.md", "r") as fh:
long_description = fh.read()
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setuptools.setup(
name='deezer-dl',
version='1.0.1',
author='Bruno Kanazawa',
author_email='[email protected]',
python_requires='>=3',
install_requires=requirements,
description="Download songs from Deezer",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/doulwyi/deezer-dl",
packages=setuptools.find_packages(),
include_package_data=True,
entry_points={
'console_scripts': [
'deezer_dl = deezer_dl.deezer_dl:deezer_dl',
],
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
install('git+https://github.com/nficano/pytube.git')
|
the-stack_0_20499 | import numpy as np
import tensorflow as tf
from sklearn.utils import shuffle
import unsupervised_learning.tensorflow.utils as utils
class AutoEncoder(object):
""" Simple autoencoder model used for unsupervised pre-training. """
def __init__(self, num_input, num_hidden, learning_rate, id):
self.num_hidden = num_hidden
self.id = id
self.build(num_input, num_hidden, learning_rate)
def set_session(self, session):
self.session = session
def build(self, num_input, num_hidden, learning_rate):
self.W = tf.Variable(tf.random_normal(shape=(num_input, num_hidden)))
self.bh = tf.Variable(np.zeros(num_hidden).astype(np.float32))
self.bo = tf.Variable(np.zeros(num_input).astype(np.float32))
self.X_in = tf.placeholder(tf.float32, shape=(None, num_input), name='X_in')
self.Z = self.encode(self.X_in)
logits = self.decode_logits(self.Z)
self.X_hat = tf.nn.sigmoid(logits)
self.cost = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
labels=self.X_in,
logits=logits))
self.train_op = tf.train.AdamOptimizer(learning_rate).minimize(self.cost)
def fit(self, X, epochs, batch_size, show_fig=False):
num_examples = X.shape[0]
n_batches = num_examples // batch_size
costs = []
print("training autoencoder: %s" % self.id)
for i in range(epochs):
print("epoch:", i)
X = shuffle(X)
for j in range(n_batches):
batch = X[j*batch_size:(j*batch_size + batch_size)]
_, c = self.session.run((self.train_op, self.cost), feed_dict={self.X_in: batch})
if j % 10 == 0:
print("j / n_batches:", j, "/", n_batches, "cost:", c)
costs.append(c)
if show_fig:
utils.show_costs(costs)
def transform(self, X):
return self.session.run(self.Z, feed_dict={self.X_in: X})
def predict(self, X):
return self.session.run(self.X_hat, feed_dict={self.X_in: X})
def encode(self, X):
Z = tf.nn.sigmoid(tf.matmul(X, self.W) + self.bh)
return Z
def decode_logits(self, Z):
return tf.matmul(Z, tf.transpose(self.W)) + self.bo
class RBM(object):
""" Restricted Boltzman Machine for unsupervised pre-training. """
def __init__(self, num_input, num_hidden, learning_rate, id):
self.num_input = num_input
self.num_hidden = num_hidden
self.id = id
self.build(num_input, num_hidden, learning_rate)
def set_session(self, session):
self.session = session
def build(self, num_input, num_hidden, learning_rate):
# params
self.W = tf.Variable(tf.random_normal(shape=(num_input, num_hidden)) * np.sqrt(2.0 / num_hidden))
# note: without limiting variance, you get numerical stability issues
self.c = tf.Variable(np.zeros(num_hidden).astype(np.float32))
self.b = tf.Variable(np.zeros(num_input).astype(np.float32))
# data
self.X_in = tf.placeholder(tf.float32, shape=(None, num_input))
# conditional probabilities (also possible to do this using tf.contrib.distributions.Bernoulli)
visible_layer = self.X_in
self.p_h_given_v = tf.nn.sigmoid(tf.matmul(visible_layer, self.W) + self.c)
r = tf.random_uniform(shape=tf.shape(self.p_h_given_v))
hidden_layer = tf.to_float(r < self.p_h_given_v)
p_v_given_h = tf.nn.sigmoid(tf.matmul(hidden_layer, tf.transpose(self.W)) + self.b)
r = tf.random_uniform(shape=tf.shape(p_v_given_h))
X_sample = tf.to_float(r < p_v_given_h)
# build the objective
objective = tf.reduce_mean(self.free_energy(self.X_in)) - tf.reduce_mean(self.free_energy(X_sample))
self.train_op = tf.train.AdamOptimizer(learning_rate).minimize(objective)
# build the cost (not used for optimization, just for output and verification during training)
Z = self.encode(self.X_in)
logits = self.decode_logits(Z)
self.cost = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
labels=self.X_in,
logits=logits))
def fit(self, X, epochs, batch_size, show_fig=False):
num_examples, input_size = X.shape
n_batches = num_examples // batch_size
costs = []
print("training rbm: %s" % self.id)
for i in range(epochs):
print("epoch:", i)
X = shuffle(X)
for j in range(n_batches):
batch = X[j * batch_size:(j * batch_size + batch_size)]
_, c = self.session.run((self.train_op, self.cost), feed_dict={self.X_in: batch})
if j % 10 == 0:
print("j / n_batches:", j, "/", n_batches, "cost:", c)
costs.append(c)
if show_fig:
utils.show_costs(costs)
def free_energy(self, V):
b = tf.reshape(self.b, (self.num_input, 1))
first_term = -tf.matmul(V, b)
first_term = tf.reshape(first_term, (-1,))
second_term = -tf.reduce_sum(
# tf.log(1 + tf.exp(tf.matmul(V, self.W) + self.c)),
tf.nn.softplus(tf.matmul(V, self.W) + self.c),
axis=1)
return first_term + second_term
def encode(self, X):
Z = tf.nn.sigmoid(tf.matmul(X, self.W) + self.c)
return Z
def decode_logits(self, Z):
return tf.matmul(Z, tf.transpose(self.W)) + self.b
def transform(self, X):
return self.session.run(self.p_h_given_v, feed_dict={self.X_in: X})
class DNN(object):
""" Simple multi-layer neural network, which uses an unsupervised model for pre-training. """
def __init__(self, num_input, hidden_layer_sizes, num_classes,
learning_rate, unsupervised_model_fn):
self.hidden_layers = []
input_size = num_input
for i, output_size in enumerate(hidden_layer_sizes):
ae = unsupervised_model_fn(input_size, output_size, learning_rate, i)
self.hidden_layers.append(ae)
input_size = output_size
self.build_final_layer(num_input, hidden_layer_sizes[-1], num_classes, learning_rate)
def set_session(self, session):
self.session = session
for layer in self.hidden_layers:
layer.set_session(session)
def build_final_layer(self, num_input, num_hidden, num_classes, learning_rate):
# initialize logistic regression layer
self.W = tf.Variable(tf.random_normal(shape=(num_hidden, num_classes)))
self.b = tf.Variable(np.zeros(num_classes).astype(np.float32))
self.X = tf.placeholder(tf.float32, shape=(None, num_input), name='X')
labels = tf.placeholder(tf.int32, shape=(None,), name='labels')
self.Y = labels
logits = self.forward(self.X)
self.cost = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits,
labels=labels
)
)
self.train_op = tf.train.AdamOptimizer(learning_rate).minimize(self.cost)
self.prediction = tf.argmax(logits, 1)
def fit(self, X, Y, Xtest, Ytest, epochs, batch_size, pretrain=False, show_fig=False):
num_examples = len(X)
print("greedy layer-wise training of autoencoders...")
pretrain_epochs = 1
if not pretrain:
pretrain_epochs = 0
current_input = X
for ae in self.hidden_layers:
ae.fit(current_input, epochs=pretrain_epochs, batch_size=batch_size)
# create current_input for the next layer
current_input = ae.transform(current_input)
n_batches = num_examples // batch_size
costs = []
print("supervised training...")
for i in range(epochs):
print("epoch:", i)
X, Y = shuffle(X, Y)
for j in range(n_batches):
Xbatch = X[j * batch_size:(j * batch_size + batch_size)]
Ybatch = Y[j * batch_size:(j * batch_size + batch_size)]
self.session.run(
self.train_op,
feed_dict={self.X: Xbatch, self.Y: Ybatch}
)
if j % 10 == 0:
c, p = self.session.run(
(self.cost, self.prediction),
feed_dict={self.X: Xtest, self.Y: Ytest})
error_rate = np.mean(p != Ytest)
print("j / n_batches:", j, "/", n_batches, "cost:", c, "error:", error_rate)
costs.append(c)
if show_fig:
utils.show_costs(costs)
def forward(self, X):
current_input = X
for ae in self.hidden_layers:
Z = ae.encode(current_input)
current_input = Z
# logistic layer
logits = tf.matmul(current_input, self.W) + self.b
return logits
class DenseLayer(object):
def __init__(self, m, n, activation=lambda x: x):
self.W = tf.Variable(tf.random_normal((m, n)) * 2 / np.sqrt(m))
self.b = tf.Variable(tf.zeros(n), dtype=tf.float32)
self.activation = activation
def forward(self, X):
return self.activation(tf.matmul(X, self.W) + self.b)
class VariationalAutoencoder(object):
"""
Simple variational autoencoder implementation.
This implementations is based on:
https://deeplearningcourses.com/c/deep-learning-gans-and-variational-autoencoders
"""
SMOOTHING_EPSILON = 1e-6 # to not get a number too close to zero, which would cause a singularity
def __init__(self, num_input, num_hiddens):
self.X = tf.placeholder(tf.float32, shape=[None, num_input])
#encoder
self.encoder_layers = []
current_input_size = num_input
for current_output_size in num_hiddens[:1]:
layer = DenseLayer(current_input_size, current_output_size,
activation=tf.nn.relu)
self.encoder_layers.append(layer)
current_input_size = current_output_size
num_z = num_hiddens[-1]
final_encoder_layer = DenseLayer(current_input_size, 2 * num_z) # 2 * num_z, because z_i = (mean, std-dev)
self.encoder_layers.append(final_encoder_layer)
current_layer_value = self.X
for layer in self.encoder_layers:
current_layer_value = layer.forward(current_layer_value)
self.means = current_layer_value[:, :num_z]
# use softplus to ensure std-dev is not negative
self.stddev = tf.nn.softplus(current_layer_value[:, num_z:]) + self.SMOOTHING_EPSILON
# @deprecated since TF r1.5
# with st.value_type(st.SampleValue()):
# # this returns q(Z), the distribution of the latent variable Z
# self.Z = st.StochasticTensor(tf.distributions.Normal(loc=self.means, scale=self.stddev))
self.Z = tf.distributions.Normal(loc=self.means, scale=self.stddev).sample()
# alternative A: to the previous, but using the "reparameterization trick"
#standard_normal = tf.distributions.Normal(
# loc=np.zeros(num_z, dtype=np.float32),
# scale=np.ones(num_z, dtype=np.float32)
#)
#e = standard_normal.sample(tf.shape(self.means)[0])
#self.Z = e * self.stddev + self.means
# alternative B:
#eps = tf.random_normal((tf.shape(self.X)[0], num_z), 0, 1,
# dtype=tf.float32)
# z = sigma*epsilon + mu
# self.Z = tf.sqrt(tf.exp(self.stddev)) * eps + self.means
# decoder
self.decoder_layers = []
current_input_size = num_z
for current_output_size in reversed(num_hiddens[:-1]):
layer = DenseLayer(current_input_size, current_output_size,
activation=tf.nn.relu)
self.decoder_layers.append(layer)
current_input_size = current_output_size
final_decoder_layer = DenseLayer(current_input_size, num_input)
self.decoder_layers.append(final_decoder_layer)
# logits
current_layer_value = self.Z
for layer in self.decoder_layers:
current_layer_value = layer.forward(current_layer_value)
logits = current_layer_value
posterior_predictive_logits = logits
self.X_hat_distribution = tf.distributions.Bernoulli(logits=logits)
# take a sample from X_hat, which is called the posterior predictive sample
self.posterior_predictive = self.X_hat_distribution.sample()
self.posterior_predictive_probs = tf.nn.sigmoid(logits)
# take a sample from a Z ~ N(0, 1) and feed it through the decoder, called the prior predictive sample
standard_normal = tf.distributions.Normal(
loc=np.zeros(num_z, dtype=np.float32),
scale=np.ones(num_z, dtype=np.float32)
)
z_std = standard_normal.sample(1)
current_layer_value = z_std
for layer in self.decoder_layers:
current_layer_value = layer.forward(current_layer_value)
logits = current_layer_value
prior_predictive_dist = tf.distributions.Bernoulli(logits=logits)
self.prior_predictive = prior_predictive_dist.sample()
self.prior_predictive_probs = tf.nn.sigmoid(logits)
# prior preditive from given input used for generating visualization
self.Z_input = tf.placeholder(tf.float32, shape=[None, num_z])
current_layer_value = self.Z_input
for layer in self.decoder_layers:
current_layer_value = layer.forward(current_layer_value)
logits = current_layer_value
self.prior_predictive_from_input_probs = tf.nn.sigmoid(logits)
# cost function
kl = -tf.log(self.stddev) + 0.5 * (self.stddev ** 2 + self.means ** 2) - 0.5
kl = tf.reduce_sum(kl, axis=1)
# equals (before TF r1.5):
# kl = tf.reduce_sum(tf.distributions.kl_divergence(self.Z, standard_normal), axis=1)
expected_log_likelihood = tf.reduce_sum(-tf.nn.sigmoid_cross_entropy_with_logits(
labels=self.X,
logits=posterior_predictive_logits
), axis=1)
# equals:
# expected_log_likelihood = tf.reduce_sum(self.X_hat_distribution.log_prob(self.X), axis=1)
elbo = tf.reduce_mean(expected_log_likelihood - kl)
self.cost = -elbo
self.train_op = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(self.cost)
# setup session
self.sess = tf.InteractiveSession()
self.sess.run(tf.global_variables_initializer())
def fit(self, X, epochs, batch_size):
costs = []
n_batches = len(X) // batch_size
print('n_batches: {}'.format(n_batches))
for epoch in range(epochs):
print('epoch: {}'.format(epoch))
np.random.shuffle(X)
for b in range(n_batches):
batch = X[b*batch_size:(b+1)*batch_size]
_, cost = self.sess.run([self.train_op, self.cost], feed_dict={
self.X: batch
})
costs.append(cost)
if b % 100 == 0:
print('@{:4d} > cost: {:.3f}'.format(b, cost))
utils.show_costs(costs)
def transform(self, X):
return self.sess.run(self.means, feed_dict={
self.X: X
})
def prior_predictive_with_input(self, Z):
return self.sess.run(
self.prior_predictive_from_input_probs, feed_dict={
self.Z_input: Z
}
)
def posterior_predictive_sample(self, X):
"""Returns a sample from p(x_new | X)."""
return self.sess.run(self.posterior_predictive, feed_dict={
self.X: X
})
def prior_predictive_sample_with_probs(self):
"""Returns a sample from p(x_new | z), where z ~ N(0, 1)."""
return self.sess.run([self.prior_predictive, self.prior_predictive_probs])
|
the-stack_0_20500 | import logging
from . import util
from . import SerialInterface, TCPInterface, BROADCAST_NUM
from pubsub import pub
import time
import sys
import threading
from dotmap import DotMap
"""The interfaces we are using for our tests"""
interfaces = None
"""A list of all packets we received while the current test was running"""
receivedPackets = None
testsRunning = False
testNumber = 0
sendingInterface = None
def onReceive(packet, interface):
"""Callback invoked when a packet arrives"""
if sendingInterface == interface:
pass
# print("Ignoring sending interface")
else:
# print(f"From {interface.stream.port}: {packet}")
p = DotMap(packet)
if p.decoded.portnum == "TEXT_MESSAGE_APP":
# We only care a about clear text packets
receivedPackets.append(p)
def onNode(node):
"""Callback invoked when the node DB changes"""
print(f"Node changed: {node}")
def subscribe():
"""Subscribe to the topics the user probably wants to see, prints output to stdout"""
pub.subscribe(onNode, "meshtastic.node")
def testSend(fromInterface, toInterface, isBroadcast=False, asBinary=False, wantAck=False):
"""
Sends one test packet between two nodes and then returns success or failure
Arguments:
fromInterface {[type]} -- [description]
toInterface {[type]} -- [description]
Returns:
boolean -- True for success
"""
global receivedPackets
receivedPackets = []
fromNode = fromInterface.myInfo.my_node_num
if isBroadcast:
toNode = BROADCAST_NUM
else:
toNode = toInterface.myInfo.my_node_num
logging.debug(
f"Sending test wantAck={wantAck} packet from {fromNode} to {toNode}")
global sendingInterface
sendingInterface = fromInterface
if not asBinary:
fromInterface.sendText(f"Test {testNumber}", toNode, wantAck=wantAck)
else:
fromInterface.sendData((f"Binary {testNumber}").encode(
"utf-8"), toNode, wantAck=wantAck)
for sec in range(60): # max of 60 secs before we timeout
time.sleep(1)
if (len(receivedPackets) >= 1):
return True
return False # Failed to send
def runTests(numTests=50, wantAck=False, maxFailures=0):
logging.info(f"Running {numTests} tests with wantAck={wantAck}")
numFail = 0
numSuccess = 0
for i in range(numTests):
global testNumber
testNumber = testNumber + 1
isBroadcast = True
# asBinary=(i % 2 == 0)
success = testSend(
interfaces[0], interfaces[1], isBroadcast, asBinary=False, wantAck=wantAck)
if not success:
numFail = numFail + 1
logging.error(
f"Test failed, expected packet not received ({numFail} failures so far)")
else:
numSuccess = numSuccess + 1
logging.info(
f"Test succeeded {numSuccess} successes {numFail} failures so far")
# if numFail >= 3:
# for i in interfaces:
# i.close()
# return
time.sleep(1)
if numFail > maxFailures:
logging.error("Too many failures! Test failed!")
return numFail
def testThread(numTests=50):
logging.info("Found devices, starting tests...")
runTests(numTests, wantAck=True)
# Allow a few dropped packets
runTests(numTests, wantAck=False, maxFailures=5)
def onConnection(topic=pub.AUTO_TOPIC):
"""Callback invoked when we connect/disconnect from a radio"""
print(f"Connection changed: {topic.getName()}")
def openDebugLog(portName):
debugname = "log" + portName.replace("/", "_")
logging.info(f"Writing serial debugging to {debugname}")
return open(debugname, 'w+', buffering=1)
def testAll():
"""
Run a series of tests using devices we can find.
Raises:
Exception: If not enough devices are found
"""
ports = util.findPorts()
if (len(ports) < 2):
raise Exception("Must have at least two devices connected to USB")
pub.subscribe(onConnection, "meshtastic.connection")
pub.subscribe(onReceive, "meshtastic.receive")
global interfaces
interfaces = list(map(lambda port: SerialInterface(
port, debugOut=openDebugLog(port), connectNow=True), ports))
logging.info("Ports opened, starting test")
testThread()
for i in interfaces:
i.close()
def testSimulator():
"""
Assume that someone has launched meshtastic-native as a simulated node.
Talk to that node over TCP, do some operations and if they are successful
exit the process with a success code, else exit with a non zero exit code.
Run with
python3 -c 'from meshtastic.test import testSimulator; testSimulator()'
"""
logging.basicConfig(level=logging.DEBUG if False else logging.INFO)
logging.info("Connecting to simulator on localhost!")
iface = TCPInterface("localhost")
iface.showInfo()
iface.localNode.showInfo()
iface.localNode.exitSimulator()
iface.close()
logging.info("Integration test successful!")
sys.exit(0)
|
the-stack_0_20501 | # Copyright 2015 Vladimir Rutsky <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AbstractRouterAdapter for aiohttp.web.UrlDispatcher.
"""
import collections
from typing import Union
from aiohttp import web
from aiohttp import hdrs
from .abc import AbstractRouterAdapter
from .mixin import CorsViewMixin
# There several usage patterns of routes which should be handled
# differently.
#
# 1. Using new Resources:
#
# resource = app.router.add_resource(path)
# cors.add(resource, resource_defaults=...)
# cors.add(resource.add_route(method1, handler1), config=...)
# cors.add(resource.add_route(method2, handler2), config=...)
# cors.add(resource.add_route(method3, handler3), config=...)
#
# Here all related Routes (i.e. routes with the same path) are in
# a single Resource.
#
# 2. Using `router.add_static()`:
#
# route1 = app.router.add_static(
# "/images", "/usr/share/app/images/")
# cors.add(route1, config=...)
#
# Here old-style `web.StaticRoute` is created and wrapped with
# `web.ResourceAdapter`.
#
# 3. Using old `router.add_route()`:
#
# cors.add(app.router.add_route(method1, path, hand1), config=...)
# cors.add(app.router.add_route(method2, path, hand2), config=...)
# cors.add(app.router.add_route(method3, path, hand3), config=...)
#
# This creates three Resources with single Route in each.
#
# 4. Using deprecated `register_route` with manually created
# `web.Route`:
#
# route1 = RouteSubclass(...)
# app.router.register_route(route1)
# cors.add(route1, config=...)
#
# Here old-style route is wrapped with `web.ResourceAdapter`.
#
# Preflight requests is roughly an OPTIONS request with query
# "is specific HTTP method is allowed".
# In order to properly handle preflight request we need to know which
# routes have enabled CORS on the request path and CORS configuration
# for requested HTTP method.
#
# In case of new usage pattern it's simple: we need to take a look at
# self._resource_config[resource][method] for the processing resource.
#
# In case of old usage pattern we need to iterate over routes with
# enabled CORS and check is requested path and HTTP method is accepted
# by a route.
class _ResourceConfig:
def __init__(self, default_config):
# Resource default config.
self.default_config = default_config
# HTTP method to route configuration.
self.method_config = {}
def _is_web_view(entity, strict=True):
webview = False
if isinstance(entity, web.AbstractRoute):
handler = entity.handler
if isinstance(handler, type) and issubclass(handler, web.View):
webview = True
if not issubclass(handler, CorsViewMixin):
if strict:
raise ValueError("web view should be derived from "
"aiohttp_cors.WebViewMixig for working "
"with the library")
else:
return False
return webview
class ResourcesUrlDispatcherRouterAdapter(AbstractRouterAdapter):
"""Adapter for `UrlDispatcher` for Resources-based routing only.
Should be used with routes added in the following way:
resource = app.router.add_resource(path)
cors.add(resource, resource_defaults=...)
cors.add(resource.add_route(method1, handler1), config=...)
cors.add(resource.add_route(method2, handler2), config=...)
cors.add(resource.add_route(method3, handler3), config=...)
"""
def __init__(self,
router: web.UrlDispatcher,
defaults):
"""
:param defaults:
Default CORS configuration.
"""
self._router = router
# Default configuration for all routes.
self._default_config = defaults
# Mapping from Resource to _ResourceConfig.
self._resource_config = {}
self._resources_with_preflight_handlers = set()
self._preflight_routes = set()
def add_preflight_handler(
self,
routing_entity: Union[web.Resource, web.StaticResource,
web.ResourceRoute],
handler):
"""Add OPTIONS handler for all routes defined by `routing_entity`.
Does nothing if CORS handler already handles routing entity.
Should fail if there are conflicting user-defined OPTIONS handlers.
"""
if isinstance(routing_entity, web.Resource):
resource = routing_entity
# Add preflight handler for Resource, if not yet added.
if resource in self._resources_with_preflight_handlers:
# Preflight handler already added for this resource.
return
for route_obj in resource:
if route_obj.method == hdrs.METH_OPTIONS:
if route_obj.handler is handler:
return # already added
else:
raise ValueError(
"{!r} already has OPTIONS handler {!r}"
.format(resource, route_obj.handler))
elif route_obj.method == hdrs.METH_ANY:
if _is_web_view(route_obj):
self._preflight_routes.add(route_obj)
self._resources_with_preflight_handlers.add(resource)
return
else:
raise ValueError("{!r} already has a '*' handler "
"for all methods".format(resource))
preflight_route = resource.add_route(hdrs.METH_OPTIONS, handler)
self._preflight_routes.add(preflight_route)
self._resources_with_preflight_handlers.add(resource)
elif isinstance(routing_entity, web.StaticResource):
resource = routing_entity
# Add preflight handler for Resource, if not yet added.
if resource in self._resources_with_preflight_handlers:
# Preflight handler already added for this resource.
return
resource.set_options_route(handler)
preflight_route = resource._routes[hdrs.METH_OPTIONS]
self._preflight_routes.add(preflight_route)
self._resources_with_preflight_handlers.add(resource)
elif isinstance(routing_entity, web.ResourceRoute):
route = routing_entity
if not self.is_cors_for_resource(route.resource):
self.add_preflight_handler(route.resource, handler)
else:
raise ValueError(
"Resource or ResourceRoute expected, got {!r}".format(
routing_entity))
def is_cors_for_resource(self, resource: web.Resource) -> bool:
"""Is CORS is configured for the resource"""
return resource in self._resources_with_preflight_handlers
def _request_route(self, request: web.Request) -> web.ResourceRoute:
match_info = request.match_info
assert isinstance(match_info, web.UrlMappingMatchInfo)
return match_info.route
def _request_resource(self, request: web.Request) -> web.Resource:
return self._request_route(request).resource
def is_preflight_request(self, request: web.Request) -> bool:
"""Is `request` is a CORS preflight request."""
route = self._request_route(request)
if _is_web_view(route, strict=False):
return request.method == 'OPTIONS'
return route in self._preflight_routes
def is_cors_enabled_on_request(self, request: web.Request) -> bool:
"""Is `request` is a request for CORS-enabled resource."""
return self._request_resource(request) in self._resource_config
def set_config_for_routing_entity(
self,
routing_entity: Union[web.Resource, web.StaticResource,
web.ResourceRoute],
config):
"""Record configuration for resource or it's route."""
if isinstance(routing_entity, (web.Resource, web.StaticResource)):
resource = routing_entity
# Add resource configuration or fail if it's already added.
if resource in self._resource_config:
raise ValueError(
"CORS is already configured for {!r} resource.".format(
resource))
self._resource_config[resource] = _ResourceConfig(
default_config=config)
elif isinstance(routing_entity, web.ResourceRoute):
route = routing_entity
# Add resource's route configuration or fail if it's already added.
if route.resource not in self._resource_config:
self.set_config_for_routing_entity(route.resource, config)
if route.resource not in self._resource_config:
raise ValueError(
"Can't setup CORS for {!r} request, "
"CORS must be enabled for route's resource first.".format(
route))
resource_config = self._resource_config[route.resource]
if route.method in resource_config.method_config:
raise ValueError(
"Can't setup CORS for {!r} route: CORS already "
"configured on resource {!r} for {} method".format(
route, route.resource, route.method))
resource_config.method_config[route.method] = config
else:
raise ValueError(
"Resource or ResourceRoute expected, got {!r}".format(
routing_entity))
async def get_preflight_request_config(
self,
preflight_request: web.Request,
origin: str,
requested_method: str):
assert self.is_preflight_request(preflight_request)
resource = self._request_resource(preflight_request)
resource_config = self._resource_config[resource]
defaulted_config = collections.ChainMap(
resource_config.default_config,
self._default_config)
options = defaulted_config.get(origin, defaulted_config.get("*"))
if options is not None and options.is_method_allowed(requested_method):
# Requested method enabled for CORS in defaults, override it with
# explicit route configuration (if any).
route_config = resource_config.method_config.get(
requested_method, {})
else:
# Requested method is not enabled in defaults.
# Enable CORS for it only if explicit configuration exists.
route_config = resource_config.method_config[requested_method]
defaulted_config = collections.ChainMap(route_config, defaulted_config)
return defaulted_config
def get_non_preflight_request_config(self, request: web.Request):
"""Get stored CORS configuration for routing entity that handles
specified request."""
assert self.is_cors_enabled_on_request(request)
resource = self._request_resource(request)
resource_config = self._resource_config[resource]
# Take Route config (if any) with defaults from Resource CORS
# configuration and global defaults.
route = request.match_info.route
if _is_web_view(route, strict=False):
method_config = request.match_info.handler.get_request_config(
request, request.method)
else:
method_config = resource_config.method_config.get(request.method,
{})
defaulted_config = collections.ChainMap(
method_config,
resource_config.default_config,
self._default_config)
return defaulted_config
|
the-stack_0_20505 | """
Author: Prateek Kumar Oraon
Copyright Prateek Kumar Oraon, free to use under MIT License
"""
import numpy as np
import pandas as pd
def build_var_matrix(data):
matrix = []
for item_list in data:
x = [1]
for item in item_list:
x.append(item)
matrix.append(x)
return matrix
def matrix_mul(x, y):
rows = len(x)
cols = len(y[0])
result = [[0 for x in range(cols)] for y in range(rows)]
for i in range(rows):
for j in range(cols):
for k in range(len(y)):
result[i][j] += x[i][k] * y[k][j]
return result
def transpose(matrix):
rows, cols = np.shape(matrix)
new_matrix = [[0 for x in range(rows)] for y in range(cols)]
for i in range(cols):
for j in range(rows):
new_matrix[i][j] = matrix[j][i]
return new_matrix
class Matrix(object):
def __init__(self, matrix):
self.matrix = matrix
def cofactor(self, mat, p, q, n):
# mat = self.matrix
i = 0
j = 0
temp = [[0 for x in range(n)] for y in range(n)]
for row in range(n):
for col in range(n):
if row != p and col != q:
temp[i][j] = mat[row][col]
j += 1
if j == n - 1:
j = 0
i += 1
return temp
def determinant(self, mat, n):
if mat is None:
mat = self.matrix
n = len(mat)
d = 0
if n == 1:
return mat[0][0]
multiplier = 1
for i in range(n):
temp = self.cofactor(mat, 0, i, n)
d += multiplier * mat[0][i] * self.determinant(temp, n - 1)
multiplier = -multiplier
return d
def adjoint(self):
mat = self.matrix
n = len(mat)
if n == 1:
return [[1]]
multiplier = 1
adj = [[0 for x in range(n)] for y in range(n)]
for i in range(n):
for j in range(n):
temp = self.cofactor(mat, i, j, n)
if (i + j) % 2 == 0:
multiplier = 1
else:
multiplier = -1
adj[j][i] = multiplier * self.determinant(temp, n - 1)
return adj
def inverse(self):
mat = self.matrix
n = len(mat)
det = self.determinant(mat, n)
if det == 0:
print("Inverse does not exist")
return False
adj = self.adjoint()
inv = [[0 for x in range(n)] for y in range(n)]
for i in range(n):
for j in range(n):
inv[i][j] = adj[i][j] / det
return inv
class LinearRegressionMul(object):
def __init__(self, x, y):
self.x = x
self.y = y
self.coefficients = []
def fit(self):
x = self.x
y = self.y
x_mat = build_var_matrix(x)
x_trans = transpose(x_mat)
x_mat = build_var_matrix(x)
x_mult = matrix_mul(x_trans, x_mat)
mat = Matrix(x_mult)
x_mult_inv = mat.inverse()
y_mat = np.reshape(y, (len(y), 1))
y_mult = matrix_mul(x_trans, y_mat)
self.coefficients = np.reshape(matrix_mul(x_mult_inv, y_mult),(3))
print("Coefficients")
print(self.coefficients)
def predict(self, x):
coeff = self.coefficients
y_pred = []
val = coeff[0]
j = 1
for x_val in x:
for i in range(len(coeff) - 1):
val += coeff[i + 1] * x_val[i]
y_pred.append(val)
val = coeff[0]
j += 1
return y_pred
def run():
dataset = pd.read_csv('regression_4.csv')
x = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
model = LinearRegressionMul(x, y)
model.fit()
y_pred = model.predict([[35, 250], [40, 250], [40, 300]])
print("Predictions")
print(y_pred)
if __name__ == '__main__':
run()
|
the-stack_0_20507 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
shotlast
Watches clipboard and automatically saves any new images.
"""
# pylint: disable=missing-docstring
# pylint: disable=empty-docstring
# pylint: disable=invalid-name
# pylint: disable=line-too-long
import argparse
import datetime
import os
import pathlib
import platform
import time
import uuid
from PIL import ImageChops
from PIL import ImageGrab
import click
import PySimpleGUI as sg
sg.theme("DarkGrey7")
# for other themes:
# https://www.geeksforgeeks.org/themes-in-pysimplegui/
def get_datetime_stamp(sep_date="", sep_group="_", sep_time="", moment=None):
"""
Returns string representation of datetime objects.
By default, the value will look like "20121212_120102" and it
is safe to use it on file names.
sep_date:
string, separator between year, month and day
sep_group:
string, separator between the date and time.
sep_time:
string, separator between hour, minute and second
moment:
an instance of datetime.datetime.
if it is None(the default), now() will be used.
Requires:
import datetime
>>> import datetime
>>> date1 = datetime.datetime(2012, 12, 12, 12, 1, 2)
>>> print(get_datetime_stamp(moment=date1))
20121212_120102
>>> print(get_datetime_stamp(sep_date="/", sep_group="-", moment=date1))
2012/12/12-120102
>>> print(get_datetime_stamp(sep_group="-", sep_time=":", moment=date1))
20121212-12:01:02
"""
date_format = (sep_date.join(["%Y", "%m", "%d"]) +
sep_group +
sep_time.join(["%H", "%M", "%S"]))
if moment is None:
moment = datetime.datetime.now()
stamp = moment.strftime(date_format)
return stamp
def build_only_file_name(prefix="clip"):
"""
Returns a string like "clip_20121212_120102" without file extension.
"""
file_name = prefix + "_" + get_datetime_stamp()
return file_name
def build_full_file_name(target_dir, file_format="png"):
only_file_name = build_only_file_name()
full_file_name = os.path.join(
target_dir, only_file_name) + "." + file_format
if os.path.isfile(full_file_name):
# there is a file name clash.
# add some UUID to avoid it.
uuid1 = str(uuid.uuid1())
full_file_name = os.path.join(
target_dir, only_file_name) + "_" + uuid1 + "." + file_format
return full_file_name
def is_same_image(image1, image2):
"""
Returns True if 2 images are the same, False otherwise.
https://stackoverflow.com/questions/35176639/compare-images-python-pil/56280735
requires:
from PIL import ImageChops
"""
if image1 is None and image2 is None: # pylint: disable=no-else-return
# both is None, so 2 "images" are "equal".
return True
elif image1 is None:
return False
elif image1 is None:
return False
same = True
image1_rgb = image1.convert('RGB')
image2_rgb = image2.convert('RGB')
diff = ImageChops.difference(image1_rgb, image2_rgb)
if diff.getbbox():
# there is a difference.
same = False
return same
def start_shots(target_dir, sleep_duration=2):
click.secho("started shotlast.")
click.secho("target_dir: ", nl=False)
click.secho(str(target_dir), fg="yellow")
click.secho("sleep_duration: ", nl=False)
click.secho(str(sleep_duration), fg="yellow")
click.secho("press ", nl=False)
click.secho("ctrl c", fg="magenta", nl=False)
click.secho(" to end.")
image0 = None # previous
file_format = "png"
while True:
time.sleep(sleep_duration)
image1 = ImageGrab.grabclipboard()
# <class 'PIL.BmpImagePlugin.DibImageFile'>
if image1 is None:
# could not find an image, possibly we have text.
continue
if not is_same_image(image0, image1):
full_file_name = build_full_file_name(target_dir, file_format)
full_file_name = os.path.normpath(full_file_name)
# the line above is required since PySimpleGUI uses / on Windows.
# C:/Users/caglar/Desktop/gun05\clip_20201204_142219.png
image1.save(full_file_name, file_format.upper())
click.secho("saved image: ", nl=False)
click.secho(str(full_file_name), fg="green")
image0 = image1
def get_candidate_dir():
"""
Returns a valid directory name to store the pictures.
If it can not be determined, "" is returned.
requires:
import os
import pathlib
import platform
https://docs.python.org/3/library/pathlib.html#pathlib.Path.home
New in version 3.5.
https://docs.python.org/3.8/library/platform.html#platform.system
Returns the system/OS name, such as 'Linux', 'Darwin', 'Java', 'Windows'.
An empty string is returned if the value cannot be determined.
"""
home_dir = pathlib.Path().home()
target_dir = home_dir
system = platform.system()
if system == "Windows":
target_dir = os.path.join(home_dir, "Pictures")
elif system == "Darwin":
target_dir = os.path.join(home_dir, "Pictures")
elif system == "Linux":
target_dir = os.path.join(home_dir, "Pictures")
if os.path.isdir(target_dir): # pylint: disable=no-else-return
return target_dir
elif os.path.isdir(home_dir):
return home_dir
else:
return ""
def choose_target_dir_with_click(default_dir):
"""
Make the user to type a directory using click package.
requires:
import click
"""
chosen_dir = default_dir
marker = '# Everything below is ignored\n'
message = click.edit(default_dir + '\n\n' + marker)
if message is not None:
chosen_dir = message.split(marker, 1)[0].rstrip('\n')
return chosen_dir
def choose_target_dir_with_sg(default_dir):
"""
Make the user to type a directory using PySimpleGUI package.
requires:
import PySimpleGUI as sg
"""
layout = [
[sg.T("")],
[sg.Text("Choose a directory to store the captured clipboard items:")],
[sg.Input(default_dir, key="__directory"),
sg.FolderBrowse()],
[sg.Button("Submit")]
]
window = sg.Window('shotlast', layout, size=(500, 150))
chosen_dir = None
while True:
event, values = window.read()
if event in {sg.WIN_CLOSED, "Exit"}:
break
elif event == "Submit":
chosen_dir = values["__directory"]
break
window.close()
return chosen_dir
def choose_target_dir(default_dir):
# chosen_dir = choose_target_dir_with_click(default_dir)
chosen_dir = choose_target_dir_with_sg(default_dir)
chosen_dir = os.path.normpath(chosen_dir)
return chosen_dir
def get_settings():
"""
requires:
import argparse
"""
parser = argparse.ArgumentParser()
help1 = "Target directory to store the saved clipboard files."
parser.add_argument('target_dir', nargs='?', help=help1)
help1 = 'Sleep duration (in seconds) between two clipboard checks.'
parser.add_argument('--period', nargs='?', help=help1, default="2")
# help1 = 'If provided, automatically confirms overwrite.'
# parser.add_argument('--overwrite', action='store_true', help=help1)
args = parser.parse_args()
settings = {}
settings["sleep_duration"] = args.period
settings["target_dir"] = args.target_dir
# for file_name in args.target_dir:
# if os.path.isfile(file_name):
# settings["input"] = file_name
# convert_file(file_name, settings)
# else:
# print("NOT a file: ", file_name)
return settings
def main():
"""
entry point of the module.
"""
# os.chdir(os.path.abspath(os.path.dirname(__file__)))
settings = get_settings()
sleep_duration = int(settings["sleep_duration"])
if settings["target_dir"]:
target_dir = settings["target_dir"]
else:
candidate_target_dir = get_candidate_dir()
target_dir = choose_target_dir(default_dir=candidate_target_dir)
settings["target_dir"] = target_dir
if target_dir is None:
click.secho("A target directory is not selected.", fg="red")
return
if not os.path.isdir(target_dir):
click.secho("Target is not a valid directory:", fg="red")
click.secho(str(target_dir), fg="yellow")
return
click.launch(target_dir)
start_shots(target_dir=target_dir, sleep_duration=sleep_duration)
if __name__ == '__main__':
main()
|
the-stack_0_20509 | import tcod as libtcod
from components.ai import ConfusedMonster
from game_messages import Message
def heal(*args, **kwargs):
entity = args[0]
amount = kwargs.get('amount')
results = []
if entity.fighter.hp == entity.fighter.max_hp:
results.append({'consumed': False, 'message': Message('You are already at full health', libtcod.yellow)})
else:
entity.fighter.heal(amount)
results.append({'consumed': True, 'message': Message('Your wounds start to feel better!', libtcod.green)})
return results
def cast_lightning(*args, **kwargs):
caster = args[0]
entities = kwargs.get('entities')
fov_map = kwargs.get('fov_map')
damage = kwargs.get('damage')
maximum_range = kwargs.get('maximum_range')
results = []
target = None
closest_distance = maximum_range + 1
for entity in entities:
if entity.fighter and entity != caster and libtcod.map_is_in_fov(fov_map, entity.x, entity.y):
distance = caster.distance_to(entity)
if distance < closest_distance:
target = entity
closest_distance = distance
if target:
results.append({'consumed': True, 'target': target, 'message': Message('A lighting bolt strikes the {0} with a loud thunder! The damage is {1}'.format(target.first_name, damage))})
results.extend(target.fighter.take_damage(damage))
else:
results.append({'consumed': False, 'target': None, 'message': Message('No enemy is close enough to strike.', libtcod.red)})
return results
def cast_fireball(*args, **kwargs):
entities = kwargs.get('entities')
fov_map = kwargs.get('fov_map')
damage = kwargs.get('damage')
radius = kwargs.get('radius')
target_x = kwargs.get('target_x')
target_y = kwargs.get('target_y')
results = []
if not libtcod.map_is_in_fov(fov_map, target_x, target_y):
results.append({'consumed': False, 'message': Message('You cannot target a tile outside your field of view.', libtcod.yellow)})
return results
results.append({'consumed': True, 'message': Message('The fireball explodes, burning everything within {0} tiles!'.format(radius), libtcod.orange)})
for entity in entities:
if entity.distance(target_x, target_y) <= radius and entity.fighter:
results.append({'message': Message('The {0} gets burned for {1} hit points.'.format(entity.first_name, damage), libtcod.orange)})
results.extend(entity.fighter.take_damage(damage))
return results
def cast_confuse(*args, **kwargs):
entities = kwargs.get('entities')
fov_map = kwargs.get('fov_map')
target_x = kwargs.get('target_x')
target_y = kwargs.get('target_y')
results = []
if not libtcod.map_is_in_fov(fov_map, target_x, target_y):
results.append({'consumed': False, 'message': Message('You cannot target a tile outside your field of view.', libtcod.yellow)})
return results
for entity in entities:
if entity.x == target_x and entity.y == target_y and entity.ai:
confused_ai = ConfusedMonster(entity.ai, 10)
confused_ai.owner = entity
entity.ai = confused_ai
results.append({'consumed': True, 'message': Message('The eyes of the {0} look vacant, as he starts to stumble around!'.format(entity.first_name), libtcod.light_green)})
break
else:
results.append({'consumed': False, 'message': Message('There is no targetable enemy at that location.', libtcod.yellow)})
return results |
the-stack_0_20510 | import logging
import json
import os
import stripe
import sys
from logging.handlers import RotatingFileHandler
from flask import Flask, render_template, redirect, request, send_from_directory, jsonify
from flask_api import status
COLORS = ["black", "white"]
COLOR = "white"
LOGDIR = '/var/log/ruthgracewong/'
LOGFILE = 'app.log'
IPHONE_SHIPPING_COST = 10
CARD = "card"
app = Flask(__name__)
if not os.path.exists(LOGDIR):
os.makedirs(LOGDIR)
handler = RotatingFileHandler(LOGDIR + LOGFILE, maxBytes=10000, backupCount=1)
handler.setLevel(logging.DEBUG)
app.logger.addHandler(handler)
def get_numbers(quantity, cost):
numbers = {}
numbers['quantity'] = quantity
numbers['cost'] = cost * numbers['quantity']
numbers['totalcents'] = 0
numbers['totaldollars'] = numbers['cost']
numbers['stripetotal'] = numbers['totaldollars'] * 100 + numbers['totalcents']
return numbers
@app.route('/')
def home():
return render_template('index.html')
@app.route('/breathableunderwear')
def breathable():
return render_template('breathableunderwear.html')
@app.route('/thankyou', methods = ['GET', 'POST'])
def thankyou():
if request.method == 'POST':
data = request.data
order_data = json.loads(data)['token']
try:
order = stripe.Order.create(
currency='usd',
items=[
{
"type": 'sku',
"parent": SKUS[order_data['color']]
}
],
shipping={
"name": order_data[CARD]['name'],
"address":{
"line1": order_data[CARD]['address_line1'],
"city": order_data[CARD]['address_city'],
"state": order_data[CARD]['address_state'],
"country": order_data[CARD]['address_country'],
"postal_code": order_data[CARD]['address_zip']
},
},
email = order_data['email']
)
email = order_data['email']
del order_data['color']
del order_data['email']
charge = stripe.Charge.create(
amount=str(IPHONE_SHIPPING_COST * 100),
description="Shipping for iPhone toy",
currency="usd",
receipt_email=email,
source=order_data['id']
)
order.pay(source=order_data['id'])
return "success"
except stripe.error.InvalidRequestError as err:
app.logger.warn("INVALID REQUEST ERROR: {0}".format(err))
return jsonify({"error": str(err)}), status.HTTP_500_INTERNAL_SERVER_ERROR
if request.method == 'GET':
return render_template('thankyou.html')
|
the-stack_0_20513 | import pandas as pd
import json
import sys
import os
def load_curr(path):
df = pd.read_csv(path, sep=',', quotechar='"', index_col='index')
return df
def create_curr():
df = pd.DataFrame(columns=['index', 'hours', 'title', 'link', 'content']).set_index('index')
return df
def save_curr(df, path):
df.to_csv(path, sep=',', quotechar='"')
def add_curr(df, hours, title, link, content):
"""
add new row to the curriculum
:df: pd.DataFrame
:hours: str of float
:title: str, title of course
:link: str, link to course
:content: str, content category
"""
if len(df):
index = max(df.index) + 1
else:
index = 0
return df.append(pd.DataFrame([[index, float(hours), title, link, content]], columns=['index', 'hours', 'title', 'link', 'content']).set_index('index'))
def remove_curr(df, index):
"""
remove row with index index
:df: pd.DataFrame
:index: str of integer
"""
return df.drop(index=int(index))
def move_curr(df, index, dir):
"""
move row up or down by one
:df: pd.DataFrame
:index: str of int
:dir: one of ['up', 'down']
"""
row_ind = list(df.index).index(int(index))
if (row_ind == 0 and dir == 'up') or (row_ind == len(df)-1 and dir == 'down'):
return df
if dir == 'up':
new_order = list(df.index)[:row_ind-1] + [df.index[row_ind]] + [df.index[row_ind-1]] + list(df.index)[row_ind+1:]
elif dir == 'down':
new_order = list(df.index)[:row_ind] + [df.index[row_ind+1]] + [df.index[row_ind]] + list(df.index)[row_ind+2:]
else:
raise ValueError('Invalid dir')
return df.reindex(new_order)
def get_curr(df, printOut=True):
l = []
for i, row in df.iterrows():
d = dict(row)
d['index'] = i
if printOut:
print(json.dumps(d))
else:
l.append(d)
if not printOut:
return l
def path_curr(name):
return os.path.join(os.path.split(__file__)[0], 'curricula', f'{name}.csv')
if __name__ == "__main__":
cmd, name = sys.argv[1:3]
cmd = cmd.strip("'")
name = name.strip("'")
path = path_curr(name)
if not cmd == 'create':
df = load_curr(path)
if cmd == 'create':
df = create_curr()
elif cmd == 'get':
get_curr(df)
elif cmd == 'add':
df = add_curr(df, *[x.strip("'") for x in sys.argv[3:]])
elif cmd == 'remove':
df = remove_curr(df, *[x.strip("'") for x in sys.argv[3:]])
elif cmd == 'move':
df = move_curr(df, *[x.strip("'") for x in sys.argv[3:]])
else:
raise ValueError('Unkown Command')
if cmd != 'get':
save_curr(df, path) |
the-stack_0_20514 |
#* Asked in Google
#? You are give a tuple representing the time of tuples representing the time intervals
#? for the lectures, intervals may be overlapped.
#? Return the number of rooms required.
#! Example
#? Input: [(30,75),(0,50),(60,150)]
#? Output: 2
#!Two rooms will be required
def findRooms(lst):
lst.sort()
rooms = 0
i = 0
while i < len(lst)-1:
j=i+1
flag= True
while j < len(lst):
if lst[i][1] >= lst[j][0]:
flag= False
break
j+=1
if not flag:
rooms += 1
i+=1
return rooms
print(findRooms([(30,75),(0,50),(60,150)])) |
the-stack_0_20515 | class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
# 典型的滑动窗口解
if not s: return 0
lookup = list()
max_len = 0
cur_len = 0
for i in range(len(s)):
temp = s[i]
if not temp in lookup:
lookup.append(temp)
cur_len += 1
# 如果该值在窗口里面已经存在了
else:
# 先获取位置
index = lookup.index(temp)
# 然后把窗口截开
lookup = lookup[index + 1:]
lookup.append(temp)
cur_len = len(lookup)
# 更新最大长度
if cur_len > max_len:
max_len = cur_len
return max_len |
the-stack_0_20521 | # Create your views here.
from django.contrib.auth import get_user_model
from django.db import transaction
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from openbook_moderation.permissions import IsNotSuspended
from openbook_common.utils.helpers import normalise_request_data
from openbook_connections.serializers import ConnectWithUserSerializer, ConnectionSerializer, \
DisconnectFromUserSerializer, UpdateConnectionSerializer, ConfirmConnectionSerializer, ConnectionUserSerializer
class Connections(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request):
user = request.user
response_serializer = ConnectionSerializer(user.connections, many=True, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_200_OK)
class ConnectWithUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
request_data = _prepare_request_data_for_validation(request.data)
serializer = ConnectWithUserSerializer(data=request_data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
username = data.get('username')
circles_ids = data.get('circles_ids')
user = request.user
User = get_user_model()
user_to_connect_with = User.objects.get(username=username)
with transaction.atomic():
connection = user.connect_with_user_with_id(user_to_connect_with.pk, circles_ids=circles_ids)
response_serializer = ConnectionSerializer(connection, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_201_CREATED)
class DisconnectFromUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
serializer = DisconnectFromUserSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
username = data.get('username')
user = request.user
User = get_user_model()
user_to_disconnect_from = User.objects.get(username=username)
with transaction.atomic():
user.disconnect_from_user_with_id(user_to_disconnect_from.pk)
response_serializer = ConnectionUserSerializer(user_to_disconnect_from, context={'request': request})
return Response(response_serializer.data, status=status.HTTP_200_OK)
class UpdateConnection(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
request_data = _prepare_request_data_for_validation(request.data)
serializer = UpdateConnectionSerializer(data=request_data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
username = data.get('username')
circles_ids = data.get('circles_ids')
user = request.user
User = get_user_model()
user_to_update_connection_from = User.objects.get(username=username)
with transaction.atomic():
connection = user.update_connection_with_user_with_id(user_to_update_connection_from.pk,
circles_ids=circles_ids)
response_serializer = ConnectionSerializer(connection, context={'request': request})
return Response(response_serializer.data, status=status.HTTP_200_OK)
class ConfirmConnection(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
request_data = _prepare_request_data_for_validation(request.data)
serializer = ConfirmConnectionSerializer(data=request_data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
username = data.get('username')
circles_ids = data.get('circles_ids')
user = request.user
User = get_user_model()
user_to_confirm_connection_with = User.objects.get(username=username)
with transaction.atomic():
connection = user.confirm_connection_with_user_with_id(user_to_confirm_connection_with.pk,
circles_ids=circles_ids)
response_serializer = ConnectionSerializer(connection, context={'request': request})
return Response(response_serializer.data, status=status.HTTP_200_OK)
def _prepare_request_data_for_validation(request_data):
request_data_copy = normalise_request_data(request_data)
circles_ids = request_data_copy.get('circles_ids', None)
if isinstance(circles_ids, str):
circles_ids = circles_ids.split(',')
request_data_copy['circles_ids'] = circles_ids
return request_data_copy
|
the-stack_0_20522 | #!/usr/bin/env python
import numpy
from shogun import MSG_DEBUG
numpy.random.seed(17)
traindat = numpy.random.random_sample((10,10))
testdat = numpy.random.random_sample((10,10))
parameter_list=[[traindat,testdat,1.2],[traindat,testdat,1.4]]
def distance_director_euclidean (fm_train_real=traindat,fm_test_real=testdat,scale=1.2):
try:
from shogun import DirectorDistance
except ImportError:
print("recompile shogun with --enable-swig-directors")
return
import shogun as sg
class DirectorEuclideanDistance(DirectorDistance):
def __init__(self):
DirectorDistance.__init__(self, True)
def distance_function(self, idx_a, idx_b):
seq1 = self.get_lhs().get_feature_vector(idx_a)
seq2 = self.get_rhs().get_feature_vector(idx_b)
return numpy.linalg.norm(seq1-seq2)
from shogun import Time
feats_train=sg.features(fm_train_real)
#feats_train.io.set_loglevel(MSG_DEBUG)
feats_train.get_global_parallel().set_num_threads(1)
feats_test=sg.features(fm_test_real)
distance=sg.distance("EuclideanDistance")
distance.init(feats_train, feats_test)
ddistance=DirectorEuclideanDistance()
ddistance.init(feats_train, feats_test)
#print "dm_train"
t=Time()
dm_train=distance.get_distance_matrix()
#t1=t.cur_time_diff(True)
#print "ddm_train"
t=Time()
ddm_train=ddistance.get_distance_matrix()
#t2=t.cur_time_diff(True)
#print "dm_train", dm_train
#print "ddm_train", ddm_train
return dm_train, ddm_train
if __name__=='__main__':
print('DirectorEuclideanDistance')
distance_director_euclidean(*parameter_list[0])
|
the-stack_0_20524 | # Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import logging as log
import os
import re
import shlex
import subprocess
import tarfile
from pathlib import Path
from Launcher import Launcher, LauncherError
from utils import VERBOSE, clean_odirs
class LsfLauncher(Launcher):
# A hidden directory specific to a cfg, where we put individual 'job'
# scripts.
jobs_dir = {}
# All launcher instances available for lookup.
jobs = {}
# When the job completes, we try to read the job script output to determine
# the outcome. It may not have been completely written the first time we
# read it so we retry on the next poll, no more than 10 times.
max_poll_retries = 10
# TODO: Add support for build/run/cov job specific resource requirements:
# cpu, mem, disk, stack.
# TODO: Allow site-specific job resource usage setting using
# `DVSIM_LSF_CFG` environment variable.
def __init__(self, deploy):
super().__init__(deploy)
# Set the status. Only update after the job is done - i.e. status will
# transition from None to P/F/K.
self.status = None
# Maintain the job script output as an instance variables for polling
# and cleanup.
self.job_script_out = None
# If we already opened the job script output file (but have not
# determined the outcome), then we maintain the file descriptor rather
# then reopening it and starting all over again on the next poll.
self.job_script_out_fd = None
self.job_script_out_err_msg = []
self.job_script_out_err_msg_found = False
# Set the job id.
self.job_id = None
# Polling retry counter..
self.num_poll_retries = 0
# Add self to the list of jobs.
cfg_dict = LsfLauncher.jobs.setdefault(deploy.sim_cfg, {})
job_name_list = cfg_dict.setdefault(deploy.job_name, [])
job_name_list.append(self)
# Job's index in the array.
self.index = len(job_name_list)
@staticmethod
def prepare_workspace(project, repo_top, args):
'''Overrides Launcher.prepare_workspace.'''
# Since we dispatch to remote machines, a project specific python
# virtualenv is exists, needs to be activated when launching the job.
Launcher.set_python_venv(project)
if Launcher.python_venv is None:
return
# Python_venv needs to be a valid tarfile. Extract it in the scratch
# area if it does not exist. It is upto the user to delete it if it is
# stale.
if tarfile.is_tarfile(Launcher.python_venv):
path = Path(args.scratch_root, Path(Launcher.python_venv).stem)
if not path.is_dir():
with tarfile.open(Launcher.python_venv, mode='r') as tar:
tar.extractall(path=args.scratch_root)
Launcher.python_venv = path
else:
raise LauncherError("{} is not a valid tar file".format(
Launcher.python_venv))
@staticmethod
def prepare_workspace_for_cfg(cfg):
'''Overrides Launcher.prepare_workspace_for_cfg.'''
# Create the job dir.
LsfLauncher.jobs_dir[cfg] = Path(cfg.scratch_path, "lsf",
cfg.timestamp)
clean_odirs(odir=LsfLauncher.jobs_dir[cfg], max_odirs=2)
os.makedirs(Path(LsfLauncher.jobs_dir[cfg]), exist_ok=True)
@staticmethod
def make_job_array_script_text(cfg, job_name):
"""Creates the job array script text.
Once all jobs in the array are dispatched, the job array script is
constructed. It is a bash script that takes the job index as a single
argument. This index is set in the bsub command as '$LSB_JOBINDEX',
which bsub sets as the actual index when launching that job in the
array. This script is super simple - it is just a giant case statement
that switches on the job index to run that specific job. This preferred
over creating individual scripts for each job which incurs additional
file I/O overhead when the scratch area is on NFS, causing a slowdown.
Returns an iterable representing the lines of the script.
"""
lines = ["#!/usr/bin/env bash\nset -e\n"]
# Activate the python virtualenv if it exists.
if Launcher.python_venv:
lines += ["source {}/bin/activate\n".format(Launcher.python_venv)]
lines += ["case $1 in\n"]
for job in LsfLauncher.jobs[cfg][job_name]:
# Redirect the job's stdout and stderr to its log file.
cmd = "{} > {} 2>&1".format(job.deploy.cmd,
job.deploy.get_log_path())
lines += [" {})\n".format(job.index), " {};;\n".format(cmd)]
# Throw error as a sanity check if the job index is invalid.
lines += [
" *)\n",
" echo \"ERROR: Illegal job index: $1\" 1>&2; exit 1;;\n",
"esac\n"
]
if Launcher.python_venv:
lines += ["deactivate\n"]
return lines
def launch(self):
self._pre_launch()
# Add self to the list of jobs.
job_name = self.deploy.job_name
cfg = self.deploy.sim_cfg
job_total = len(LsfLauncher.jobs[cfg][job_name])
# The actual launching of the bsub command cannot happen until the
# Scheduler has dispatched ALL jobs in the array.
if self.index < job_total:
return
# Write the job array script.
job_script_wo_idx = Path(LsfLauncher.jobs_dir[cfg], job_name)
try:
with open(job_script_wo_idx, "w", encoding='utf-8') as f:
f.writelines(self.make_job_array_script_text(cfg, job_name))
except IOError as e:
err_msg = "ERROR: Failed to write job script {}:\n{}".format(
job_script_wo_idx, e)
self._kill_job_array(err_msg)
raise LauncherError(err_msg)
# Update the shell's env vars with self.exports. Values in exports must
# replace the values in the shell's env vars if the keys match.
exports = os.environ.copy()
if self.deploy.exports:
exports.update(self.deploy.exports)
# Clear the magic MAKEFLAGS variable from exports if necessary. This
# variable is used by recursive Make calls to pass variables from one
# level to the next. Here, self.cmd is a call to Make but it's
# logically a top-level invocation: we don't want to pollute the flow's
# Makefile with Make variables from any wrapper that called dvsim.
if 'MAKEFLAGS' in exports:
del exports['MAKEFLAGS']
self._dump_env_vars(exports)
# TODO: Arbitrarily set the max slot-limit to 100.
job_array = "{}[1-{}]".format(job_name, job_total)
if job_total > 100:
job_array += "%100"
# TODO: This needs to be moved to a HJson.
if self.deploy.sim_cfg.tool == "vcs":
job_rusage = "\'rusage[vcssim=1,vcssim_dynamic=1:duration=1]\'"
elif self.deploy.sim_cfg.tool == "xcelium":
job_rusage = "\'rusage[xcelium=1,xcelium_dynamic=1:duration=1]\'"
else:
job_rusage = None
# Launch the job array.
cmd = [
"bsub",
# TODO: LSF project name could be site specific!
"-P",
cfg.project,
"-J",
job_array,
"-oo",
"{}.%I.out".format(job_script_wo_idx),
"-eo",
"{}.%I.out".format(job_script_wo_idx)
]
if job_rusage:
cmd += ["-R", job_rusage]
cmd.append(
shlex.quote(
"/usr/bin/bash {} $LSB_JOBINDEX".format(job_script_wo_idx)))
try:
p = subprocess.run(' '.join(cmd),
check=True,
shell=True,
timeout=60,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=exports)
except subprocess.CalledProcessError as e:
# Need to mark all jobs in this range with this fail pattern.
err_msg = e.stderr.decode("utf-8").strip()
self._kill_job_array(err_msg)
raise LauncherError(err_msg)
# Extract the job ID.
result = p.stdout.decode("utf-8").strip()
job_id = result.split('Job <')[1].split('>')[0]
if not job_id:
self._kill_job_array("Job ID not found!")
raise LauncherError(err_msg)
for job in LsfLauncher.jobs[cfg][job_name]:
job.job_script_out = Path("{}.{}.out".format(
job_script_wo_idx, job.index))
job.job_id = "{}[{}]".format(job_id, job.index)
job._link_odir("D")
def _finish(self, status, err_msg=None):
'''Helper function that sets the status, exit code and err msg.'''
assert status in ['P', 'F', 'K']
if self.job_script_out_fd:
self.job_script_out_fd.close()
self.status = status
if self.exit_code is None:
self.exit_code = 0 if status == 'P' else 1
if err_msg:
self.fail_msg += err_msg
log.log(VERBOSE, err_msg)
self._post_finish(status)
return status
def poll(self):
# It is possible we may have determined the status already.
if self.status:
return self.status
if not self.job_script_out_fd:
# If job id is not set, the bsub command has not been sent yet.
if not self.job_id:
return 'D'
# If the bsub output file is not created, we are still in
# dispatched state.
if not self.job_script_out.is_file():
return "D"
# We redirect the job's output to the log file, so the job script
# output remains empty until the point it finishes. This is a very
# quick way to check if the job has completed. If nothing has been
# written to the job script output yet, then the job is still
# running.
if not self.job_script_out.stat().st_size:
return "D"
# If we got to this point, we can now open the job script output
# file for reading.
try:
self.job_script_out_fd = open(self.job_script_out, "r")
except IOError as e:
return self._finish(
status="F",
err_msg="ERROR: Failed to open {}\n{}.".format(
self.job_script_out, e))
# Now that the job has completed, we need to determine its status.
#
# If the job successfully launched and it failed, the failure message
# will appear in its log file (because of the stderr redirection).
# But, in some cases, if there is something wrong with the command
# itself, bsub might return immediately with an error message, which
# will appear in the job script output file. We want to retrieve that
# so that we can report the status accurately.
#
# At this point, we could run bjobs or bhist to determine the status,
# but it has been found to be too slow, expecially when running 1000s
# of jobs. Plus, we have to read the job script output anyway to look
# for those error messages.
#
# So we just read this file to determine both, the status and extract
# the error message, rather than running bjobs or bhist. But there is
# one more complication to deal with - if we read the file now, it is
# possible that it may not have been fully updated. We will try reading
# it anyway. If we are unable to find what we are looking for, then we
# will resume reading it again at the next poll. We will do this upto
# max_poll_retries times before giving up and flagging an error.
#
# TODO: Consider using the IBM Plarform LSF Python APIs instead.
# (deferred due to shortage of time / resources).
# TODO: Parse job telemetry data for performance insights.
exit_code = self._get_job_exit_code()
if exit_code is not None:
self.exit_code = exit_code
status = "F" if exit_code else "P" if self._has_passed() else "F"
return self._finish(status=status)
else:
self.num_poll_retries += 1
# Fail the test if we have reached the max polling retries.
if self.num_poll_retries == LsfLauncher.max_poll_retries:
return self._finish(status="F",
err_msg="ERROR: Reached max retries while "
"reading job script output {} to determine"
" the outcome.".format(
self.job_script_out))
return "D"
def _get_job_exit_code(self):
'''Read the job script output to retrieve the exit code.
Also read the error message if any, which will appear at the beginning
of the log file followed by bsub's standard 'email' format output. It
looks something like this:
<stderr messages>
------------------------------------------------------------
Sender: LSF System <...>
Subject: ...
...
Successfully completed.
<OR>
Exited with exit code 1.
...
The extracted stderr messages are logged to self.fail_msg. The line
indicating whether it was successful or it failed with an exit code
is used to return the exit code.
Returns the exit code if found, else None.
'''
# Job script output must have been opened already.
assert self.job_script_out_fd
for line in self.job_script_out_fd:
if not self.job_script_out_err_msg_found:
m = re.match("^Sender", line)
if m:
self.job_script_out_err_msg = "".join(
self.job_script_out_err_msg[:-1]).strip()
self.job_script_out_err_msg_found = True
else:
self.job_script_out_err_msg.append(line)
else:
m = re.match(r"^Exited with exit code (\d+).\n$", line)
if m:
self.fail_msg += self.job_script_out_err_msg
return m.group(1)
if not self.job_script_out_err_msg:
m = re.match(r"^Successfully completed.\n$", line)
if m:
return 0
return None
def _kill_job_array(self, err_msg):
'''If there is an LSF error, then kill all jobs in the array this job
belongs to.'''
for job in LsfLauncher.jobs[self.deploy.sim_cfg][self.deploy.job_name]:
job._finish("K", err_msg)
def kill(self):
if self.job_id:
try:
subprocess.run(["bkill", "-s", "SIGTERM", self.job_id],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
log.error("Failed to kill job: {}".format(
e.stderr.decode("utf-8").strip()))
else:
log.error("Job ID for %s not found", self.name)
self._post_finish('K')
|
the-stack_0_20525 | import json
import os
import sys
import requests
import requests.auth
ENV_VARS = ["CLIENT_ID", "CLIENT_SECRET", "USERNAME", "PASSWORD"]
API_BASE = "oauth.reddit.com"
WWW_BASE = "www.reddit.com"
USER_AGENT = "Cargo-Crates v0.0.1"
def get(url, access_token, params={}):
return requests.get(
url,
params=params,
headers={"User-Agent": USER_AGENT, "Authorization": f"bearer {access_token}"},
)
def get_access_token() -> str:
client_auth = requests.auth.HTTPBasicAuth(
os.getenv("CLIENT_ID"), os.getenv("CLIENT_SECRET")
)
post_data = {
"grant_type": "password",
"username": os.getenv("USERNAME"),
"password": os.getenv("PASSWORD"),
}
headers = {"User-Agent": USER_AGENT}
response = requests.post(
"https://www.reddit.com/api/v1/access_token",
auth=client_auth,
data=post_data,
headers=headers,
)
return response.json().get("access_token")
def show_bearer_token():
"""
Simply generates and returns a bearer token with the provided credentials.
"""
access_token = get_access_token()
return f"{access_token}"
def saved(username: str, start_date: str = None):
"""
Returns the most recent saved posts of the provided user.
"""
access_token = get_access_token()
params = {"limit": 100}
url = f"https://{API_BASE}/user/{username}/saved"
r = get(url, access_token, params)
return r.json().get("data", {}).get("children", [])
def search(keyword: str, subreddit_path: str = None):
access_token = get_access_token()
params = {"type": "link", "sort": "new", "limit": 100, "q": keyword}
url = f"https://{API_BASE}/search.json"
if subreddit_path is not None:
params["restrict_sr"] = "true"
url = f"https://{API_BASE}/{subreddit_path}/search.json"
r = get(url, access_token, params)
print(url, params)
return r.json().get("data", {}).get("children", [])
SUPPORTED_CMDS = ["saved", "search", "show_bearer_token"]
if __name__ == "__main__":
if len(sys.argv) < 2:
print("ERR: Expected 2 arguments")
exit(1)
cmd = sys.argv[1]
if cmd not in SUPPORTED_CMDS:
print(f"ERR: '{cmd}' is not a supported commmand.")
exit(1)
if cmd == "saved":
username = sys.argv[2]
start_date = None
if os.getenv("start"):
start_date = os.getenv("start")
result = saved(username)
if cmd == "search":
# We (optionally) take the subreddit as the first argument because the search term can be arbitrarily long
search_term = " ".join(sys.argv[2:])
subreddit = None
if sys.argv[2].startswith("r/"):
subreddit = sys.argv[2]
search_term = " ".join(sys.argv[3:])
result = search(search_term, subreddit)
if cmd == "show_bearer_token":
result = [show_bearer_token()]
for r in result:
try:
print(json.dumps(r))
except BrokenPipeError:
# We both catch the error *and* close stderr (stdout is already closed)
# Reference: https://stackoverflow.com/a/26738736
sys.stderr.close()
exit(0) |
the-stack_0_20526 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import time
"""
info:
author:CriseLYJ
github:https://github.com/CriseLYJ/
update_time:2019-3-7
"""
class loginTB(object):
def __init__(self):
self.driver = webdriver.Chrome()
self.driver.maximize_window()
# 设置一个智能等待
self.wait = WebDriverWait(self.driver,5)
def login(self,key,pw):
url = 'https://login.taobao.com/member/login.jhtml'
self.driver.get(url)
try:
# 寻找密码登陆按钮
login_links = self.wait.until(
EC.presence_of_element_located((By.XPATH,"//a[text()='密码登录']"))
)
login_links.click()
except TimeoutException as e:
print("找不到登陆入口,原因是:",e)
else:
# 输入账号密码
input_key = self.wait.until(
EC.presence_of_element_located((By.XPATH,"//input[@name='TPL_username']"))
)
input_pw = self.wait.until(
EC.presence_of_element_located((By.XPATH,"//input[@name='TPL_password']"))
)
input_key.clear()
input_pw.clear()
input_key.send_keys(key)
input_pw.send_keys(pw)
self.driver.find_element_by_xpath('//*[@id="J_SubmitStatic"]').click()
try:
# 试探能否找到个人信息,如果找不到说明登录失败
user_info = self.wait.until(
EC.presence_of_element_located((By.XPATH,"//div[@class='m-userinfo']"))
)
print('已经登陆成功,进入了个人中心')
except TimeoutException:
try:
self.driver.find_element_by_xpath("//div[@class='avatar-wrapper']")
print('已经登录成功,进入了淘宝网首页')
except:
try:
# 尝试找手机验证框,如果能找到说明要手机验证
frame = self.wait.until(
EC.presence_of_element_located((By.XPATH,'//div[@class="login-check-left"]/iframe'))
)
print('本次登录需要进行手机验证...')
except TimeoutException:
# 找不到手机验证说明密码账号输入错误,要重新输入
print('登录失败,目测是账号或密码有误,请检查后重新登录...')
key = input('请重新输入账号:').strip()
pw = input('请重新输入密码:').strip()
self.login(key,pw)
else:
self.driver.switch_to.frame(frame)
phone_num = self.wait.until(
EC.presence_of_element_located((By.XPATH,'//button[@id="J_GetCode"]'))
)
phone_num.click()
phone_key = input('请输入手机验证码:').strip()
key_send = self.wait.until(
EC.presence_of_element_located((By.XPATH,'//input[@id="J_Phone_Checkcode"]'))
)
key_send.send_keys(phone_key)
go_button = self.wait.until(
EC.presence_of_element_located((By.XPATH,'//input[@type="submit"]'))
)
go_button.click()
user_info = self.wait.until(
EC.presence_of_element_located((By.XPATH, "//div[@class='m-userinfo']"))
)
print('手机验证登陆成功!!!')
if __name__ == '__main__':
t = time.time()
l = loginTB()
l.login('username','password')
print('登录完成,耗时{:.2f}秒'.format(float(time.time()-t))) |
the-stack_0_20528 | import logging
import os
import pathlib
import subprocess
from typing import Iterable
from septentrion import configuration
logger = logging.getLogger(__name__)
class SQLRunnerException(Exception):
pass
class Script:
def __init__(
self,
settings: configuration.Settings,
file_handler: Iterable[str],
path: pathlib.Path,
):
self.settings = settings
self.file_lines = list(file_handler)
self.path = path
def run(self):
if any("--meta-psql:" in line for line in self.file_lines):
self._run_with_meta_loop()
else:
self._run_simple()
def _env(self):
environment = {
"PGHOST": self.settings.HOST,
"PGPORT": self.settings.PORT,
"PGDATABASE": self.settings.DBNAME,
"PGUSER": self.settings.USERNAME,
"PGPASSWORD": self.settings.PASSWORD,
}
return {key: str(value) for key, value in environment.items() if value}
def _run_simple(self):
try:
cmd = subprocess.run(
["psql", "--set", "ON_ERROR_STOP=on", "-f", str(self.path)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
# environment has precedence over os.environ
env={**os.environ, **self._env()},
)
except FileNotFoundError:
raise RuntimeError(
"Septentrion requires the 'psql' executable to be present in "
"the PATH."
)
except subprocess.CalledProcessError as e:
msg = "Error during migration: {}".format(e.stderr.decode("utf-8"))
raise SQLRunnerException(msg) from e
return cmd.stdout.decode("utf-8")
def _run_with_meta_loop(self):
KEYWORDS = ["INSERT", "UPDATE", "DELETE"]
rows_remaining = True
while rows_remaining:
out = self._run_simple()
# we can stop once all the write operations return 0 rows
for line in out.split("\n"):
rows_remaining = any(
keyword in line and keyword + " 0" not in line
for keyword in KEYWORDS
)
# we still have work to do, we can go back to the main loop
if rows_remaining:
break
|
the-stack_0_20529 | from __future__ import absolute_import
from __future__ import unicode_literals
import logging
from tron.core.actionrun import ActionRun
from tron.core.actionrun import MesosActionRun
from tron.core.actionrun import SSHActionRun
log = logging.getLogger(__name__)
def filter_action_runs_needing_recovery(action_runs):
ssh_runs = []
mesos_runs = []
for action_run in action_runs:
if isinstance(action_run, SSHActionRun):
if action_run.state == ActionRun.UNKNOWN:
ssh_runs.append(action_run)
elif isinstance(action_run, MesosActionRun):
if action_run.state == ActionRun.UNKNOWN and action_run.end_time is None:
mesos_runs.append(action_run)
return ssh_runs, mesos_runs
def launch_recovery_actionruns_for_job_runs(job_runs, master_action_runner):
for run in job_runs:
if not run._action_runs:
log.info(f'Skipping recovery of {run} with no action runs (may have been cleaned up)')
continue
ssh_runs, mesos_runs = filter_action_runs_needing_recovery(run._action_runs)
for action_run in ssh_runs:
action_run.recover()
for action_run in mesos_runs:
action_run.recover()
|
the-stack_0_20530 | import re
import os
from conans import ConanFile, CMake, tools
class ScopeguardConan(ConanFile):
name = "scope-guard"
license = "MIT"
author = "offa <offa@github>"
url = "https://github.com.offa/scope-guard"
description = "Implementation of Scoped Guards and Unique Resource as proposed in P0052."
homepage = "https://github.com/offa/scope-guard"
topics = ("cpp", "cpp17", "p0052", "scope-guard",
"scope-exit", "scope-fail", "scope-success", "unique-resource", "cmake")
generators = ("cmake_find_package", "cmake_paths")
exports = ["LICENSE"]
exports_sources = ("CMakeLists.txt", "include/*", "test/*", "cmake/*")
options = {
"unittest": ["ON", "OFF"],
"enable_compat_header": ["ON", "OFF"]
}
default_options = (
"unittest=ON",
"enable_compat_header=OFF"
)
__requirements = [
"catch2/2.13.6",
"trompeloeil/41"
]
def set_version(self):
cmake_lists_content = tools.load(os.path.join(self.recipe_folder, "CMakeLists.txt"))
project_match = re.search(r'project\s*\((.+?)\)', cmake_lists_content, re.DOTALL)
if not project_match:
raise ConanInvalidConfiguration("No valid project() statement found in CMakeLists.txt")
project_params = project_match.group(1).split()
version_string = project_params[project_params.index("VERSION") + 1]
if not re.search(r'\d+\.\d+\.\d+(?:\.\d)?', version_string):
raise ConanInvalidConfiguration("No valid version found in CMakeLists.txt")
self.version = version_string
self.output.info("Project version from CMakeLists.txt: '{}'".format(self.version))
def requirements(self):
if self.options.unittest:
for req in self.__requirements:
self.requires(req)
def package(self):
self.copy("LICENSE", dst="license")
cmake = self._configure_cmake()
cmake.install()
def _configure_cmake(self):
cmake = CMake(self)
cmake.definitions["UNITTEST"] = self.options.unittest
cmake.definitions["ENABLE_COMPAT_HEADER"] = self.options.enable_compat_header
cmake.configure(build_folder="build")
return cmake
|
the-stack_0_20533 | # IMPORTANT: the same tests are run from "test_xml_etree_c" in order
# to ensure consistency between the C implementation and the Python
# implementation.
#
# For this purpose, the module-level "ET" symbol is temporarily
# monkey-patched when running the "test_xml_etree_c" test suite.
import copy
import functools
import html
import io
import itertools
import locale
import operator
import os
import pickle
import sys
import textwrap
import types
import unittest
import warnings
import weakref
from functools import partial
from itertools import product, islice
from test import support
from test.support import TESTFN, findfile, import_fresh_module, gc_collect, swap_attr
# pyET is the pure-Python implementation.
#
# ET is pyET in test_xml_etree and is the C accelerated version in
# test_xml_etree_c.
pyET = None
ET = None
SIMPLE_XMLFILE = findfile("simple.xml", subdir="xmltestdata")
try:
SIMPLE_XMLFILE.encode("utf-8")
except UnicodeEncodeError:
raise unittest.SkipTest("filename is not encodable to utf8")
SIMPLE_NS_XMLFILE = findfile("simple-ns.xml", subdir="xmltestdata")
UTF8_BUG_XMLFILE = findfile("expat224_utf8_bug.xml", subdir="xmltestdata")
SAMPLE_XML = """\
<body>
<tag class='a'>text</tag>
<tag class='b' />
<section>
<tag class='b' id='inner'>subtext</tag>
</section>
</body>
"""
SAMPLE_SECTION = """\
<section>
<tag class='b' id='inner'>subtext</tag>
<nexttag />
<nextsection>
<tag />
</nextsection>
</section>
"""
SAMPLE_XML_NS = """
<body xmlns="http://effbot.org/ns">
<tag>text</tag>
<tag />
<section>
<tag>subtext</tag>
</section>
</body>
"""
SAMPLE_XML_NS_ELEMS = """
<root>
<h:table xmlns:h="hello">
<h:tr>
<h:td>Apples</h:td>
<h:td>Bananas</h:td>
</h:tr>
</h:table>
<f:table xmlns:f="foo">
<f:name>African Coffee Table</f:name>
<f:width>80</f:width>
<f:length>120</f:length>
</f:table>
</root>
"""
ENTITY_XML = """\
<!DOCTYPE points [
<!ENTITY % user-entities SYSTEM 'user-entities.xml'>
%user-entities;
]>
<document>&entity;</document>
"""
EXTERNAL_ENTITY_XML = """\
<!DOCTYPE points [
<!ENTITY entity SYSTEM "file:///non-existing-file.xml">
]>
<document>&entity;</document>
"""
def checkwarnings(*filters, quiet=False):
def decorator(test):
def newtest(*args, **kwargs):
with support.check_warnings(*filters, quiet=quiet):
test(*args, **kwargs)
functools.update_wrapper(newtest, test)
return newtest
return decorator
class ModuleTest(unittest.TestCase):
def test_sanity(self):
# Import sanity.
from xml.etree import ElementTree
from xml.etree import ElementInclude
from xml.etree import ElementPath
def test_all(self):
names = ("xml.etree.ElementTree", "_elementtree")
support.check__all__(self, ET, names, blacklist=("HTML_EMPTY",))
def serialize(elem, to_string=True, encoding='unicode', **options):
if encoding != 'unicode':
file = io.BytesIO()
else:
file = io.StringIO()
tree = ET.ElementTree(elem)
tree.write(file, encoding=encoding, **options)
if to_string:
return file.getvalue()
else:
file.seek(0)
return file
def summarize_list(seq):
return [elem.tag for elem in seq]
class ElementTestCase:
@classmethod
def setUpClass(cls):
cls.modules = {pyET, ET}
def pickleRoundTrip(self, obj, name, dumper, loader, proto):
save_m = sys.modules[name]
try:
sys.modules[name] = dumper
temp = pickle.dumps(obj, proto)
sys.modules[name] = loader
result = pickle.loads(temp)
except pickle.PicklingError as pe:
# pyET must be second, because pyET may be (equal to) ET.
human = dict([(ET, "cET"), (pyET, "pyET")])
raise support.TestFailed("Failed to round-trip %r from %r to %r"
% (obj,
human.get(dumper, dumper),
human.get(loader, loader))) from pe
finally:
sys.modules[name] = save_m
return result
def assertEqualElements(self, alice, bob):
self.assertIsInstance(alice, (ET.Element, pyET.Element))
self.assertIsInstance(bob, (ET.Element, pyET.Element))
self.assertEqual(len(list(alice)), len(list(bob)))
for x, y in zip(alice, bob):
self.assertEqualElements(x, y)
properties = operator.attrgetter('tag', 'tail', 'text', 'attrib')
self.assertEqual(properties(alice), properties(bob))
# --------------------------------------------------------------------
# element tree tests
class ElementTreeTest(unittest.TestCase):
def serialize_check(self, elem, expected):
self.assertEqual(serialize(elem), expected)
def test_interface(self):
# Test element tree interface.
def check_string(string):
len(string)
for char in string:
self.assertEqual(len(char), 1,
msg="expected one-character string, got %r" % char)
new_string = string + ""
new_string = string + " "
string[:0]
def check_mapping(mapping):
len(mapping)
keys = mapping.keys()
items = mapping.items()
for key in keys:
item = mapping[key]
mapping["key"] = "value"
self.assertEqual(mapping["key"], "value",
msg="expected value string, got %r" % mapping["key"])
def check_element(element):
self.assertTrue(ET.iselement(element), msg="not an element")
direlem = dir(element)
for attr in 'tag', 'attrib', 'text', 'tail':
self.assertTrue(hasattr(element, attr),
msg='no %s member' % attr)
self.assertIn(attr, direlem,
msg='no %s visible by dir' % attr)
check_string(element.tag)
check_mapping(element.attrib)
if element.text is not None:
check_string(element.text)
if element.tail is not None:
check_string(element.tail)
for elem in element:
check_element(elem)
element = ET.Element("tag")
check_element(element)
tree = ET.ElementTree(element)
check_element(tree.getroot())
element = ET.Element("t\xe4g", key="value")
tree = ET.ElementTree(element)
self.assertRegex(repr(element), r"^<Element 't\xe4g' at 0x.*>$")
element = ET.Element("tag", key="value")
# Make sure all standard element methods exist.
def check_method(method):
self.assertTrue(hasattr(method, '__call__'),
msg="%s not callable" % method)
check_method(element.append)
check_method(element.extend)
check_method(element.insert)
check_method(element.remove)
check_method(element.getchildren)
check_method(element.find)
check_method(element.iterfind)
check_method(element.findall)
check_method(element.findtext)
check_method(element.clear)
check_method(element.get)
check_method(element.set)
check_method(element.keys)
check_method(element.items)
check_method(element.iter)
check_method(element.itertext)
check_method(element.getiterator)
# These methods return an iterable. See bug 6472.
def check_iter(it):
check_method(it.__next__)
check_iter(element.iterfind("tag"))
check_iter(element.iterfind("*"))
check_iter(tree.iterfind("tag"))
check_iter(tree.iterfind("*"))
# These aliases are provided:
self.assertEqual(ET.XML, ET.fromstring)
self.assertEqual(ET.PI, ET.ProcessingInstruction)
def test_set_attribute(self):
element = ET.Element('tag')
self.assertEqual(element.tag, 'tag')
element.tag = 'Tag'
self.assertEqual(element.tag, 'Tag')
element.tag = 'TAG'
self.assertEqual(element.tag, 'TAG')
self.assertIsNone(element.text)
element.text = 'Text'
self.assertEqual(element.text, 'Text')
element.text = 'TEXT'
self.assertEqual(element.text, 'TEXT')
self.assertIsNone(element.tail)
element.tail = 'Tail'
self.assertEqual(element.tail, 'Tail')
element.tail = 'TAIL'
self.assertEqual(element.tail, 'TAIL')
self.assertEqual(element.attrib, {})
element.attrib = {'a': 'b', 'c': 'd'}
self.assertEqual(element.attrib, {'a': 'b', 'c': 'd'})
element.attrib = {'A': 'B', 'C': 'D'}
self.assertEqual(element.attrib, {'A': 'B', 'C': 'D'})
def test_simpleops(self):
# Basic method sanity checks.
elem = ET.XML("<body><tag/></body>")
self.serialize_check(elem, '<body><tag /></body>')
e = ET.Element("tag2")
elem.append(e)
self.serialize_check(elem, '<body><tag /><tag2 /></body>')
elem.remove(e)
self.serialize_check(elem, '<body><tag /></body>')
elem.insert(0, e)
self.serialize_check(elem, '<body><tag2 /><tag /></body>')
elem.remove(e)
elem.extend([e])
self.serialize_check(elem, '<body><tag /><tag2 /></body>')
elem.remove(e)
element = ET.Element("tag", key="value")
self.serialize_check(element, '<tag key="value" />') # 1
subelement = ET.Element("subtag")
element.append(subelement)
self.serialize_check(element, '<tag key="value"><subtag /></tag>') # 2
element.insert(0, subelement)
self.serialize_check(element,
'<tag key="value"><subtag /><subtag /></tag>') # 3
element.remove(subelement)
self.serialize_check(element, '<tag key="value"><subtag /></tag>') # 4
element.remove(subelement)
self.serialize_check(element, '<tag key="value" />') # 5
with self.assertRaises(ValueError) as cm:
element.remove(subelement)
self.assertEqual(str(cm.exception), 'list.remove(x): x not in list')
self.serialize_check(element, '<tag key="value" />') # 6
element[0:0] = [subelement, subelement, subelement]
self.serialize_check(element[1], '<subtag />')
self.assertEqual(element[1:9], [element[1], element[2]])
self.assertEqual(element[:9:2], [element[0], element[2]])
del element[1:2]
self.serialize_check(element,
'<tag key="value"><subtag /><subtag /></tag>')
def test_cdata(self):
# Test CDATA handling (etc).
self.serialize_check(ET.XML("<tag>hello</tag>"),
'<tag>hello</tag>')
self.serialize_check(ET.XML("<tag>hello</tag>"),
'<tag>hello</tag>')
self.serialize_check(ET.XML("<tag><![CDATA[hello]]></tag>"),
'<tag>hello</tag>')
def test_file_init(self):
stringfile = io.BytesIO(SAMPLE_XML.encode("utf-8"))
tree = ET.ElementTree(file=stringfile)
self.assertEqual(tree.find("tag").tag, 'tag')
self.assertEqual(tree.find("section/tag").tag, 'tag')
tree = ET.ElementTree(file=SIMPLE_XMLFILE)
self.assertEqual(tree.find("element").tag, 'element')
self.assertEqual(tree.find("element/../empty-element").tag,
'empty-element')
def test_path_cache(self):
# Check that the path cache behaves sanely.
from xml.etree import ElementPath
elem = ET.XML(SAMPLE_XML)
for i in range(10): ET.ElementTree(elem).find('./'+str(i))
cache_len_10 = len(ElementPath._cache)
for i in range(10): ET.ElementTree(elem).find('./'+str(i))
self.assertEqual(len(ElementPath._cache), cache_len_10)
for i in range(20): ET.ElementTree(elem).find('./'+str(i))
self.assertGreater(len(ElementPath._cache), cache_len_10)
for i in range(600): ET.ElementTree(elem).find('./'+str(i))
self.assertLess(len(ElementPath._cache), 500)
def test_copy(self):
# Test copy handling (etc).
import copy
e1 = ET.XML("<tag>hello<foo/></tag>")
e2 = copy.copy(e1)
e3 = copy.deepcopy(e1)
e1.find("foo").tag = "bar"
self.serialize_check(e1, '<tag>hello<bar /></tag>')
self.serialize_check(e2, '<tag>hello<bar /></tag>')
self.serialize_check(e3, '<tag>hello<foo /></tag>')
def test_attrib(self):
# Test attribute handling.
elem = ET.Element("tag")
elem.get("key") # 1.1
self.assertEqual(elem.get("key", "default"), 'default') # 1.2
elem.set("key", "value")
self.assertEqual(elem.get("key"), 'value') # 1.3
elem = ET.Element("tag", key="value")
self.assertEqual(elem.get("key"), 'value') # 2.1
self.assertEqual(elem.attrib, {'key': 'value'}) # 2.2
attrib = {"key": "value"}
elem = ET.Element("tag", attrib)
attrib.clear() # check for aliasing issues
self.assertEqual(elem.get("key"), 'value') # 3.1
self.assertEqual(elem.attrib, {'key': 'value'}) # 3.2
attrib = {"key": "value"}
elem = ET.Element("tag", **attrib)
attrib.clear() # check for aliasing issues
self.assertEqual(elem.get("key"), 'value') # 4.1
self.assertEqual(elem.attrib, {'key': 'value'}) # 4.2
elem = ET.Element("tag", {"key": "other"}, key="value")
self.assertEqual(elem.get("key"), 'value') # 5.1
self.assertEqual(elem.attrib, {'key': 'value'}) # 5.2
elem = ET.Element('test')
elem.text = "aa"
elem.set('testa', 'testval')
elem.set('testb', 'test2')
self.assertEqual(ET.tostring(elem),
b'<test testa="testval" testb="test2">aa</test>')
self.assertEqual(sorted(elem.keys()), ['testa', 'testb'])
self.assertEqual(sorted(elem.items()),
[('testa', 'testval'), ('testb', 'test2')])
self.assertEqual(elem.attrib['testb'], 'test2')
elem.attrib['testb'] = 'test1'
elem.attrib['testc'] = 'test2'
self.assertEqual(ET.tostring(elem),
b'<test testa="testval" testb="test1" testc="test2">aa</test>')
elem = ET.Element('test')
elem.set('a', '\r')
elem.set('b', '\r\n')
elem.set('c', '\t\n\r ')
elem.set('d', '\n\n')
self.assertEqual(ET.tostring(elem),
b'<test a=" " b=" " c="	 " d=" " />')
def test_makeelement(self):
# Test makeelement handling.
elem = ET.Element("tag")
attrib = {"key": "value"}
subelem = elem.makeelement("subtag", attrib)
self.assertIsNot(subelem.attrib, attrib, msg="attrib aliasing")
elem.append(subelem)
self.serialize_check(elem, '<tag><subtag key="value" /></tag>')
elem.clear()
self.serialize_check(elem, '<tag />')
elem.append(subelem)
self.serialize_check(elem, '<tag><subtag key="value" /></tag>')
elem.extend([subelem, subelem])
self.serialize_check(elem,
'<tag><subtag key="value" /><subtag key="value" /><subtag key="value" /></tag>')
elem[:] = [subelem]
self.serialize_check(elem, '<tag><subtag key="value" /></tag>')
elem[:] = tuple([subelem])
self.serialize_check(elem, '<tag><subtag key="value" /></tag>')
def test_parsefile(self):
# Test parsing from file.
tree = ET.parse(SIMPLE_XMLFILE)
stream = io.StringIO()
tree.write(stream, encoding='unicode')
self.assertEqual(stream.getvalue(),
'<root>\n'
' <element key="value">text</element>\n'
' <element>text</element>tail\n'
' <empty-element />\n'
'</root>')
tree = ET.parse(SIMPLE_NS_XMLFILE)
stream = io.StringIO()
tree.write(stream, encoding='unicode')
self.assertEqual(stream.getvalue(),
'<ns0:root xmlns:ns0="namespace">\n'
' <ns0:element key="value">text</ns0:element>\n'
' <ns0:element>text</ns0:element>tail\n'
' <ns0:empty-element />\n'
'</ns0:root>')
with open(SIMPLE_XMLFILE) as f:
data = f.read()
parser = ET.XMLParser()
self.assertRegex(parser.version, r'^Expat ')
parser.feed(data)
self.serialize_check(parser.close(),
'<root>\n'
' <element key="value">text</element>\n'
' <element>text</element>tail\n'
' <empty-element />\n'
'</root>')
target = ET.TreeBuilder()
parser = ET.XMLParser(target=target)
parser.feed(data)
self.serialize_check(parser.close(),
'<root>\n'
' <element key="value">text</element>\n'
' <element>text</element>tail\n'
' <empty-element />\n'
'</root>')
def test_parseliteral(self):
element = ET.XML("<html><body>text</body></html>")
self.assertEqual(ET.tostring(element, encoding='unicode'),
'<html><body>text</body></html>')
element = ET.fromstring("<html><body>text</body></html>")
self.assertEqual(ET.tostring(element, encoding='unicode'),
'<html><body>text</body></html>')
sequence = ["<html><body>", "text</bo", "dy></html>"]
element = ET.fromstringlist(sequence)
self.assertEqual(ET.tostring(element),
b'<html><body>text</body></html>')
self.assertEqual(b"".join(ET.tostringlist(element)),
b'<html><body>text</body></html>')
self.assertEqual(ET.tostring(element, "ascii"),
b"<?xml version='1.0' encoding='ascii'?>\n"
b"<html><body>text</body></html>")
_, ids = ET.XMLID("<html><body>text</body></html>")
self.assertEqual(len(ids), 0)
_, ids = ET.XMLID("<html><body id='body'>text</body></html>")
self.assertEqual(len(ids), 1)
self.assertEqual(ids["body"].tag, 'body')
def test_iterparse(self):
# Test iterparse interface.
iterparse = ET.iterparse
context = iterparse(SIMPLE_XMLFILE)
action, elem = next(context)
self.assertEqual((action, elem.tag), ('end', 'element'))
self.assertEqual([(action, elem.tag) for action, elem in context], [
('end', 'element'),
('end', 'empty-element'),
('end', 'root'),
])
self.assertEqual(context.root.tag, 'root')
context = iterparse(SIMPLE_NS_XMLFILE)
self.assertEqual([(action, elem.tag) for action, elem in context], [
('end', '{namespace}element'),
('end', '{namespace}element'),
('end', '{namespace}empty-element'),
('end', '{namespace}root'),
])
events = ()
context = iterparse(SIMPLE_XMLFILE, events)
self.assertEqual([(action, elem.tag) for action, elem in context], [])
events = ()
context = iterparse(SIMPLE_XMLFILE, events=events)
self.assertEqual([(action, elem.tag) for action, elem in context], [])
events = ("start", "end")
context = iterparse(SIMPLE_XMLFILE, events)
self.assertEqual([(action, elem.tag) for action, elem in context], [
('start', 'root'),
('start', 'element'),
('end', 'element'),
('start', 'element'),
('end', 'element'),
('start', 'empty-element'),
('end', 'empty-element'),
('end', 'root'),
])
events = ("start", "end", "start-ns", "end-ns")
context = iterparse(SIMPLE_NS_XMLFILE, events)
self.assertEqual([(action, elem.tag) if action in ("start", "end")
else (action, elem)
for action, elem in context], [
('start-ns', ('', 'namespace')),
('start', '{namespace}root'),
('start', '{namespace}element'),
('end', '{namespace}element'),
('start', '{namespace}element'),
('end', '{namespace}element'),
('start', '{namespace}empty-element'),
('end', '{namespace}empty-element'),
('end', '{namespace}root'),
('end-ns', None),
])
events = ('start-ns', 'end-ns')
context = iterparse(io.StringIO(r"<root xmlns=''/>"), events)
res = [action for action, elem in context]
self.assertEqual(res, ['start-ns', 'end-ns'])
events = ("start", "end", "bogus")
with open(SIMPLE_XMLFILE, "rb") as f:
with self.assertRaises(ValueError) as cm:
iterparse(f, events)
self.assertFalse(f.closed)
self.assertEqual(str(cm.exception), "unknown event 'bogus'")
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError) as cm:
iterparse(SIMPLE_XMLFILE, events)
self.assertEqual(str(cm.exception), "unknown event 'bogus'")
del cm
source = io.BytesIO(
b"<?xml version='1.0' encoding='iso-8859-1'?>\n"
b"<body xmlns='http://éffbot.org/ns'\n"
b" xmlns:cl\xe9='http://effbot.org/ns'>text</body>\n")
events = ("start-ns",)
context = iterparse(source, events)
self.assertEqual([(action, elem) for action, elem in context], [
('start-ns', ('', 'http://\xe9ffbot.org/ns')),
('start-ns', ('cl\xe9', 'http://effbot.org/ns')),
])
source = io.StringIO("<document />junk")
it = iterparse(source)
action, elem = next(it)
self.assertEqual((action, elem.tag), ('end', 'document'))
with self.assertRaises(ET.ParseError) as cm:
next(it)
self.assertEqual(str(cm.exception),
'junk after document element: line 1, column 12')
self.addCleanup(support.unlink, TESTFN)
with open(TESTFN, "wb") as f:
f.write(b"<document />junk")
it = iterparse(TESTFN)
action, elem = next(it)
self.assertEqual((action, elem.tag), ('end', 'document'))
with support.check_no_resource_warning(self):
with self.assertRaises(ET.ParseError) as cm:
next(it)
self.assertEqual(str(cm.exception),
'junk after document element: line 1, column 12')
del cm, it
def test_writefile(self):
elem = ET.Element("tag")
elem.text = "text"
self.serialize_check(elem, '<tag>text</tag>')
ET.SubElement(elem, "subtag").text = "subtext"
self.serialize_check(elem, '<tag>text<subtag>subtext</subtag></tag>')
# Test tag suppression
elem.tag = None
self.serialize_check(elem, 'text<subtag>subtext</subtag>')
elem.insert(0, ET.Comment("comment"))
self.serialize_check(elem,
'text<!--comment--><subtag>subtext</subtag>') # assumes 1.3
elem[0] = ET.PI("key", "value")
self.serialize_check(elem, 'text<?key value?><subtag>subtext</subtag>')
def test_custom_builder(self):
# Test parser w. custom builder.
with open(SIMPLE_XMLFILE) as f:
data = f.read()
class Builder(list):
def start(self, tag, attrib):
self.append(("start", tag))
def end(self, tag):
self.append(("end", tag))
def data(self, text):
pass
builder = Builder()
parser = ET.XMLParser(target=builder)
parser.feed(data)
self.assertEqual(builder, [
('start', 'root'),
('start', 'element'),
('end', 'element'),
('start', 'element'),
('end', 'element'),
('start', 'empty-element'),
('end', 'empty-element'),
('end', 'root'),
])
with open(SIMPLE_NS_XMLFILE) as f:
data = f.read()
class Builder(list):
def start(self, tag, attrib):
self.append(("start", tag))
def end(self, tag):
self.append(("end", tag))
def data(self, text):
pass
def pi(self, target, data):
self.append(("pi", target, data))
def comment(self, data):
self.append(("comment", data))
def start_ns(self, prefix, uri):
self.append(("start-ns", prefix, uri))
def end_ns(self, prefix):
self.append(("end-ns", prefix))
builder = Builder()
parser = ET.XMLParser(target=builder)
parser.feed(data)
self.assertEqual(builder, [
('pi', 'pi', 'data'),
('comment', ' comment '),
('start-ns', '', 'namespace'),
('start', '{namespace}root'),
('start', '{namespace}element'),
('end', '{namespace}element'),
('start', '{namespace}element'),
('end', '{namespace}element'),
('start', '{namespace}empty-element'),
('end', '{namespace}empty-element'),
('end', '{namespace}root'),
('end-ns', ''),
])
def test_custom_builder_only_end_ns(self):
class Builder(list):
def end_ns(self, prefix):
self.append(("end-ns", prefix))
builder = Builder()
parser = ET.XMLParser(target=builder)
parser.feed(textwrap.dedent("""\
<?pi data?>
<!-- comment -->
<root xmlns='namespace' xmlns:p='pns' xmlns:a='ans'>
<a:element key='value'>text</a:element>
<p:element>text</p:element>tail
<empty-element/>
</root>
"""))
self.assertEqual(builder, [
('end-ns', 'a'),
('end-ns', 'p'),
('end-ns', ''),
])
# Element.getchildren() and ElementTree.getiterator() are deprecated.
@checkwarnings(("This method will be removed in future versions. "
"Use .+ instead.",
DeprecationWarning))
def test_getchildren(self):
# Test Element.getchildren()
with open(SIMPLE_XMLFILE, "rb") as f:
tree = ET.parse(f)
self.assertEqual([summarize_list(elem.getchildren())
for elem in tree.getroot().iter()], [
['element', 'element', 'empty-element'],
[],
[],
[],
])
self.assertEqual([summarize_list(elem.getchildren())
for elem in tree.getiterator()], [
['element', 'element', 'empty-element'],
[],
[],
[],
])
elem = ET.XML(SAMPLE_XML)
self.assertEqual(len(elem.getchildren()), 3)
self.assertEqual(len(elem[2].getchildren()), 1)
self.assertEqual(elem[:], elem.getchildren())
child1 = elem[0]
child2 = elem[2]
del elem[1:2]
self.assertEqual(len(elem.getchildren()), 2)
self.assertEqual(child1, elem[0])
self.assertEqual(child2, elem[1])
elem[0:2] = [child2, child1]
self.assertEqual(child2, elem[0])
self.assertEqual(child1, elem[1])
self.assertNotEqual(child1, elem[0])
elem.clear()
self.assertEqual(elem.getchildren(), [])
def test_writestring(self):
elem = ET.XML("<html><body>text</body></html>")
self.assertEqual(ET.tostring(elem), b'<html><body>text</body></html>')
elem = ET.fromstring("<html><body>text</body></html>")
self.assertEqual(ET.tostring(elem), b'<html><body>text</body></html>')
def test_tostring_default_namespace(self):
elem = ET.XML('<body xmlns="http://effbot.org/ns"><tag/></body>')
self.assertEqual(
ET.tostring(elem, encoding='unicode'),
'<ns0:body xmlns:ns0="http://effbot.org/ns"><ns0:tag /></ns0:body>'
)
self.assertEqual(
ET.tostring(elem, encoding='unicode', default_namespace='http://effbot.org/ns'),
'<body xmlns="http://effbot.org/ns"><tag /></body>'
)
def test_tostring_default_namespace_different_namespace(self):
elem = ET.XML('<body xmlns="http://effbot.org/ns"><tag/></body>')
self.assertEqual(
ET.tostring(elem, encoding='unicode', default_namespace='foobar'),
'<ns1:body xmlns="foobar" xmlns:ns1="http://effbot.org/ns"><ns1:tag /></ns1:body>'
)
def test_tostring_default_namespace_original_no_namespace(self):
elem = ET.XML('<body><tag/></body>')
EXPECTED_MSG = '^cannot use non-qualified names with default_namespace option$'
with self.assertRaisesRegex(ValueError, EXPECTED_MSG):
ET.tostring(elem, encoding='unicode', default_namespace='foobar')
def test_tostring_no_xml_declaration(self):
elem = ET.XML('<body><tag/></body>')
self.assertEqual(
ET.tostring(elem, encoding='unicode'),
'<body><tag /></body>'
)
def test_tostring_xml_declaration(self):
elem = ET.XML('<body><tag/></body>')
self.assertEqual(
ET.tostring(elem, encoding='utf8', xml_declaration=True),
b"<?xml version='1.0' encoding='utf8'?>\n<body><tag /></body>"
)
def test_tostring_xml_declaration_unicode_encoding(self):
elem = ET.XML('<body><tag/></body>')
preferredencoding = locale.getpreferredencoding()
self.assertEqual(
f"<?xml version='1.0' encoding='{preferredencoding}'?>\n<body><tag /></body>",
ET.tostring(elem, encoding='unicode', xml_declaration=True)
)
def test_tostring_xml_declaration_cases(self):
elem = ET.XML('<body><tag>ø</tag></body>')
preferredencoding = locale.getpreferredencoding()
TESTCASES = [
# (expected_retval, encoding, xml_declaration)
# ... xml_declaration = None
(b'<body><tag>ø</tag></body>', None, None),
(b'<body><tag>\xc3\xb8</tag></body>', 'UTF-8', None),
(b'<body><tag>ø</tag></body>', 'US-ASCII', None),
(b"<?xml version='1.0' encoding='ISO-8859-1'?>\n"
b"<body><tag>\xf8</tag></body>", 'ISO-8859-1', None),
('<body><tag>ø</tag></body>', 'unicode', None),
# ... xml_declaration = False
(b"<body><tag>ø</tag></body>", None, False),
(b"<body><tag>\xc3\xb8</tag></body>", 'UTF-8', False),
(b"<body><tag>ø</tag></body>", 'US-ASCII', False),
(b"<body><tag>\xf8</tag></body>", 'ISO-8859-1', False),
("<body><tag>ø</tag></body>", 'unicode', False),
# ... xml_declaration = True
(b"<?xml version='1.0' encoding='us-ascii'?>\n"
b"<body><tag>ø</tag></body>", None, True),
(b"<?xml version='1.0' encoding='UTF-8'?>\n"
b"<body><tag>\xc3\xb8</tag></body>", 'UTF-8', True),
(b"<?xml version='1.0' encoding='US-ASCII'?>\n"
b"<body><tag>ø</tag></body>", 'US-ASCII', True),
(b"<?xml version='1.0' encoding='ISO-8859-1'?>\n"
b"<body><tag>\xf8</tag></body>", 'ISO-8859-1', True),
(f"<?xml version='1.0' encoding='{preferredencoding}'?>\n"
"<body><tag>ø</tag></body>", 'unicode', True),
]
for expected_retval, encoding, xml_declaration in TESTCASES:
with self.subTest(f'encoding={encoding} '
f'xml_declaration={xml_declaration}'):
self.assertEqual(
ET.tostring(
elem,
encoding=encoding,
xml_declaration=xml_declaration
),
expected_retval
)
def test_tostringlist_default_namespace(self):
elem = ET.XML('<body xmlns="http://effbot.org/ns"><tag/></body>')
self.assertEqual(
''.join(ET.tostringlist(elem, encoding='unicode')),
'<ns0:body xmlns:ns0="http://effbot.org/ns"><ns0:tag /></ns0:body>'
)
self.assertEqual(
''.join(ET.tostringlist(elem, encoding='unicode', default_namespace='http://effbot.org/ns')),
'<body xmlns="http://effbot.org/ns"><tag /></body>'
)
def test_tostringlist_xml_declaration(self):
elem = ET.XML('<body><tag/></body>')
self.assertEqual(
''.join(ET.tostringlist(elem, encoding='unicode')),
'<body><tag /></body>'
)
self.assertEqual(
b''.join(ET.tostringlist(elem, xml_declaration=True)),
b"<?xml version='1.0' encoding='us-ascii'?>\n<body><tag /></body>"
)
preferredencoding = locale.getpreferredencoding()
stringlist = ET.tostringlist(elem, encoding='unicode', xml_declaration=True)
self.assertEqual(
''.join(stringlist),
f"<?xml version='1.0' encoding='{preferredencoding}'?>\n<body><tag /></body>"
)
self.assertRegex(stringlist[0], r"^<\?xml version='1.0' encoding='.+'?>")
self.assertEqual(['<body', '>', '<tag', ' />', '</body>'], stringlist[1:])
def test_encoding(self):
def check(encoding, body=''):
xml = ("<?xml version='1.0' encoding='%s'?><xml>%s</xml>" %
(encoding, body))
self.assertEqual(ET.XML(xml.encode(encoding)).text, body)
self.assertEqual(ET.XML(xml).text, body)
check("ascii", 'a')
check("us-ascii", 'a')
check("iso-8859-1", '\xbd')
check("iso-8859-15", '\u20ac')
check("cp437", '\u221a')
check("mac-roman", '\u02da')
def xml(encoding):
return "<?xml version='1.0' encoding='%s'?><xml />" % encoding
def bxml(encoding):
return xml(encoding).encode(encoding)
supported_encodings = [
'ascii', 'utf-8', 'utf-8-sig', 'utf-16', 'utf-16be', 'utf-16le',
'iso8859-1', 'iso8859-2', 'iso8859-3', 'iso8859-4', 'iso8859-5',
'iso8859-6', 'iso8859-7', 'iso8859-8', 'iso8859-9', 'iso8859-10',
'iso8859-13', 'iso8859-14', 'iso8859-15', 'iso8859-16',
'cp437', 'cp720', 'cp737', 'cp775', 'cp850', 'cp852',
'cp855', 'cp856', 'cp857', 'cp858', 'cp860', 'cp861', 'cp862',
'cp863', 'cp865', 'cp866', 'cp869', 'cp874', 'cp1006', 'cp1125',
'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
'cp1256', 'cp1257', 'cp1258',
'mac-cyrillic', 'mac-greek', 'mac-iceland', 'mac-latin2',
'mac-roman', 'mac-turkish',
'iso2022-jp', 'iso2022-jp-1', 'iso2022-jp-2', 'iso2022-jp-2004',
'iso2022-jp-3', 'iso2022-jp-ext',
'koi8-r', 'koi8-t', 'koi8-u', 'kz1048',
'hz', 'ptcp154',
]
for encoding in supported_encodings:
self.assertEqual(ET.tostring(ET.XML(bxml(encoding))), b'<xml />')
unsupported_ascii_compatible_encodings = [
'big5', 'big5hkscs',
'cp932', 'cp949', 'cp950',
'euc-jp', 'euc-jis-2004', 'euc-jisx0213', 'euc-kr',
'gb2312', 'gbk', 'gb18030',
'iso2022-kr', 'johab',
'shift-jis', 'shift-jis-2004', 'shift-jisx0213',
'utf-7',
]
for encoding in unsupported_ascii_compatible_encodings:
self.assertRaises(ValueError, ET.XML, bxml(encoding))
unsupported_ascii_incompatible_encodings = [
'cp037', 'cp424', 'cp500', 'cp864', 'cp875', 'cp1026', 'cp1140',
'utf_32', 'utf_32_be', 'utf_32_le',
]
for encoding in unsupported_ascii_incompatible_encodings:
self.assertRaises(ET.ParseError, ET.XML, bxml(encoding))
self.assertRaises(ValueError, ET.XML, xml('undefined').encode('ascii'))
self.assertRaises(LookupError, ET.XML, xml('xxx').encode('ascii'))
def test_methods(self):
# Test serialization methods.
e = ET.XML("<html><link/><script>1 < 2</script></html>")
e.tail = "\n"
self.assertEqual(serialize(e),
'<html><link /><script>1 < 2</script></html>\n')
self.assertEqual(serialize(e, method=None),
'<html><link /><script>1 < 2</script></html>\n')
self.assertEqual(serialize(e, method="xml"),
'<html><link /><script>1 < 2</script></html>\n')
self.assertEqual(serialize(e, method="html"),
'<html><link><script>1 < 2</script></html>\n')
self.assertEqual(serialize(e, method="text"), '1 < 2\n')
def test_issue18347(self):
e = ET.XML('<html><CamelCase>text</CamelCase></html>')
self.assertEqual(serialize(e),
'<html><CamelCase>text</CamelCase></html>')
self.assertEqual(serialize(e, method="html"),
'<html><CamelCase>text</CamelCase></html>')
def test_entity(self):
# Test entity handling.
# 1) good entities
e = ET.XML("<document title='舰'>test</document>")
self.assertEqual(serialize(e, encoding="us-ascii"),
b'<document title="舰">test</document>')
self.serialize_check(e, '<document title="\u8230">test</document>')
# 2) bad entities
with self.assertRaises(ET.ParseError) as cm:
ET.XML("<document>&entity;</document>")
self.assertEqual(str(cm.exception),
'undefined entity: line 1, column 10')
with self.assertRaises(ET.ParseError) as cm:
ET.XML(ENTITY_XML)
self.assertEqual(str(cm.exception),
'undefined entity &entity;: line 5, column 10')
# 3) custom entity
parser = ET.XMLParser()
parser.entity["entity"] = "text"
parser.feed(ENTITY_XML)
root = parser.close()
self.serialize_check(root, '<document>text</document>')
# 4) external (SYSTEM) entity
with self.assertRaises(ET.ParseError) as cm:
ET.XML(EXTERNAL_ENTITY_XML)
self.assertEqual(str(cm.exception),
'undefined entity &entity;: line 4, column 10')
def test_namespace(self):
# Test namespace issues.
# 1) xml namespace
elem = ET.XML("<tag xml:lang='en' />")
self.serialize_check(elem, '<tag xml:lang="en" />') # 1.1
# 2) other "well-known" namespaces
elem = ET.XML("<rdf:RDF xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#' />")
self.serialize_check(elem,
'<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" />') # 2.1
elem = ET.XML("<html:html xmlns:html='http://www.w3.org/1999/xhtml' />")
self.serialize_check(elem,
'<html:html xmlns:html="http://www.w3.org/1999/xhtml" />') # 2.2
elem = ET.XML("<soap:Envelope xmlns:soap='http://schemas.xmlsoap.org/soap/envelope' />")
self.serialize_check(elem,
'<ns0:Envelope xmlns:ns0="http://schemas.xmlsoap.org/soap/envelope" />') # 2.3
# 3) unknown namespaces
elem = ET.XML(SAMPLE_XML_NS)
self.serialize_check(elem,
'<ns0:body xmlns:ns0="http://effbot.org/ns">\n'
' <ns0:tag>text</ns0:tag>\n'
' <ns0:tag />\n'
' <ns0:section>\n'
' <ns0:tag>subtext</ns0:tag>\n'
' </ns0:section>\n'
'</ns0:body>')
def test_qname(self):
# Test QName handling.
# 1) decorated tags
elem = ET.Element("{uri}tag")
self.serialize_check(elem, '<ns0:tag xmlns:ns0="uri" />') # 1.1
elem = ET.Element(ET.QName("{uri}tag"))
self.serialize_check(elem, '<ns0:tag xmlns:ns0="uri" />') # 1.2
elem = ET.Element(ET.QName("uri", "tag"))
self.serialize_check(elem, '<ns0:tag xmlns:ns0="uri" />') # 1.3
elem = ET.Element(ET.QName("uri", "tag"))
subelem = ET.SubElement(elem, ET.QName("uri", "tag1"))
subelem = ET.SubElement(elem, ET.QName("uri", "tag2"))
self.serialize_check(elem,
'<ns0:tag xmlns:ns0="uri"><ns0:tag1 /><ns0:tag2 /></ns0:tag>') # 1.4
# 2) decorated attributes
elem.clear()
elem.attrib["{uri}key"] = "value"
self.serialize_check(elem,
'<ns0:tag xmlns:ns0="uri" ns0:key="value" />') # 2.1
elem.clear()
elem.attrib[ET.QName("{uri}key")] = "value"
self.serialize_check(elem,
'<ns0:tag xmlns:ns0="uri" ns0:key="value" />') # 2.2
# 3) decorated values are not converted by default, but the
# QName wrapper can be used for values
elem.clear()
elem.attrib["{uri}key"] = "{uri}value"
self.serialize_check(elem,
'<ns0:tag xmlns:ns0="uri" ns0:key="{uri}value" />') # 3.1
elem.clear()
elem.attrib["{uri}key"] = ET.QName("{uri}value")
self.serialize_check(elem,
'<ns0:tag xmlns:ns0="uri" ns0:key="ns0:value" />') # 3.2
elem.clear()
subelem = ET.Element("tag")
subelem.attrib["{uri1}key"] = ET.QName("{uri2}value")
elem.append(subelem)
elem.append(subelem)
self.serialize_check(elem,
'<ns0:tag xmlns:ns0="uri" xmlns:ns1="uri1" xmlns:ns2="uri2">'
'<tag ns1:key="ns2:value" />'
'<tag ns1:key="ns2:value" />'
'</ns0:tag>') # 3.3
# 4) Direct QName tests
self.assertEqual(str(ET.QName('ns', 'tag')), '{ns}tag')
self.assertEqual(str(ET.QName('{ns}tag')), '{ns}tag')
q1 = ET.QName('ns', 'tag')
q2 = ET.QName('ns', 'tag')
self.assertEqual(q1, q2)
q2 = ET.QName('ns', 'other-tag')
self.assertNotEqual(q1, q2)
self.assertNotEqual(q1, 'ns:tag')
self.assertEqual(q1, '{ns}tag')
def test_doctype_public(self):
# Test PUBLIC doctype.
elem = ET.XML('<!DOCTYPE html PUBLIC'
' "-//W3C//DTD XHTML 1.0 Transitional//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
'<html>text</html>')
def test_xpath_tokenizer(self):
# Test the XPath tokenizer.
from xml.etree import ElementPath
def check(p, expected, namespaces=None):
self.assertEqual([op or tag
for op, tag in ElementPath.xpath_tokenizer(p, namespaces)],
expected)
# tests from the xml specification
check("*", ['*'])
check("text()", ['text', '()'])
check("@name", ['@', 'name'])
check("@*", ['@', '*'])
check("para[1]", ['para', '[', '1', ']'])
check("para[last()]", ['para', '[', 'last', '()', ']'])
check("*/para", ['*', '/', 'para'])
check("/doc/chapter[5]/section[2]",
['/', 'doc', '/', 'chapter', '[', '5', ']',
'/', 'section', '[', '2', ']'])
check("chapter//para", ['chapter', '//', 'para'])
check("//para", ['//', 'para'])
check("//olist/item", ['//', 'olist', '/', 'item'])
check(".", ['.'])
check(".//para", ['.', '//', 'para'])
check("..", ['..'])
check("../@lang", ['..', '/', '@', 'lang'])
check("chapter[title]", ['chapter', '[', 'title', ']'])
check("employee[@secretary and @assistant]", ['employee',
'[', '@', 'secretary', '', 'and', '', '@', 'assistant', ']'])
# additional tests
check("@{ns}attr", ['@', '{ns}attr'])
check("{http://spam}egg", ['{http://spam}egg'])
check("./spam.egg", ['.', '/', 'spam.egg'])
check(".//{http://spam}egg", ['.', '//', '{http://spam}egg'])
# wildcard tags
check("{ns}*", ['{ns}*'])
check("{}*", ['{}*'])
check("{*}tag", ['{*}tag'])
check("{*}*", ['{*}*'])
check(".//{*}tag", ['.', '//', '{*}tag'])
# namespace prefix resolution
check("./xsd:type", ['.', '/', '{http://www.w3.org/2001/XMLSchema}type'],
{'xsd': 'http://www.w3.org/2001/XMLSchema'})
check("type", ['{http://www.w3.org/2001/XMLSchema}type'],
{'': 'http://www.w3.org/2001/XMLSchema'})
check("@xsd:type", ['@', '{http://www.w3.org/2001/XMLSchema}type'],
{'xsd': 'http://www.w3.org/2001/XMLSchema'})
check("@type", ['@', 'type'],
{'': 'http://www.w3.org/2001/XMLSchema'})
check("@{*}type", ['@', '{*}type'],
{'': 'http://www.w3.org/2001/XMLSchema'})
check("@{ns}attr", ['@', '{ns}attr'],
{'': 'http://www.w3.org/2001/XMLSchema',
'ns': 'http://www.w3.org/2001/XMLSchema'})
def test_processinginstruction(self):
# Test ProcessingInstruction directly
self.assertEqual(ET.tostring(ET.ProcessingInstruction('test', 'instruction')),
b'<?test instruction?>')
self.assertEqual(ET.tostring(ET.PI('test', 'instruction')),
b'<?test instruction?>')
# Issue #2746
self.assertEqual(ET.tostring(ET.PI('test', '<testing&>')),
b'<?test <testing&>?>')
self.assertEqual(ET.tostring(ET.PI('test', '<testing&>\xe3'), 'latin-1'),
b"<?xml version='1.0' encoding='latin-1'?>\n"
b"<?test <testing&>\xe3?>")
def test_html_empty_elems_serialization(self):
# issue 15970
# from http://www.w3.org/TR/html401/index/elements.html
for element in ['AREA', 'BASE', 'BASEFONT', 'BR', 'COL', 'FRAME', 'HR',
'IMG', 'INPUT', 'ISINDEX', 'LINK', 'META', 'PARAM']:
for elem in [element, element.lower()]:
expected = '<%s>' % elem
serialized = serialize(ET.XML('<%s />' % elem), method='html')
self.assertEqual(serialized, expected)
serialized = serialize(ET.XML('<%s></%s>' % (elem,elem)),
method='html')
self.assertEqual(serialized, expected)
def test_dump_attribute_order(self):
# See BPO 34160
e = ET.Element('cirriculum', status='public', company='example')
with support.captured_stdout() as stdout:
ET.dump(e)
self.assertEqual(stdout.getvalue(),
'<cirriculum status="public" company="example" />\n')
def test_tree_write_attribute_order(self):
# See BPO 34160
root = ET.Element('cirriculum', status='public', company='example')
self.assertEqual(serialize(root),
'<cirriculum status="public" company="example" />')
self.assertEqual(serialize(root, method='html'),
'<cirriculum status="public" company="example"></cirriculum>')
class XMLPullParserTest(unittest.TestCase):
def _feed(self, parser, data, chunk_size=None):
if chunk_size is None:
parser.feed(data)
else:
for i in range(0, len(data), chunk_size):
parser.feed(data[i:i+chunk_size])
def assert_events(self, parser, expected, max_events=None):
self.assertEqual(
[(event, (elem.tag, elem.text))
for event, elem in islice(parser.read_events(), max_events)],
expected)
def assert_event_tuples(self, parser, expected, max_events=None):
self.assertEqual(
list(islice(parser.read_events(), max_events)),
expected)
def assert_event_tags(self, parser, expected, max_events=None):
events = islice(parser.read_events(), max_events)
self.assertEqual([(action, elem.tag) for action, elem in events],
expected)
def test_simple_xml(self):
for chunk_size in (None, 1, 5):
with self.subTest(chunk_size=chunk_size):
parser = ET.XMLPullParser()
self.assert_event_tags(parser, [])
self._feed(parser, "<!-- comment -->\n", chunk_size)
self.assert_event_tags(parser, [])
self._feed(parser,
"<root>\n <element key='value'>text</element",
chunk_size)
self.assert_event_tags(parser, [])
self._feed(parser, ">\n", chunk_size)
self.assert_event_tags(parser, [('end', 'element')])
self._feed(parser, "<element>text</element>tail\n", chunk_size)
self._feed(parser, "<empty-element/>\n", chunk_size)
self.assert_event_tags(parser, [
('end', 'element'),
('end', 'empty-element'),
])
self._feed(parser, "</root>\n", chunk_size)
self.assert_event_tags(parser, [('end', 'root')])
self.assertIsNone(parser.close())
def test_feed_while_iterating(self):
parser = ET.XMLPullParser()
it = parser.read_events()
self._feed(parser, "<root>\n <element key='value'>text</element>\n")
action, elem = next(it)
self.assertEqual((action, elem.tag), ('end', 'element'))
self._feed(parser, "</root>\n")
action, elem = next(it)
self.assertEqual((action, elem.tag), ('end', 'root'))
with self.assertRaises(StopIteration):
next(it)
def test_simple_xml_with_ns(self):
parser = ET.XMLPullParser()
self.assert_event_tags(parser, [])
self._feed(parser, "<!-- comment -->\n")
self.assert_event_tags(parser, [])
self._feed(parser, "<root xmlns='namespace'>\n")
self.assert_event_tags(parser, [])
self._feed(parser, "<element key='value'>text</element")
self.assert_event_tags(parser, [])
self._feed(parser, ">\n")
self.assert_event_tags(parser, [('end', '{namespace}element')])
self._feed(parser, "<element>text</element>tail\n")
self._feed(parser, "<empty-element/>\n")
self.assert_event_tags(parser, [
('end', '{namespace}element'),
('end', '{namespace}empty-element'),
])
self._feed(parser, "</root>\n")
self.assert_event_tags(parser, [('end', '{namespace}root')])
self.assertIsNone(parser.close())
def test_ns_events(self):
parser = ET.XMLPullParser(events=('start-ns', 'end-ns'))
self._feed(parser, "<!-- comment -->\n")
self._feed(parser, "<root xmlns='namespace'>\n")
self.assertEqual(
list(parser.read_events()),
[('start-ns', ('', 'namespace'))])
self._feed(parser, "<element key='value'>text</element")
self._feed(parser, ">\n")
self._feed(parser, "<element>text</element>tail\n")
self._feed(parser, "<empty-element/>\n")
self._feed(parser, "</root>\n")
self.assertEqual(list(parser.read_events()), [('end-ns', None)])
self.assertIsNone(parser.close())
def test_ns_events_start(self):
parser = ET.XMLPullParser(events=('start-ns', 'start', 'end'))
self._feed(parser, "<tag xmlns='abc' xmlns:p='xyz'>\n")
self.assert_event_tuples(parser, [
('start-ns', ('', 'abc')),
('start-ns', ('p', 'xyz')),
], max_events=2)
self.assert_event_tags(parser, [
('start', '{abc}tag'),
], max_events=1)
self._feed(parser, "<child />\n")
self.assert_event_tags(parser, [
('start', '{abc}child'),
('end', '{abc}child'),
])
self._feed(parser, "</tag>\n")
parser.close()
self.assert_event_tags(parser, [
('end', '{abc}tag'),
])
def test_ns_events_start_end(self):
parser = ET.XMLPullParser(events=('start-ns', 'start', 'end', 'end-ns'))
self._feed(parser, "<tag xmlns='abc' xmlns:p='xyz'>\n")
self.assert_event_tuples(parser, [
('start-ns', ('', 'abc')),
('start-ns', ('p', 'xyz')),
], max_events=2)
self.assert_event_tags(parser, [
('start', '{abc}tag'),
], max_events=1)
self._feed(parser, "<child />\n")
self.assert_event_tags(parser, [
('start', '{abc}child'),
('end', '{abc}child'),
])
self._feed(parser, "</tag>\n")
parser.close()
self.assert_event_tags(parser, [
('end', '{abc}tag'),
], max_events=1)
self.assert_event_tuples(parser, [
('end-ns', None),
('end-ns', None),
])
def test_events(self):
parser = ET.XMLPullParser(events=())
self._feed(parser, "<root/>\n")
self.assert_event_tags(parser, [])
parser = ET.XMLPullParser(events=('start', 'end'))
self._feed(parser, "<!-- text here -->\n")
self.assert_events(parser, [])
parser = ET.XMLPullParser(events=('start', 'end'))
self._feed(parser, "<root>\n")
self.assert_event_tags(parser, [('start', 'root')])
self._feed(parser, "<element key='value'>text</element")
self.assert_event_tags(parser, [('start', 'element')])
self._feed(parser, ">\n")
self.assert_event_tags(parser, [('end', 'element')])
self._feed(parser,
"<element xmlns='foo'>text<empty-element/></element>tail\n")
self.assert_event_tags(parser, [
('start', '{foo}element'),
('start', '{foo}empty-element'),
('end', '{foo}empty-element'),
('end', '{foo}element'),
])
self._feed(parser, "</root>")
self.assertIsNone(parser.close())
self.assert_event_tags(parser, [('end', 'root')])
parser = ET.XMLPullParser(events=('start',))
self._feed(parser, "<!-- comment -->\n")
self.assert_event_tags(parser, [])
self._feed(parser, "<root>\n")
self.assert_event_tags(parser, [('start', 'root')])
self._feed(parser, "<element key='value'>text</element")
self.assert_event_tags(parser, [('start', 'element')])
self._feed(parser, ">\n")
self.assert_event_tags(parser, [])
self._feed(parser,
"<element xmlns='foo'>text<empty-element/></element>tail\n")
self.assert_event_tags(parser, [
('start', '{foo}element'),
('start', '{foo}empty-element'),
])
self._feed(parser, "</root>")
self.assertIsNone(parser.close())
def test_events_comment(self):
parser = ET.XMLPullParser(events=('start', 'comment', 'end'))
self._feed(parser, "<!-- text here -->\n")
self.assert_events(parser, [('comment', (ET.Comment, ' text here '))])
self._feed(parser, "<!-- more text here -->\n")
self.assert_events(parser, [('comment', (ET.Comment, ' more text here '))])
self._feed(parser, "<root-tag>text")
self.assert_event_tags(parser, [('start', 'root-tag')])
self._feed(parser, "<!-- inner comment-->\n")
self.assert_events(parser, [('comment', (ET.Comment, ' inner comment'))])
self._feed(parser, "</root-tag>\n")
self.assert_event_tags(parser, [('end', 'root-tag')])
self._feed(parser, "<!-- outer comment -->\n")
self.assert_events(parser, [('comment', (ET.Comment, ' outer comment '))])
parser = ET.XMLPullParser(events=('comment',))
self._feed(parser, "<!-- text here -->\n")
self.assert_events(parser, [('comment', (ET.Comment, ' text here '))])
def test_events_pi(self):
parser = ET.XMLPullParser(events=('start', 'pi', 'end'))
self._feed(parser, "<?pitarget?>\n")
self.assert_events(parser, [('pi', (ET.PI, 'pitarget'))])
parser = ET.XMLPullParser(events=('pi',))
self._feed(parser, "<?pitarget some text ?>\n")
self.assert_events(parser, [('pi', (ET.PI, 'pitarget some text '))])
def test_events_sequence(self):
# Test that events can be some sequence that's not just a tuple or list
eventset = {'end', 'start'}
parser = ET.XMLPullParser(events=eventset)
self._feed(parser, "<foo>bar</foo>")
self.assert_event_tags(parser, [('start', 'foo'), ('end', 'foo')])
class DummyIter:
def __init__(self):
self.events = iter(['start', 'end', 'start-ns'])
def __iter__(self):
return self
def __next__(self):
return next(self.events)
parser = ET.XMLPullParser(events=DummyIter())
self._feed(parser, "<foo>bar</foo>")
self.assert_event_tags(parser, [('start', 'foo'), ('end', 'foo')])
def test_unknown_event(self):
with self.assertRaises(ValueError):
ET.XMLPullParser(events=('start', 'end', 'bogus'))
#
# xinclude tests (samples from appendix C of the xinclude specification)
XINCLUDE = {}
XINCLUDE["C1.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>120 Mz is adequate for an average home user.</p>
<xi:include href="disclaimer.xml"/>
</document>
"""
XINCLUDE["disclaimer.xml"] = """\
<?xml version='1.0'?>
<disclaimer>
<p>The opinions represented herein represent those of the individual
and should not be interpreted as official policy endorsed by this
organization.</p>
</disclaimer>
"""
XINCLUDE["C2.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>This document has been accessed
<xi:include href="count.txt" parse="text"/> times.</p>
</document>
"""
XINCLUDE["count.txt"] = "324387"
XINCLUDE["C2b.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>This document has been <em>accessed</em>
<xi:include href="count.txt" parse="text"/> times.</p>
</document>
"""
XINCLUDE["C3.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>The following is the source of the "data.xml" resource:</p>
<example><xi:include href="data.xml" parse="text"/></example>
</document>
"""
XINCLUDE["data.xml"] = """\
<?xml version='1.0'?>
<data>
<item><![CDATA[Brooks & Shields]]></item>
</data>
"""
XINCLUDE["C5.xml"] = """\
<?xml version='1.0'?>
<div xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="example.txt" parse="text">
<xi:fallback>
<xi:include href="fallback-example.txt" parse="text">
<xi:fallback><a href="mailto:[email protected]">Report error</a></xi:fallback>
</xi:include>
</xi:fallback>
</xi:include>
</div>
"""
XINCLUDE["default.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>Example.</p>
<xi:include href="{}"/>
</document>
""".format(html.escape(SIMPLE_XMLFILE, True))
#
# badly formatted xi:include tags
XINCLUDE_BAD = {}
XINCLUDE_BAD["B1.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>120 Mz is adequate for an average home user.</p>
<xi:include href="disclaimer.xml" parse="BAD_TYPE"/>
</document>
"""
XINCLUDE_BAD["B2.xml"] = """\
<?xml version='1.0'?>
<div xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:fallback></xi:fallback>
</div>
"""
class XIncludeTest(unittest.TestCase):
def xinclude_loader(self, href, parse="xml", encoding=None):
try:
data = XINCLUDE[href]
except KeyError:
raise OSError("resource not found")
if parse == "xml":
data = ET.XML(data)
return data
def none_loader(self, href, parser, encoding=None):
return None
def _my_loader(self, href, parse):
# Used to avoid a test-dependency problem where the default loader
# of ElementInclude uses the pyET parser for cET tests.
if parse == 'xml':
with open(href, 'rb') as f:
return ET.parse(f).getroot()
else:
return None
def test_xinclude_default(self):
from xml.etree import ElementInclude
doc = self.xinclude_loader('default.xml')
ElementInclude.include(doc, self._my_loader)
self.assertEqual(serialize(doc),
'<document>\n'
' <p>Example.</p>\n'
' <root>\n'
' <element key="value">text</element>\n'
' <element>text</element>tail\n'
' <empty-element />\n'
'</root>\n'
'</document>')
def test_xinclude(self):
from xml.etree import ElementInclude
# Basic inclusion example (XInclude C.1)
document = self.xinclude_loader("C1.xml")
ElementInclude.include(document, self.xinclude_loader)
self.assertEqual(serialize(document),
'<document>\n'
' <p>120 Mz is adequate for an average home user.</p>\n'
' <disclaimer>\n'
' <p>The opinions represented herein represent those of the individual\n'
' and should not be interpreted as official policy endorsed by this\n'
' organization.</p>\n'
'</disclaimer>\n'
'</document>') # C1
# Textual inclusion example (XInclude C.2)
document = self.xinclude_loader("C2.xml")
ElementInclude.include(document, self.xinclude_loader)
self.assertEqual(serialize(document),
'<document>\n'
' <p>This document has been accessed\n'
' 324387 times.</p>\n'
'</document>') # C2
# Textual inclusion after sibling element (based on modified XInclude C.2)
document = self.xinclude_loader("C2b.xml")
ElementInclude.include(document, self.xinclude_loader)
self.assertEqual(serialize(document),
'<document>\n'
' <p>This document has been <em>accessed</em>\n'
' 324387 times.</p>\n'
'</document>') # C2b
# Textual inclusion of XML example (XInclude C.3)
document = self.xinclude_loader("C3.xml")
ElementInclude.include(document, self.xinclude_loader)
self.assertEqual(serialize(document),
'<document>\n'
' <p>The following is the source of the "data.xml" resource:</p>\n'
" <example><?xml version='1.0'?>\n"
'<data>\n'
' <item><![CDATA[Brooks & Shields]]></item>\n'
'</data>\n'
'</example>\n'
'</document>') # C3
# Fallback example (XInclude C.5)
# Note! Fallback support is not yet implemented
document = self.xinclude_loader("C5.xml")
with self.assertRaises(OSError) as cm:
ElementInclude.include(document, self.xinclude_loader)
self.assertEqual(str(cm.exception), 'resource not found')
self.assertEqual(serialize(document),
'<div xmlns:ns0="http://www.w3.org/2001/XInclude">\n'
' <ns0:include href="example.txt" parse="text">\n'
' <ns0:fallback>\n'
' <ns0:include href="fallback-example.txt" parse="text">\n'
' <ns0:fallback><a href="mailto:[email protected]">Report error</a></ns0:fallback>\n'
' </ns0:include>\n'
' </ns0:fallback>\n'
' </ns0:include>\n'
'</div>') # C5
def test_xinclude_failures(self):
from xml.etree import ElementInclude
# Test failure to locate included XML file.
document = ET.XML(XINCLUDE["C1.xml"])
with self.assertRaises(ElementInclude.FatalIncludeError) as cm:
ElementInclude.include(document, loader=self.none_loader)
self.assertEqual(str(cm.exception),
"cannot load 'disclaimer.xml' as 'xml'")
# Test failure to locate included text file.
document = ET.XML(XINCLUDE["C2.xml"])
with self.assertRaises(ElementInclude.FatalIncludeError) as cm:
ElementInclude.include(document, loader=self.none_loader)
self.assertEqual(str(cm.exception),
"cannot load 'count.txt' as 'text'")
# Test bad parse type.
document = ET.XML(XINCLUDE_BAD["B1.xml"])
with self.assertRaises(ElementInclude.FatalIncludeError) as cm:
ElementInclude.include(document, loader=self.none_loader)
self.assertEqual(str(cm.exception),
"unknown parse type in xi:include tag ('BAD_TYPE')")
# Test xi:fallback outside xi:include.
document = ET.XML(XINCLUDE_BAD["B2.xml"])
with self.assertRaises(ElementInclude.FatalIncludeError) as cm:
ElementInclude.include(document, loader=self.none_loader)
self.assertEqual(str(cm.exception),
"xi:fallback tag must be child of xi:include "
"('{http://www.w3.org/2001/XInclude}fallback')")
# --------------------------------------------------------------------
# reported bugs
class BugsTest(unittest.TestCase):
def test_bug_xmltoolkit21(self):
# marshaller gives obscure errors for non-string values
def check(elem):
with self.assertRaises(TypeError) as cm:
serialize(elem)
self.assertEqual(str(cm.exception),
'cannot serialize 123 (type int)')
elem = ET.Element(123)
check(elem) # tag
elem = ET.Element("elem")
elem.text = 123
check(elem) # text
elem = ET.Element("elem")
elem.tail = 123
check(elem) # tail
elem = ET.Element("elem")
elem.set(123, "123")
check(elem) # attribute key
elem = ET.Element("elem")
elem.set("123", 123)
check(elem) # attribute value
def test_bug_xmltoolkit25(self):
# typo in ElementTree.findtext
elem = ET.XML(SAMPLE_XML)
tree = ET.ElementTree(elem)
self.assertEqual(tree.findtext("tag"), 'text')
self.assertEqual(tree.findtext("section/tag"), 'subtext')
def test_bug_xmltoolkit28(self):
# .//tag causes exceptions
tree = ET.XML("<doc><table><tbody/></table></doc>")
self.assertEqual(summarize_list(tree.findall(".//thead")), [])
self.assertEqual(summarize_list(tree.findall(".//tbody")), ['tbody'])
def test_bug_xmltoolkitX1(self):
# dump() doesn't flush the output buffer
tree = ET.XML("<doc><table><tbody/></table></doc>")
with support.captured_stdout() as stdout:
ET.dump(tree)
self.assertEqual(stdout.getvalue(), '<doc><table><tbody /></table></doc>\n')
def test_bug_xmltoolkit39(self):
# non-ascii element and attribute names doesn't work
tree = ET.XML(b"<?xml version='1.0' encoding='iso-8859-1'?><t\xe4g />")
self.assertEqual(ET.tostring(tree, "utf-8"), b'<t\xc3\xa4g />')
tree = ET.XML(b"<?xml version='1.0' encoding='iso-8859-1'?>"
b"<tag \xe4ttr='välue' />")
self.assertEqual(tree.attrib, {'\xe4ttr': 'v\xe4lue'})
self.assertEqual(ET.tostring(tree, "utf-8"),
b'<tag \xc3\xa4ttr="v\xc3\xa4lue" />')
tree = ET.XML(b"<?xml version='1.0' encoding='iso-8859-1'?>"
b'<t\xe4g>text</t\xe4g>')
self.assertEqual(ET.tostring(tree, "utf-8"),
b'<t\xc3\xa4g>text</t\xc3\xa4g>')
tree = ET.Element("t\u00e4g")
self.assertEqual(ET.tostring(tree, "utf-8"), b'<t\xc3\xa4g />')
tree = ET.Element("tag")
tree.set("\u00e4ttr", "v\u00e4lue")
self.assertEqual(ET.tostring(tree, "utf-8"),
b'<tag \xc3\xa4ttr="v\xc3\xa4lue" />')
def test_bug_xmltoolkit54(self):
# problems handling internally defined entities
e = ET.XML("<!DOCTYPE doc [<!ENTITY ldots '舰'>]>"
'<doc>&ldots;</doc>')
self.assertEqual(serialize(e, encoding="us-ascii"),
b'<doc>舰</doc>')
self.assertEqual(serialize(e), '<doc>\u8230</doc>')
def test_bug_xmltoolkit55(self):
# make sure we're reporting the first error, not the last
with self.assertRaises(ET.ParseError) as cm:
ET.XML(b"<!DOCTYPE doc SYSTEM 'doc.dtd'>"
b'<doc>&ldots;&ndots;&rdots;</doc>')
self.assertEqual(str(cm.exception),
'undefined entity &ldots;: line 1, column 36')
def test_bug_xmltoolkit60(self):
# Handle crash in stream source.
class ExceptionFile:
def read(self, x):
raise OSError
self.assertRaises(OSError, ET.parse, ExceptionFile())
def test_bug_xmltoolkit62(self):
# Don't crash when using custom entities.
ENTITIES = {'rsquo': '\u2019', 'lsquo': '\u2018'}
parser = ET.XMLParser()
parser.entity.update(ENTITIES)
parser.feed("""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE patent-application-publication SYSTEM "pap-v15-2001-01-31.dtd" []>
<patent-application-publication>
<subdoc-abstract>
<paragraph id="A-0001" lvl="0">A new cultivar of Begonia plant named ‘BCT9801BEG’.</paragraph>
</subdoc-abstract>
</patent-application-publication>""")
t = parser.close()
self.assertEqual(t.find('.//paragraph').text,
'A new cultivar of Begonia plant named \u2018BCT9801BEG\u2019.')
@unittest.skipIf(sys.gettrace(), "Skips under coverage.")
def test_bug_xmltoolkit63(self):
# Check reference leak.
def xmltoolkit63():
tree = ET.TreeBuilder()
tree.start("tag", {})
tree.data("text")
tree.end("tag")
xmltoolkit63()
count = sys.getrefcount(None)
for i in range(1000):
xmltoolkit63()
self.assertEqual(sys.getrefcount(None), count)
def test_bug_200708_newline(self):
# Preserve newlines in attributes.
e = ET.Element('SomeTag', text="def _f():\n return 3\n")
self.assertEqual(ET.tostring(e),
b'<SomeTag text="def _f(): return 3 " />')
self.assertEqual(ET.XML(ET.tostring(e)).get("text"),
'def _f():\n return 3\n')
self.assertEqual(ET.tostring(ET.XML(ET.tostring(e))),
b'<SomeTag text="def _f(): return 3 " />')
def test_bug_200708_close(self):
# Test default builder.
parser = ET.XMLParser() # default
parser.feed("<element>some text</element>")
self.assertEqual(parser.close().tag, 'element')
# Test custom builder.
class EchoTarget:
def close(self):
return ET.Element("element") # simulate root
parser = ET.XMLParser(target=EchoTarget())
parser.feed("<element>some text</element>")
self.assertEqual(parser.close().tag, 'element')
def test_bug_200709_default_namespace(self):
e = ET.Element("{default}elem")
s = ET.SubElement(e, "{default}elem")
self.assertEqual(serialize(e, default_namespace="default"), # 1
'<elem xmlns="default"><elem /></elem>')
e = ET.Element("{default}elem")
s = ET.SubElement(e, "{default}elem")
s = ET.SubElement(e, "{not-default}elem")
self.assertEqual(serialize(e, default_namespace="default"), # 2
'<elem xmlns="default" xmlns:ns1="not-default">'
'<elem />'
'<ns1:elem />'
'</elem>')
e = ET.Element("{default}elem")
s = ET.SubElement(e, "{default}elem")
s = ET.SubElement(e, "elem") # unprefixed name
with self.assertRaises(ValueError) as cm:
serialize(e, default_namespace="default") # 3
self.assertEqual(str(cm.exception),
'cannot use non-qualified names with default_namespace option')
def test_bug_200709_register_namespace(self):
e = ET.Element("{http://namespace.invalid/does/not/exist/}title")
self.assertEqual(ET.tostring(e),
b'<ns0:title xmlns:ns0="http://namespace.invalid/does/not/exist/" />')
ET.register_namespace("foo", "http://namespace.invalid/does/not/exist/")
e = ET.Element("{http://namespace.invalid/does/not/exist/}title")
self.assertEqual(ET.tostring(e),
b'<foo:title xmlns:foo="http://namespace.invalid/does/not/exist/" />')
# And the Dublin Core namespace is in the default list:
e = ET.Element("{http://purl.org/dc/elements/1.1/}title")
self.assertEqual(ET.tostring(e),
b'<dc:title xmlns:dc="http://purl.org/dc/elements/1.1/" />')
def test_bug_200709_element_comment(self):
# Not sure if this can be fixed, really (since the serializer needs
# ET.Comment, not cET.comment).
a = ET.Element('a')
a.append(ET.Comment('foo'))
self.assertEqual(a[0].tag, ET.Comment)
a = ET.Element('a')
a.append(ET.PI('foo'))
self.assertEqual(a[0].tag, ET.PI)
def test_bug_200709_element_insert(self):
a = ET.Element('a')
b = ET.SubElement(a, 'b')
c = ET.SubElement(a, 'c')
d = ET.Element('d')
a.insert(0, d)
self.assertEqual(summarize_list(a), ['d', 'b', 'c'])
a.insert(-1, d)
self.assertEqual(summarize_list(a), ['d', 'b', 'd', 'c'])
def test_bug_200709_iter_comment(self):
a = ET.Element('a')
b = ET.SubElement(a, 'b')
comment_b = ET.Comment("TEST-b")
b.append(comment_b)
self.assertEqual(summarize_list(a.iter(ET.Comment)), [ET.Comment])
# --------------------------------------------------------------------
# reported on bugs.python.org
def test_bug_1534630(self):
bob = ET.TreeBuilder()
e = bob.data("data")
e = bob.start("tag", {})
e = bob.end("tag")
e = bob.close()
self.assertEqual(serialize(e), '<tag />')
def test_issue6233(self):
e = ET.XML(b"<?xml version='1.0' encoding='utf-8'?>"
b'<body>t\xc3\xa3g</body>')
self.assertEqual(ET.tostring(e, 'ascii'),
b"<?xml version='1.0' encoding='ascii'?>\n"
b'<body>tãg</body>')
e = ET.XML(b"<?xml version='1.0' encoding='iso-8859-1'?>"
b'<body>t\xe3g</body>')
self.assertEqual(ET.tostring(e, 'ascii'),
b"<?xml version='1.0' encoding='ascii'?>\n"
b'<body>tãg</body>')
def test_issue3151(self):
e = ET.XML('<prefix:localname xmlns:prefix="${stuff}"/>')
self.assertEqual(e.tag, '{${stuff}}localname')
t = ET.ElementTree(e)
self.assertEqual(ET.tostring(e), b'<ns0:localname xmlns:ns0="${stuff}" />')
def test_issue6565(self):
elem = ET.XML("<body><tag/></body>")
self.assertEqual(summarize_list(elem), ['tag'])
newelem = ET.XML(SAMPLE_XML)
elem[:] = newelem[:]
self.assertEqual(summarize_list(elem), ['tag', 'tag', 'section'])
def test_issue10777(self):
# Registering a namespace twice caused a "dictionary changed size during
# iteration" bug.
ET.register_namespace('test10777', 'http://myuri/')
ET.register_namespace('test10777', 'http://myuri/')
def test_lost_text(self):
# Issue #25902: Borrowed text can disappear
class Text:
def __bool__(self):
e.text = 'changed'
return True
e = ET.Element('tag')
e.text = Text()
i = e.itertext()
t = next(i)
self.assertIsInstance(t, Text)
self.assertIsInstance(e.text, str)
self.assertEqual(e.text, 'changed')
def test_lost_tail(self):
# Issue #25902: Borrowed tail can disappear
class Text:
def __bool__(self):
e[0].tail = 'changed'
return True
e = ET.Element('root')
e.append(ET.Element('tag'))
e[0].tail = Text()
i = e.itertext()
t = next(i)
self.assertIsInstance(t, Text)
self.assertIsInstance(e[0].tail, str)
self.assertEqual(e[0].tail, 'changed')
def test_lost_elem(self):
# Issue #25902: Borrowed element can disappear
class Tag:
def __eq__(self, other):
e[0] = ET.Element('changed')
next(i)
return True
e = ET.Element('root')
e.append(ET.Element(Tag()))
e.append(ET.Element('tag'))
i = e.iter('tag')
try:
t = next(i)
except ValueError:
self.skipTest('generators are not reentrant')
self.assertIsInstance(t.tag, Tag)
self.assertIsInstance(e[0].tag, str)
self.assertEqual(e[0].tag, 'changed')
def check_expat224_utf8_bug(self, text):
xml = b'<a b="%s"/>' % text
root = ET.XML(xml)
self.assertEqual(root.get('b'), text.decode('utf-8'))
def test_expat224_utf8_bug(self):
# bpo-31170: Expat 2.2.3 had a bug in its UTF-8 decoder.
# Check that Expat 2.2.4 fixed the bug.
#
# Test buffer bounds at odd and even positions.
text = b'\xc3\xa0' * 1024
self.check_expat224_utf8_bug(text)
text = b'x' + b'\xc3\xa0' * 1024
self.check_expat224_utf8_bug(text)
def test_expat224_utf8_bug_file(self):
with open(UTF8_BUG_XMLFILE, 'rb') as fp:
raw = fp.read()
root = ET.fromstring(raw)
xmlattr = root.get('b')
# "Parse" manually the XML file to extract the value of the 'b'
# attribute of the <a b='xxx' /> XML element
text = raw.decode('utf-8').strip()
text = text.replace('\r\n', ' ')
text = text[6:-4]
self.assertEqual(root.get('b'), text)
# --------------------------------------------------------------------
class BasicElementTest(ElementTestCase, unittest.TestCase):
def test___init__(self):
tag = "foo"
attrib = { "zix": "wyp" }
element_foo = ET.Element(tag, attrib)
# traits of an element
self.assertIsInstance(element_foo, ET.Element)
self.assertIn("tag", dir(element_foo))
self.assertIn("attrib", dir(element_foo))
self.assertIn("text", dir(element_foo))
self.assertIn("tail", dir(element_foo))
# string attributes have expected values
self.assertEqual(element_foo.tag, tag)
self.assertIsNone(element_foo.text)
self.assertIsNone(element_foo.tail)
# attrib is a copy
self.assertIsNot(element_foo.attrib, attrib)
self.assertEqual(element_foo.attrib, attrib)
# attrib isn't linked
attrib["bar"] = "baz"
self.assertIsNot(element_foo.attrib, attrib)
self.assertNotEqual(element_foo.attrib, attrib)
def test___copy__(self):
element_foo = ET.Element("foo", { "zix": "wyp" })
element_foo.append(ET.Element("bar", { "baz": "qix" }))
element_foo2 = copy.copy(element_foo)
# elements are not the same
self.assertIsNot(element_foo2, element_foo)
# string attributes are equal
self.assertEqual(element_foo2.tag, element_foo.tag)
self.assertEqual(element_foo2.text, element_foo.text)
self.assertEqual(element_foo2.tail, element_foo.tail)
# number of children is the same
self.assertEqual(len(element_foo2), len(element_foo))
# children are the same
for (child1, child2) in itertools.zip_longest(element_foo, element_foo2):
self.assertIs(child1, child2)
# attrib is a copy
self.assertEqual(element_foo2.attrib, element_foo.attrib)
def test___deepcopy__(self):
element_foo = ET.Element("foo", { "zix": "wyp" })
element_foo.append(ET.Element("bar", { "baz": "qix" }))
element_foo2 = copy.deepcopy(element_foo)
# elements are not the same
self.assertIsNot(element_foo2, element_foo)
# string attributes are equal
self.assertEqual(element_foo2.tag, element_foo.tag)
self.assertEqual(element_foo2.text, element_foo.text)
self.assertEqual(element_foo2.tail, element_foo.tail)
# number of children is the same
self.assertEqual(len(element_foo2), len(element_foo))
# children are not the same
for (child1, child2) in itertools.zip_longest(element_foo, element_foo2):
self.assertIsNot(child1, child2)
# attrib is a copy
self.assertIsNot(element_foo2.attrib, element_foo.attrib)
self.assertEqual(element_foo2.attrib, element_foo.attrib)
# attrib isn't linked
element_foo.attrib["bar"] = "baz"
self.assertIsNot(element_foo2.attrib, element_foo.attrib)
self.assertNotEqual(element_foo2.attrib, element_foo.attrib)
def test_augmentation_type_errors(self):
e = ET.Element('joe')
self.assertRaises(TypeError, e.append, 'b')
self.assertRaises(TypeError, e.extend, [ET.Element('bar'), 'foo'])
self.assertRaises(TypeError, e.insert, 0, 'foo')
e[:] = [ET.Element('bar')]
with self.assertRaises(TypeError):
e[0] = 'foo'
with self.assertRaises(TypeError):
e[:] = [ET.Element('bar'), 'foo']
if hasattr(e, '__setstate__'):
state = {
'tag': 'tag',
'_children': [None], # non-Element
'attrib': 'attr',
'tail': 'tail',
'text': 'text',
}
self.assertRaises(TypeError, e.__setstate__, state)
if hasattr(e, '__deepcopy__'):
class E(ET.Element):
def __deepcopy__(self, memo):
return None # non-Element
e[:] = [E('bar')]
self.assertRaises(TypeError, copy.deepcopy, e)
def test_cyclic_gc(self):
class Dummy:
pass
# Test the shortest cycle: d->element->d
d = Dummy()
d.dummyref = ET.Element('joe', attr=d)
wref = weakref.ref(d)
del d
gc_collect()
self.assertIsNone(wref())
# A longer cycle: d->e->e2->d
e = ET.Element('joe')
d = Dummy()
d.dummyref = e
wref = weakref.ref(d)
e2 = ET.SubElement(e, 'foo', attr=d)
del d, e, e2
gc_collect()
self.assertIsNone(wref())
# A cycle between Element objects as children of one another
# e1->e2->e3->e1
e1 = ET.Element('e1')
e2 = ET.Element('e2')
e3 = ET.Element('e3')
e3.append(e1)
e2.append(e3)
e1.append(e2)
wref = weakref.ref(e1)
del e1, e2, e3
gc_collect()
self.assertIsNone(wref())
def test_weakref(self):
flag = False
def wref_cb(w):
nonlocal flag
flag = True
e = ET.Element('e')
wref = weakref.ref(e, wref_cb)
self.assertEqual(wref().tag, 'e')
del e
self.assertEqual(flag, True)
self.assertEqual(wref(), None)
def test_get_keyword_args(self):
e1 = ET.Element('foo' , x=1, y=2, z=3)
self.assertEqual(e1.get('x', default=7), 1)
self.assertEqual(e1.get('w', default=7), 7)
def test_pickle(self):
# issue #16076: the C implementation wasn't pickleable.
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
for dumper, loader in product(self.modules, repeat=2):
e = dumper.Element('foo', bar=42)
e.text = "text goes here"
e.tail = "opposite of head"
dumper.SubElement(e, 'child').append(dumper.Element('grandchild'))
e.append(dumper.Element('child'))
e.findall('.//grandchild')[0].set('attr', 'other value')
e2 = self.pickleRoundTrip(e, 'xml.etree.ElementTree',
dumper, loader, proto)
self.assertEqual(e2.tag, 'foo')
self.assertEqual(e2.attrib['bar'], 42)
self.assertEqual(len(e2), 2)
self.assertEqualElements(e, e2)
def test_pickle_issue18997(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
for dumper, loader in product(self.modules, repeat=2):
XMLTEXT = """<?xml version="1.0"?>
<group><dogs>4</dogs>
</group>"""
e1 = dumper.fromstring(XMLTEXT)
if hasattr(e1, '__getstate__'):
self.assertEqual(e1.__getstate__()['tag'], 'group')
e2 = self.pickleRoundTrip(e1, 'xml.etree.ElementTree',
dumper, loader, proto)
self.assertEqual(e2.tag, 'group')
self.assertEqual(e2[0].tag, 'dogs')
class BadElementTest(ElementTestCase, unittest.TestCase):
def test_extend_mutable_list(self):
class X:
@property
def __class__(self):
L[:] = [ET.Element('baz')]
return ET.Element
L = [X()]
e = ET.Element('foo')
try:
e.extend(L)
except TypeError:
pass
class Y(X, ET.Element):
pass
L = [Y('x')]
e = ET.Element('foo')
e.extend(L)
def test_extend_mutable_list2(self):
class X:
@property
def __class__(self):
del L[:]
return ET.Element
L = [X(), ET.Element('baz')]
e = ET.Element('foo')
try:
e.extend(L)
except TypeError:
pass
class Y(X, ET.Element):
pass
L = [Y('bar'), ET.Element('baz')]
e = ET.Element('foo')
e.extend(L)
def test_remove_with_mutating(self):
class X(ET.Element):
def __eq__(self, o):
del e[:]
return False
e = ET.Element('foo')
e.extend([X('bar')])
self.assertRaises(ValueError, e.remove, ET.Element('baz'))
e = ET.Element('foo')
e.extend([ET.Element('bar')])
self.assertRaises(ValueError, e.remove, X('baz'))
def test_recursive_repr(self):
# Issue #25455
e = ET.Element('foo')
with swap_attr(e, 'tag', e):
with self.assertRaises(RuntimeError):
repr(e) # Should not crash
def test_element_get_text(self):
# Issue #27863
class X(str):
def __del__(self):
try:
elem.text
except NameError:
pass
b = ET.TreeBuilder()
b.start('tag', {})
b.data('ABCD')
b.data(X('EFGH'))
b.data('IJKL')
b.end('tag')
elem = b.close()
self.assertEqual(elem.text, 'ABCDEFGHIJKL')
def test_element_get_tail(self):
# Issue #27863
class X(str):
def __del__(self):
try:
elem[0].tail
except NameError:
pass
b = ET.TreeBuilder()
b.start('root', {})
b.start('tag', {})
b.end('tag')
b.data('ABCD')
b.data(X('EFGH'))
b.data('IJKL')
b.end('root')
elem = b.close()
self.assertEqual(elem[0].tail, 'ABCDEFGHIJKL')
def test_subscr(self):
# Issue #27863
class X:
def __index__(self):
del e[:]
return 1
e = ET.Element('elem')
e.append(ET.Element('child'))
e[:X()] # shouldn't crash
e.append(ET.Element('child'))
e[0:10:X()] # shouldn't crash
def test_ass_subscr(self):
# Issue #27863
class X:
def __index__(self):
e[:] = []
return 1
e = ET.Element('elem')
for _ in range(10):
e.insert(0, ET.Element('child'))
e[0:10:X()] = [] # shouldn't crash
def test_treebuilder_start(self):
# Issue #27863
def element_factory(x, y):
return []
b = ET.TreeBuilder(element_factory=element_factory)
b.start('tag', {})
b.data('ABCD')
self.assertRaises(AttributeError, b.start, 'tag2', {})
del b
gc_collect()
def test_treebuilder_end(self):
# Issue #27863
def element_factory(x, y):
return []
b = ET.TreeBuilder(element_factory=element_factory)
b.start('tag', {})
b.data('ABCD')
self.assertRaises(AttributeError, b.end, 'tag')
del b
gc_collect()
class MutatingElementPath(str):
def __new__(cls, elem, *args):
self = str.__new__(cls, *args)
self.elem = elem
return self
def __eq__(self, o):
del self.elem[:]
return True
MutatingElementPath.__hash__ = str.__hash__
class BadElementPath(str):
def __eq__(self, o):
raise 1/0
BadElementPath.__hash__ = str.__hash__
class BadElementPathTest(ElementTestCase, unittest.TestCase):
def setUp(self):
super().setUp()
from xml.etree import ElementPath
self.path_cache = ElementPath._cache
ElementPath._cache = {}
def tearDown(self):
from xml.etree import ElementPath
ElementPath._cache = self.path_cache
super().tearDown()
def test_find_with_mutating(self):
e = ET.Element('foo')
e.extend([ET.Element('bar')])
e.find(MutatingElementPath(e, 'x'))
def test_find_with_error(self):
e = ET.Element('foo')
e.extend([ET.Element('bar')])
try:
e.find(BadElementPath('x'))
except ZeroDivisionError:
pass
def test_findtext_with_mutating(self):
e = ET.Element('foo')
e.extend([ET.Element('bar')])
e.findtext(MutatingElementPath(e, 'x'))
def test_findtext_with_error(self):
e = ET.Element('foo')
e.extend([ET.Element('bar')])
try:
e.findtext(BadElementPath('x'))
except ZeroDivisionError:
pass
def test_findall_with_mutating(self):
e = ET.Element('foo')
e.extend([ET.Element('bar')])
e.findall(MutatingElementPath(e, 'x'))
def test_findall_with_error(self):
e = ET.Element('foo')
e.extend([ET.Element('bar')])
try:
e.findall(BadElementPath('x'))
except ZeroDivisionError:
pass
class ElementTreeTypeTest(unittest.TestCase):
def test_istype(self):
self.assertIsInstance(ET.ParseError, type)
self.assertIsInstance(ET.QName, type)
self.assertIsInstance(ET.ElementTree, type)
self.assertIsInstance(ET.Element, type)
self.assertIsInstance(ET.TreeBuilder, type)
self.assertIsInstance(ET.XMLParser, type)
def test_Element_subclass_trivial(self):
class MyElement(ET.Element):
pass
mye = MyElement('foo')
self.assertIsInstance(mye, ET.Element)
self.assertIsInstance(mye, MyElement)
self.assertEqual(mye.tag, 'foo')
# test that attribute assignment works (issue 14849)
mye.text = "joe"
self.assertEqual(mye.text, "joe")
def test_Element_subclass_constructor(self):
class MyElement(ET.Element):
def __init__(self, tag, attrib={}, **extra):
super(MyElement, self).__init__(tag + '__', attrib, **extra)
mye = MyElement('foo', {'a': 1, 'b': 2}, c=3, d=4)
self.assertEqual(mye.tag, 'foo__')
self.assertEqual(sorted(mye.items()),
[('a', 1), ('b', 2), ('c', 3), ('d', 4)])
def test_Element_subclass_new_method(self):
class MyElement(ET.Element):
def newmethod(self):
return self.tag
mye = MyElement('joe')
self.assertEqual(mye.newmethod(), 'joe')
def test_Element_subclass_find(self):
class MyElement(ET.Element):
pass
e = ET.Element('foo')
e.text = 'text'
sub = MyElement('bar')
sub.text = 'subtext'
e.append(sub)
self.assertEqual(e.findtext('bar'), 'subtext')
self.assertEqual(e.find('bar').tag, 'bar')
found = list(e.findall('bar'))
self.assertEqual(len(found), 1, found)
self.assertEqual(found[0].tag, 'bar')
class ElementFindTest(unittest.TestCase):
def test_find_simple(self):
e = ET.XML(SAMPLE_XML)
self.assertEqual(e.find('tag').tag, 'tag')
self.assertEqual(e.find('section/tag').tag, 'tag')
self.assertEqual(e.find('./tag').tag, 'tag')
e[2] = ET.XML(SAMPLE_SECTION)
self.assertEqual(e.find('section/nexttag').tag, 'nexttag')
self.assertEqual(e.findtext('./tag'), 'text')
self.assertEqual(e.findtext('section/tag'), 'subtext')
# section/nexttag is found but has no text
self.assertEqual(e.findtext('section/nexttag'), '')
self.assertEqual(e.findtext('section/nexttag', 'default'), '')
# tog doesn't exist and 'default' kicks in
self.assertIsNone(e.findtext('tog'))
self.assertEqual(e.findtext('tog', 'default'), 'default')
# Issue #16922
self.assertEqual(ET.XML('<tag><empty /></tag>').findtext('empty'), '')
def test_find_xpath(self):
LINEAR_XML = '''
<body>
<tag class='a'/>
<tag class='b'/>
<tag class='c'/>
<tag class='d'/>
</body>'''
e = ET.XML(LINEAR_XML)
# Test for numeric indexing and last()
self.assertEqual(e.find('./tag[1]').attrib['class'], 'a')
self.assertEqual(e.find('./tag[2]').attrib['class'], 'b')
self.assertEqual(e.find('./tag[last()]').attrib['class'], 'd')
self.assertEqual(e.find('./tag[last()-1]').attrib['class'], 'c')
self.assertEqual(e.find('./tag[last()-2]').attrib['class'], 'b')
self.assertRaisesRegex(SyntaxError, 'XPath', e.find, './tag[0]')
self.assertRaisesRegex(SyntaxError, 'XPath', e.find, './tag[-1]')
self.assertRaisesRegex(SyntaxError, 'XPath', e.find, './tag[last()-0]')
self.assertRaisesRegex(SyntaxError, 'XPath', e.find, './tag[last()+1]')
def test_findall(self):
e = ET.XML(SAMPLE_XML)
e[2] = ET.XML(SAMPLE_SECTION)
self.assertEqual(summarize_list(e.findall('.')), ['body'])
self.assertEqual(summarize_list(e.findall('tag')), ['tag', 'tag'])
self.assertEqual(summarize_list(e.findall('tog')), [])
self.assertEqual(summarize_list(e.findall('tog/foo')), [])
self.assertEqual(summarize_list(e.findall('*')),
['tag', 'tag', 'section'])
self.assertEqual(summarize_list(e.findall('.//tag')),
['tag'] * 4)
self.assertEqual(summarize_list(e.findall('section/tag')), ['tag'])
self.assertEqual(summarize_list(e.findall('section//tag')), ['tag'] * 2)
self.assertEqual(summarize_list(e.findall('section/*')),
['tag', 'nexttag', 'nextsection'])
self.assertEqual(summarize_list(e.findall('section//*')),
['tag', 'nexttag', 'nextsection', 'tag'])
self.assertEqual(summarize_list(e.findall('section/.//*')),
['tag', 'nexttag', 'nextsection', 'tag'])
self.assertEqual(summarize_list(e.findall('*/*')),
['tag', 'nexttag', 'nextsection'])
self.assertEqual(summarize_list(e.findall('*//*')),
['tag', 'nexttag', 'nextsection', 'tag'])
self.assertEqual(summarize_list(e.findall('*/tag')), ['tag'])
self.assertEqual(summarize_list(e.findall('*/./tag')), ['tag'])
self.assertEqual(summarize_list(e.findall('./tag')), ['tag'] * 2)
self.assertEqual(summarize_list(e.findall('././tag')), ['tag'] * 2)
self.assertEqual(summarize_list(e.findall('.//tag[@class]')),
['tag'] * 3)
self.assertEqual(summarize_list(e.findall('.//tag[@class="a"]')),
['tag'])
self.assertEqual(summarize_list(e.findall('.//tag[@class="b"]')),
['tag'] * 2)
self.assertEqual(summarize_list(e.findall('.//tag[@id]')),
['tag'])
self.assertEqual(summarize_list(e.findall('.//section[tag]')),
['section'])
self.assertEqual(summarize_list(e.findall('.//section[element]')), [])
self.assertEqual(summarize_list(e.findall('../tag')), [])
self.assertEqual(summarize_list(e.findall('section/../tag')),
['tag'] * 2)
self.assertEqual(e.findall('section//'), e.findall('section//*'))
self.assertEqual(summarize_list(e.findall(".//section[tag='subtext']")),
['section'])
self.assertEqual(summarize_list(e.findall(".//section[tag ='subtext']")),
['section'])
self.assertEqual(summarize_list(e.findall(".//section[tag= 'subtext']")),
['section'])
self.assertEqual(summarize_list(e.findall(".//section[tag = 'subtext']")),
['section'])
self.assertEqual(summarize_list(e.findall(".//section[ tag = 'subtext' ]")),
['section'])
self.assertEqual(summarize_list(e.findall(".//tag[.='subtext']")),
['tag'])
self.assertEqual(summarize_list(e.findall(".//tag[. ='subtext']")),
['tag'])
self.assertEqual(summarize_list(e.findall('.//tag[.= "subtext"]')),
['tag'])
self.assertEqual(summarize_list(e.findall('.//tag[ . = "subtext" ]')),
['tag'])
self.assertEqual(summarize_list(e.findall(".//tag[. = 'subtext']")),
['tag'])
self.assertEqual(summarize_list(e.findall(".//tag[. = 'subtext ']")),
[])
self.assertEqual(summarize_list(e.findall(".//tag[.= ' subtext']")),
[])
# duplicate section => 2x tag matches
e[1] = e[2]
self.assertEqual(summarize_list(e.findall(".//section[tag = 'subtext']")),
['section', 'section'])
self.assertEqual(summarize_list(e.findall(".//tag[. = 'subtext']")),
['tag', 'tag'])
def test_test_find_with_ns(self):
e = ET.XML(SAMPLE_XML_NS)
self.assertEqual(summarize_list(e.findall('tag')), [])
self.assertEqual(
summarize_list(e.findall("{http://effbot.org/ns}tag")),
['{http://effbot.org/ns}tag'] * 2)
self.assertEqual(
summarize_list(e.findall(".//{http://effbot.org/ns}tag")),
['{http://effbot.org/ns}tag'] * 3)
def test_findall_different_nsmaps(self):
root = ET.XML('''
<a xmlns:x="X" xmlns:y="Y">
<x:b><c/></x:b>
<b/>
<c><x:b/><b/></c><y:b/>
</a>''')
nsmap = {'xx': 'X'}
self.assertEqual(len(root.findall(".//xx:b", namespaces=nsmap)), 2)
self.assertEqual(len(root.findall(".//b", namespaces=nsmap)), 2)
nsmap = {'xx': 'Y'}
self.assertEqual(len(root.findall(".//xx:b", namespaces=nsmap)), 1)
self.assertEqual(len(root.findall(".//b", namespaces=nsmap)), 2)
nsmap = {'xx': 'X', '': 'Y'}
self.assertEqual(len(root.findall(".//xx:b", namespaces=nsmap)), 2)
self.assertEqual(len(root.findall(".//b", namespaces=nsmap)), 1)
def test_findall_wildcard(self):
root = ET.XML('''
<a xmlns:x="X" xmlns:y="Y">
<x:b><c/></x:b>
<b/>
<c><x:b/><b/></c><y:b/>
</a>''')
root.append(ET.Comment('test'))
self.assertEqual(summarize_list(root.findall("{*}b")),
['{X}b', 'b', '{Y}b'])
self.assertEqual(summarize_list(root.findall("{*}c")),
['c'])
self.assertEqual(summarize_list(root.findall("{X}*")),
['{X}b'])
self.assertEqual(summarize_list(root.findall("{Y}*")),
['{Y}b'])
self.assertEqual(summarize_list(root.findall("{}*")),
['b', 'c'])
self.assertEqual(summarize_list(root.findall("{}b")), # only for consistency
['b'])
self.assertEqual(summarize_list(root.findall("{}b")),
summarize_list(root.findall("b")))
self.assertEqual(summarize_list(root.findall("{*}*")),
['{X}b', 'b', 'c', '{Y}b'])
# This is an unfortunate difference, but that's how find('*') works.
self.assertEqual(summarize_list(root.findall("{*}*") + [root[-1]]),
summarize_list(root.findall("*")))
self.assertEqual(summarize_list(root.findall(".//{*}b")),
['{X}b', 'b', '{X}b', 'b', '{Y}b'])
self.assertEqual(summarize_list(root.findall(".//{*}c")),
['c', 'c'])
self.assertEqual(summarize_list(root.findall(".//{X}*")),
['{X}b', '{X}b'])
self.assertEqual(summarize_list(root.findall(".//{Y}*")),
['{Y}b'])
self.assertEqual(summarize_list(root.findall(".//{}*")),
['c', 'b', 'c', 'b'])
self.assertEqual(summarize_list(root.findall(".//{}b")), # only for consistency
['b', 'b'])
self.assertEqual(summarize_list(root.findall(".//{}b")),
summarize_list(root.findall(".//b")))
def test_bad_find(self):
e = ET.XML(SAMPLE_XML)
with self.assertRaisesRegex(SyntaxError, 'cannot use absolute path'):
e.findall('/tag')
def test_find_through_ElementTree(self):
e = ET.XML(SAMPLE_XML)
self.assertEqual(ET.ElementTree(e).find('tag').tag, 'tag')
self.assertEqual(ET.ElementTree(e).findtext('tag'), 'text')
self.assertEqual(summarize_list(ET.ElementTree(e).findall('tag')),
['tag'] * 2)
# this produces a warning
msg = ("This search is broken in 1.3 and earlier, and will be fixed "
"in a future version. If you rely on the current behaviour, "
"change it to '.+'")
with self.assertWarnsRegex(FutureWarning, msg):
it = ET.ElementTree(e).findall('//tag')
self.assertEqual(summarize_list(it), ['tag'] * 3)
class ElementIterTest(unittest.TestCase):
def _ilist(self, elem, tag=None):
return summarize_list(elem.iter(tag))
def test_basic(self):
doc = ET.XML("<html><body>this is a <i>paragraph</i>.</body>..</html>")
self.assertEqual(self._ilist(doc), ['html', 'body', 'i'])
self.assertEqual(self._ilist(doc.find('body')), ['body', 'i'])
self.assertEqual(next(doc.iter()).tag, 'html')
self.assertEqual(''.join(doc.itertext()), 'this is a paragraph...')
self.assertEqual(''.join(doc.find('body').itertext()),
'this is a paragraph.')
self.assertEqual(next(doc.itertext()), 'this is a ')
# iterparse should return an iterator
sourcefile = serialize(doc, to_string=False)
self.assertEqual(next(ET.iterparse(sourcefile))[0], 'end')
# With an explicit parser too (issue #9708)
sourcefile = serialize(doc, to_string=False)
parser = ET.XMLParser(target=ET.TreeBuilder())
self.assertEqual(next(ET.iterparse(sourcefile, parser=parser))[0],
'end')
tree = ET.ElementTree(None)
self.assertRaises(AttributeError, tree.iter)
# Issue #16913
doc = ET.XML("<root>a&<sub>b&</sub>c&</root>")
self.assertEqual(''.join(doc.itertext()), 'a&b&c&')
def test_corners(self):
# single root, no subelements
a = ET.Element('a')
self.assertEqual(self._ilist(a), ['a'])
# one child
b = ET.SubElement(a, 'b')
self.assertEqual(self._ilist(a), ['a', 'b'])
# one child and one grandchild
c = ET.SubElement(b, 'c')
self.assertEqual(self._ilist(a), ['a', 'b', 'c'])
# two children, only first with grandchild
d = ET.SubElement(a, 'd')
self.assertEqual(self._ilist(a), ['a', 'b', 'c', 'd'])
# replace first child by second
a[0] = a[1]
del a[1]
self.assertEqual(self._ilist(a), ['a', 'd'])
def test_iter_by_tag(self):
doc = ET.XML('''
<document>
<house>
<room>bedroom1</room>
<room>bedroom2</room>
</house>
<shed>nothing here
</shed>
<house>
<room>bedroom8</room>
</house>
</document>''')
self.assertEqual(self._ilist(doc, 'room'), ['room'] * 3)
self.assertEqual(self._ilist(doc, 'house'), ['house'] * 2)
# test that iter also accepts 'tag' as a keyword arg
self.assertEqual(
summarize_list(doc.iter(tag='room')),
['room'] * 3)
# make sure both tag=None and tag='*' return all tags
all_tags = ['document', 'house', 'room', 'room',
'shed', 'house', 'room']
self.assertEqual(summarize_list(doc.iter()), all_tags)
self.assertEqual(self._ilist(doc), all_tags)
self.assertEqual(self._ilist(doc, '*'), all_tags)
# Element.getiterator() is deprecated.
@checkwarnings(("This method will be removed in future versions. "
"Use .+ instead.", DeprecationWarning))
def test_getiterator(self):
doc = ET.XML('''
<document>
<house>
<room>bedroom1</room>
<room>bedroom2</room>
</house>
<shed>nothing here
</shed>
<house>
<room>bedroom8</room>
</house>
</document>''')
self.assertEqual(summarize_list(doc.getiterator('room')),
['room'] * 3)
self.assertEqual(summarize_list(doc.getiterator('house')),
['house'] * 2)
# test that getiterator also accepts 'tag' as a keyword arg
self.assertEqual(
summarize_list(doc.getiterator(tag='room')),
['room'] * 3)
# make sure both tag=None and tag='*' return all tags
all_tags = ['document', 'house', 'room', 'room',
'shed', 'house', 'room']
self.assertEqual(summarize_list(doc.getiterator()), all_tags)
self.assertEqual(summarize_list(doc.getiterator(None)), all_tags)
self.assertEqual(summarize_list(doc.getiterator('*')), all_tags)
@unittest.skipIf(support.stackless, "Stackless can copy iterators")
def test_copy(self):
a = ET.Element('a')
it = a.iter()
with self.assertRaises(TypeError):
copy.copy(it)
@unittest.skipIf(support.stackless, "Stackless can pickle iterators")
def test_pickle(self):
a = ET.Element('a')
it = a.iter()
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((TypeError, pickle.PicklingError)):
pickle.dumps(it, proto)
class TreeBuilderTest(unittest.TestCase):
sample1 = ('<!DOCTYPE html PUBLIC'
' "-//W3C//DTD XHTML 1.0 Transitional//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
'<html>text<div>subtext</div>tail</html>')
sample2 = '''<toplevel>sometext</toplevel>'''
def _check_sample1_element(self, e):
self.assertEqual(e.tag, 'html')
self.assertEqual(e.text, 'text')
self.assertEqual(e.tail, None)
self.assertEqual(e.attrib, {})
children = list(e)
self.assertEqual(len(children), 1)
child = children[0]
self.assertEqual(child.tag, 'div')
self.assertEqual(child.text, 'subtext')
self.assertEqual(child.tail, 'tail')
self.assertEqual(child.attrib, {})
def test_dummy_builder(self):
class BaseDummyBuilder:
def close(self):
return 42
class DummyBuilder(BaseDummyBuilder):
data = start = end = lambda *a: None
parser = ET.XMLParser(target=DummyBuilder())
parser.feed(self.sample1)
self.assertEqual(parser.close(), 42)
parser = ET.XMLParser(target=BaseDummyBuilder())
parser.feed(self.sample1)
self.assertEqual(parser.close(), 42)
parser = ET.XMLParser(target=object())
parser.feed(self.sample1)
self.assertIsNone(parser.close())
def test_treebuilder_comment(self):
b = ET.TreeBuilder()
self.assertEqual(b.comment('ctext').tag, ET.Comment)
self.assertEqual(b.comment('ctext').text, 'ctext')
b = ET.TreeBuilder(comment_factory=ET.Comment)
self.assertEqual(b.comment('ctext').tag, ET.Comment)
self.assertEqual(b.comment('ctext').text, 'ctext')
b = ET.TreeBuilder(comment_factory=len)
self.assertEqual(b.comment('ctext'), len('ctext'))
def test_treebuilder_pi(self):
b = ET.TreeBuilder()
self.assertEqual(b.pi('target', None).tag, ET.PI)
self.assertEqual(b.pi('target', None).text, 'target')
b = ET.TreeBuilder(pi_factory=ET.PI)
self.assertEqual(b.pi('target').tag, ET.PI)
self.assertEqual(b.pi('target').text, "target")
self.assertEqual(b.pi('pitarget', ' text ').tag, ET.PI)
self.assertEqual(b.pi('pitarget', ' text ').text, "pitarget text ")
b = ET.TreeBuilder(pi_factory=lambda target, text: (len(target), text))
self.assertEqual(b.pi('target'), (len('target'), None))
self.assertEqual(b.pi('pitarget', ' text '), (len('pitarget'), ' text '))
def test_treebuilder_elementfactory_none(self):
parser = ET.XMLParser(target=ET.TreeBuilder(element_factory=None))
parser.feed(self.sample1)
e = parser.close()
self._check_sample1_element(e)
def test_subclass(self):
class MyTreeBuilder(ET.TreeBuilder):
def foobar(self, x):
return x * 2
tb = MyTreeBuilder()
self.assertEqual(tb.foobar(10), 20)
parser = ET.XMLParser(target=tb)
parser.feed(self.sample1)
e = parser.close()
self._check_sample1_element(e)
def test_subclass_comment_pi(self):
class MyTreeBuilder(ET.TreeBuilder):
def foobar(self, x):
return x * 2
tb = MyTreeBuilder(comment_factory=ET.Comment, pi_factory=ET.PI)
self.assertEqual(tb.foobar(10), 20)
parser = ET.XMLParser(target=tb)
parser.feed(self.sample1)
parser.feed('<!-- a comment--><?and a pi?>')
e = parser.close()
self._check_sample1_element(e)
def test_element_factory(self):
lst = []
def myfactory(tag, attrib):
nonlocal lst
lst.append(tag)
return ET.Element(tag, attrib)
tb = ET.TreeBuilder(element_factory=myfactory)
parser = ET.XMLParser(target=tb)
parser.feed(self.sample2)
parser.close()
self.assertEqual(lst, ['toplevel'])
def _check_element_factory_class(self, cls):
tb = ET.TreeBuilder(element_factory=cls)
parser = ET.XMLParser(target=tb)
parser.feed(self.sample1)
e = parser.close()
self.assertIsInstance(e, cls)
self._check_sample1_element(e)
def test_element_factory_subclass(self):
class MyElement(ET.Element):
pass
self._check_element_factory_class(MyElement)
def test_element_factory_pure_python_subclass(self):
# Mimick SimpleTAL's behaviour (issue #16089): both versions of
# TreeBuilder should be able to cope with a subclass of the
# pure Python Element class.
base = ET._Element_Py
# Not from a C extension
self.assertEqual(base.__module__, 'xml.etree.ElementTree')
# Force some multiple inheritance with a C class to make things
# more interesting.
class MyElement(base, ValueError):
pass
self._check_element_factory_class(MyElement)
def test_doctype(self):
class DoctypeParser:
_doctype = None
def doctype(self, name, pubid, system):
self._doctype = (name, pubid, system)
def close(self):
return self._doctype
parser = ET.XMLParser(target=DoctypeParser())
parser.feed(self.sample1)
self.assertEqual(parser.close(),
('html', '-//W3C//DTD XHTML 1.0 Transitional//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'))
def test_builder_lookup_errors(self):
class RaisingBuilder:
def __init__(self, raise_in=None, what=ValueError):
self.raise_in = raise_in
self.what = what
def __getattr__(self, name):
if name == self.raise_in:
raise self.what(self.raise_in)
def handle(*args):
pass
return handle
ET.XMLParser(target=RaisingBuilder())
# cET also checks for 'close' and 'doctype', PyET does it only at need
for event in ('start', 'data', 'end', 'comment', 'pi'):
with self.assertRaisesRegex(ValueError, event):
ET.XMLParser(target=RaisingBuilder(event))
ET.XMLParser(target=RaisingBuilder(what=AttributeError))
for event in ('start', 'data', 'end', 'comment', 'pi'):
parser = ET.XMLParser(target=RaisingBuilder(event, what=AttributeError))
parser.feed(self.sample1)
self.assertIsNone(parser.close())
class XMLParserTest(unittest.TestCase):
sample1 = b'<file><line>22</line></file>'
sample2 = (b'<!DOCTYPE html PUBLIC'
b' "-//W3C//DTD XHTML 1.0 Transitional//EN"'
b' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
b'<html>text</html>')
sample3 = ('<?xml version="1.0" encoding="iso-8859-1"?>\n'
'<money value="$\xa3\u20ac\U0001017b">$\xa3\u20ac\U0001017b</money>')
def _check_sample_element(self, e):
self.assertEqual(e.tag, 'file')
self.assertEqual(e[0].tag, 'line')
self.assertEqual(e[0].text, '22')
def test_constructor_args(self):
parser2 = ET.XMLParser(encoding='utf-8',
target=ET.TreeBuilder())
parser2.feed(self.sample1)
self._check_sample_element(parser2.close())
def test_subclass(self):
class MyParser(ET.XMLParser):
pass
parser = MyParser()
parser.feed(self.sample1)
self._check_sample_element(parser.close())
def test_doctype_warning(self):
with warnings.catch_warnings():
warnings.simplefilter('error', DeprecationWarning)
parser = ET.XMLParser()
parser.feed(self.sample2)
parser.close()
def test_subclass_doctype(self):
_doctype = None
class MyParserWithDoctype(ET.XMLParser):
def doctype(self, *args, **kwargs):
nonlocal _doctype
_doctype = (args, kwargs)
parser = MyParserWithDoctype()
with self.assertWarnsRegex(RuntimeWarning, 'doctype'):
parser.feed(self.sample2)
parser.close()
self.assertIsNone(_doctype)
_doctype = _doctype2 = None
with warnings.catch_warnings():
warnings.simplefilter('error', DeprecationWarning)
warnings.simplefilter('error', RuntimeWarning)
class DoctypeParser:
def doctype(self, name, pubid, system):
nonlocal _doctype2
_doctype2 = (name, pubid, system)
parser = MyParserWithDoctype(target=DoctypeParser())
parser.feed(self.sample2)
parser.close()
self.assertIsNone(_doctype)
self.assertEqual(_doctype2,
('html', '-//W3C//DTD XHTML 1.0 Transitional//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'))
def test_inherited_doctype(self):
'''Ensure that ordinary usage is not deprecated (Issue 19176)'''
with warnings.catch_warnings():
warnings.simplefilter('error', DeprecationWarning)
warnings.simplefilter('error', RuntimeWarning)
class MyParserWithoutDoctype(ET.XMLParser):
pass
parser = MyParserWithoutDoctype()
parser.feed(self.sample2)
parser.close()
def test_parse_string(self):
parser = ET.XMLParser(target=ET.TreeBuilder())
parser.feed(self.sample3)
e = parser.close()
self.assertEqual(e.tag, 'money')
self.assertEqual(e.attrib['value'], '$\xa3\u20ac\U0001017b')
self.assertEqual(e.text, '$\xa3\u20ac\U0001017b')
class NamespaceParseTest(unittest.TestCase):
def test_find_with_namespace(self):
nsmap = {'h': 'hello', 'f': 'foo'}
doc = ET.fromstring(SAMPLE_XML_NS_ELEMS)
self.assertEqual(len(doc.findall('{hello}table', nsmap)), 1)
self.assertEqual(len(doc.findall('.//{hello}td', nsmap)), 2)
self.assertEqual(len(doc.findall('.//{foo}name', nsmap)), 1)
class ElementSlicingTest(unittest.TestCase):
def _elem_tags(self, elemlist):
return [e.tag for e in elemlist]
def _subelem_tags(self, elem):
return self._elem_tags(list(elem))
def _make_elem_with_children(self, numchildren):
"""Create an Element with a tag 'a', with the given amount of children
named 'a0', 'a1' ... and so on.
"""
e = ET.Element('a')
for i in range(numchildren):
ET.SubElement(e, 'a%s' % i)
return e
def test_getslice_single_index(self):
e = self._make_elem_with_children(10)
self.assertEqual(e[1].tag, 'a1')
self.assertEqual(e[-2].tag, 'a8')
self.assertRaises(IndexError, lambda: e[12])
self.assertRaises(IndexError, lambda: e[-12])
def test_getslice_range(self):
e = self._make_elem_with_children(6)
self.assertEqual(self._elem_tags(e[3:]), ['a3', 'a4', 'a5'])
self.assertEqual(self._elem_tags(e[3:6]), ['a3', 'a4', 'a5'])
self.assertEqual(self._elem_tags(e[3:16]), ['a3', 'a4', 'a5'])
self.assertEqual(self._elem_tags(e[3:5]), ['a3', 'a4'])
self.assertEqual(self._elem_tags(e[3:-1]), ['a3', 'a4'])
self.assertEqual(self._elem_tags(e[:2]), ['a0', 'a1'])
def test_getslice_steps(self):
e = self._make_elem_with_children(10)
self.assertEqual(self._elem_tags(e[8:10:1]), ['a8', 'a9'])
self.assertEqual(self._elem_tags(e[::3]), ['a0', 'a3', 'a6', 'a9'])
self.assertEqual(self._elem_tags(e[::8]), ['a0', 'a8'])
self.assertEqual(self._elem_tags(e[1::8]), ['a1', 'a9'])
self.assertEqual(self._elem_tags(e[3::sys.maxsize]), ['a3'])
self.assertEqual(self._elem_tags(e[3::sys.maxsize<<64]), ['a3'])
def test_getslice_negative_steps(self):
e = self._make_elem_with_children(4)
self.assertEqual(self._elem_tags(e[::-1]), ['a3', 'a2', 'a1', 'a0'])
self.assertEqual(self._elem_tags(e[::-2]), ['a3', 'a1'])
self.assertEqual(self._elem_tags(e[3::-sys.maxsize]), ['a3'])
self.assertEqual(self._elem_tags(e[3::-sys.maxsize-1]), ['a3'])
self.assertEqual(self._elem_tags(e[3::-sys.maxsize<<64]), ['a3'])
def test_delslice(self):
e = self._make_elem_with_children(4)
del e[0:2]
self.assertEqual(self._subelem_tags(e), ['a2', 'a3'])
e = self._make_elem_with_children(4)
del e[0:]
self.assertEqual(self._subelem_tags(e), [])
e = self._make_elem_with_children(4)
del e[::-1]
self.assertEqual(self._subelem_tags(e), [])
e = self._make_elem_with_children(4)
del e[::-2]
self.assertEqual(self._subelem_tags(e), ['a0', 'a2'])
e = self._make_elem_with_children(4)
del e[1::2]
self.assertEqual(self._subelem_tags(e), ['a0', 'a2'])
e = self._make_elem_with_children(2)
del e[::2]
self.assertEqual(self._subelem_tags(e), ['a1'])
def test_setslice_single_index(self):
e = self._make_elem_with_children(4)
e[1] = ET.Element('b')
self.assertEqual(self._subelem_tags(e), ['a0', 'b', 'a2', 'a3'])
e[-2] = ET.Element('c')
self.assertEqual(self._subelem_tags(e), ['a0', 'b', 'c', 'a3'])
with self.assertRaises(IndexError):
e[5] = ET.Element('d')
with self.assertRaises(IndexError):
e[-5] = ET.Element('d')
self.assertEqual(self._subelem_tags(e), ['a0', 'b', 'c', 'a3'])
def test_setslice_range(self):
e = self._make_elem_with_children(4)
e[1:3] = [ET.Element('b%s' % i) for i in range(2)]
self.assertEqual(self._subelem_tags(e), ['a0', 'b0', 'b1', 'a3'])
e = self._make_elem_with_children(4)
e[1:3] = [ET.Element('b')]
self.assertEqual(self._subelem_tags(e), ['a0', 'b', 'a3'])
e = self._make_elem_with_children(4)
e[1:3] = [ET.Element('b%s' % i) for i in range(3)]
self.assertEqual(self._subelem_tags(e), ['a0', 'b0', 'b1', 'b2', 'a3'])
def test_setslice_steps(self):
e = self._make_elem_with_children(6)
e[1:5:2] = [ET.Element('b%s' % i) for i in range(2)]
self.assertEqual(self._subelem_tags(e), ['a0', 'b0', 'a2', 'b1', 'a4', 'a5'])
e = self._make_elem_with_children(6)
with self.assertRaises(ValueError):
e[1:5:2] = [ET.Element('b')]
with self.assertRaises(ValueError):
e[1:5:2] = [ET.Element('b%s' % i) for i in range(3)]
with self.assertRaises(ValueError):
e[1:5:2] = []
self.assertEqual(self._subelem_tags(e), ['a0', 'a1', 'a2', 'a3', 'a4', 'a5'])
e = self._make_elem_with_children(4)
e[1::sys.maxsize] = [ET.Element('b')]
self.assertEqual(self._subelem_tags(e), ['a0', 'b', 'a2', 'a3'])
e[1::sys.maxsize<<64] = [ET.Element('c')]
self.assertEqual(self._subelem_tags(e), ['a0', 'c', 'a2', 'a3'])
def test_setslice_negative_steps(self):
e = self._make_elem_with_children(4)
e[2:0:-1] = [ET.Element('b%s' % i) for i in range(2)]
self.assertEqual(self._subelem_tags(e), ['a0', 'b1', 'b0', 'a3'])
e = self._make_elem_with_children(4)
with self.assertRaises(ValueError):
e[2:0:-1] = [ET.Element('b')]
with self.assertRaises(ValueError):
e[2:0:-1] = [ET.Element('b%s' % i) for i in range(3)]
with self.assertRaises(ValueError):
e[2:0:-1] = []
self.assertEqual(self._subelem_tags(e), ['a0', 'a1', 'a2', 'a3'])
e = self._make_elem_with_children(4)
e[1::-sys.maxsize] = [ET.Element('b')]
self.assertEqual(self._subelem_tags(e), ['a0', 'b', 'a2', 'a3'])
e[1::-sys.maxsize-1] = [ET.Element('c')]
self.assertEqual(self._subelem_tags(e), ['a0', 'c', 'a2', 'a3'])
e[1::-sys.maxsize<<64] = [ET.Element('d')]
self.assertEqual(self._subelem_tags(e), ['a0', 'd', 'a2', 'a3'])
class IOTest(unittest.TestCase):
def test_encoding(self):
# Test encoding issues.
elem = ET.Element("tag")
elem.text = "abc"
self.assertEqual(serialize(elem), '<tag>abc</tag>')
for enc in ("utf-8", "us-ascii"):
with self.subTest(enc):
self.assertEqual(serialize(elem, encoding=enc),
b'<tag>abc</tag>')
self.assertEqual(serialize(elem, encoding=enc.upper()),
b'<tag>abc</tag>')
for enc in ("iso-8859-1", "utf-16", "utf-32"):
with self.subTest(enc):
self.assertEqual(serialize(elem, encoding=enc),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag>abc</tag>" % enc).encode(enc))
upper = enc.upper()
self.assertEqual(serialize(elem, encoding=upper),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag>abc</tag>" % upper).encode(enc))
elem = ET.Element("tag")
elem.text = "<&\"\'>"
self.assertEqual(serialize(elem), '<tag><&"\'></tag>')
self.assertEqual(serialize(elem, encoding="utf-8"),
b'<tag><&"\'></tag>')
self.assertEqual(serialize(elem, encoding="us-ascii"),
b'<tag><&"\'></tag>')
for enc in ("iso-8859-1", "utf-16", "utf-32"):
self.assertEqual(serialize(elem, encoding=enc),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag><&\"'></tag>" % enc).encode(enc))
elem = ET.Element("tag")
elem.attrib["key"] = "<&\"\'>"
self.assertEqual(serialize(elem), '<tag key="<&"\'>" />')
self.assertEqual(serialize(elem, encoding="utf-8"),
b'<tag key="<&"\'>" />')
self.assertEqual(serialize(elem, encoding="us-ascii"),
b'<tag key="<&"\'>" />')
for enc in ("iso-8859-1", "utf-16", "utf-32"):
self.assertEqual(serialize(elem, encoding=enc),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag key=\"<&"'>\" />" % enc).encode(enc))
elem = ET.Element("tag")
elem.text = '\xe5\xf6\xf6<>'
self.assertEqual(serialize(elem), '<tag>\xe5\xf6\xf6<></tag>')
self.assertEqual(serialize(elem, encoding="utf-8"),
b'<tag>\xc3\xa5\xc3\xb6\xc3\xb6<></tag>')
self.assertEqual(serialize(elem, encoding="us-ascii"),
b'<tag>åöö<></tag>')
for enc in ("iso-8859-1", "utf-16", "utf-32"):
self.assertEqual(serialize(elem, encoding=enc),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag>åöö<></tag>" % enc).encode(enc))
elem = ET.Element("tag")
elem.attrib["key"] = '\xe5\xf6\xf6<>'
self.assertEqual(serialize(elem), '<tag key="\xe5\xf6\xf6<>" />')
self.assertEqual(serialize(elem, encoding="utf-8"),
b'<tag key="\xc3\xa5\xc3\xb6\xc3\xb6<>" />')
self.assertEqual(serialize(elem, encoding="us-ascii"),
b'<tag key="åöö<>" />')
for enc in ("iso-8859-1", "utf-16", "utf-16le", "utf-16be", "utf-32"):
self.assertEqual(serialize(elem, encoding=enc),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag key=\"åöö<>\" />" % enc).encode(enc))
def test_write_to_filename(self):
self.addCleanup(support.unlink, TESTFN)
tree = ET.ElementTree(ET.XML('''<site />'''))
tree.write(TESTFN)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(), b'''<site />''')
def test_write_to_text_file(self):
self.addCleanup(support.unlink, TESTFN)
tree = ET.ElementTree(ET.XML('''<site />'''))
with open(TESTFN, 'w', encoding='utf-8') as f:
tree.write(f, encoding='unicode')
self.assertFalse(f.closed)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(), b'''<site />''')
def test_write_to_binary_file(self):
self.addCleanup(support.unlink, TESTFN)
tree = ET.ElementTree(ET.XML('''<site />'''))
with open(TESTFN, 'wb') as f:
tree.write(f)
self.assertFalse(f.closed)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(), b'''<site />''')
def test_write_to_binary_file_with_bom(self):
self.addCleanup(support.unlink, TESTFN)
tree = ET.ElementTree(ET.XML('''<site />'''))
# test BOM writing to buffered file
with open(TESTFN, 'wb') as f:
tree.write(f, encoding='utf-16')
self.assertFalse(f.closed)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(),
'''<?xml version='1.0' encoding='utf-16'?>\n'''
'''<site />'''.encode("utf-16"))
# test BOM writing to non-buffered file
with open(TESTFN, 'wb', buffering=0) as f:
tree.write(f, encoding='utf-16')
self.assertFalse(f.closed)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(),
'''<?xml version='1.0' encoding='utf-16'?>\n'''
'''<site />'''.encode("utf-16"))
def test_read_from_stringio(self):
tree = ET.ElementTree()
stream = io.StringIO('''<?xml version="1.0"?><site></site>''')
tree.parse(stream)
self.assertEqual(tree.getroot().tag, 'site')
def test_write_to_stringio(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
stream = io.StringIO()
tree.write(stream, encoding='unicode')
self.assertEqual(stream.getvalue(), '''<site />''')
def test_read_from_bytesio(self):
tree = ET.ElementTree()
raw = io.BytesIO(b'''<?xml version="1.0"?><site></site>''')
tree.parse(raw)
self.assertEqual(tree.getroot().tag, 'site')
def test_write_to_bytesio(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
raw = io.BytesIO()
tree.write(raw)
self.assertEqual(raw.getvalue(), b'''<site />''')
class dummy:
pass
def test_read_from_user_text_reader(self):
stream = io.StringIO('''<?xml version="1.0"?><site></site>''')
reader = self.dummy()
reader.read = stream.read
tree = ET.ElementTree()
tree.parse(reader)
self.assertEqual(tree.getroot().tag, 'site')
def test_write_to_user_text_writer(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
stream = io.StringIO()
writer = self.dummy()
writer.write = stream.write
tree.write(writer, encoding='unicode')
self.assertEqual(stream.getvalue(), '''<site />''')
def test_read_from_user_binary_reader(self):
raw = io.BytesIO(b'''<?xml version="1.0"?><site></site>''')
reader = self.dummy()
reader.read = raw.read
tree = ET.ElementTree()
tree.parse(reader)
self.assertEqual(tree.getroot().tag, 'site')
tree = ET.ElementTree()
def test_write_to_user_binary_writer(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
raw = io.BytesIO()
writer = self.dummy()
writer.write = raw.write
tree.write(writer)
self.assertEqual(raw.getvalue(), b'''<site />''')
def test_write_to_user_binary_writer_with_bom(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
raw = io.BytesIO()
writer = self.dummy()
writer.write = raw.write
writer.seekable = lambda: True
writer.tell = raw.tell
tree.write(writer, encoding="utf-16")
self.assertEqual(raw.getvalue(),
'''<?xml version='1.0' encoding='utf-16'?>\n'''
'''<site />'''.encode("utf-16"))
def test_tostringlist_invariant(self):
root = ET.fromstring('<tag>foo</tag>')
self.assertEqual(
ET.tostring(root, 'unicode'),
''.join(ET.tostringlist(root, 'unicode')))
self.assertEqual(
ET.tostring(root, 'utf-16'),
b''.join(ET.tostringlist(root, 'utf-16')))
def test_short_empty_elements(self):
root = ET.fromstring('<tag>a<x />b<y></y>c</tag>')
self.assertEqual(
ET.tostring(root, 'unicode'),
'<tag>a<x />b<y />c</tag>')
self.assertEqual(
ET.tostring(root, 'unicode', short_empty_elements=True),
'<tag>a<x />b<y />c</tag>')
self.assertEqual(
ET.tostring(root, 'unicode', short_empty_elements=False),
'<tag>a<x></x>b<y></y>c</tag>')
class ParseErrorTest(unittest.TestCase):
def test_subclass(self):
self.assertIsInstance(ET.ParseError(), SyntaxError)
def _get_error(self, s):
try:
ET.fromstring(s)
except ET.ParseError as e:
return e
def test_error_position(self):
self.assertEqual(self._get_error('foo').position, (1, 0))
self.assertEqual(self._get_error('<tag>&foo;</tag>').position, (1, 5))
self.assertEqual(self._get_error('foobar<').position, (1, 6))
def test_error_code(self):
import xml.parsers.expat.errors as ERRORS
self.assertEqual(self._get_error('foo').code,
ERRORS.codes[ERRORS.XML_ERROR_SYNTAX])
class KeywordArgsTest(unittest.TestCase):
# Test various issues with keyword arguments passed to ET.Element
# constructor and methods
def test_issue14818(self):
x = ET.XML("<a>foo</a>")
self.assertEqual(x.find('a', None),
x.find(path='a', namespaces=None))
self.assertEqual(x.findtext('a', None, None),
x.findtext(path='a', default=None, namespaces=None))
self.assertEqual(x.findall('a', None),
x.findall(path='a', namespaces=None))
self.assertEqual(list(x.iterfind('a', None)),
list(x.iterfind(path='a', namespaces=None)))
self.assertEqual(ET.Element('a').attrib, {})
elements = [
ET.Element('a', dict(href="#", id="foo")),
ET.Element('a', attrib=dict(href="#", id="foo")),
ET.Element('a', dict(href="#"), id="foo"),
ET.Element('a', href="#", id="foo"),
ET.Element('a', dict(href="#", id="foo"), href="#", id="foo"),
]
for e in elements:
self.assertEqual(e.tag, 'a')
self.assertEqual(e.attrib, dict(href="#", id="foo"))
e2 = ET.SubElement(elements[0], 'foobar', attrib={'key1': 'value1'})
self.assertEqual(e2.attrib['key1'], 'value1')
with self.assertRaisesRegex(TypeError, 'must be dict, not str'):
ET.Element('a', "I'm not a dict")
with self.assertRaisesRegex(TypeError, 'must be dict, not str'):
ET.Element('a', attrib="I'm not a dict")
# --------------------------------------------------------------------
class NoAcceleratorTest(unittest.TestCase):
def setUp(self):
if not pyET:
raise unittest.SkipTest('only for the Python version')
# Test that the C accelerator was not imported for pyET
def test_correct_import_pyET(self):
# The type of methods defined in Python code is types.FunctionType,
# while the type of methods defined inside _elementtree is
# <class 'wrapper_descriptor'>
self.assertIsInstance(pyET.Element.__init__, types.FunctionType)
self.assertIsInstance(pyET.XMLParser.__init__, types.FunctionType)
# --------------------------------------------------------------------
def c14n_roundtrip(xml, **options):
return pyET.canonicalize(xml, **options)
class C14NTest(unittest.TestCase):
maxDiff = None
#
# simple roundtrip tests (from c14n.py)
def test_simple_roundtrip(self):
# Basics
self.assertEqual(c14n_roundtrip("<doc/>"), '<doc></doc>')
self.assertEqual(c14n_roundtrip("<doc xmlns='uri'/>"), # FIXME
'<doc xmlns="uri"></doc>')
self.assertEqual(c14n_roundtrip("<prefix:doc xmlns:prefix='uri'/>"),
'<prefix:doc xmlns:prefix="uri"></prefix:doc>')
self.assertEqual(c14n_roundtrip("<doc xmlns:prefix='uri'><prefix:bar/></doc>"),
'<doc><prefix:bar xmlns:prefix="uri"></prefix:bar></doc>')
self.assertEqual(c14n_roundtrip("<elem xmlns:wsu='http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd' xmlns:SOAP-ENV='http://schemas.xmlsoap.org/soap/envelope/' />"),
'<elem></elem>')
# C14N spec
self.assertEqual(c14n_roundtrip("<doc>Hello, world!<!-- Comment 1 --></doc>"),
'<doc>Hello, world!</doc>')
self.assertEqual(c14n_roundtrip("<value>2</value>"),
'<value>2</value>')
self.assertEqual(c14n_roundtrip('<compute><![CDATA[value>"0" && value<"10" ?"valid":"error"]]></compute>'),
'<compute>value>"0" && value<"10" ?"valid":"error"</compute>')
self.assertEqual(c14n_roundtrip('''<compute expr='value>"0" && value<"10" ?"valid":"error"'>valid</compute>'''),
'<compute expr="value>"0" && value<"10" ?"valid":"error"">valid</compute>')
self.assertEqual(c14n_roundtrip("<norm attr=' '   
	 ' '/>"),
'<norm attr=" \' 
	 \' "></norm>')
self.assertEqual(c14n_roundtrip("<normNames attr=' A   
	 B '/>"),
'<normNames attr=" A 
	 B "></normNames>')
self.assertEqual(c14n_roundtrip("<normId id=' '   
	 ' '/>"),
'<normId id=" \' 
	 \' "></normId>')
# fragments from PJ's tests
#self.assertEqual(c14n_roundtrip("<doc xmlns:x='http://example.com/x' xmlns='http://example.com/default'><b y:a1='1' xmlns='http://example.com/default' a3='3' xmlns:y='http://example.com/y' y:a2='2'/></doc>"),
#'<doc xmlns:x="http://example.com/x"><b xmlns:y="http://example.com/y" a3="3" y:a1="1" y:a2="2"></b></doc>')
def test_c14n_exclusion(self):
xml = textwrap.dedent("""\
<root xmlns:x="http://example.com/x">
<a x:attr="attrx">
<b>abtext</b>
</a>
<b>btext</b>
<c>
<x:d>dtext</x:d>
</c>
</root>
""")
self.assertEqual(
c14n_roundtrip(xml, strip_text=True),
'<root>'
'<a xmlns:x="http://example.com/x" x:attr="attrx"><b>abtext</b></a>'
'<b>btext</b>'
'<c><x:d xmlns:x="http://example.com/x">dtext</x:d></c>'
'</root>')
self.assertEqual(
c14n_roundtrip(xml, strip_text=True, exclude_attrs=['{http://example.com/x}attr']),
'<root>'
'<a><b>abtext</b></a>'
'<b>btext</b>'
'<c><x:d xmlns:x="http://example.com/x">dtext</x:d></c>'
'</root>')
self.assertEqual(
c14n_roundtrip(xml, strip_text=True, exclude_tags=['{http://example.com/x}d']),
'<root>'
'<a xmlns:x="http://example.com/x" x:attr="attrx"><b>abtext</b></a>'
'<b>btext</b>'
'<c></c>'
'</root>')
self.assertEqual(
c14n_roundtrip(xml, strip_text=True, exclude_attrs=['{http://example.com/x}attr'],
exclude_tags=['{http://example.com/x}d']),
'<root>'
'<a><b>abtext</b></a>'
'<b>btext</b>'
'<c></c>'
'</root>')
self.assertEqual(
c14n_roundtrip(xml, strip_text=True, exclude_tags=['a', 'b']),
'<root>'
'<c><x:d xmlns:x="http://example.com/x">dtext</x:d></c>'
'</root>')
self.assertEqual(
c14n_roundtrip(xml, exclude_tags=['a', 'b']),
'<root>\n'
' \n'
' \n'
' <c>\n'
' <x:d xmlns:x="http://example.com/x">dtext</x:d>\n'
' </c>\n'
'</root>')
self.assertEqual(
c14n_roundtrip(xml, strip_text=True, exclude_tags=['{http://example.com/x}d', 'b']),
'<root>'
'<a xmlns:x="http://example.com/x" x:attr="attrx"></a>'
'<c></c>'
'</root>')
self.assertEqual(
c14n_roundtrip(xml, exclude_tags=['{http://example.com/x}d', 'b']),
'<root>\n'
' <a xmlns:x="http://example.com/x" x:attr="attrx">\n'
' \n'
' </a>\n'
' \n'
' <c>\n'
' \n'
' </c>\n'
'</root>')
#
# basic method=c14n tests from the c14n 2.0 specification. uses
# test files under xmltestdata/c14n-20.
# note that this uses generated C14N versions of the standard ET.write
# output, not roundtripped C14N (see above).
def test_xml_c14n2(self):
datadir = findfile("c14n-20", subdir="xmltestdata")
full_path = partial(os.path.join, datadir)
files = [filename[:-4] for filename in sorted(os.listdir(datadir))
if filename.endswith('.xml')]
input_files = [
filename for filename in files
if filename.startswith('in')
]
configs = {
filename: {
# <c14n2:PrefixRewrite>sequential</c14n2:PrefixRewrite>
option.tag.split('}')[-1]: ((option.text or '').strip(), option)
for option in ET.parse(full_path(filename) + ".xml").getroot()
}
for filename in files
if filename.startswith('c14n')
}
tests = {
input_file: [
(filename, configs[filename.rsplit('_', 1)[-1]])
for filename in files
if filename.startswith(f'out_{input_file}_')
and filename.rsplit('_', 1)[-1] in configs
]
for input_file in input_files
}
# Make sure we found all test cases.
self.assertEqual(30, len([
output_file for output_files in tests.values()
for output_file in output_files]))
def get_option(config, option_name, default=None):
return config.get(option_name, (default, ()))[0]
for input_file, output_files in tests.items():
for output_file, config in output_files:
keep_comments = get_option(
config, 'IgnoreComments') == 'true' # no, it's right :)
strip_text = get_option(
config, 'TrimTextNodes') == 'true'
rewrite_prefixes = get_option(
config, 'PrefixRewrite') == 'sequential'
if 'QNameAware' in config:
qattrs = [
f"{{{el.get('NS')}}}{el.get('Name')}"
for el in config['QNameAware'][1].findall(
'{http://www.w3.org/2010/xml-c14n2}QualifiedAttr')
]
qtags = [
f"{{{el.get('NS')}}}{el.get('Name')}"
for el in config['QNameAware'][1].findall(
'{http://www.w3.org/2010/xml-c14n2}Element')
]
else:
qtags = qattrs = None
# Build subtest description from config.
config_descr = ','.join(
f"{name}={value or ','.join(c.tag.split('}')[-1] for c in children)}"
for name, (value, children) in sorted(config.items())
)
with self.subTest(f"{output_file}({config_descr})"):
if input_file == 'inNsRedecl' and not rewrite_prefixes:
self.skipTest(
f"Redeclared namespace handling is not supported in {output_file}")
if input_file == 'inNsSuperfluous' and not rewrite_prefixes:
self.skipTest(
f"Redeclared namespace handling is not supported in {output_file}")
if 'QNameAware' in config and config['QNameAware'][1].find(
'{http://www.w3.org/2010/xml-c14n2}XPathElement') is not None:
self.skipTest(
f"QName rewriting in XPath text is not supported in {output_file}")
f = full_path(input_file + ".xml")
if input_file == 'inC14N5':
# Hack: avoid setting up external entity resolution in the parser.
with open(full_path('world.txt'), 'rb') as entity_file:
with open(f, 'rb') as f:
f = io.BytesIO(f.read().replace(b'&ent2;', entity_file.read()))
text = ET.canonicalize(
from_file=f,
with_comments=keep_comments,
strip_text=strip_text,
rewrite_prefixes=rewrite_prefixes,
qname_aware_tags=qtags, qname_aware_attrs=qattrs)
with open(full_path(output_file + ".xml"), 'r', encoding='utf8') as f:
expected = f.read()
if input_file == 'inC14N3':
# FIXME: cET resolves default attributes but ET does not!
expected = expected.replace(' attr="default"', '')
text = text.replace(' attr="default"', '')
self.assertEqual(expected, text)
# --------------------------------------------------------------------
def test_main(module=None):
# When invoked without a module, runs the Python ET tests by loading pyET.
# Otherwise, uses the given module as the ET.
global pyET
pyET = import_fresh_module('xml.etree.ElementTree',
blocked=['_elementtree'])
if module is None:
module = pyET
global ET
ET = module
test_classes = [
ModuleTest,
ElementSlicingTest,
BasicElementTest,
BadElementTest,
BadElementPathTest,
ElementTreeTest,
IOTest,
ParseErrorTest,
XIncludeTest,
ElementTreeTypeTest,
ElementFindTest,
ElementIterTest,
TreeBuilderTest,
XMLParserTest,
XMLPullParserTest,
BugsTest,
KeywordArgsTest,
C14NTest,
]
# These tests will only run for the pure-Python version that doesn't import
# _elementtree. We can't use skipUnless here, because pyET is filled in only
# after the module is loaded.
if pyET is not ET:
test_classes.extend([
NoAcceleratorTest,
])
# Provide default namespace mapping and path cache.
from xml.etree import ElementPath
nsmap = ET.register_namespace._namespace_map
# Copy the default namespace mapping
nsmap_copy = nsmap.copy()
# Copy the path cache (should be empty)
path_cache = ElementPath._cache
ElementPath._cache = path_cache.copy()
# Align the Comment/PI factories.
if hasattr(ET, '_set_factories'):
old_factories = ET._set_factories(ET.Comment, ET.PI)
else:
old_factories = None
try:
support.run_unittest(*test_classes)
finally:
from xml.etree import ElementPath
# Restore mapping and path cache
nsmap.clear()
nsmap.update(nsmap_copy)
ElementPath._cache = path_cache
if old_factories is not None:
ET._set_factories(*old_factories)
# don't interfere with subsequent tests
ET = pyET = None
if __name__ == '__main__':
test_main()
|
the-stack_0_20534 | class Solution:
def minPathSum(self, grid: List[List[int]]) -> int:
"""
Since all elements are non-negative, we should never move to left or top.
These movements increase the path length and also the path value.
"""
if not grid or not grid[0]:
raise Exception("Empty Grid")
dp = [[0] * len(grid[0]) for _ in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
if i == 0 and j == 0:
dp[i][j] = grid[i][j]
elif i == 0 and j != 0:
dp[i][j] = dp[i][j - 1] + grid[i][j]
elif i != 0 and j == 0:
dp[i][j] = dp[i - 1][j] + grid[i][j]
else:
dp[i][j] = min(dp[i][j - 1], dp[i - 1][j]) + grid[i][j]
return dp[-1][-1]
|
the-stack_0_20535 |
from bs4 import BeautifulSoup
import requests
def scraperSongtexts(website: str):
sWebsiteContent = requests.get(website)
soup = BeautifulSoup(sWebsiteContent.content, 'html.parser')
links_with_text = []
for a in soup.find_all('a', href=True):
if a.text:
links_with_text.append(a['href'])
links =[link for link in links_with_text if "/songtext/" in link]
websits = []
for link in links:
link = link.replace('../../', '')
websits.append('https://www.songtexte.com/' + link)
return websits
|
the-stack_0_20536 | import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as dsets
from torch.autograd import Variable
'''
STEP 1: LOADING DATASET
'''
train_dataset = dsets.MNIST(root='./data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = dsets.MNIST(root='./data',
train=False,
transform=transforms.ToTensor())
'''
STEP 2: MAKING DATASET ITERABLE
'''
batch_size = 100
n_iters = 3000
num_epochs = n_iters / (len(train_dataset) / batch_size)
num_epochs = int(num_epochs)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
'''
STEP 3: CREATE MODEL CLASS
'''
class LogisticRegressionModel(nn.Module):
def __init__(self, input_dim, output_dim):
super(LogisticRegressionModel, self).__init__()
self.linear = nn.Linear(input_dim, output_dim)
def forward(self, x):
out = self.linear(x)
return out
'''
STEP 4: INSTANTIATE MODEL CLASS
'''
input_dim = 28*28
output_dim = 10
model = LogisticRegressionModel(input_dim, output_dim)
#######################
# USE GPU FOR MODEL #
#######################
if torch.cuda.is_available():
model.cuda()
'''
STEP 5: INSTANTIATE LOSS CLASS
'''
criterion = nn.CrossEntropyLoss()
'''
STEP 6: INSTANTIATE OPTIMIZER CLASS
'''
learning_rate = 0.001
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
'''
STEP 7: TRAIN THE MODEL
'''
iter = 0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
#######################
# USE GPU FOR MODEL #
#######################
if torch.cuda.is_available():
images = Variable(images.view(-1, 28*28).cuda())
labels = Variable(labels.cuda())
else:
images = Variable(images.view(-1, 28*28))
labels = Variable(labels)
# Clear gradients w.r.t. parameters
optimizer.zero_grad()
# Forward pass to get output/logits
outputs = model(images)
# Calculate Loss: softmax --> cross entropy loss
loss = criterion(outputs, labels)
# Getting gradients w.r.t. parameters
loss.backward()
# Updating parameters
optimizer.step()
iter += 1
if iter % 500 == 0:
# Calculate Accuracy
correct = 0
total = 0
# Iterate through test dataset
for images, labels in test_loader:
#######################
# USE GPU FOR MODEL #
#######################
images = Variable(images.view(-1, 28*28).cuda())
# Forward pass only to get logits/output
outputs = model(images)
# Get predictions from the maximum value
_, predicted = torch.max(outputs.data, 1)
# Total number of labels
total += labels.size(0)
#######################
# USE GPU FOR MODEL #
#######################
# Total correct predictions
correct += (predicted.cpu() == labels.cpu()).sum()
accuracy = 100 * correct / total
# Print Loss
print('Iteration: {}. Loss: {}. Accuracy: {}'.format(iter, loss.data[0], accuracy)) |
the-stack_0_20537 | import os
import sys
def make_new_path(directory):
import os
if not os.path.exists(directory):
os.makedirs(directory)
def merge_file(input1, input2, output_path):
with open(input1, 'r') as f1, open(input2, 'r') as f2:
input1_list = f1.readlines()
input2_list = f2.readlines()
input_list = input1_list + input2_list
output_list = list(set(input_list))
with open(output_path, 'w') as f:
for label in output_list:
print('label:', label)
if label in ['\n', '\t']:
pass
else:
f.write("%s" % label)
return
def main():
PATH_DIR = os.getcwd()
print(PATH_DIR)
#
output_dataset_dir = 'jpner_1128_split'
dataset_dir_path = os.path.join(PATH_DIR, output_dataset_dir)
make_new_path(dataset_dir_path)
#
input_file_1 = "label_invoice_ner_1.txt"
input_file_2 = "label_invoice_ner_2.txt"
input1_path = os.path.join(dataset_dir_path, input_file_1)
input2_path = os.path.join(dataset_dir_path, input_file_2)
output_file = "label_invoice_ner_1128.txt"
output_path = os.path.join(dataset_dir_path, output_file)
#
merge_file(input1_path, input2_path, output_path)
if __name__ == "__main__":
main()
|
the-stack_0_20539 | from desdeo_problem.Problem import DataProblem
from desdeo_problem.surrogatemodels.SurrogateModels import GaussianProcessRegressor
from desdeo_problem.surrogatemodels.SurrogateKriging import SurrogateKriging
from desdeo_problem.testproblems.TestProblems import test_problem_builder
from pyDOE import lhs
import plotly.graph_objects as go
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from desdeo_emo.EAs.ProbRVEA import RVEA
from desdeo_emo.EAs.ProbRVEA import ProbRVEA
from desdeo_emo.EAs.ProbRVEA import ProbRVEA_v3
from desdeo_emo.EAs.ProbRVEA import HybRVEA
from desdeo_emo.EAs.ProbRVEA import HybRVEA_v3
from desdeo_emo.EAs.ProbMOEAD import MOEA_D, ProbMOEAD_TCH, ProbMOEAD_WS
from desdeo_emo.EAs.ProbMOEAD import ProbMOEAD
from desdeo_emo.EAs.ProbMOEAD import ProbMOEAD_v3
from desdeo_emo.EAs.ProbMOEAD import HybMOEAD
from desdeo_emo.EAs.ProbMOEAD import HybMOEAD_v3
#from pygmo import non_dominated_front_2d as nd2
#from non_domx import ndx
import scipy.io
from sklearn.neighbors import NearestNeighbors
import time
gen_per_iter_set = 10
max_func_evals = 40000
nsamples = 109
def build_surrogates(problem_testbench, problem_name, nobjs, nvars, is_data, x_data, y_data):
x_names = [f'x{i}' for i in range(1,nvars+1)]
y_names = [f'f{i}' for i in range(1,nobjs+1)]
row_names = ['lower_bound','upper_bound']
if is_data is False:
prob = test_problem_builder(problem_name, nvars, nobjs)
x = lhs(nvars, nsamples)
y = prob.evaluate(x)
data = pd.DataFrame(np.hstack((x,y.objectives)), columns=x_names+y_names)
else:
data = pd.DataFrame(np.hstack((x_data,y_data)), columns=x_names+y_names)
if problem_testbench == 'DDMOPP':
x_low = np.ones(nvars)*-1
x_high = np.ones(nvars)
elif problem_testbench == 'DTLZ':
x_low = np.ones(nvars)*0
x_high = np.ones(nvars)
bounds = pd.DataFrame(np.vstack((x_low,x_high)), columns=x_names, index=row_names)
problem = DataProblem(data=data, variable_names=x_names, objective_names=y_names,bounds=bounds)
start = time.time()
problem.train(SurrogateKriging)
end = time.time()
time_taken = end - start
return problem, time_taken
def read_dataset(problem_testbench, folder_data, problem_name, nobjs, nvars, sampling, run):
if problem_testbench == "DDMOPP":
mat = scipy.io.loadmat(folder_data + '/Initial_Population_' + problem_testbench + '_' + sampling +
'_AM_' + str(nvars) + '_109.mat')
x = ((mat['Initial_Population_'+problem_testbench])[0][run])[0]
mat = scipy.io.loadmat(folder_data+'/Obj_vals_DDMOPP_'+sampling+'_AM_'+problem_name+'_'
+ str(nobjs) + '_' + str(nvars) + '_109.mat')
y = ((mat['Obj_vals_DDMOPP'])[0][run])[0]
else:
mat = scipy.io.loadmat(folder_data + '/Initial_Population_DTLZ_'+sampling+'_AM_' + str(nvars) + '_109.mat')
prob = test_problem_builder(
name=problem_name, n_of_objectives=nobjs, n_of_variables=nvars
)
x = ((mat['Initial_Population_DTLZ'])[0][run])[0]
y = prob.evaluate(x)[0]
return x, y
def optimize_surrogates_1(problem,x):
print("Optimizing...")
evolver_opt = RVEA(problem, use_surrogates=True, n_gen_per_iter=gen_per_iter_set, total_function_evaluations=max_func_evals) #, population_params={'design':'InitSamples','init_pop':x}, population_size=109)
while evolver_opt.continue_evolution():
evolver_opt.iterate()
print("FE count:",evolver_opt._function_evaluation_count)
#front_true = evolver_opt.population.objectives
#evolver_opt.population.
#print(front_true)
return evolver_opt.population
def optimize_surrogates_7(problem,x):
print("Optimizing...")
evolver_opt = ProbRVEA(problem, use_surrogates=True, n_gen_per_iter=gen_per_iter_set, total_function_evaluations=max_func_evals) #, population_params={'design':'InitSamples','init_pop':x}, population_size=109)
while evolver_opt.continue_evolution():
evolver_opt.iterate()
print("FE count:",evolver_opt._function_evaluation_count)
#front_true = evolver_opt.population.objectives
#evolver_opt.population.
#print(front_true)
return evolver_opt.population
def optimize_surrogates_8(problem,x):
print("Optimizing...")
evolver_opt = HybRVEA_v3(problem, use_surrogates=True, n_gen_per_iter=gen_per_iter_set, total_function_evaluations=max_func_evals) #, population_params={'design':'InitSamples','init_pop':x}, population_size=109)
while evolver_opt.continue_evolution():
evolver_opt.iterate()
print("FE count:",evolver_opt._function_evaluation_count)
#front_true = evolver_opt.population.objectives
#evolver_opt.population.
#print(front_true)
return evolver_opt.population
def optimize_surrogates_12(problem,x):
print("Optimizing...")
evolver_opt = MOEA_D(problem, use_surrogates=True, n_gen_per_iter=gen_per_iter_set, total_function_evaluations=max_func_evals) #, population_params={'design':'InitSamples','init_pop':x}, population_size=109)
while evolver_opt.continue_evolution():
evolver_opt.iterate()
print("FE count:",evolver_opt._function_evaluation_count)
#front_true = evolver_opt.population.objectives
#evolver_opt.population.
#print(front_true)
return evolver_opt.population
def optimize_surrogates_72(problem,x):
print("Optimizing...")
evolver_opt = ProbMOEAD(problem, use_surrogates=True, n_gen_per_iter=gen_per_iter_set, total_function_evaluations=max_func_evals) #, population_params={'design':'InitSamples','init_pop':x}) #, population_size=109)
while evolver_opt.continue_evolution():
evolver_opt.iterate()
print("FE count:",evolver_opt._function_evaluation_count)
#front_true = evolver_opt.population.objectives
#evolver_opt.population.
#print(front_true)
return evolver_opt.population
def optimize_surrogates_722(problem,x):
print("Optimizing...")
evolver_opt = ProbMOEAD_WS(problem, SF_type='WS',use_surrogates=True, n_gen_per_iter=gen_per_iter_set, total_function_evaluations=max_func_evals) #, population_params={'design':'InitSamples','init_pop':x}) #, population_size=109)
while evolver_opt.continue_evolution():
evolver_opt.iterate()
print("FE count:",evolver_opt._function_evaluation_count)
#front_true = evolver_opt.population.objectives
#evolver_opt.population.
#print(front_true)
return evolver_opt.population
def optimize_surrogates_723(problem,x):
print("Optimizing...")
evolver_opt = ProbMOEAD_TCH(problem, SF_type='TCH',use_surrogates=True, n_gen_per_iter=gen_per_iter_set, total_function_evaluations=max_func_evals) #, population_params={'design':'InitSamples','init_pop':x}) #, population_size=109)
while evolver_opt.continue_evolution():
evolver_opt.iterate()
print("FE count:",evolver_opt._function_evaluation_count)
#front_true = evolver_opt.population.objectives
#evolver_opt.population.
#print(front_true)
return evolver_opt.population
def optimize_surrogates_82(problem,x):
print("Optimizing...")
evolver_opt = HybMOEAD_v3(problem, use_surrogates=True, n_gen_per_iter=gen_per_iter_set, total_function_evaluations=max_func_evals) #, population_params={'design':'InitSamples','init_pop':x}, population_size=109)
while evolver_opt.continue_evolution():
evolver_opt.iterate()
print("FE count:",evolver_opt._function_evaluation_count)
#front_true = evolver_opt.population.objectives
#evolver_opt.population.
#print(front_true)
return evolver_opt.population
def run_optimizer(problem_testbench, folder_data, problem_name, nobjs, nvars, sampling, is_data, run, approach):
if is_data is True:
x, y = read_dataset(problem_testbench, folder_data, problem_name, nobjs, nvars, sampling, run)
surrogate_problem, time_taken = build_surrogates(problem_testbench,problem_name, nobjs, nvars, is_data, x, y)
print(time_taken)
if approach == 1:
population = optimize_surrogates_1(surrogate_problem,x)
elif approach == 7:
population = optimize_surrogates_7(surrogate_problem,x)
elif approach == 8:
population = optimize_surrogates_8(surrogate_problem,x)
elif approach == 12:
population = optimize_surrogates_12(surrogate_problem,x)
elif approach == 72:
population = optimize_surrogates_72(surrogate_problem,x)
elif approach == 722:
population = optimize_surrogates_722(surrogate_problem,x)
elif approach == 723:
population = optimize_surrogates_723(surrogate_problem,x)
elif approach == 82:
population = optimize_surrogates_82(surrogate_problem,x)
results_dict = {
'individual_archive': population.individuals_archive,
'objectives_archive': population.objectives_archive,
'uncertainty_archive': population.uncertainty_archive,
'individuals_solutions': population.individuals,
'obj_solutions': population.objectives,
'uncertainty_solutions': population.uncertainity,
'time_taken': time_taken
}
return results_dict
|
the-stack_0_20540 | # Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" DUT for IDF applications """
import os
import sys
import re
import subprocess
import functools
import random
import tempfile
from serial.tools import list_ports
import DUT
class IDFToolError(OSError):
pass
def _tool_method(func):
""" close port, execute tool method and then reopen port """
@functools.wraps(func)
def handler(self, *args, **kwargs):
self.close()
ret = func(self, *args, **kwargs)
self.open()
return ret
return handler
class IDFDUT(DUT.SerialDUT):
""" IDF DUT, extends serial with ESPTool methods """
CHIP_TYPE_PATTERN = re.compile(r"Detecting chip type[.:\s]+(.+)")
# /dev/ttyAMA0 port is listed in Raspberry Pi
# /dev/tty.Bluetooth-Incoming-Port port is listed in Mac
INVALID_PORT_PATTERN = re.compile(r"AMA|Bluetooth")
# if need to erase NVS partition in start app
ERASE_NVS = True
def __init__(self, name, port, log_file, app, **kwargs):
self.download_config, self.partition_table = app.process_app_info()
super(IDFDUT, self).__init__(name, port, log_file, app, **kwargs)
@classmethod
def get_chip(cls, app, port):
"""
get chip id via esptool
:param app: application instance (to get tool)
:param port: comport
:return: chip ID or None
"""
try:
output = subprocess.check_output(["python", app.esptool, "--port", port, "chip_id"])
except subprocess.CalledProcessError:
output = bytes()
if isinstance(output, bytes):
output = output.decode()
chip_type = cls.CHIP_TYPE_PATTERN.search(output)
return chip_type.group(1) if chip_type else None
@classmethod
def confirm_dut(cls, port, app, **kwargs):
return cls.get_chip(app, port) is not None
@_tool_method
def start_app(self, erase_nvs=ERASE_NVS):
"""
download and start app.
:param: erase_nvs: whether erase NVS partition during flash
:return: None
"""
if erase_nvs:
address = self.partition_table["nvs"]["offset"]
size = self.partition_table["nvs"]["size"]
nvs_file = tempfile.NamedTemporaryFile()
nvs_file.write(b'\xff' * size)
nvs_file.flush()
download_config = self.download_config + [address, nvs_file.name]
else:
download_config = self.download_config
retry_baud_rates = ["921600", "115200"]
error = IDFToolError()
try:
for baud_rate in retry_baud_rates:
try:
subprocess.check_output(["python", self.app.esptool,
"--port", self.port, "--baud", baud_rate]
+ download_config)
break
except subprocess.CalledProcessError as error:
continue
else:
raise error
finally:
if erase_nvs:
nvs_file.close()
@_tool_method
def reset(self):
"""
reset DUT with esptool
:return: None
"""
subprocess.check_output(["python", self.app.esptool, "--port", self.port, "run"])
@_tool_method
def erase_partition(self, partition):
"""
:param partition: partition name to erase
:return: None
"""
address = self.partition_table[partition]["offset"]
size = self.partition_table[partition]["size"]
with open(".erase_partition.tmp", "wb") as f:
f.write(chr(0xFF) * size)
@_tool_method
def dump_flush(self, output_file, **kwargs):
"""
dump flush
:param output_file: output file name, if relative path, will use sdk path as base path.
:keyword partition: partition name, dump the partition.
``partition`` is preferred than using ``address`` and ``size``.
:keyword address: dump from address (need to be used with size)
:keyword size: dump size (need to be used with address)
:return: None
"""
if os.path.isabs(output_file) is False:
output_file = os.path.relpath(output_file, self.app.get_log_folder())
if "partition" in kwargs:
partition = self.partition_table[kwargs["partition"]]
_address = partition["offset"]
_size = partition["size"]
elif "address" in kwargs and "size" in kwargs:
_address = kwargs["address"]
_size = kwargs["size"]
else:
raise IDFToolError("You must specify 'partition' or ('address' and 'size') to dump flash")
subprocess.check_output(
["python", self.app.esptool, "--port", self.port, "--baud", "921600",
"--before", "default_reset", "--after", "hard_reset", "read_flash",
_address, _size, output_file]
)
@classmethod
def list_available_ports(cls):
ports = [x.device for x in list_ports.comports()]
espport = os.getenv('ESPPORT')
if not espport:
# It's a little hard filter out invalid port with `serial.tools.list_ports.grep()`:
# The check condition in `grep` is: `if r.search(port) or r.search(desc) or r.search(hwid)`.
# This means we need to make all 3 conditions fail, to filter out the port.
# So some part of the filters will not be straight forward to users.
# And negative regular expression (`^((?!aa|bb|cc).)*$`) is not easy to understand.
# Filter out invalid port by our own will be much simpler.
return [x for x in ports if not cls.INVALID_PORT_PATTERN.search(x)]
# On MacOs with python3.6: type of espport is already utf8
if type(espport) is type(u''):
port_hint = espport
else:
port_hint = espport.decode('utf8')
# If $ESPPORT is a valid port, make it appear first in the list
if port_hint in ports:
ports.remove(port_hint)
return [port_hint] + ports
# On macOS, user may set ESPPORT to /dev/tty.xxx while
# pySerial lists only the corresponding /dev/cu.xxx port
if sys.platform == 'darwin' and 'tty.' in port_hint:
port_hint = port_hint.replace('tty.', 'cu.')
if port_hint in ports:
ports.remove(port_hint)
return [port_hint] + ports
return ports
|
the-stack_0_20543 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
'''
Author: Luiz Yao ([email protected])
Created Date: 2019-11-05 14:43:53
-----
Last Modified: 2019-11-05 16:17:16
Modified By: Luiz Yao ([email protected])
-----
THIS PROGRAM IS FREE SOFTWARE, IS LICENSED UNDER MIT.
A short and simple permissive license with conditions
only requiring preservation of copyright and license notices.
Copyright © 2019 Yao Meng
-----
HISTORY:
Date By Comments
---------- -------- ---------------------------------------------------------
'''
import sys
import pytest
minversion = pytest.mark.skipif(sys.version_info < (3, 8),
reason='请使用 python 3.8 或者更高的版本。')
@minversion
def test_one():
assert True
|
the-stack_0_20544 | """creating a table for coaches
Revision ID: f4d17b82bfed
Revises: ae34c861c0cc
Create Date: 2018-09-19 11:19:14.100523
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f4d17b82bfed'
down_revision = 'ae34c861c0cc'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('coaches',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('support_to_provide', sa.String(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('coaches')
# ### end Alembic commands ###
|
the-stack_0_20545 | from pylab import *
import postgkyl as pg
style.use('../code/postgkyl.mplstyle')
w = 0.4567
tper = 2*pi/w
def getData(i):
#print("Working on %d ..." % i)
data = pg.GData("s4-oscc-E_ions_%d.bp" % i)
dg = pg.data.GInterpModal(data, 2, "ms")
XX, q = dg.interpolate()
nx, nvx, nvy = q.shape[0], q.shape[1], q.shape[2]
qMid = q[int(nx/2),:,int(nvy/2)]
tm = data.time
tm = tm - tper*floor(tm/tper)
return tm, qMid
nFrame = 100
qFull = zeros((nFrame+1,48), float)
T = zeros((nFrame+1,), float)
for i in range(nFrame+1):
tm, qt = getData(i)
T[i] = tm
qFull[i,:] = squeeze(qt)
# sort to enforce periodicity in time
idx = T.argsort()
Ts = T[idx]
qS = 0.0*qFull
for i in range(idx.shape[0]):
qS[i,:] = qFull[idx[i],:]
def maxwellian2D(n, vx, vy, ux, uy, vth):
v2 = (vx - ux)**2 + (vy - uy)**2
return n/(2*pi*vth**2)*exp(-v2/(2*vth**2))
Vx = linspace(-6, 6, 48)
contour(Ts, Vx, transpose(qS), colors='k', linestyles='dotted')
savefig("s4-pc.png", dpi=150)
show()
|
the-stack_0_20547 | import nltk
from nltk.stem.lancaster import LancasterStemmer
from nltk.stem.porter import PorterStemmer
from nltk.stem.snowball import EnglishStemmer
from nltk.stem import WordNetLemmatizer
from flask import abort
from .helpers import ret_success
from .helpers import ret_failure
from .helpers import parse_input
from .helpers import penn_to_wn
LancasterSt = LancasterStemmer()
PorterSt = PorterStemmer()
SnowballSt = EnglishStemmer()
WordnetLm = WordNetLemmatizer()
def stemmer(method,data):
"""
Takes an array of words in JSON format.
"""
data = parse_input(data)
if data == False:
return ret_failure(703)
else:
res=[]
if method == "lancaster":
for word in data:
try:
res.append([word,LancasterSt.stem(word)])
except:
return ret_failure(702)
elif method == "porter":
for word in data:
try:
res.append([word,PorterSt.stem(word)])
except:
return ret_failure(702)
elif method == 'snowball':
for word in data:
try:
res.append([word,SnowballSt.stem(word)])
except:
return ret_failure(702)
else:
abort(404)
return ret_success(res)
def lemmatize(method,data):
"""
Takes an array of words or array of tupples containing words and pos tags.
Both Penn and Wordnet tags are supported
"""
data = parse_input(data)
if data == False:
return ret_failure(703)
else:
res=[]
if method == "wordnet":
for word in data:
try:
if type(word) is list:
res.append([word[0],WordnetLm.lemmatize(word[0],penn_to_wn(word[1]))])
else:
res.append([word,WordnetLm.lemmatize(word)])
except LookupError:
return ret_failure(704)
except:
return ret_failure(702)
else:
abort(404)
return ret_success(res)
|
the-stack_0_20549 | import asyncio
import inspect
from contextlib import contextmanager
from copy import deepcopy
from typing import (
Any,
Callable,
Dict,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from fastapi import params
from fastapi.concurrency import (
AsyncExitStack,
_fake_asynccontextmanager,
asynccontextmanager,
contextmanager_in_threadpool,
)
from fastapi.dependencies.models import Dependant, SecurityRequirement
from fastapi.security.base import SecurityBase
from fastapi.security.oauth2 import OAuth2, SecurityScopes
from fastapi.security.open_id_connect_url import OpenIdConnect
from fastapi.utils import (
PYDANTIC_1,
create_response_field,
get_field_info,
get_path_param_names,
)
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.error_wrappers import ErrorWrapper
from pydantic.errors import MissingError
from pydantic.utils import lenient_issubclass
from starlette.background import BackgroundTasks
from starlette.concurrency import run_in_threadpool
from starlette.datastructures import FormData, Headers, QueryParams, UploadFile
from starlette.requests import Request
from starlette.responses import Response
from starlette.websockets import WebSocket
try:
from pydantic.fields import (
SHAPE_LIST,
SHAPE_SEQUENCE,
SHAPE_SET,
SHAPE_SINGLETON,
SHAPE_TUPLE,
SHAPE_TUPLE_ELLIPSIS,
FieldInfo,
ModelField,
Required,
)
from pydantic.schema import get_annotation_from_field_info
from pydantic.typing import ForwardRef, evaluate_forwardref
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic.fields import Required, Shape # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
from pydantic.schema import get_annotation_from_schema # type: ignore
from pydantic.utils import ForwardRef, evaluate_forwardref # type: ignore
SHAPE_LIST = Shape.LIST
SHAPE_SEQUENCE = Shape.SEQUENCE
SHAPE_SET = Shape.SET
SHAPE_SINGLETON = Shape.SINGLETON
SHAPE_TUPLE = Shape.TUPLE
SHAPE_TUPLE_ELLIPSIS = Shape.TUPLE_ELLIPS
def get_annotation_from_field_info(
annotation: Any, field_info: FieldInfo, field_name: str
) -> Type[Any]:
return get_annotation_from_schema(annotation, field_info)
sequence_shapes = {
SHAPE_LIST,
SHAPE_SET,
SHAPE_TUPLE,
SHAPE_SEQUENCE,
SHAPE_TUPLE_ELLIPSIS,
}
sequence_types = (list, set, tuple)
sequence_shape_to_type = {
SHAPE_LIST: list,
SHAPE_SET: set,
SHAPE_TUPLE: tuple,
SHAPE_SEQUENCE: list,
SHAPE_TUPLE_ELLIPSIS: list,
}
def get_param_sub_dependant(
*, param: inspect.Parameter, path: str, security_scopes: List[str] = None
) -> Dependant:
depends: params.Depends = param.default
if depends.dependency:
dependency = depends.dependency
else:
dependency = param.annotation
return get_sub_dependant(
depends=depends,
dependency=dependency,
path=path,
name=param.name,
security_scopes=security_scopes,
)
def get_parameterless_sub_dependant(*, depends: params.Depends, path: str) -> Dependant:
assert callable(
depends.dependency
), "A parameter-less dependency must have a callable dependency"
return get_sub_dependant(depends=depends, dependency=depends.dependency, path=path)
def get_sub_dependant(
*,
depends: params.Depends,
dependency: Callable,
path: str,
name: str = None,
security_scopes: List[str] = None,
) -> Dependant:
security_requirement = None
security_scopes = security_scopes or []
if isinstance(depends, params.Security):
dependency_scopes = depends.scopes
security_scopes.extend(dependency_scopes)
if isinstance(dependency, SecurityBase):
use_scopes: List[str] = []
if isinstance(dependency, (OAuth2, OpenIdConnect)):
use_scopes = security_scopes
security_requirement = SecurityRequirement(
security_scheme=dependency, scopes=use_scopes
)
sub_dependant = get_dependant(
path=path,
call=dependency,
name=name,
security_scopes=security_scopes,
use_cache=depends.use_cache,
)
if security_requirement:
sub_dependant.security_requirements.append(security_requirement)
sub_dependant.security_scopes = security_scopes
return sub_dependant
CacheKey = Tuple[Optional[Callable], Tuple[str, ...]]
def get_flat_dependant(
dependant: Dependant, *, skip_repeats: bool = False, visited: List[CacheKey] = None
) -> Dependant:
if visited is None:
visited = []
visited.append(dependant.cache_key)
flat_dependant = Dependant(
path_params=dependant.path_params.copy(),
query_params=dependant.query_params.copy(),
header_params=dependant.header_params.copy(),
cookie_params=dependant.cookie_params.copy(),
body_params=dependant.body_params.copy(),
security_schemes=dependant.security_requirements.copy(),
use_cache=dependant.use_cache,
path=dependant.path,
)
for sub_dependant in dependant.dependencies:
if skip_repeats and sub_dependant.cache_key in visited:
continue
flat_sub = get_flat_dependant(
sub_dependant, skip_repeats=skip_repeats, visited=visited
)
flat_dependant.path_params.extend(flat_sub.path_params)
flat_dependant.query_params.extend(flat_sub.query_params)
flat_dependant.header_params.extend(flat_sub.header_params)
flat_dependant.cookie_params.extend(flat_sub.cookie_params)
flat_dependant.body_params.extend(flat_sub.body_params)
flat_dependant.security_requirements.extend(flat_sub.security_requirements)
return flat_dependant
def get_flat_params(dependant: Dependant) -> List[ModelField]:
flat_dependant = get_flat_dependant(dependant, skip_repeats=True)
return (
flat_dependant.path_params
+ flat_dependant.query_params
+ flat_dependant.header_params
+ flat_dependant.cookie_params
)
def is_scalar_field(field: ModelField) -> bool:
field_info = get_field_info(field)
if not (
field.shape == SHAPE_SINGLETON
and not lenient_issubclass(field.type_, BaseModel)
and not lenient_issubclass(field.type_, sequence_types + (dict,))
and not isinstance(field_info, params.Body)
):
return False
if field.sub_fields:
if not all(is_scalar_field(f) for f in field.sub_fields):
return False
return True
def is_scalar_sequence_field(field: ModelField) -> bool:
if (field.shape in sequence_shapes) and not lenient_issubclass(
field.type_, BaseModel
):
if field.sub_fields is not None:
for sub_field in field.sub_fields:
if not is_scalar_field(sub_field):
return False
return True
if lenient_issubclass(field.type_, sequence_types):
return True
return False
def get_typed_signature(call: Callable) -> inspect.Signature:
signature = inspect.signature(call)
globalns = getattr(call, "__globals__", {})
typed_params = [
inspect.Parameter(
name=param.name,
kind=param.kind,
default=param.default,
annotation=get_typed_annotation(param, globalns),
)
for param in signature.parameters.values()
]
typed_signature = inspect.Signature(typed_params)
return typed_signature
def get_typed_annotation(param: inspect.Parameter, globalns: Dict[str, Any]) -> Any:
annotation = param.annotation
if isinstance(annotation, str):
annotation = ForwardRef(annotation)
annotation = evaluate_forwardref(annotation, globalns, globalns)
return annotation
async_contextmanager_dependencies_error = """
FastAPI dependencies with yield require Python 3.7 or above,
or the backports for Python 3.6, installed with:
pip install async-exit-stack async-generator
"""
def check_dependency_contextmanagers() -> None:
if AsyncExitStack is None or asynccontextmanager == _fake_asynccontextmanager:
raise RuntimeError(async_contextmanager_dependencies_error) # pragma: no cover
def get_dependant(
*,
path: str,
call: Callable,
name: str = None,
security_scopes: List[str] = None,
use_cache: bool = True,
) -> Dependant:
path_param_names = get_path_param_names(path)
endpoint_signature = get_typed_signature(call)
signature_params = endpoint_signature.parameters
if inspect.isgeneratorfunction(call) or inspect.isasyncgenfunction(call):
check_dependency_contextmanagers()
dependant = Dependant(call=call, name=name, path=path, use_cache=use_cache)
for param_name, param in signature_params.items():
if isinstance(param.default, params.Depends):
sub_dependant = get_param_sub_dependant(
param=param, path=path, security_scopes=security_scopes
)
dependant.dependencies.append(sub_dependant)
for param_name, param in signature_params.items():
if isinstance(param.default, params.Depends):
continue
if add_non_field_param_to_dependency(param=param, dependant=dependant):
continue
param_field = get_param_field(
param=param, default_field_info=params.Query, param_name=param_name
)
if param_name in path_param_names:
assert is_scalar_field(
field=param_field
), f"Path params must be of one of the supported types"
if isinstance(param.default, params.Path):
ignore_default = False
else:
ignore_default = True
param_field = get_param_field(
param=param,
param_name=param_name,
default_field_info=params.Path,
force_type=params.ParamTypes.path,
ignore_default=ignore_default,
)
add_param_to_fields(field=param_field, dependant=dependant)
elif is_scalar_field(field=param_field):
add_param_to_fields(field=param_field, dependant=dependant)
elif isinstance(
param.default, (params.Query, params.Header)
) and is_scalar_sequence_field(param_field):
add_param_to_fields(field=param_field, dependant=dependant)
else:
field_info = get_field_info(param_field)
assert isinstance(
field_info, params.Body
), f"Param: {param_field.name} can only be a request body, using Body(...)"
dependant.body_params.append(param_field)
return dependant
def add_non_field_param_to_dependency(
*, param: inspect.Parameter, dependant: Dependant
) -> Optional[bool]:
if lenient_issubclass(param.annotation, Request):
dependant.request_param_name = param.name
return True
elif lenient_issubclass(param.annotation, WebSocket):
dependant.websocket_param_name = param.name
return True
elif lenient_issubclass(param.annotation, Response):
dependant.response_param_name = param.name
return True
elif lenient_issubclass(param.annotation, BackgroundTasks):
dependant.background_tasks_param_name = param.name
return True
elif lenient_issubclass(param.annotation, SecurityScopes):
dependant.security_scopes_param_name = param.name
return True
return None
def get_param_field(
*,
param: inspect.Parameter,
param_name: str,
default_field_info: Type[params.Param] = params.Param,
force_type: params.ParamTypes = None,
ignore_default: bool = False,
) -> ModelField:
default_value = Required
had_schema = False
if not param.default == param.empty and ignore_default is False:
default_value = param.default
if isinstance(default_value, FieldInfo):
had_schema = True
field_info = default_value
default_value = field_info.default
if (
isinstance(field_info, params.Param)
and getattr(field_info, "in_", None) is None
):
field_info.in_ = default_field_info.in_
if force_type:
field_info.in_ = force_type # type: ignore
else:
field_info = default_field_info(default_value)
required = default_value == Required
annotation: Any = Any
if not param.annotation == param.empty:
annotation = param.annotation
annotation = get_annotation_from_field_info(annotation, field_info, param_name)
if not field_info.alias and getattr(field_info, "convert_underscores", None):
alias = param.name.replace("_", "-")
else:
alias = field_info.alias or param.name
field = create_response_field(
name=param.name,
type_=annotation,
default=None if required else default_value,
alias=alias,
required=required,
field_info=field_info,
)
field.required = required
if not had_schema and not is_scalar_field(field=field):
if PYDANTIC_1:
field.field_info = params.Body(field_info.default)
else:
field.schema = params.Body(field_info.default) # type: ignore # pragma: nocover
return field
def add_param_to_fields(*, field: ModelField, dependant: Dependant) -> None:
field_info = cast(params.Param, get_field_info(field))
if field_info.in_ == params.ParamTypes.path:
dependant.path_params.append(field)
elif field_info.in_ == params.ParamTypes.query:
dependant.query_params.append(field)
elif field_info.in_ == params.ParamTypes.header:
dependant.header_params.append(field)
else:
assert (
field_info.in_ == params.ParamTypes.cookie
), f"non-body parameters must be in path, query, header or cookie: {field.name}"
dependant.cookie_params.append(field)
def is_coroutine_callable(call: Callable) -> bool:
if inspect.isroutine(call):
return asyncio.iscoroutinefunction(call)
if inspect.isclass(call):
return False
call = getattr(call, "__call__", None)
return asyncio.iscoroutinefunction(call)
async def solve_generator(
*, call: Callable, stack: AsyncExitStack, sub_values: Dict[str, Any]
) -> Any:
if inspect.isgeneratorfunction(call):
cm = contextmanager_in_threadpool(contextmanager(call)(**sub_values))
elif inspect.isasyncgenfunction(call):
cm = asynccontextmanager(call)(**sub_values)
return await stack.enter_async_context(cm)
async def solve_dependencies(
*,
request: Union[Request, WebSocket],
dependant: Dependant,
body: Optional[Union[Dict[str, Any], FormData]] = None,
background_tasks: BackgroundTasks = None,
response: Response = None,
dependency_overrides_provider: Any = None,
dependency_cache: Dict[Tuple[Callable, Tuple[str]], Any] = None,
) -> Tuple[
Dict[str, Any],
List[ErrorWrapper],
Optional[BackgroundTasks],
Response,
Dict[Tuple[Callable, Tuple[str]], Any],
]:
values: Dict[str, Any] = {}
errors: List[ErrorWrapper] = []
response = response or Response(
content=None,
status_code=None, # type: ignore
headers=None,
media_type=None,
background=None,
)
dependency_cache = dependency_cache or {}
sub_dependant: Dependant
for sub_dependant in dependant.dependencies:
sub_dependant.call = cast(Callable, sub_dependant.call)
sub_dependant.cache_key = cast(
Tuple[Callable, Tuple[str]], sub_dependant.cache_key
)
call = sub_dependant.call
use_sub_dependant = sub_dependant
if (
dependency_overrides_provider
and dependency_overrides_provider.dependency_overrides
):
original_call = sub_dependant.call
call = getattr(
dependency_overrides_provider, "dependency_overrides", {}
).get(original_call, original_call)
use_path: str = sub_dependant.path # type: ignore
use_sub_dependant = get_dependant(
path=use_path,
call=call,
name=sub_dependant.name,
security_scopes=sub_dependant.security_scopes,
)
solved_result = await solve_dependencies(
request=request,
dependant=use_sub_dependant,
body=body,
background_tasks=background_tasks,
response=response,
dependency_overrides_provider=dependency_overrides_provider,
dependency_cache=dependency_cache,
)
(
sub_values,
sub_errors,
background_tasks,
sub_response,
sub_dependency_cache,
) = solved_result
sub_response = cast(Response, sub_response)
response.headers.raw.extend(sub_response.headers.raw)
if sub_response.status_code:
response.status_code = sub_response.status_code
dependency_cache.update(sub_dependency_cache)
if sub_errors:
errors.extend(sub_errors)
continue
if sub_dependant.use_cache and sub_dependant.cache_key in dependency_cache:
solved = dependency_cache[sub_dependant.cache_key]
elif inspect.isgeneratorfunction(call) or inspect.isasyncgenfunction(call):
stack = request.scope.get("fastapi_astack")
if stack is None:
raise RuntimeError(
async_contextmanager_dependencies_error
) # pragma: no cover
solved = await solve_generator(
call=call, stack=stack, sub_values=sub_values
)
elif is_coroutine_callable(call):
solved = await call(**sub_values)
else:
solved = await run_in_threadpool(call, **sub_values)
if sub_dependant.name is not None:
values[sub_dependant.name] = solved
if sub_dependant.cache_key not in dependency_cache:
dependency_cache[sub_dependant.cache_key] = solved
path_values, path_errors = request_params_to_args(
dependant.path_params, request.path_params
)
query_values, query_errors = request_params_to_args(
dependant.query_params, request.query_params
)
header_values, header_errors = request_params_to_args(
dependant.header_params, request.headers
)
cookie_values, cookie_errors = request_params_to_args(
dependant.cookie_params, request.cookies
)
values.update(path_values)
values.update(query_values)
values.update(header_values)
values.update(cookie_values)
errors += path_errors + query_errors + header_errors + cookie_errors
if dependant.body_params:
(
body_values,
body_errors,
) = await request_body_to_args( # body_params checked above
required_params=dependant.body_params, received_body=body
)
values.update(body_values)
errors.extend(body_errors)
if dependant.request_param_name and isinstance(request, Request):
values[dependant.request_param_name] = request
elif dependant.websocket_param_name and isinstance(request, WebSocket):
values[dependant.websocket_param_name] = request
if dependant.background_tasks_param_name:
if background_tasks is None:
background_tasks = BackgroundTasks()
values[dependant.background_tasks_param_name] = background_tasks
if dependant.response_param_name:
values[dependant.response_param_name] = response
if dependant.security_scopes_param_name:
values[dependant.security_scopes_param_name] = SecurityScopes(
scopes=dependant.security_scopes
)
return values, errors, background_tasks, response, dependency_cache
def request_params_to_args(
required_params: Sequence[ModelField],
received_params: Union[Mapping[str, Any], QueryParams, Headers],
) -> Tuple[Dict[str, Any], List[ErrorWrapper]]:
values = {}
errors = []
for field in required_params:
if is_scalar_sequence_field(field) and isinstance(
received_params, (QueryParams, Headers)
):
value = received_params.getlist(field.alias) or field.default
else:
value = received_params.get(field.alias)
field_info = get_field_info(field)
assert isinstance(
field_info, params.Param
), "Params must be subclasses of Param"
if value is None:
if field.required:
if PYDANTIC_1:
errors.append(
ErrorWrapper(
MissingError(), loc=(field_info.in_.value, field.alias)
)
)
else: # pragma: nocover
errors.append(
ErrorWrapper( # type: ignore
MissingError(),
loc=(field_info.in_.value, field.alias),
config=BaseConfig,
)
)
else:
values[field.name] = deepcopy(field.default)
continue
v_, errors_ = field.validate(
value, values, loc=(field_info.in_.value, field.alias)
)
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
else:
values[field.name] = v_
return values, errors
async def request_body_to_args(
required_params: List[ModelField],
received_body: Optional[Union[Dict[str, Any], FormData]],
) -> Tuple[Dict[str, Any], List[ErrorWrapper]]:
values = {}
errors = []
if required_params:
field = required_params[0]
field_info = get_field_info(field)
embed = getattr(field_info, "embed", None)
if len(required_params) == 1 and not embed:
received_body = {field.alias: received_body}
for field in required_params:
value: Any = None
if received_body is not None:
if (
field.shape in sequence_shapes or field.type_ in sequence_types
) and isinstance(received_body, FormData):
value = received_body.getlist(field.alias)
else:
try:
value = received_body.get(field.alias)
except AttributeError:
errors.append(get_missing_field_error(field.alias))
continue
if (
value is None
or (isinstance(field_info, params.Form) and value == "")
or (
isinstance(field_info, params.Form)
and field.shape in sequence_shapes
and len(value) == 0
)
):
if field.required:
errors.append(get_missing_field_error(field.alias))
else:
values[field.name] = deepcopy(field.default)
continue
if (
isinstance(field_info, params.File)
and lenient_issubclass(field.type_, bytes)
and isinstance(value, UploadFile)
):
value = await value.read()
elif (
field.shape in sequence_shapes
and isinstance(field_info, params.File)
and lenient_issubclass(field.type_, bytes)
and isinstance(value, sequence_types)
):
awaitables = [sub_value.read() for sub_value in value]
contents = await asyncio.gather(*awaitables)
value = sequence_shape_to_type[field.shape](contents)
v_, errors_ = field.validate(value, values, loc=("body", field.alias))
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
else:
values[field.name] = v_
return values, errors
def get_missing_field_error(field_alias: str) -> ErrorWrapper:
if PYDANTIC_1:
missing_field_error = ErrorWrapper(MissingError(), loc=("body", field_alias))
else: # pragma: no cover
missing_field_error = ErrorWrapper( # type: ignore
MissingError(), loc=("body", field_alias), config=BaseConfig,
)
return missing_field_error
def get_schema_compatible_field(*, field: ModelField) -> ModelField:
out_field = field
if lenient_issubclass(field.type_, UploadFile):
use_type: type = bytes
if field.shape in sequence_shapes:
use_type = List[bytes]
out_field = create_response_field(
name=field.name,
type_=use_type,
class_validators=field.class_validators,
model_config=field.model_config,
default=field.default,
required=field.required,
alias=field.alias,
field_info=field.field_info if PYDANTIC_1 else field.schema, # type: ignore
)
return out_field
def get_body_field(*, dependant: Dependant, name: str) -> Optional[ModelField]:
flat_dependant = get_flat_dependant(dependant)
if not flat_dependant.body_params:
return None
first_param = flat_dependant.body_params[0]
field_info = get_field_info(first_param)
embed = getattr(field_info, "embed", None)
body_param_names_set = set([param.name for param in flat_dependant.body_params])
if len(body_param_names_set) == 1 and not embed:
return get_schema_compatible_field(field=first_param)
# If one field requires to embed, all have to be embedded
# in case a sub-dependency is evaluated with a single unique body field
# That is combined (embedded) with other body fields
for param in flat_dependant.body_params:
setattr(get_field_info(param), "embed", True)
model_name = "Body_" + name
BodyModel = create_model(model_name)
for f in flat_dependant.body_params:
BodyModel.__fields__[f.name] = get_schema_compatible_field(field=f)
required = any(True for f in flat_dependant.body_params if f.required)
BodyFieldInfo_kwargs: Dict[str, Any] = dict(default=None)
if any(
isinstance(get_field_info(f), params.File) for f in flat_dependant.body_params
):
BodyFieldInfo: Type[params.Body] = params.File
elif any(
isinstance(get_field_info(f), params.Form) for f in flat_dependant.body_params
):
BodyFieldInfo = params.Form
else:
BodyFieldInfo = params.Body
body_param_media_types = [
getattr(get_field_info(f), "media_type")
for f in flat_dependant.body_params
if isinstance(get_field_info(f), params.Body)
]
if len(set(body_param_media_types)) == 1:
BodyFieldInfo_kwargs["media_type"] = body_param_media_types[0]
return create_response_field(
name="body",
type_=BodyModel,
required=required,
alias="body",
field_info=BodyFieldInfo(**BodyFieldInfo_kwargs),
)
|
the-stack_0_20550 | """
https://en.wikipedia.org/wiki/Burrows%E2%80%93Wheeler_transform
The Burrows–Wheeler transform (BWT, also called block-sorting compression)
rearranges a character string into runs of similar characters. This is useful
for compression, since it tends to be easy to compress a string that has runs
of repeated characters by techniques such as move-to-front transform and
run-length encoding. More importantly, the transformation is reversible,
without needing to store any additional data except the position of the first
original character. The BWT is thus a "free" method of improving the efficiency
of text compression algorithms, costing only some extra computation.
"""
from __future__ import annotations
def all_rotations(s: str) -> list[str]:
"""
:param s: The string that will be rotated len(s) times.
:return: A list with the rotations.
:raises TypeError: If s is not an instance of str.
Examples:
>>> all_rotations("^BANANA|") # doctest: +NORMALIZE_WHITESPACE
['^BANANA|', 'BANANA|^', 'ANANA|^B', 'NANA|^BA', 'ANA|^BAN', 'NA|^BANA',
'A|^BANAN', '|^BANANA']
>>> all_rotations("a_asa_da_casa") # doctest: +NORMALIZE_WHITESPACE
['a_asa_da_casa', '_asa_da_casaa', 'asa_da_casaa_', 'sa_da_casaa_a',
'a_da_casaa_as', '_da_casaa_asa', 'da_casaa_asa_', 'a_casaa_asa_d',
'_casaa_asa_da', 'casaa_asa_da_', 'asaa_asa_da_c', 'saa_asa_da_ca',
'aa_asa_da_cas']
>>> all_rotations("panamabanana") # doctest: +NORMALIZE_WHITESPACE
['panamabanana', 'anamabananap', 'namabananapa', 'amabananapan',
'mabananapana', 'abananapanam', 'bananapanama', 'ananapanamab',
'nanapanamaba', 'anapanamaban', 'napanamabana', 'apanamabanan']
>>> all_rotations(5)
Traceback (most recent call last):
...
TypeError: The parameter s type must be str.
"""
if not isinstance(s, str):
raise TypeError("The parameter s type must be str.")
return [s[i:] + s[:i] for i in range(len(s))]
def bwt_transform(s: str) -> dict:
"""
:param s: The string that will be used at bwt algorithm
:return: the string composed of the last char of each row of the ordered
rotations and the index of the original string at ordered rotations list
:raises TypeError: If the s parameter type is not str
:raises ValueError: If the s parameter is empty
Examples:
>>> bwt_transform("^BANANA")
{'bwt_string': 'BNN^AAA', 'idx_original_string': 6}
>>> bwt_transform("a_asa_da_casa")
{'bwt_string': 'aaaadss_c__aa', 'idx_original_string': 3}
>>> bwt_transform("panamabanana")
{'bwt_string': 'mnpbnnaaaaaa', 'idx_original_string': 11}
>>> bwt_transform(4)
Traceback (most recent call last):
...
TypeError: The parameter s type must be str.
>>> bwt_transform('')
Traceback (most recent call last):
...
ValueError: The parameter s must not be empty.
"""
if not isinstance(s, str):
raise TypeError("The parameter s type must be str.")
if not s:
raise ValueError("The parameter s must not be empty.")
rotations = all_rotations(s)
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
return {
"bwt_string": "".join([word[-1] for word in rotations]),
"idx_original_string": rotations.index(s),
}
def reverse_bwt(bwt_string: str, idx_original_string: int) -> str:
"""
:param bwt_string: The string returned from bwt algorithm execution
:param idx_original_string: A 0-based index of the string that was used to
generate bwt_string at ordered rotations list
:return: The string used to generate bwt_string when bwt was executed
:raises TypeError: If the bwt_string parameter type is not str
:raises ValueError: If the bwt_string parameter is empty
:raises TypeError: If the idx_original_string type is not int or if not
possible to cast it to int
:raises ValueError: If the idx_original_string value is lower than 0 or
greater than len(bwt_string) - 1
>>> reverse_bwt("BNN^AAA", 6)
'^BANANA'
>>> reverse_bwt("aaaadss_c__aa", 3)
'a_asa_da_casa'
>>> reverse_bwt("mnpbnnaaaaaa", 11)
'panamabanana'
>>> reverse_bwt(4, 11)
Traceback (most recent call last):
...
TypeError: The parameter bwt_string type must be str.
>>> reverse_bwt("", 11)
Traceback (most recent call last):
...
ValueError: The parameter bwt_string must not be empty.
>>> reverse_bwt("mnpbnnaaaaaa", "asd") # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
TypeError: The parameter idx_original_string type must be int or passive
of cast to int.
>>> reverse_bwt("mnpbnnaaaaaa", -1)
Traceback (most recent call last):
...
ValueError: The parameter idx_original_string must not be lower than 0.
>>> reverse_bwt("mnpbnnaaaaaa", 12) # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The parameter idx_original_string must be lower than
len(bwt_string).
>>> reverse_bwt("mnpbnnaaaaaa", 11.0)
'panamabanana'
>>> reverse_bwt("mnpbnnaaaaaa", 11.4)
'panamabanana'
"""
if not isinstance(bwt_string, str):
raise TypeError("The parameter bwt_string type must be str.")
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty.")
try:
idx_original_string = int(idx_original_string)
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int."
)
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0.")
if idx_original_string >= len(bwt_string):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)."
)
ordered_rotations = [""] * len(bwt_string)
for x in range(len(bwt_string)):
for i in range(len(bwt_string)):
ordered_rotations[i] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
entry_msg = "Provide a string that I will generate its BWT transform: "
s = input(entry_msg).strip()
result = bwt_transform(s)
bwt_output_msg = "Burrows Wheeler transform for string '{}' results in '{}'"
print(bwt_output_msg.format(s, result["bwt_string"]))
original_string = reverse_bwt(result["bwt_string"], result["idx_original_string"])
fmt = (
"Reversing Burrows Wheeler transform for entry '{}' we get original"
" string '{}'"
)
print(fmt.format(result["bwt_string"], original_string))
|
the-stack_0_20551 | # Absorb linear operators into proximal operators.
import copy as cp
import numpy as np
from proximal.lin_ops import Variable, scale, mul_elemwise, Constant
from proximal.prox_fns import (nonneg, weighted_nonneg, norm1, weighted_norm1, poisson_norm,
weighted_poisson_norm, sum_squares, weighted_sum_squares,
group_norm1, weighted_group_norm1, zero_prox)
WEIGHTED = {nonneg: weighted_nonneg,
norm1: weighted_norm1,
sum_squares: weighted_sum_squares,
poisson_norm: weighted_poisson_norm,
group_norm1: weighted_group_norm1}
def absorb_all_lin_ops(prox_funcs):
"""Repeatedy absorb lin ops.
"""
new_proxes = []
ready = prox_funcs[:]
while len(ready) > 0:
curr = ready.pop(0)
absorbed = absorb_lin_op(curr)
if len(absorbed) == 1 and absorbed[0] == curr:
new_proxes.append(absorbed[0])
else:
ready += absorbed
return new_proxes
def absorb_lin_op(prox_fn):
"""If possible moves the top level lin op argument
into the prox operator.
For example, elementwise multiplication can be folded into
a separable function's prox.
"""
# Never eliminate variables.
if isinstance(prox_fn.lin_op, Variable):
return [prox_fn]
# Absorb a lin op into sum_entries/zero.
if type(prox_fn) == zero_prox and prox_fn.gamma == 0:
outputs = []
inputs = [prox_fn.c]
for arg in prox_fn.lin_op.input_nodes:
outputs.append(np.zeros(arg.shape))
prox_fn.lin_op.adjoint(inputs, outputs)
new_proxes = []
for output, arg in zip(outputs, prox_fn.lin_op.input_nodes):
new_proxes.append(prox_fn.copy(arg, c=output))
return new_proxes
# Fold scaling into the function.
if isinstance(prox_fn.lin_op, mul_elemwise):
op_weight = prox_fn.lin_op.weight
def get_new_prox(prox_type, args):
new_prox = prox_type(*args)
copy_prox_fn(new_prox, prox_fn)
idxs = op_weight != 0
new_prox.b[idxs] = prox_fn.b[idxs] / op_weight[idxs]
new_prox.c = prox_fn.c * op_weight
return [new_prox]
if type(prox_fn) in list(WEIGHTED.keys()) and prox_fn.gamma == 0:
args = [prox_fn.lin_op.input_nodes[0]] + prox_fn.get_data() + \
[op_weight]
return get_new_prox(WEIGHTED[type(prox_fn)], args)
elif type(prox_fn) in list(WEIGHTED.values()) and prox_fn.gamma == 0:
args = [prox_fn.lin_op.input_nodes[0]] + prox_fn.get_data()
args[-1] = args[-1] * op_weight
return get_new_prox(type(prox_fn), args)
# Fold scalar into the function.
if isinstance(prox_fn.lin_op, scale):
scalar = prox_fn.lin_op.scalar
new_prox = prox_fn.copy(prox_fn.lin_op.input_nodes[0],
beta=prox_fn.beta * scalar, b=prox_fn.b / scalar,
c=prox_fn.c * scalar,
gamma=prox_fn.gamma * scalar ** 2)
return [new_prox]
# No change.
return [prox_fn]
def copy_prox_fn(dst_prox, src_prox):
"""Copy the optional parameters from src_prox to dst_prox.
"""
dst_prox.alpha = src_prox.alpha
dst_prox.beta = src_prox.beta
dst_prox.gamma = src_prox.gamma
dst_prox.b = src_prox.b
dst_prox.c = src_prox.c
dst_prox.d = src_prox.d
def copy_non_var(lin_op):
"""If not a variable, returns a shallow copy.
"""
if isinstance(lin_op, Variable):
return lin_op
else:
return cp.copy(lin_op)
def absorb_offset(prox_fn):
"""Absorb the constant offset into the b term and zero out constants in lin op.
"""
# Short circuit if no constant leaves.
if len(prox_fn.lin_op.constants()) == 0:
return prox_fn
new_b = -prox_fn.lin_op.get_offset()
# Zero out constants.
new_lin_op = copy_non_var(prox_fn.lin_op)
ready = [new_lin_op]
while len(ready) > 0:
curr = ready.pop(0)
for idx, arg in enumerate(curr.input_nodes):
if isinstance(arg, Constant):
curr.input_nodes[idx] = Constant(np.zeros(arg.shape))
# Don't copy variables.
else:
curr.input_nodes[idx] = copy_non_var(arg)
ready.append(curr.input_nodes[idx])
return prox_fn.copy(new_lin_op, b=new_b + prox_fn.b)
|
the-stack_0_20553 | from selenium.webdriver import Chrome
url = 'https://curso-python-selenium.netlify.app/aula_09_a.html'
browser = Chrome(executable_path=r'./drivers/chromedriver.exe')
browser.get(url)
browser.implicitly_wait(30)
btn = browser.find_elements_by_css_selector('button')
btn.click()
sucesso = browser.find_elements_by_css_selector('#finished')
assert sucesso.text == 'Carregamento concluido'
|
the-stack_0_20555 | """
经济词典中所使用的通用函数。包括:
errlog:错误日志
getdic:从github上下载已经整理好的数据文件
"""
def errlog(message):
"""
写入错误日志到logfile.txt
需要的输入:错误信息message
"""
import os
import time
with open(
os.path.dirname(os.path.realpath(__file__)) + "/logfile.txt",
'a') as f:
f.write(
time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime()) + message +
'\n')
def getdic(filename):
"""
从GitHub上下载已经整理好的词典文件。
需要的输入:文件名filename
"""
import os
import requests
import sys
url = "https://raw.githubusercontent.com/sijichun/jingjidic/master/sub_dics/" + filename
PWD = os.path.dirname(os.path.realpath(__file__))
try:
html = requests.get(url).text
with open(PWD + '/sub_dics/' + filename, 'w') as f:
f.write(html)
except Exception as e:
print("错误:" + str(e))
sys.exit(1)
def remove_duplicates(filename, csv=False):
"""
去除文件filename中重复的行。
"""
with open(filename, 'r') as f:
if csv:
import csv
rows = []
csv_file = csv.DictReader(f)
for r in csv_file:
rows.append(r)
rows = list(set(rows))
else:
content = f.readlines()
content = list(set(content))
content.sort()
with open(filename, 'w') as f:
if csv:
csv_file = csv.DictWriter(f, list(rows[0].keys()))
csv_file.writeheader()
csv_file.writerows(rows)
else:
for w in content:
f.write(w.strip() + '\n')
|
the-stack_0_20559 | import os
import random
import re
import attr
import pexpect
from loguru import logger
from typing import List, Union
from pexpect.exceptions import EOF, ExceptionPexpect
from .exceptions import (
UnknownVersionException,
WindscribeNotRunningException,
WindscribeNotFoundException,
UnsupportedVersionException,
NotLoggedInException,
InvalidLocationException,
InvalidPasswordException,
InvalidUsernameException,
InvalidCredentialsException,
ProAccountRequiredException,
)
WINDSCRIBE_NOT_RUNNING = "The Windscribe service is not running."
WINDSCRIBE_NOT_FOUND = "The Windscribe CLI cannot be found."
NOT_LOGGED_IN = "Not logged in."
NOT_CONNECTED_TO_INTERNET = "Not connected to internet."
UNSUPPORTED_VERSION = "This module is incompatible with your Windscribe CLI version."
ANSI_SEQUENCES = r"(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]"
@attr.s
class WindscribeLocation:
""" Represents a Windscribe location. """
name = attr.ib(type=str)
abbrev = attr.ib(type=str)
city = attr.ib(type=str)
label = attr.ib(type=str)
pro = attr.ib(type=bool)
@attr.s
class WindscribeStatus:
""" Represents a Windscribe status. """
pid = attr.ib(type=int)
status = attr.ib(type=str)
uptime = attr.ib(type=str)
cpu_usage = attr.ib(type=float)
mem_usage = attr.ib(type=float)
ip = attr.ib(type=str)
connected = attr.ib(type=bool)
@attr.s
class WindscribeAccount:
""" Represents a Windscribe account. """
username = attr.ib(type=str)
current_usage = attr.ib(type=float)
current_usage_unit = attr.ib(type=str)
max_usage = attr.ib(type=float)
max_usage_unit = attr.ib(type=str)
plan = attr.ib(type=str)
def remove_ansi_sequences(text: str) -> str:
"""Remove ansi sequences from a string.
Args:
-----
`text (str)`: The string.
Returns:
--------
`str`: The string without the ansi sequences.
"""
return re.sub(ANSI_SEQUENCES, "", text)
def execute_command(cmd: str) -> pexpect.spawn:
"""Execute a command.
Args:
-----
`cmd (str)`: The command.
Raises:
-------
`WindscribeNotFoundException`: if the Windscribe CLI cannot be found.
Returns:
--------
`pexpect.spawn`: The child process.
"""
try:
logger.info(f"Execute command : {cmd}")
return pexpect.spawn(cmd, encoding="utf-8", timeout=None)
except ExceptionPexpect:
logger.error("Windscribe CLI is not found.")
raise WindscribeNotFoundException(WINDSCRIBE_NOT_FOUND)
def version() -> str:
"""Gets the version of the Windscribe CLI.
Raises:
-------
`WindscribeNotFoundException`: if the Windscribe CLI cannot be found.
`UnknownVersionException`: if the Windscribe version cannot be found.
Returns:
--------
`str`: The Windscribe CLI version.
"""
# Execute the command
child = execute_command("windscribe")
# Try to find the version in the output
match = child.expect([EOF, "Windscribe CLI client"])
# Wait until the command returns
child.wait()
# The version is found
if match == 1:
version = re.search(r"v[0-9]{1,}\.[0-9]{1,}",
child.readline()).group(0)
logger.info(f"Windscribe CLI client {version}.")
return version
# The version is not found
else:
raise UnknownVersionException(
"The Windscribe version cannot be found.")
def locations() -> List[WindscribeLocation]:
"""Get the list of server locations.
Raises:
-------
`WindscribeNotFoundException`: if the Windscribe CLI cannot be found.
`WindscribeNotRunningException`: if the Windscribe service is not running.
`UnsupportedVersionException`: if this module is incompatible with your Windscribe CLI version.
`NotLoggedInException` : if the user is not logged in.
Returns:
--------
`List[WindscribeLocation]`: The list of server locations.
"""
# Execute the command
child = execute_command("windscribe locations")
# Read the output
match = child.expect(
[
re.compile(
"Location[ ]{2,}Short Name[ ]{2,}City Name[ ]{2,}Label[ ]{2,}Pro[ ]{0,}(%s){0,}\\r\\n" % ANSI_SEQUENCES
),
"Please login to use Windscribe",
"Windscribe is not running",
EOF,
]
)
# Unsupported version
if match == 3:
logger.error(UNSUPPORTED_VERSION)
raise UnsupportedVersionException(UNSUPPORTED_VERSION)
# The service is not running
elif match == 2:
logger.error(WINDSCRIBE_NOT_RUNNING)
raise WindscribeNotRunningException(WINDSCRIBE_NOT_RUNNING)
# The user is not logged in
elif match == 1:
logger.warning(NOT_LOGGED_IN)
raise NotLoggedInException(NOT_LOGGED_IN)
# Get the locations
else:
# Loop for each lines
locations = []
for location in child.readlines():
location_info = re.split(
"[ ]{2,}", remove_ansi_sequences(location).strip())
# Check if the location info are valid
if (len(location_info) != 5 or location_info[4] != "*") and len(location_info) != 4:
raise UnsupportedVersionException(UNSUPPORTED_VERSION)
# Add the location
if len(location_info) == 5:
location_info[4] = True
else:
location_info.append(False)
locations.append(WindscribeLocation(*location_info))
return locations
def random_connect():
"""Connect to a random Windscribe server.
Raises:
-------
`WindscribeNotFoundException`: if the Windscribe CLI cannot be found.
`WindscribeNotRunningException`: if the Windscribe service is not running.
`UnsupportedVersionException`: if this module is incompatible with your Windscribe CLI version.
`NotLoggedInException` : if the user is not logged in.
`InvalidLocationException` : if the location is not valid.
`ProAccountRequiredException` : if a pro account is required.
`ConnectionError` : if the user is not connected to internet.
"""
connect(random.choice(locations()))
def connect(location: Union[str, WindscribeLocation] = "best"):
"""Connect to a Windscribe server.
Args:
-----
`location ([str, WindscribeLocation], optional)`: The Windscribe server location. Defaults to "best".
Raises:
-------
`TypeError`: if location is neither a WindscribeLocation, neither a str.
`WindscribeNotFoundException`: if the Windscribe CLI cannot be found.
`WindscribeNotRunningException`: if the Windscribe service is not running.
`UnsupportedVersionException`: if this module is incompatible with your Windscribe CLI version.
`NotLoggedInException` : if the user is not logged in.
`InvalidLocationException` : if the location is not valid.
`ProAccountRequiredException` : if a pro account is required.
`ConnectionError` : if the user is not connected to internet.
"""
# Check the parameter value
if isinstance(location, WindscribeLocation):
location = location.label
elif type(location) is not str:
raise TypeError(
"The location parameter must be a str or a WindscribeLocation object.")
# Execute the command
child = execute_command(f'windscribe connect "{location}"')
# Read the output
match = child.expect(
[
"Your IP changed from",
"IP check after connection failed, you may not have internet",
"is not a valid location",
"requires a Pro account",
"Please login to use Windscribe",
"Failed to connect",
"Windscribe is not running",
EOF,
]
)
# Unsupported version
if match == 7:
logger.error(UNSUPPORTED_VERSION)
raise UnsupportedVersionException(UNSUPPORTED_VERSION)
# The service is not running
elif match == 6:
logger.error(WINDSCRIBE_NOT_RUNNING)
raise WindscribeNotRunningException(WINDSCRIBE_NOT_RUNNING)
# The user needs to be connected to internet
elif match == 5:
logger.error(NOT_CONNECTED_TO_INTERNET)
raise ConnectionError(NOT_CONNECTED_TO_INTERNET)
# The user is not logged in
elif match == 4:
logger.warning(NOT_LOGGED_IN)
raise NotLoggedInException(NOT_LOGGED_IN)
# The user needs to be a pro account
elif match == 3:
logger.warning(f"{location} requires a Pro account.")
raise ProAccountRequiredException(
f"{location} requires a Pro account.")
# The location is invalid
elif match == 2:
logger.warning(f"{location} is not a valid location.")
raise InvalidLocationException(f"{location} is not a valid location.")
# The user may not have internet
elif match == 1:
# Wait until the command returns
child.wait()
logger.info("Connected.")
logger.warning(
"IP check after connection failed, you may not have internet")
# The connection succeed
else:
# Wait until the command returns
child.wait()
logger.info("Connected.")
def disconnect():
"""Disconnect from the Windscribe server.
Raises:
-------
`WindscribeNotFoundException`: if the Windscribe CLI cannot be found.
`WindscribeNotRunningException`: if the Windscribe service is not running.
`UnsupportedVersionException`: if this module is incompatible with your Windscribe CLI version.
`ConnectionError` : if the user is not connected to internet.
"""
# Execute the command
child = execute_command("windscribe disconnect")
# Read the output
match = child.expect(
[
"DISCONNECTED",
"Service communication error",
"Windscribe is not running",
EOF,
]
)
# Unsupported version
if match == 3:
logger.error(UNSUPPORTED_VERSION)
raise UnsupportedVersionException(UNSUPPORTED_VERSION)
# The service is not running
elif match == 2:
logger.error(WINDSCRIBE_NOT_RUNNING)
raise WindscribeNotRunningException(WINDSCRIBE_NOT_RUNNING)
# The user needs to be connected to internet
elif match == 1:
logger.error(NOT_CONNECTED_TO_INTERNET)
raise ConnectionError(NOT_CONNECTED_TO_INTERNET)
# Disconnected
else:
# Wait until the command returns
child.wait()
logger.info("Disconnected.")
def login(user: str = None, pw: str = None) -> bool:
"""Login to the Windscribe CLI.
Args:
-----
`user (str, optional)`: The username. Defaults to None.
`pw (str, optional)`: The password. Defaults to None.
Raises:
-------
`ValueError`: if the password is less than 4 characters long.
`ConnectionError` : if the user is not connected to internet.
`InvalidUsernameException`: if the Windscribe username is not in the environement.
`InvalidPasswordException`: if the Windscribe password is not in the environement.
`InvalidCredentialsException`: if the credentials are invalid.
`WindscribeNotFoundException`: if the Windscribe CLI cannot be found.
`WindscribeNotRunningException`: if the Windscribe service is not running.
`UnsupportedVersionException`: if this module is incompatible with your Windscribe CLI version.
Returns:
--------
`bool`: True if the user has been logged in, False if the user was already logged in.
"""
# Get the user
if user is None:
user = os.environ.get("WINDSCRIBE_USER")
if user is None:
raise InvalidUsernameException(
"Could not found Windscribe username in environement.")
user += "\n"
# Get the password
if pw is None:
pw = os.environ.get("WINDSCRIBE_PW")
if pw is None:
raise InvalidPasswordException(
"Could not found Windscribe password in environement.")
pw += "\n"
# Value checking
if len(pw) <= 4:
raise ValueError(
"Windscribe password must be at least 4 characters long.")
# Execute the command
child = execute_command("windscribe login")
# Read the output
match = child.expect(["Windscribe Username:", "Already Logged in", EOF])
# Unsupported version
if match == 2:
logger.error(UNSUPPORTED_VERSION)
raise UnsupportedVersionException(UNSUPPORTED_VERSION)
# Already logged in
elif match == 1:
logger.warning("Already logged in.")
return False
# Not logged in
else:
# Send username
child.sendline(user)
# Send password
child.expect("Windscribe Password:")
child.sendline(pw)
# Check if the connection was successful
match = child.expect(
[
"Logged In",
"API Error: Could not log in with provided credentials",
"Windscribe is not running",
"API Error: No API Connectivity",
EOF,
]
)
# Unsupported version
if match == 4:
logger.error(UNSUPPORTED_VERSION)
raise UnsupportedVersionException(UNSUPPORTED_VERSION)
# The user is not connected to internet
elif match == 3:
logger.error(NOT_CONNECTED_TO_INTERNET)
raise ConnectionError(NOT_CONNECTED_TO_INTERNET)
# The service is not running
elif match == 2:
logger.error(WINDSCRIBE_NOT_RUNNING)
raise WindscribeNotRunningException(WINDSCRIBE_NOT_RUNNING)
# Invalid credentials
elif match == 1:
logger.warning("Could not log in with provided credentials.")
raise InvalidCredentialsException(
"Could not log in with provided credentials.")
# Logged in
else:
# Wait the end of the commands
child.wait()
logger.info("Logged in.")
return True
def logout() -> bool:
"""Logout from the Windscribe CLI.
Raises:
-------
`WindscribeNotFoundException`: if the Windscribe CLI cannot be found.
`WindscribeNotRunningException`: if the Windscribe service is not running.
`UnsupportedVersionException`: if this module is incompatible with your Windscribe CLI version.
Returns:
--------
`bool`: True if the user has been logged out, False if the user was already logged out.
"""
# Execute the command
child = execute_command("windscribe logout")
# Read the output
match = child.expect(
[
"DISCONNECTED",
"Not logged in",
"Windscribe is not running",
EOF,
]
)
# Unsupported version
if match == 3:
logger.error(UNSUPPORTED_VERSION)
raise UnsupportedVersionException(UNSUPPORTED_VERSION)
# The service is not running
elif match == 2:
logger.error(WINDSCRIBE_NOT_RUNNING)
raise WindscribeNotRunningException(WINDSCRIBE_NOT_RUNNING)
# The user is not logged in
elif match == 1:
# Wait until the command returns
child.wait()
logger.warning(NOT_LOGGED_IN)
return False
# The user was logged in
else:
# Wait until the command returns
child.wait()
logger.info("Logged Out.")
return True
def status() -> WindscribeStatus:
"""Gets the Windscribe CLI status.
Raises:
-------
`WindscribeNotFoundException`: if the Windscribe CLI cannot be found.
`WindscribeNotRunningException`: if the Windscribe service is not running.
`UnsupportedVersionException`: if this module is incompatible with your Windscribe CLI version.
`ConnectionError` : if the user is not connected to internet.
Returns:
--------
`WindscribeStatus`: The Windscribe status.
"""
# Execute the command
child = execute_command("windscribe status")
# Read the output
match = child.expect(
[
"windscribe --",
"Windscribe is not running",
EOF,
]
)
# Unsupported version
if match == 2:
logger.error(UNSUPPORTED_VERSION)
raise UnsupportedVersionException(UNSUPPORTED_VERSION)
# The service is not running
elif match == 1:
logger.error(WINDSCRIBE_NOT_RUNNING)
raise WindscribeNotRunningException(WINDSCRIBE_NOT_RUNNING)
# Get status info
else:
line_info = re.split(
r"[,| ]{0,}[^ ]+: ", remove_ansi_sequences(child.readline()).strip())
# Check if unsupported version
if len(line_info) != 6:
logger.error(UNSUPPORTED_VERSION)
raise UnsupportedVersionException(UNSUPPORTED_VERSION)
# Parse info
pid = int(line_info[1])
status = line_info[2]
uptime = line_info[3]
cpu_usage = float(line_info[4])
mem_usage = float(line_info[5])
# Read output
match = child.expect(["IP:", "Service communication error", EOF])
# Unsupported version
if match == 2:
logger.error(UNSUPPORTED_VERSION)
raise UnsupportedVersionException(UNSUPPORTED_VERSION)
# The user needs to be connected to internet
elif match == 1:
logger.error(NOT_CONNECTED_TO_INTERNET)
raise ConnectionError(NOT_CONNECTED_TO_INTERNET)
# Get ip
else:
match = re.findall(
r"\b((?:[0-9]{1,3}\.){3}[0-9]{1,3})\b", remove_ansi_sequences(child.readline()).strip())
# Check if unsupported version
if len(match) == 0:
logger.error(UNSUPPORTED_VERSION)
raise UnsupportedVersionException(UNSUPPORTED_VERSION)
# Parse info
ip = match[0]
# Get connected status
connected = "DISCONNECTED" not in remove_ansi_sequences(
child.readline())
# Log
logger.info(
f"pid : {pid}, status: {status}, uptime: {uptime}, %cpu: {cpu_usage}, %mem: {mem_usage}")
logger.info(f"IP : {ip}")
logger.info("CONNECTED" if connected else "DISCONNECTED")
# Return status
return WindscribeStatus(*(pid, status, uptime, cpu_usage, mem_usage, ip, connected))
def account() -> WindscribeAccount:
"""Get Windscribe account informations.
Raises:
-------
`WindscribeNotFoundException`: if the Windscribe CLI cannot be found.
`WindscribeNotRunningException`: if the Windscribe service is not running.
`UnsupportedVersionException`: if this module is incompatible with your Windscribe CLI version.
`ConnectionError` : if the user is not connected to internet.
`NotLoggedInException` : if the user is not logged in.
"""
# Execute the command
child = execute_command("windscribe account")
# Read the output
match = child.expect(
[
re.compile(
"[ |-]{0,}My Account[ |-]{0,}(%s){0,}\\r\\n" % ANSI_SEQUENCES),
"Please login to use Windscribe",
"Failed to connect",
"Windscribe is not running",
EOF,
]
)
# Unsupported version
if match == 4:
logger.error(UNSUPPORTED_VERSION)
raise UnsupportedVersionException(UNSUPPORTED_VERSION)
# The service is not running
elif match == 3:
logger.error(WINDSCRIBE_NOT_RUNNING)
raise WindscribeNotRunningException(WINDSCRIBE_NOT_RUNNING)
# The user needs to be connected to internet
elif match == 2:
logger.error(NOT_CONNECTED_TO_INTERNET)
raise ConnectionError(NOT_CONNECTED_TO_INTERNET)
# The user is not logged in
elif match == 1:
logger.warning(NOT_LOGGED_IN)
raise NotLoggedInException(NOT_LOGGED_IN)
# Get account info
else:
# Username
child.expect("Username:")
username = remove_ansi_sequences(child.readline()).strip()
# Data usage
child.expect("Data Usage:")
line_info = remove_ansi_sequences(child.readline()).strip()
data_usage = line_info.split(" ")
if len(data_usage) != 5:
raise UnsupportedVersionException(UNSUPPORTED_VERSION)
# Plan
child.expect("Plan:")
plan = remove_ansi_sequences(child.readline()).strip()
# Log
logger.info(
f"Username: {username}, Data Usage: {data_usage[0]}{data_usage[1]}/{data_usage[3]}{data_usage[4]}, Plan: {plan}"
)
# Return account
return WindscribeAccount(
*(username, float(data_usage[0]), data_usage[1], float(data_usage[3]), data_usage[4], plan)
)
|
the-stack_0_20561 | # -*- coding: utf-8 -*-
# @Author: 何睿
# @Create Date: 2018-12-13 16:05:46
# @Last Modified by: 何睿
# @Last Modified time: 2018-12-13 16:32:08
class Solution:
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
res = []
strdict = {}
for item in strs:
key = ''.join(sorted(item))
if key in strdict.keys():
strdict[key].append(item)
else:
strdict[key] = []
strdict[key].append(item)
for key in strdict:
res.append(strdict[key])
return res
if __name__ == "__main__":
so = Solution()
res = so.groupAnagrams(["eat", "tea", "tan", "ate", "nat", "bat"])
print(res) |
the-stack_0_20562 | # Copyright (c) 2019-present, Francesco Croce
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
from autoattack.fab_base import FABAttack
class FABAttack_TF(FABAttack):
"""
Fast Adaptive Boundary Attack (Linf, L2, L1)
https://arxiv.org/abs/1907.02044
:param model: TF_model
:param norm: Lp-norm to minimize ('Linf', 'L2', 'L1' supported)
:param n_restarts: number of random restarts
:param n_iter: number of iterations
:param eps: epsilon for the random restarts
:param alpha_max: alpha_max
:param eta: overshooting
:param beta: backward step
"""
def __init__(
self,
model,
norm='Linf',
n_restarts=1,
n_iter=100,
eps=None,
alpha_max=0.1,
eta=1.05,
beta=0.9,
loss_fn=None,
verbose=False,
seed=0,
targeted=False,
device=None,
n_target_classes=9):
""" FAB-attack implementation in TF2 """
self.model = model
super().__init__(norm,
n_restarts,
n_iter,
eps,
alpha_max,
eta,
beta,
loss_fn,
verbose,
seed,
targeted,
device,
n_target_classes)
def _predict_fn(self, x):
return self.model.predict(x)
def _get_predicted_label(self, x):
with torch.no_grad():
outputs = self._predict_fn(x)
_, y = torch.max(outputs, dim=1)
return y
def get_diff_logits_grads_batch(self, imgs, la):
y2, g2 = self.model.grad_logits(imgs)
df = y2 - y2[torch.arange(imgs.shape[0]), la].unsqueeze(1)
dg = g2 - g2[torch.arange(imgs.shape[0]), la].unsqueeze(1)
df[torch.arange(imgs.shape[0]), la] = 1e10
return df, dg
def get_diff_logits_grads_batch_targeted(self, imgs, la, la_target):
df, dg = self.model.get_grad_diff_logits_target(imgs, la, la_target)
df.unsqueeze_(1)
dg.unsqueeze_(1)
return df, dg
|
the-stack_0_20563 | import asyncio
import ipaddress
import os.path
import random
import socket
from collections import namedtuple
import aiodns
import aiohttp
import maxminddb
from .errors import ResolveError
from .utils import DATA_DIR, log
GeoData = namedtuple(
'GeoData', ['code', 'name', 'region_code', 'region_name', 'city_name']
)
_countrydb = os.path.join(DATA_DIR, 'GeoLite2-Country.mmdb')
_citydb = os.path.join(DATA_DIR, 'GeoLite2-City.mmdb')
_geo_db = _citydb if os.path.exists(_citydb) else _countrydb
_mmdb_reader = maxminddb.open_database(_geo_db)
class Resolver:
"""Async host resolver based on aiodns."""
_cached_hosts = {}
_ip_hosts = [
'https://wtfismyip.com/text',
'http://api.ipify.org/',
'http://ipinfo.io/ip',
'http://ipv4.icanhazip.com/',
'http://myexternalip.com/raw',
'http://ipinfo.io/ip',
'http://ifconfig.io/ip',
]
def __init__(self, timeout=5, loop=None):
self._timeout = timeout
self._loop = loop or asyncio.get_event_loop()
self._resolver = aiodns.DNSResolver(loop=self._loop)
@staticmethod
def host_is_ip(host):
"""Check a host is IP address."""
# TODO: add IPv6 support
try:
ipaddress.IPv4Address(host)
except ipaddress.AddressValueError:
return False
else:
return True
@staticmethod
def get_ip_info(ip):
"""Return geo information about IP address.
`code` - ISO country code
`name` - Full name of country
`region_code` - ISO region code
`region_name` - Full name of region
`city_name` - Full name of city
"""
# from pprint import pprint
try:
ipInfo = _mmdb_reader.get(ip) or {}
except (maxminddb.errors.InvalidDatabaseError, ValueError):
ipInfo = {}
code, name = '--', 'Unknown'
city_name, region_code, region_name = ('Unknown',) * 3
if 'country' in ipInfo:
code = ipInfo['country']['iso_code']
name = ipInfo['country']['names']['en']
elif 'continent' in ipInfo:
code = ipInfo['continent']['code']
name = ipInfo['continent']['names']['en']
if 'city' in ipInfo:
city_name = ipInfo['city']['names']['en']
if 'subdivisions' in ipInfo:
region_code = ipInfo['subdivisions'][0]['iso_code']
region_name = ipInfo['subdivisions'][0]['names']['en']
return GeoData(code, name, region_code, region_name, city_name)
def _pop_random_ip_host(self):
host = random.choice(self._ip_hosts)
self._ip_hosts.remove(host)
return host
async def get_real_ext_ip(self):
"""Return real external IP address."""
while self._ip_hosts:
try:
timeout = aiohttp.ClientTimeout(total=self._timeout)
async with aiohttp.ClientSession(
timeout=timeout, loop=self._loop
) as session, session.get(self._pop_random_ip_host()) as resp:
ip = await resp.text()
except asyncio.TimeoutError:
pass
else:
ip = ip.strip()
if self.host_is_ip(ip):
log.debug('Real external IP: %s', ip)
break
else:
raise RuntimeError('Could not get the external IP')
return ip
async def resolve(self, host, port=80, family=None, qtype='A', logging=True):
"""Return resolving IP address(es) from host name."""
if self.host_is_ip(host):
return host
_host = self._cached_hosts.get(host)
if _host:
return _host
resp = await self._resolve(host, qtype)
if resp:
hosts = [
{
'hostname': host,
'host': r.host,
'port': port,
'family': family,
'proto': socket.IPPROTO_IP,
'flags': socket.AI_NUMERICHOST,
}
for r in resp
]
if family:
self._cached_hosts[host] = hosts
else:
self._cached_hosts[host] = hosts[0]['host']
if logging:
log.debug('%s: Host resolved: %s' % (host, self._cached_hosts[host]))
else:
if logging:
log.warning('%s: Could not resolve host' % host)
return self._cached_hosts.get(host)
async def _resolve(self, host, qtype):
try:
resp = await asyncio.wait_for(
self._resolver.query(host, qtype), timeout=self._timeout
)
except (aiodns.error.DNSError, asyncio.TimeoutError):
raise ResolveError
else:
return resp
|
the-stack_0_20564 | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Const Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.awt
from enum import IntEnum
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.awt import FontEmphasisMark as FontEmphasisMark
if hasattr(FontEmphasisMark, '_constants') and isinstance(FontEmphasisMark._constants, dict):
FontEmphasisMark._constants['__ooo_ns__'] = 'com.sun.star.awt'
FontEmphasisMark._constants['__ooo_full_ns__'] = 'com.sun.star.awt.FontEmphasisMark'
FontEmphasisMark._constants['__ooo_type_name__'] = 'const'
def build_enum():
global FontEmphasisMarkEnum
ls = [f for f in dir(FontEmphasisMark) if not callable(getattr(FontEmphasisMark, f)) and not f.startswith('__')]
_dict = {}
for name in ls:
_dict[name] = getattr(FontEmphasisMark, name)
FontEmphasisMarkEnum = IntEnum('FontEmphasisMarkEnum', _dict)
build_enum()
else:
from ...lo.awt.font_emphasis_mark import FontEmphasisMark as FontEmphasisMark
class FontEmphasisMarkEnum(IntEnum):
"""
Enum of Const Class FontEmphasisMark
These values are used to specify the kind of emphasis mark.
They may be expanded in future versions.
"""
NONE = FontEmphasisMark.NONE
"""
specifies no emphasis mark.
"""
DOT = FontEmphasisMark.DOT
"""
specifies emphasis mark dot.
"""
CIRCLE = FontEmphasisMark.CIRCLE
"""
specifies emphasis mark circle.
"""
DISC = FontEmphasisMark.DISC
"""
specifies emphasis mark disc.
"""
ACCENT = FontEmphasisMark.ACCENT
"""
specifies emphasis mark accent.
"""
ABOVE = FontEmphasisMark.ABOVE
"""
specifies that the emphasis mark should be positioned above the characters.
"""
BELOW = FontEmphasisMark.BELOW
"""
specifies that the emphasis mark should be positioned below the characters.
"""
__all__ = ['FontEmphasisMark', 'FontEmphasisMarkEnum']
|
the-stack_0_20565 | import collections
import dataclasses
import heapq
import json
import logging
import re
from dataclasses import dataclass
from datetime import datetime, timedelta
from typing import (
Any,
Counter,
Dict,
Iterable,
List,
MutableMapping,
Optional,
Union,
cast,
)
import cachetools
import pydantic
from google.cloud.logging_v2.client import Client as GCPLoggingClient
from more_itertools import partition
import datahub.emitter.mce_builder as builder
from datahub.configuration.common import AllowDenyPattern
from datahub.configuration.time_window_config import get_time_bucket
from datahub.emitter.mcp import MetadataChangeProposalWrapper
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.api.source import Source, SourceReport
from datahub.ingestion.api.workunit import MetadataWorkUnit
from datahub.ingestion.source.usage.usage_common import (
BaseUsageConfig,
GenericAggregatedDataset,
)
from datahub.metadata.schema_classes import (
ChangeTypeClass,
OperationClass,
OperationTypeClass,
)
from datahub.utilities.delayed_iter import delayed_iter
logger = logging.getLogger(__name__)
# ProtobufEntry is generated dynamically using a namedtuple, so mypy
# can't really deal with it. As such, we short circuit mypy's typing
# but keep the code relatively clear by retaining dummy types.
#
# from google.cloud.logging_v2 import ProtobufEntry
# AuditLogEntry = ProtobufEntry
AuditLogEntry = Any
# BigQueryAuditMetadata is the v2 format in which audit logs are exported to BigQuery
BigQueryAuditMetadata = Any
DEBUG_INCLUDE_FULL_PAYLOADS = False
# Handle yearly, monthly, daily, or hourly partitioning.
# See https://cloud.google.com/bigquery/docs/partitioned-tables.
# This REGEX handles both Partitioned Tables ($ separator) and Sharded Tables (_ separator)
PARTITIONED_TABLE_REGEX = re.compile(
r"^(.+)[\$_](\d{4}|\d{6}|\d{8}|\d{10}|__PARTITIONS_SUMMARY__)$"
)
# Handle table snapshots
# See https://cloud.google.com/bigquery/docs/table-snapshots-intro.
SNAPSHOT_TABLE_REGEX = re.compile(r"^(.+)@(\d{13})$")
BQ_DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
BQ_DATE_SHARD_FORMAT = "%Y%m%d"
BQ_FILTER_REGEX_ALLOW_TEMPLATE = """
protoPayload.serviceData.jobCompletedEvent.job.jobStatistics.referencedTables.tableId =~ "{allow_pattern}"
"""
BQ_FILTER_REGEX_DENY_TEMPLATE = """
{logical_operator}
protoPayload.serviceData.jobCompletedEvent.job.jobStatistics.referencedTables.tableId !~ "{deny_pattern}"
"""
BQ_FILTER_RULE_TEMPLATE = """
protoPayload.serviceName="bigquery.googleapis.com"
AND
(
(
protoPayload.methodName="jobservice.jobcompleted"
AND
protoPayload.serviceData.jobCompletedEvent.eventName="query_job_completed"
AND
protoPayload.serviceData.jobCompletedEvent.job.jobStatus.state="DONE"
AND
NOT protoPayload.serviceData.jobCompletedEvent.job.jobStatus.error.code:*
)
OR
(
protoPayload.metadata.tableDataRead:*
)
)
AND (
{allow_regex}
{deny_regex}
OR
protoPayload.metadata.tableDataRead.reason = "JOB"
)
AND
timestamp >= "{start_time}"
AND
timestamp < "{end_time}"
""".strip()
OPERATION_STATEMENT_TYPES = {
"INSERT": OperationTypeClass.INSERT,
"UPDATE": OperationTypeClass.UPDATE,
"DELETE": OperationTypeClass.DELETE,
"MERGE": OperationTypeClass.UPDATE,
"CREATE": OperationTypeClass.CREATE,
"CREATE_TABLE_AS_SELECT": OperationTypeClass.CREATE,
"CREATE_SCHEMA": OperationTypeClass.CREATE,
"DROP_TABLE": OperationTypeClass.DROP,
}
@dataclass(frozen=True, order=True)
class BigQueryTableRef:
project: str
dataset: str
table: str
@classmethod
def from_spec_obj(cls, spec: dict) -> "BigQueryTableRef":
return cls(spec["projectId"], spec["datasetId"], spec["tableId"])
@classmethod
def from_string_name(cls, ref: str) -> "BigQueryTableRef":
parts = ref.split("/")
if parts[0] != "projects" or parts[2] != "datasets" or parts[4] != "tables":
raise ValueError(f"invalid BigQuery table reference: {ref}")
return cls(parts[1], parts[3], parts[5])
def is_anonymous(self) -> bool:
# Temporary tables will have a dataset that begins with an underscore.
return self.dataset.startswith("_")
def remove_extras(self) -> "BigQueryTableRef":
# Handle partitioned and sharded tables.
matches = PARTITIONED_TABLE_REGEX.match(self.table)
if matches:
table_name = matches.group(1)
logger.debug(
f"Found partitioned table {self.table}. Using {table_name} as the table name."
)
return BigQueryTableRef(self.project, self.dataset, table_name)
# Handle table snapshots.
matches = SNAPSHOT_TABLE_REGEX.match(self.table)
if matches:
table_name = matches.group(1)
logger.debug(
f"Found table snapshot {self.table}. Using {table_name} as the table name."
)
return BigQueryTableRef(self.project, self.dataset, table_name)
# Handle exceptions
invalid_chars_in_table_name: List[str] = [
c for c in {"$", "@"} if c in self.table
]
if invalid_chars_in_table_name:
raise ValueError(
f"Cannot handle {self} - poorly formatted table name, contains {invalid_chars_in_table_name}"
)
return self
def __str__(self) -> str:
return f"projects/{self.project}/datasets/{self.dataset}/tables/{self.table}"
AggregatedDataset = GenericAggregatedDataset[BigQueryTableRef]
def _table_ref_to_urn(ref: BigQueryTableRef, env: str) -> str:
return builder.make_dataset_urn(
"bigquery", f"{ref.project}.{ref.dataset}.{ref.table}", env
)
def _job_name_ref(project: str, jobId: str) -> Optional[str]:
if project and jobId:
return f"projects/{project}/jobs/{jobId}"
else:
return None
@dataclass
class ReadEvent:
"""
A container class for data from a TableDataRead event.
See https://cloud.google.com/bigquery/docs/reference/auditlogs/rest/Shared.Types/BigQueryAuditMetadata#BigQueryAuditMetadata.TableDataRead.
"""
timestamp: datetime
actor_email: str
resource: BigQueryTableRef
fieldsRead: List[str]
readReason: Optional[str]
jobName: Optional[str]
payload: Any
# We really should use composition here since the query isn't actually
# part of the read event, but this solution is just simpler.
query: Optional[str] = None # populated via join
@classmethod
def can_parse_entry(cls, entry: AuditLogEntry) -> bool:
try:
entry.payload["metadata"]["tableDataRead"]
return True
except (KeyError, TypeError):
return False
@classmethod
def from_entry(cls, entry: AuditLogEntry) -> "ReadEvent":
user = entry.payload["authenticationInfo"]["principalEmail"]
resourceName = entry.payload["resourceName"]
readInfo = entry.payload["metadata"]["tableDataRead"]
fields = readInfo.get("fields", [])
# https://cloud.google.com/bigquery/docs/reference/auditlogs/rest/Shared.Types/BigQueryAuditMetadata.TableDataRead.Reason
readReason = readInfo.get("reason")
jobName = None
if readReason == "JOB":
jobName = readInfo.get("jobName")
readEvent = ReadEvent(
actor_email=user,
timestamp=entry.timestamp,
resource=BigQueryTableRef.from_string_name(resourceName),
fieldsRead=fields,
readReason=readReason,
jobName=jobName,
payload=entry.payload if DEBUG_INCLUDE_FULL_PAYLOADS else None,
)
if readReason == "JOB" and not jobName:
logger.debug(
"jobName from read events is absent when readReason is JOB. "
"Auditlog entry - {logEntry}".format(logEntry=entry)
)
return readEvent
@dataclass
class QueryEvent:
"""
A container class for a query job completion event.
See https://cloud.google.com/bigquery/docs/reference/auditlogs/rest/Shared.Types/AuditData#JobCompletedEvent.
"""
timestamp: datetime
actor_email: str
query: str
statementType: Optional[str]
destinationTable: Optional[BigQueryTableRef]
referencedTables: Optional[List[BigQueryTableRef]]
jobName: Optional[str]
payload: Any
@classmethod
def can_parse_entry(cls, entry: AuditLogEntry) -> bool:
try:
entry.payload["serviceData"]["jobCompletedEvent"]["job"]
return True
except (KeyError, TypeError):
return False
@classmethod
def from_entry(cls, entry: AuditLogEntry) -> "QueryEvent":
user = entry.payload["authenticationInfo"]["principalEmail"]
job = entry.payload["serviceData"]["jobCompletedEvent"]["job"]
jobName = _job_name_ref(
job.get("jobName", {}).get("projectId"), job.get("jobName", {}).get("jobId")
)
rawQuery = job["jobConfiguration"]["query"]["query"]
rawDestTable = job["jobConfiguration"]["query"]["destinationTable"]
destinationTable = None
if rawDestTable:
destinationTable = BigQueryTableRef.from_spec_obj(rawDestTable)
try:
statementType = job["jobConfiguration"]["query"]["statementType"]
except KeyError:
statementType = None
rawRefTables = job["jobStatistics"].get("referencedTables")
referencedTables = None
if rawRefTables:
referencedTables = [
BigQueryTableRef.from_spec_obj(spec) for spec in rawRefTables
]
queryEvent = QueryEvent(
timestamp=entry.timestamp,
actor_email=user,
query=rawQuery,
statementType=statementType,
destinationTable=destinationTable,
referencedTables=referencedTables,
jobName=jobName,
payload=entry.payload if DEBUG_INCLUDE_FULL_PAYLOADS else None,
)
if not jobName:
logger.debug(
"jobName from query events is absent. "
"Auditlog entry - {logEntry}".format(logEntry=entry)
)
return queryEvent
@classmethod
def can_parse_exported_bigquery_audit_metadata(
cls, row: BigQueryAuditMetadata
) -> bool:
try:
row["timestamp"]
row["protoPayload"]
row["metadata"]
return True
except (KeyError, TypeError):
return False
@classmethod
def from_exported_bigquery_audit_metadata(
cls, row: BigQueryAuditMetadata
) -> "QueryEvent":
timestamp = row["timestamp"]
payload = row["protoPayload"]
metadata = json.loads(row["metadata"])
user = payload["authenticationInfo"]["principalEmail"]
job = metadata["jobChange"]["job"]
job_name = job.get("jobName")
raw_query = job["jobConfig"]["queryConfig"]["query"]
raw_dest_table = job["jobConfig"]["queryConfig"].get("destinationTable")
destination_table = None
if raw_dest_table:
destination_table = BigQueryTableRef.from_string_name(raw_dest_table)
raw_ref_tables = job["jobStats"]["queryStats"].get("referencedTables")
referenced_tables = None
if raw_ref_tables:
referenced_tables = [
BigQueryTableRef.from_string_name(spec) for spec in raw_ref_tables
]
try:
statementType = job["jobConfiguration"]["query"]["statementType"]
except KeyError:
statementType = None
query_event = QueryEvent(
timestamp=timestamp,
actor_email=user,
query=raw_query,
statementType=statementType,
destinationTable=destination_table,
referencedTables=referenced_tables,
jobName=job_name,
payload=payload if DEBUG_INCLUDE_FULL_PAYLOADS else None,
)
if not job_name:
logger.debug(
"jobName from query events is absent. "
"BigQueryAuditMetadata entry - {logEntry}".format(logEntry=row)
)
return query_event
class BigQueryUsageConfig(BaseUsageConfig):
projects: Optional[List[str]] = None
project_id: Optional[str] = None # deprecated in favor of `projects`
extra_client_options: dict = {}
env: str = builder.DEFAULT_ENV
table_pattern: Optional[AllowDenyPattern] = None
log_page_size: Optional[pydantic.PositiveInt] = 1000
query_log_delay: Optional[pydantic.PositiveInt] = None
max_query_duration: timedelta = timedelta(minutes=15)
@pydantic.validator("project_id")
def note_project_id_deprecation(cls, v, values, **kwargs):
logger.warning(
"bigquery-usage project_id option is deprecated; use projects instead"
)
values["projects"] = [v]
return None
def get_allow_pattern_string(self) -> str:
return "|".join(self.table_pattern.allow) if self.table_pattern else ""
def get_deny_pattern_string(self) -> str:
return "|".join(self.table_pattern.deny) if self.table_pattern else ""
@dataclass
class BigQueryUsageSourceReport(SourceReport):
dropped_table: Counter[str] = dataclasses.field(default_factory=collections.Counter)
def report_dropped(self, key: str) -> None:
self.dropped_table[key] += 1
class BigQueryUsageSource(Source):
config: BigQueryUsageConfig
report: BigQueryUsageSourceReport
def __init__(self, config: BigQueryUsageConfig, ctx: PipelineContext):
super().__init__(ctx)
self.config = config
self.report = BigQueryUsageSourceReport()
@classmethod
def create(cls, config_dict: dict, ctx: PipelineContext) -> "BigQueryUsageSource":
config = BigQueryUsageConfig.parse_obj(config_dict)
return cls(config, ctx)
def get_workunits(self) -> Iterable[MetadataWorkUnit]:
clients = self._make_bigquery_clients()
bigquery_log_entries = self._get_bigquery_log_entries(clients)
parsed_bigquery_log_events = self._parse_bigquery_log_entries(
bigquery_log_entries
)
parsed_events_uncasted: Iterable[Union[ReadEvent, QueryEvent, MetadataWorkUnit]]
last_updated_work_units_uncasted: Iterable[
Union[ReadEvent, QueryEvent, MetadataWorkUnit]
]
parsed_events_uncasted, last_updated_work_units_uncasted = partition(
lambda x: isinstance(x, MetadataWorkUnit), parsed_bigquery_log_events
)
parsed_events: Iterable[Union[ReadEvent, QueryEvent]] = cast(
Iterable[Union[ReadEvent, QueryEvent]], parsed_events_uncasted
)
last_updated_work_units: Iterable[MetadataWorkUnit] = cast(
Iterable[MetadataWorkUnit], last_updated_work_units_uncasted
)
if self.config.include_operational_stats:
for wu in last_updated_work_units:
self.report.report_workunit(wu)
yield wu
hydrated_read_events = self._join_events_by_job_id(parsed_events)
aggregated_info = self._aggregate_enriched_read_events(hydrated_read_events)
for time_bucket in aggregated_info.values():
for aggregate in time_bucket.values():
wu = self._make_usage_stat(aggregate)
self.report.report_workunit(wu)
yield wu
def _make_bigquery_clients(self) -> List[GCPLoggingClient]:
# See https://github.com/googleapis/google-cloud-python/issues/2674 for
# why we disable gRPC here.
client_options = self.config.extra_client_options.copy()
client_options["_use_grpc"] = False
if self.config.projects is None:
return [
GCPLoggingClient(**client_options),
]
else:
return [
GCPLoggingClient(**client_options, project=project_id)
for project_id in self.config.projects
]
def _get_bigquery_log_entries(
self, clients: List[GCPLoggingClient]
) -> Iterable[AuditLogEntry]:
# We adjust the filter values a bit, since we need to make sure that the join
# between query events and read events is complete. For example, this helps us
# handle the case where the read happens within our time range but the query
# completion event is delayed and happens after the configured end time.
# Can safely access the first index of the allow list as it by default contains ".*"
use_allow_filter = self.config.table_pattern and (
len(self.config.table_pattern.allow) > 1
or self.config.table_pattern.allow[0] != ".*"
)
use_deny_filter = self.config.table_pattern and self.config.table_pattern.deny
allow_regex = (
BQ_FILTER_REGEX_ALLOW_TEMPLATE.format(
allow_pattern=self.config.get_allow_pattern_string()
)
if use_allow_filter
else ""
)
deny_regex = (
BQ_FILTER_REGEX_DENY_TEMPLATE.format(
deny_pattern=self.config.get_deny_pattern_string(),
logical_operator="AND" if use_allow_filter else "",
)
if use_deny_filter
else ("" if use_allow_filter else "FALSE")
)
logger.debug(
f"use_allow_filter={use_allow_filter}, use_deny_filter={use_deny_filter}, "
f"allow_regex={allow_regex}, deny_regex={deny_regex}"
)
filter = BQ_FILTER_RULE_TEMPLATE.format(
start_time=(
self.config.start_time - self.config.max_query_duration
).strftime(BQ_DATETIME_FORMAT),
end_time=(self.config.end_time + self.config.max_query_duration).strftime(
BQ_DATETIME_FORMAT
),
allow_regex=allow_regex,
deny_regex=deny_regex,
)
logger.debug(filter)
def get_entry_timestamp(entry: AuditLogEntry) -> datetime:
return entry.timestamp
list_entry_generators_across_clients: List[Iterable[AuditLogEntry]] = list()
for client in clients:
try:
list_entries: Iterable[AuditLogEntry] = client.list_entries(
filter_=filter, page_size=self.config.log_page_size
)
list_entry_generators_across_clients.append(list_entries)
except Exception as e:
logger.warning(
f"Encountered exception retrieving AuditLogEntires for project {client.project}",
e,
)
self.report.report_failure(
f"{client.project}", f"unable to retrive log entrires {e}"
)
i: int = 0
entry: AuditLogEntry
for i, entry in enumerate(
heapq.merge(
*list_entry_generators_across_clients,
key=get_entry_timestamp,
)
):
if i == 0:
logger.info("Starting log load from BigQuery")
yield entry
logger.info(f"Finished loading {i} log entries from BigQuery")
def _create_operation_aspect_work_unit(
self, event: QueryEvent
) -> Optional[MetadataWorkUnit]:
if event.statementType in OPERATION_STATEMENT_TYPES and event.destinationTable:
destination_table: BigQueryTableRef
try:
destination_table = event.destinationTable.remove_extras()
except Exception as e:
self.report.report_warning(
str(event.destinationTable),
f"Failed to clean up destination table, {e}",
)
return None
last_updated_timestamp: int = int(event.timestamp.timestamp() * 1000)
affected_datasets = []
if event.referencedTables:
for table in event.referencedTables:
try:
affected_datasets.append(
_table_ref_to_urn(table.remove_extras(), self.config.env)
)
except Exception as e:
self.report.report_warning(
str(table),
f"Failed to clean up table, {e}",
)
operation_aspect = OperationClass(
timestampMillis=last_updated_timestamp,
lastUpdatedTimestamp=last_updated_timestamp,
actor=builder.make_user_urn(event.actor_email.split("@")[0]),
operationType=OPERATION_STATEMENT_TYPES[event.statementType],
affectedDatasets=affected_datasets,
)
mcp = MetadataChangeProposalWrapper(
entityType="dataset",
aspectName="operation",
changeType=ChangeTypeClass.UPSERT,
entityUrn=_table_ref_to_urn(destination_table, self.config.env),
aspect=operation_aspect,
)
return MetadataWorkUnit(
id=f"operation-aspect-{destination_table}-{event.timestamp.isoformat()}",
mcp=mcp,
)
return None
def _parse_bigquery_log_entries(
self, entries: Iterable[AuditLogEntry]
) -> Iterable[Union[ReadEvent, QueryEvent, MetadataWorkUnit]]:
num_read_events: int = 0
num_query_events: int = 0
for entry in entries:
event: Optional[Union[ReadEvent, QueryEvent]] = None
try:
if ReadEvent.can_parse_entry(entry):
event = ReadEvent.from_entry(entry)
num_read_events += 1
elif QueryEvent.can_parse_entry(entry):
event = QueryEvent.from_entry(entry)
num_query_events += 1
wu = self._create_operation_aspect_work_unit(event)
if wu:
yield wu
else:
self.report.report_warning(
f"{entry.log_name}-{entry.insert_id}",
"Log entry cannot be parsed as either ReadEvent or QueryEvent.",
)
logger.warning(
f"Log entry cannot be parsed as either ReadEvent or QueryEvent: {entry!r}"
)
except Exception as e:
self.report.report_failure(
f"{entry.log_name}-{entry.insert_id}",
f"unable to parse log entry: {entry!r}, exception: {e}",
)
logger.error("Error while parsing GCP log entries", e)
if event:
yield event
logger.info(
f"Parsed {num_read_events} ReadEvents and {num_query_events} QueryEvents"
)
def _join_events_by_job_id(
self, events: Iterable[Union[ReadEvent, QueryEvent]]
) -> Iterable[ReadEvent]:
# If caching eviction is enabled, we only store the most recently used query events,
# which are used when resolving job information within the read events.
query_jobs: MutableMapping[str, QueryEvent]
if self.config.query_log_delay:
query_jobs = cachetools.LRUCache(maxsize=5 * self.config.query_log_delay)
else:
query_jobs = {}
def event_processor(
events: Iterable[Union[ReadEvent, QueryEvent]]
) -> Iterable[ReadEvent]:
for event in events:
if isinstance(event, QueryEvent):
if event.jobName:
query_jobs[event.jobName] = event
else:
yield event
# TRICKY: To account for the possibility that the query event arrives after
# the read event in the audit logs, we wait for at least `query_log_delay`
# additional events to be processed before attempting to resolve BigQuery
# job information from the logs. If `query_log_delay` is None, it gets treated
# as an unlimited delay, which prioritizes correctness at the expense of memory usage.
original_read_events = event_processor(events)
delayed_read_events = delayed_iter(
original_read_events, self.config.query_log_delay
)
num_joined: int = 0
for event in delayed_read_events:
if (
event.timestamp < self.config.start_time
or event.timestamp >= self.config.end_time
):
continue
if event.jobName:
if event.jobName in query_jobs:
# Join the query log event into the table read log event.
num_joined += 1
event.query = query_jobs[event.jobName].query
# TODO also join into the query itself for column references
else:
self.report.report_warning(
str(event.resource),
"failed to match table read event with job; try increasing `query_log_delay` or `max_query_duration`",
)
yield event
logger.info(f"Number of read events joined with query events: {num_joined}")
def _aggregate_enriched_read_events(
self, events: Iterable[ReadEvent]
) -> Dict[datetime, Dict[BigQueryTableRef, AggregatedDataset]]:
# TODO: handle partitioned tables
# TODO: perhaps we need to continuously prune this, rather than
# storing it all in one big object.
datasets: Dict[
datetime, Dict[BigQueryTableRef, AggregatedDataset]
] = collections.defaultdict(dict)
num_aggregated: int = 0
for event in events:
floored_ts = get_time_bucket(event.timestamp, self.config.bucket_duration)
resource: Optional[BigQueryTableRef] = None
try:
resource = event.resource.remove_extras()
except Exception as e:
self.report.report_warning(
str(event.resource), f"Failed to clean up resource, {e}"
)
logger.warning(f"Failed to process event {str(event.resource)}", e)
continue
if resource.is_anonymous():
logger.debug(f"Dropping temporary table {resource}")
self.report.report_dropped(str(resource))
continue
agg_bucket = datasets[floored_ts].setdefault(
resource,
AggregatedDataset(bucket_start_time=floored_ts, resource=resource),
)
agg_bucket.add_read_entry(event.actor_email, event.query, event.fieldsRead)
num_aggregated += 1
logger.info(f"Total number of events aggregated = {num_aggregated}.")
bucket_level_stats: str = "\n\t" + "\n\t".join(
[
f'bucket:{db.strftime("%m-%d-%Y:%H:%M:%S")}, size={len(ads)}'
for db, ads in datasets.items()
]
)
logger.debug(
f"Number of buckets created = {len(datasets)}. Per-bucket details:{bucket_level_stats}"
)
return datasets
def _make_usage_stat(self, agg: AggregatedDataset) -> MetadataWorkUnit:
return agg.make_usage_workunit(
self.config.bucket_duration,
lambda resource: _table_ref_to_urn(resource, self.config.env),
self.config.top_n_queries,
)
def get_report(self) -> SourceReport:
return self.report
|
the-stack_0_20568 | #! /usr/bin/env python3
import sqlite3
import os
from flask import Flask
from flask import render_template
from flask import redirect
app = Flask(__name__)
db_path = '{}links.db'.format(os.getenv('IRC_db_path', './'))
channel = os.getenv('IRC_channel', '#linkgrabber')
server = os.getenv('IRC_server', 'irc.libera.chat')
@app.route('/')
def index():
return redirect('/1')
@app.route('/<int:page_id>')
def page(page_id):
conn = sqlite3.connect(db_path)
c = conn.cursor()
links = []
limit = str(20)
offset = str((page_id * 20) - 20)
for row in c.execute('''SELECT * FROM links ORDER BY rowid
DESC LIMIT {} OFFSET {}'''.format(limit, offset)):
links.append(row)
c.close()
next_page = page_id + 1
if page_id - 1 < 1:
previous_page = 1
else:
previous_page = page_id - 1
return render_template('links.html',
links=links,
server=server,
channel=channel,
next_page=next_page,
previous_page=previous_page)
@app.route('/<nick>')
def nick_base_page(nick):
return redirect('/{}/1'.format(nick))
@app.route('/<nick>/<int:page_id>')
def nick_page(nick, page_id):
conn = sqlite3.connect(db_path)
c = conn.cursor()
links = []
limit = str(20)
offset = str((page_id * 20) - 20)
for row in c.execute('''SELECT * FROM links WHERE nick = "{}" ORDER BY rowid
DESC LIMIT {} OFFSET {}'''.format(nick, limit, offset)):
links.append(row)
c.close()
next_page = page_id + 1
if page_id - 1 < 1:
previous_page = 1
else:
previous_page = page_id - 1
return render_template('links.html',
links=links,
server=server,
channel=channel,
nick=nick,
next_page=next_page,
previous_page=previous_page)
|
the-stack_0_20569 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def hasCycle(self, head: ListNode) -> bool:
while(head != None):
if head.val > 100000:
return True
head.val = 1000000
head = head.next
return False
|
the-stack_0_20570 | import pytest
from web3._utils.toolz import (
identity,
)
from web3.exceptions import (
MismatchedABI,
NoABIFound,
NoABIFunctionsFound,
)
def deploy(web3, Contract, apply_func=identity, args=None):
args = args or []
deploy_txn = Contract.constructor(*args).transact()
deploy_receipt = web3.eth.waitForTransactionReceipt(deploy_txn)
assert deploy_receipt is not None
address = apply_func(deploy_receipt['contractAddress'])
contract = Contract(address=address)
assert contract.address == address
assert len(web3.eth.getCode(contract.address)) > 0
return contract
@pytest.fixture()
def address(web3):
return web3.eth.accounts[1]
@pytest.fixture()
def math_contract(web3, MathContract, address_conversion_func):
return deploy(web3, MathContract, address_conversion_func)
@pytest.fixture()
def caller_tester_contract(web3, CallerTesterContract, address_conversion_func):
return deploy(web3, CallerTesterContract, address_conversion_func)
@pytest.fixture()
def transaction_dict(web3, address):
return {
'from': address,
'gas': 210000,
'gasPrice': web3.toWei(.001, 'ether'),
'value': 12345,
}
def test_caller_default(math_contract):
result = math_contract.caller.add(3, 5)
assert result == 8
def test_caller_with_parens(math_contract):
result = math_contract.caller().add(3, 5)
assert result == 8
def test_caller_with_no_abi(web3):
contract = web3.eth.contract()
with pytest.raises(NoABIFound):
contract.caller.thisFunctionDoesNotExist()
def test_caller_with_no_abi_and_parens(web3):
contract = web3.eth.contract()
with pytest.raises(NoABIFound):
contract.caller().thisFunctionDoesNotExist()
def test_caller_with_empty_abi_and_parens(web3):
contract = web3.eth.contract(abi=[])
with pytest.raises(NoABIFunctionsFound):
contract.caller().thisFunctionDoesNotExist()
def test_caller_with_empty_abi(web3):
contract = web3.eth.contract(abi=[])
with pytest.raises(NoABIFunctionsFound):
contract.caller.thisFunctionDoesNotExist()
def test_caller_with_a_nonexistent_function(math_contract):
contract = math_contract
with pytest.raises(MismatchedABI):
contract.caller.thisFunctionDoesNotExist()
def test_caller_with_block_identifier(web3, math_contract):
start_num = web3.eth.getBlock('latest').number
assert math_contract.caller.counter() == 0
web3.provider.make_request(method='evm_mine', params=[5])
math_contract.functions.increment().transact()
math_contract.functions.increment().transact()
output1 = math_contract.caller(block_identifier=start_num + 6).counter()
output2 = math_contract.caller(block_identifier=start_num + 7).counter()
assert output1 == 1
assert output2 == 2
def test_caller_with_block_identifier_and_transaction_dict(web3,
caller_tester_contract,
transaction_dict,
address):
start_num = web3.eth.getBlock('latest').number
assert caller_tester_contract.caller.counter() == 0
web3.provider.make_request(method='evm_mine', params=[5])
caller_tester_contract.functions.increment().transact()
block_id = start_num + 6
contract = caller_tester_contract.caller(
transaction=transaction_dict,
block_identifier=block_id
)
sender, _, gasLeft, value, block_num = contract.returnMeta()
counter = contract.counter()
assert sender == address
assert gasLeft <= transaction_dict['gas']
assert value == transaction_dict['value']
assert block_num == block_id
assert counter == 1
def test_caller_with_transaction_keyword(web3,
caller_tester_contract,
transaction_dict,
address):
contract = caller_tester_contract.caller(transaction=transaction_dict)
sender, _, gasLeft, value, _ = contract.returnMeta()
assert address == sender
assert gasLeft <= transaction_dict['gas']
assert value == transaction_dict['value']
def test_caller_with_dict_but_no_transaction_keyword(web3,
caller_tester_contract,
transaction_dict,
address):
contract = caller_tester_contract.caller(transaction_dict)
sender, _, gasLeft, value, _ = contract.returnMeta()
assert address == sender
assert gasLeft <= transaction_dict['gas']
assert value == transaction_dict['value']
def test_caller_with_args_and_no_transaction_keyword(web3,
caller_tester_contract,
transaction_dict,
address):
contract = caller_tester_contract.caller(transaction_dict)
sender, _, gasLeft, value, _ = contract.returnMeta()
assert address == sender
assert gasLeft <= transaction_dict['gas']
assert value == transaction_dict['value']
add_result = contract.add(3, 5)
assert add_result == 8
|
the-stack_0_20571 | import numpy as np
from LVMvSSGP_model_SV2_IP import LVMvSSGP
import scipy.io as sio
from time import time
def extend(x, y, z = {}):
dictx=dict(x.items())
dicty=dict(y.items())
dictz=dict(z.items())
dictx.update(dicty)
dictx.update(dictz)
return dictx
pool, global_f, global_g = None, None, None
def eval_f_LL(MU, X, params):
return global_f['LL'](**extend({'X': X}, params))
def eval_g_LL(name, MU, X, params):
return global_g[name]['LL'](**extend({'X': X}, params))
class LVMvSSGP_opt():
def __init__(self, dataset, run, Q, D, N, M, lower_bound_values, save_iter, inputs, opt_params, fixed_params):
self.dataset = dataset
self.run = run
self.LVMvssgp, self.N, self.M, self.fixed_params, self.lower_bound_values, self.save_iter = LVMvSSGP(Q, D, N, M), N, M, fixed_params, lower_bound_values, save_iter
self.inputs = inputs
self.opt_param_names = [n for n,_ in opt_params.items()]
opt_param_values = [np.atleast_2d(opt_params[n]) for n in self.opt_param_names]
self.shapes = [v.shape for v in opt_param_values]
self.sizes = [sum([np.prod(x) for x in self.shapes[:i]]) for i in range(len(self.shapes)+1)]
self.callback_counter = [0]
def unpack(self, x):
x_param_values = [np.squeeze(x[self.sizes[i-1]:self.sizes[i]].reshape(self.shapes[i-1])) for i in range(1,len(self.shapes)+1)]
params = {n:v for (n,v) in zip(self.opt_param_names, x_param_values)}
return params
def func(self, x):
params = extend(self.fixed_params, self.unpack(x))
params = extend(self.inputs, params)
LL, KL = self.LVMvssgp.f['LL'](**params), self.LVMvssgp.f['KL'](**params)
return -(LL - KL)
def fprime(self, x):
grads, params = [], extend(self.fixed_params, self.unpack(x))
for n in self.opt_param_names:
params = extend(self.inputs, params)
dLL, dKL = self.LVMvssgp.g[n]['LL'](**params), self.LVMvssgp.g[n]['KL'](**params)
grads += [-(dLL - dKL)]
return np.concatenate([grad.flatten() for grad in grads])
def callback(self, x):
opt_params = self.unpack(x)
print(opt_params)
params = extend(self.inputs, self.fixed_params, opt_params)
if self.lower_bound_values == 1:
LL = self.LVMvssgp.f['LL'](**params)
KL = self.LVMvssgp.f['KL'](**params)
print(str(self.callback_counter) + '::' + str(-(LL - KL)))
if self.save_iter == 1:
sio.savemat('temp_SV2_' + self.dataset + self.run + str(self.callback_counter) + '.mat', {'opt_params': params, 'bound': str(-(LL - KL))})
else:
sio.savemat('temp_SV2' + self.dataset + self.run + '.mat', {'opt_params': params})
else:
print(str(self.callback_counter))
if self.save_iter == 1:
sio.savemat('temp_SV2_' + self.dataset + self.run + str(self.callback_counter) + '.mat', {'opt_params': params})
else:
sio.savemat('temp_SV2' + self.dataset + self.run + '.mat', {'opt_params': params})
self.callback_counter[0] += 1
x = np.concatenate([np.atleast_2d(opt_params[n]).flatten() for n in self.opt_param_names])
return x
|
the-stack_0_20572 | import os
import twint
import datetime
import pathlib
from os import listdir
from os.path import isfile, join
import extractor
import counter
import drawer
import threading
import time
import asyncio
class GovernmentWorker(threading.Thread):
def __init__(self, event, loop, workspace=".", search="Elecciones2020", since="2019-10-01", interval_s=30*60):
threading.Thread.__init__(self)
self.loop = loop
self.stopped = event
self.workspace = workspace
self.pics_folder = join(self.workspace, "pics")
self.dataframes_folder = join(self.workspace, "dataframes")
self.search = search
self.since = since
self.interval_time = interval_s
self.freqs = []
pathlib.Path(self.pics_folder).mkdir(exist_ok=True)
pathlib.Path(self.dataframes_folder).mkdir(exist_ok=True)
onlyfiles = [f for f in listdir(self.dataframes_folder) if isfile(
join(self.dataframes_folder, f))]
if len(onlyfiles) > 0:
times = [datetime.datetime.strptime(file, "%d-%m-%Y_%H:%M.csv")
for file in onlyfiles]
times.sort(reverse=True)
self.last_dataframe = times[0].strftime("%d-%m-%Y_%H:%M.csv")
else:
self.save_new_snapshot()
print(self.last_dataframe)
self.perform_generators()
self.dfs = [f for f in listdir(self.dataframes_folder) if isfile(
join(self.dataframes_folder, f))]
def save_new_snapshot(self):
c = twint.Config()
c.Search = self.search
c.Since = self.since
c.Store_csv = True
c.Output = join(self.dataframes_folder, "temp.csv")
twint.run.Search(c)
name = datetime.datetime.now().strftime("%d-%m-%Y_%H:%M.csv")
os.rename(c.Output, join(self.dataframes_folder, name))
self.last_dataframe = name
self.dfs = [f for f in listdir(self.dataframes_folder) if isfile(
join(self.dataframes_folder, f))]
def perform_generators(self):
dataframe_filename = join(self.dataframes_folder, self.last_dataframe)
pic = self.last_dataframe.replace(".csv", ".png")
draw_filename = join(self.pics_folder, pic)
text = extractor.extract_tweets_from_csv(dataframe_filename)
self.freqs = counter.freq_str(text, minimal_counts=2)
drawer.draw_word_cloud(text, filename=draw_filename)
def last_update(self):
return datetime.datetime.strptime(self.last_dataframe, "%d-%m-%Y_%H:%M.csv")
def available_dataframes(self):
return self.dfs
def path_of_dataframe(self, df):
if not df in self.dfs:
return ""
return join(self.dataframes_folder, df)
def run(self):
asyncio.set_event_loop(self.loop)
self.save_new_snapshot()
while not self.stopped.wait(self.interval_time):
print("executing snapshot")
self.save_new_snapshot()
self.perform_generators()
|
the-stack_0_20575 | # coding: utf-8
from __future__ import unicode_literals
import re
from .Lemmatizer import Lemmatizer
from .WordTokenizer import WordTokenizer
from .utils import maketrans
compile_patterns = lambda patterns: [(re.compile(pattern), repl) for pattern, repl in patterns]
class Normalizer(object):
def __init__(self, remove_extra_spaces=True, persian_style=True, persian_numbers=True, remove_diacritics=True, affix_spacing=True, token_based=False, punctuation_spacing=True):
self._punctuation_spacing = punctuation_spacing
self._affix_spacing = affix_spacing
self._token_based = token_based
translation_src, translation_dst = ' ىكي“”', ' یکی""'
if persian_numbers:
translation_src += '0123456789%'
translation_dst += '۰۱۲۳۴۵۶۷۸۹٪'
self.translations = maketrans(translation_src, translation_dst)
if self._token_based:
lemmatizer = Lemmatizer()
self.words = lemmatizer.words
self.verbs = lemmatizer.verbs
self.tokenizer = WordTokenizer(join_verb_parts=False)
self.suffixes = {'ی', 'ای', 'ها', 'های', 'تر', 'تری', 'ترین', 'گر', 'گری', 'ام', 'ات', 'اش'}
self.character_refinement_patterns = []
if remove_extra_spaces:
self.character_refinement_patterns.extend([
(r' +', ' '), # remove extra spaces
(r'\n\n+', '\n\n'), # remove extra newlines
(r'[ـ\r]', ''), # remove keshide, carriage returns
])
if persian_style:
self.character_refinement_patterns.extend([
('"([^\n"]+)"', r'«\1»'), # replace quotation with gyoome
('([\d+])\.([\d+])', r'\1٫\2'), # replace dot with momayez
(r' ?\.\.\.', ' …'), # replace 3 dots
])
if remove_diacritics:
self.character_refinement_patterns.append(
('[\u064B\u064C\u064D\u064E\u064F\u0650\u0651\u0652]', ''), # remove FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SHADDA, SUKUN
)
self.character_refinement_patterns = compile_patterns(self.character_refinement_patterns)
punc_after, punc_before = r'\.:!،؛؟»\]\)\}', r'«\[\(\{'
if punctuation_spacing:
self.punctuation_spacing_patterns = compile_patterns([
('" ([^\n"]+) "', r'"\1"'), # remove space before and after quotation
(' (['+ punc_after +'])', r'\1'), # remove space before
('(['+ punc_before +']) ', r'\1'), # remove space after
('(['+ punc_after[:3] +'])([^ '+ punc_after +'\d۰۱۲۳۴۵۶۷۸۹])', r'\1 \2'), # put space after . and :
('(['+ punc_after[3:] +'])([^ '+ punc_after +'])', r'\1 \2'), # put space after
('([^ '+ punc_before +'])(['+ punc_before +'])', r'\1 \2'), # put space before
])
if affix_spacing:
self.affix_spacing_patterns = compile_patterns([
(r'([^ ]ه) ی ', r'\1ی '), # fix ی space
(r'(^| )(ن?می) ', r'\1\2'), # put zwnj after می, نمی
(r'(?<=[^\n\d '+ punc_after + punc_before +']{2}) (تر(ین?)?|گری?|های?)(?=[ \n'+ punc_after + punc_before +']|$)', r'\1'), # put zwnj before تر, تری, ترین, گر, گری, ها, های
(r'([^ ]ه) (ا(م|یم|ش|ند|ی|ید|ت))(?=[ \n'+ punc_after +']|$)', r'\1\2'), # join ام, ایم, اش, اند, ای, اید, ات
])
def normalize(self, text):
text = self.character_refinement(text)
if self._affix_spacing:
text = self.affix_spacing(text)
if self._token_based:
tokens = self.tokenizer.tokenize(text.translate(self.translations))
text = ' '.join(self.token_spacing(tokens))
if self._punctuation_spacing:
text = self.punctuation_spacing(text)
return text
def character_refinement(self, text):
"""
>>> normalizer = Normalizer()
>>> normalizer.character_refinement('اصلاح كاف و ياي عربي')
'اصلاح کاف و یای عربی'
>>> normalizer.character_refinement('عراق سال 2012 قراردادی به ارزش "4.2 میلیارد دلار" برای خرید تجهیزات نظامی با روسیه امضا کرد.')
'عراق سال ۲۰۱۲ قراردادی به ارزش «۴٫۲ میلیارد دلار» برای خرید تجهیزات نظامی با روسیه امضا کرد.'
>>> normalizer.character_refinement('رمــــان')
'رمان'
>>> normalizer.character_refinement('بُشقابِ مَن را بِگیر')
'بشقاب من را بگیر'
"""
text = text.translate(self.translations)
for pattern, repl in self.character_refinement_patterns:
text = pattern.sub(repl, text)
return text
def punctuation_spacing(self, text):
"""
>>> normalizer = Normalizer()
>>> normalizer.punctuation_spacing('اصلاح ( پرانتزها ) در متن .')
'اصلاح (پرانتزها) در متن.'
>>> normalizer.punctuation_spacing('نسخه 0.5 در ساعت 22:00 تهران،1396')
'نسخه 0.5 در ساعت 22:00 تهران، 1396'
>>> normalizer.punctuation_spacing('اتریش ۷.۹ میلیون.')
'اتریش ۷.۹ میلیون.'
"""
for pattern, repl in self.punctuation_spacing_patterns:
text = pattern.sub(repl, text)
return text
def affix_spacing(self, text):
"""
>>> normalizer = Normalizer()
>>> normalizer.affix_spacing('خانه ی پدری')
'خانهی پدری'
>>> normalizer.affix_spacing('فاصله میان پیشوند ها و پسوند ها را اصلاح می کند.')
'فاصله میان پیشوندها و پسوندها را اصلاح میکند.'
>>> normalizer.affix_spacing('می روم')
'میروم'
>>> normalizer.affix_spacing('حرفه ای')
'حرفهای'
>>> normalizer.affix_spacing('محبوب ترین ها')
'محبوبترینها'
"""
for pattern, repl in self.affix_spacing_patterns:
text = pattern.sub(repl, text)
return text
def token_spacing(self, tokens):
"""
>>> normalizer = Normalizer(token_based=True)
>>> normalizer.token_spacing(['کتاب', 'ها'])
['کتابها']
>>> normalizer.token_spacing(['او', 'می', 'رود'])
['او', 'میرود']
>>> normalizer.token_spacing(['ماه', 'می', 'سال', 'جدید'])
['ماه', 'می', 'سال', 'جدید']
>>> normalizer.token_spacing(['اخلال', 'گر'])
['اخلالگر']
>>> normalizer.token_spacing(['پرداخت', 'شده', 'است'])
['پرداخت', 'شده', 'است']
>>> normalizer.token_spacing(['زمین', 'لرزه', 'ای'])
['زمینلرزهای']
"""
result = []
for t, token in enumerate(tokens):
joined = False
if result:
token_pair = result[-1]+''+token
if token_pair in self.verbs or token_pair in self.words and self.words[token_pair][0] > 0:
joined = True
if t < len(tokens)-1 and token+'_'+tokens[t+1] in self.verbs:
joined = False
elif token in self.suffixes and result[-1] in self.words:
joined = True
if joined:
result.pop()
result.append(token_pair)
else:
result.append(token)
return result
|
the-stack_0_20577 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cgi
#import cgitb; cgitb.enable() # for troubleshooting
import re, sqlite3, collections
import os, sys
import operator
from collections import defaultdict as dd
import warnings
import unicodedata
from ntumc_gatekeeper import placeholders_for as _placeholders_for
#############################################################
# Configuration
#############################################################
#tagcgi = 'tag-lex.cgi' # DEPRECATED - WILL BE REMOVED SOON
taglcgi="tag-lexs.cgi"
tagwcgi="tag-word.cgi"
showsentcgi="show-sent.cgi"
logincgi="login.cgi"
### reference to wn-grid (search .cgi)
omwcgi = "wn-gridx.cgi"
# wordnets
wncgi = "wn-gridx.cgi"
wndb = "../db/wn-ntumc.db"
#############################################################
# Utilities for debugging
#############################################################
# 2014-06-12 [Tuan Anh]
def jilog(msg):
sys.stderr.write((u"%s\n" % msg))
try:
with open("../log/ntumc.txt", "a", encoding='utf-8') as logfile:
logfile.write(u"%s\n" % msg)
except Exception as ex:
sys.stderr.write(str(ex))
pass
# I added a timer class here for performance optimisation
import time
class Timer:
def __init__(self):
self.start_time = time.time()
self.end_time = time.time()
def start(self):
self.start_time = time.time()
return self
def stop(self):
self.end_time = time.time()
return self
def __str__(self):
return "Execution time: %.2f sec(s)" % (self.end_time - self.start_time)
def log(self, task_note = ''):
jilog(u"%s - Note=[%s]\n" % (self, task_note))
return self
#############################################################
# NTU-MC shared functions
#############################################################
def expandlem (lemma): ### keep in sync with tag-lexs
lems=set()
lems.add(lemma)
### case
lems.add(lemma.lower())
lems.add(lemma.upper())
lems.add(lemma.title())
### hyphen, underbar, space
lems.add(lemma.replace('-',''))
lems.add(lemma.replace('-',' '))
lems.add(lemma.replace(' ','-'))
lems.add(lemma.replace('_',''))
lems.add(lemma.replace(' ',''))
### normalize unicode
lems.add(unicodedata.normalize('NFKC', lemma))
lems.add(unicodedata.normalize('NFKC', lemma).lower())
lems.add(unicodedata.normalize('NFKC', lemma).upper())
lems.add(unicodedata.normalize('NFKC', lemma).title())
# lems.add(lemma.replace('_',u'∥'))
# lems.add(lemma.replace('-',u'∥'))
# lems.add(lemma.replace(u'・',u'∥'))
# lems.add(lemma.replace(u'ー',u'∥'))
# lems.add(lemma.replace(' ',u'∥'))
return lems
def pos2wn (pos, lang, lemma=''):
### FIXME: check and document --- Change POS for VN?
if lang == 'jpn':
if pos in [u'名詞-形容動詞語幹', u"形容詞-自立", u"連体詞"]:
return 'a'
elif pos in [u"名詞-サ変接続", u"名詞-ナイ形容詞語幹",
u"名詞-一般", u"名詞-副詞可能",
u"名詞-接尾-一般", u"名詞-形容動詞語幹",
u"名詞-数", u"記号-アルファベット"]:
return 'n'
elif pos in [u"動詞-自立", u"動詞-サ変接続"]:
return 'v'
elif pos in [u"副詞-一般", u"副詞-助詞類接続"]:
return 'r'
else:
return 'x'
elif lang=='eng':
if pos == 'VAX': #local tag for auxiliaries
return 'x'
elif pos in ['CD', 'NN', 'NNS', 'NNP', 'NNPS', 'WP', 'PRP']:
# include proper nouns and pronouns
## fixme flag for proper nouns
return 'n'
elif pos.startswith('V'):
return('v')
elif pos.startswith('J') or pos in ['WDT', 'WP$', 'PRP$', 'PDT', 'PRP'] or \
(pos=='DT' and not lemma in ['a', 'an', 'the']): ### most determiners
return('a')
elif pos.startswith('RB') or pos == 'WRB':
return('r')
else:
return 'x'
elif lang=='cmn':
if pos in "NN NN2 CD DT PN PN2 LC M M2 NR NT".split():
return 'n'
elif pos in "VV VV2 VC VE".split():
return 'v'
elif pos in "JJ JJ2 OD VA VA2".split():
return 'a'
elif pos in "AD AD2 ETC ON".split():
return 'r'
else:
return 'x'
elif lang=='vie':
if pos in "N Np Nc Nu Ny B".split():
return 'n'
elif pos in "V".split():
return 'v'
elif pos in "A".split():
return 'a'
elif pos in "L R".split():
return 'r'
else:
return 'x'
elif lang in ('ind', 'zsm'):
if pos in "nn nn2 nnc nng nnp nnu nns2 prp wp prl vnb".split():
return 'n'
elif pos in "vbi vbt vbd vbb vbl".split():
return 'v'
elif pos in "jj jj2 jjs jjs2 jje jje2 dt".split():
return 'a'
elif pos in "rb".split():
return 'r'
else:
return 'x'
else:
return 'u'
#half = u' 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~'
#full = u' 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!゛#$%&()*+、ー。/:;〈=〉?@[\\]^_‘{|}~'
#half2full = dict((ord(x[0]), x[1]) for x in zip(half, full)
#full2half = dict((ord(x[0]), x[1]) for x in zip(full, half)
#print u'Hello, world!'.translate(half2full)
###
### tagging functions
###
# Add more tags in
mtags = [ 'e', 'x', 'w' ] + ["org", "loc", "per", "dat", "oth", "num", "dat:year"]
mtags_short = { "e":"e",
"x":"x",
"w":"w",
'org' : 'Org',
'loc': 'Loc',
'per': 'Per',
'dat': 'Dat',
'oth': 'Oth',
'num': 'Num',
'dat:year': 'Year',
'' : 'Not tagged',
None : 'Not tagged'
}
mtags_human = { "e":"Error in the Corpus",
"x":"No need to tag",
"w":"Wordnet needs improvement",
'org' : 'Organization',
'loc': 'Location',
'per': 'Person',
'dat': 'Date/Time',
'oth': 'Other (Name)',
'num': 'Number',
'dat:year': 'Date: Year',
'' : 'Not tagged',
None : 'Not tagged'
}
# LMC: THIS IS A TEST TO REDESIGN THE TAG BOX
def tbox(sss, cid, wp, tag, com):
"""Create the box for tagging entries: return a string"""
box = "<span>"
for i, t in enumerate(sss):
box +="""<nobr><span style="color:#4D99E0;font-size:13px;
border-radius: 10px; background: #ededed;"
onchange="document.getElementById('tagword').submit();
return false">
<label for="cid_%s">%s<sub><font size='-2'>%s</font>
</sub></label>
<input type="radio" name="cid_%s" id="cid_%s"
value="%s" %s > </span> </nobr>
""" % (cid+str(i), str(i+1), t[-1],
cid, cid+str(i), t,
" checked " if t==tag else "")
for tk in mtags:
tv = mtags_human[tk]
box +=""" <nobr><span title="%s" style="color:#4D99E0;font-size:13px;
border-radius: 10px; background: #ededed;">
<label for="cid_%s"> %s </label>
<input type="radio" name="cid_%s" id="cid_%s" value="%s"
onchange="document.getElementById('tagword').submit();
return false" %s > </span> </nobr>
""" % (tv,
cid+tk,
mtags_short[tk] if tk in mtags_short else tk,
cid, cid+tk, tk,
" checked " if tk==tag else "")
# COMMENT
comv = com if com is not None else '';
box += """ <textarea style='font-size:12px; height: 25px; width: 100px;'
placeholder='Comment' onblur="document.getElementById('tagword').submit();"
title= 'Comment' name='com_%s'>%s</textarea>""" % (cid, comv)
box += "</span>"
# box += """<span style="color: #4D99E0;"
# onclick="document.getElementById('tagword').submit();">
# <i class='icon-ok-sign'></i></span>"""
return box
################################################################################
# 2016.02.25 LMC -- Checking Meta of CorpusDBs
################################################################################
def check_corpusdb(corpusdb):
""" This function takes a corpusdb argument of form 'eng', 'eng1',
'engB' (etc.), and returns 4 statements:
1) whether it exists (self or False)
2) version, i.e. if it's a master or copy ('master','A','B',...)
3) the master db associated with it (can be self)
4) the language of the database
5) the db path
"""
exists = False
if corpusdb.endswith('.db'):
dbpath = '../db/' + corpusdb
else:
dbpath = '../db/' + corpusdb + '.db'
if os.path.isfile(dbpath):
exists = corpusdb
if exists:
conn = sqlite3.connect(dbpath)
c = conn.cursor()
c.execute("""SELECT lang, version, master FROM meta""")
(lang, version, master) = c.fetchone()
else:
(lang, version, master) = ('unknown', 'unknown', 'unknown')
return (exists, version, master, lang, dbpath)
################################################################################
################################################################################
# 2016.02.25 LMC -- Listable CorpusDBs
################################################################################
def all_corpusdb():
corpusdb_list = [('eng','English DB'),
# ('eng2','English2DB'),
('cmn','Chinese DB'),
('jpn','Japanese DB'),
('ita','Italian DB'),
('ind','Indonesian DB'),
('zsm','Malay DB')]
return corpusdb_list
################################################################################
###
### get the synsets for a lemma
###
def lem2ss(c, lem, lang):
"""return a list of possible synsets for lang; backing off to lang1
TODO(Wilson): Migrate to ntumc_tagdb.py?
"""
lems = list(expandlem(lem))
query = """
SELECT DISTINCT synset
FROM word
LEFT JOIN sense ON word.wordid = sense.wordid
WHERE lemma in (%s)
AND sense.lang = ?
ORDER BY freq DESC
""" % placeholders_for(lems)
c.execute(query, list(lems) + [lang])
rows = c.fetchall()
# if not rows and lang != lang1:
# w.execute("""SELECT distinct synset
# FROM word LEFT JOIN sense ON word.wordid = sense.wordid
# WHERE lemma in (%s) AND sense.lang = ? and sense.status is not 'old'
# ORDER BY freq DESC""" % ','.join('?'*len(lems)), (lems + [lang1]))
# rows = w.fetchall()
# com_all='FW:eng'
### sort by POS
return sorted([s[0] for s in rows], key=lambda x: x[-1])
def set_rest_x(c, usrname, sid, cid):
query = """
UPDATE concept
SET tag='x', usrname=?
WHERE ROWID=(
SELECT bcon.ROWID
FROM cwl AS a
INNER JOIN cwl AS b ON a.sid=b.sid AND a.wid=b.wid
LEFT JOIN concept AS acon ON a.sid=acon.sid AND a.cid=acon.cid
LEFT JOIN concept AS bcon ON b.sid=bcon.sid AND b.cid=bcon.cid
WHERE a.sid=?
AND a.cid=?
AND acon.tag NOT IN ('x', 'e')
AND bcon.tag IS NULL)
"""
c.execute(query, (usrname, sid,cid))
def _deprecating(old, new):
"""This would look cooler as a decorator..."""
warnings.warn(f'{old} has migrated to {new}', DeprecationWarning)
def placeholders_for(*args, **kwargs):
"""Depreciation wrapper for database._placeholders_for()"""
_deprecating('ntumc_util.placeholders_for()', 'databases.placeholders_for()')
return _placeholders_for(*args, **kwargs)
|
the-stack_0_20579 | import numpy as np
from scipy.integrate import odeint
import porousmedialab.vg as vg
def thetaFun(psi, pars):
if psi >= 0.:
Se = 1.
else:
Se = (1 + abs(psi * pars['alpha'])**pars['n'])**(-pars['m'])
return pars['thetaR'] + (pars['thetaS'] - pars['thetaR']) * Se
def CFun(psi, pars):
if psi >= 0.:
Se = 1.
else:
Se = (1 + abs(psi * pars['alpha'])**pars['n'])**(-pars['m'])
dSedh = pars['alpha'] * pars['m'] / \
(1 - pars['m']) * Se**(1 / pars['m']) * \
(1 - Se**(1 / pars['m']))**pars['m']
return Se * pars['Ss'] + (pars['thetaS'] - pars['thetaR']) * dSedh
def KFun(psi, pars):
if psi >= 0.:
Se = 1.
else:
Se = (1 + abs(psi * pars['alpha'])**pars['n'])**(-pars['m'])
return pars['Ks'] * Se**pars['neta'] * (1 - (1 - Se**(1 / pars['m']))**pars['m'])**2
thetaFun = np.vectorize(thetaFun)
CFun = np.vectorize(CFun)
KFun = np.vectorize(KFun)
class RichardsModel:
"""Unsaturated transport model"""
def __init__(self, z, t, psi0, qTop=-0.01, qBot=[], psiTop=[], psiBot=[]):
# Boundary conditions
self.qTop = -0.01
self.qBot = []
self.psiTop = []
self.psiBot = []
# soil type
self.p = vg.HygieneSandstone()
# Grid in space
self.dz = 0.1
self.ProfileDepth = 5
self.z = z # np.arange(self.dz / 2.0, self.ProfileDepth, self.dz)
self.n = z.size
# Grid in time
self.t = np.linspace(0, t, 2)
# Initial conditions
self.psi0 = psi0
def solve(self):
self.psi = odeint(self.RichardsEquation, self.psi0, self.t, args=(
self.dz, self.n, self.p, self.qTop, self.qBot, self.psiTop, self.psiBot), mxstep=500)
self.psi0 = self.psi[-1, :]
def RichardsEquation(self, psi, t, dz, n, p, qTop, qBot, psiTop, psiBot):
# Basic properties:
C = CFun(psi, p)
# initialize vectors:
q = np.zeros(n + 1)
# Upper boundary
if qTop == []:
KTop = KFun(np.zeros(1) + psiTop, p)
q[n] = -KTop * ((psiTop - psi[n - 1]) / dz * 2 + 1)
else:
q[n] = qTop
# Lower boundary
if qBot == []:
if psiBot == []:
# Free drainage
KBot = KFun(np.zeros(1) + psi[0], p)
q[0] = -KBot
else:
# Type 1 boundary
KBot = KFun(np.zeros(1) + psiBot, p)
q[0] = -KBot * ((psi[0] - psiBot) / dz * 2 + 1.0)
else:
# Type 2 boundary
q[0] = qBot
# Internal nodes
i = np.arange(0, n - 1)
Knodes = KFun(psi, p)
Kmid = (Knodes[i + 1] + Knodes[i]) / 2.0
j = np.arange(1, n)
q[j] = -Kmid * ((psi[i + 1] - psi[i]) / dz + 1.0)
# Continuity
i = np.arange(0, n)
dpsidt = (-(q[i + 1] - q[i]) / dz) / C
return dpsidt
|
the-stack_0_20581 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Forward-backward attention decoding."""
import logging
logger = logging.getLogger(__name__)
def fwd_bwd_attention(nbest_hyps_fwd, aws_fwd, scores_fwd,
nbest_hyps_bwd, aws_bwd, scores_bwd,
eos, gnmt_decoding, lp_weight, idx2token, refs_id, flip=False):
"""Decoding with the forward and backward attention-based decoders.
Args:
nbest_hyps_fwd (list): A list of length `[B]`, which contains list of n hypotheses
aws_fwd (list): A list of length `[B]`, which contains arrays of size `[L, T]`
scores_fwd (list):
nbest_hyps_bwd (list):
aws_bwd (list):
scores_bwd (list):
eos (int):
gnmt_decoding (float):
lp_weight (float):
idx2token (): converter from index to token
refs_id ():
flip (bool): flip the encoder indices
Returns:
"""
bs = len(nbest_hyps_fwd)
nbest = len(nbest_hyps_fwd[0])
best_hyps = []
for b in range(bs):
max_time = len(aws_fwd[b][0])
merged = []
for n in range(nbest):
# forward
if len(nbest_hyps_fwd[b][n]) > 1:
if nbest_hyps_fwd[b][n][-1] == eos:
merged.append({'hyp': nbest_hyps_fwd[b][n][:-1],
'score': scores_fwd[b][n][-2]})
# NOTE: remove eos probability
else:
merged.append({'hyp': nbest_hyps_fwd[b][n],
'score': scores_fwd[b][n][-1]})
else:
# <eos> only
logger.info(nbest_hyps_fwd[b][n])
# backward
if len(nbest_hyps_bwd[b][n]) > 1:
if nbest_hyps_bwd[b][n][0] == eos:
merged.append({'hyp': nbest_hyps_bwd[b][n][1:],
'score': scores_bwd[b][n][1]})
# NOTE: remove eos probability
else:
merged.append({'hyp': nbest_hyps_bwd[b][n],
'score': scores_bwd[b][n][0]})
else:
# <eos> only
logger.info(nbest_hyps_bwd[b][n])
for n_f in range(nbest):
for n_b in range(nbest):
for i_f in range(len(aws_fwd[b][n_f]) - 1):
for i_b in range(len(aws_bwd[b][n_b]) - 1):
if flip:
# the encoder is not shared between forward and backward decoders
t_prev = max_time - aws_bwd[b][n_b][i_b + 1].argmax(-2)
t_curr = aws_fwd[b][n_f][i_f].argmax(-2)
t_next = max_time - aws_bwd[b][n_b][i_b - 1].argmax(-2)
else:
t_prev = aws_bwd[b][n_b][i_b + 1].argmax(-2)
t_curr = aws_fwd[b][n_f][i_f].argmax(-2)
t_next = aws_bwd[b][n_b][i_b - 1].argmax(-2)
# the same token at the same time
if t_curr >= t_prev and t_curr <= t_next and nbest_hyps_fwd[b][n_f][i_f] == nbest_hyps_bwd[b][n_b][i_b]:
new_hyp = nbest_hyps_fwd[b][n_f][:i_f + 1].tolist() + \
nbest_hyps_bwd[b][n_b][i_b + 1:].tolist()
score_curr_fwd = scores_fwd[b][n_f][i_f] - scores_fwd[b][n_f][i_f - 1]
score_curr_bwd = scores_bwd[b][n_b][i_b] - scores_bwd[b][n_b][i_b + 1]
score_curr = max(score_curr_fwd, score_curr_bwd)
new_score = scores_fwd[b][n_f][i_f - 1] + scores_bwd[b][n_b][i_b + 1] + score_curr
merged.append({'hyp': new_hyp, 'score': new_score})
logger.info('time matching')
if refs_id is not None:
logger.info('Ref: %s' % idx2token(refs_id[b]))
logger.info('hyp (fwd): %s' % idx2token(nbest_hyps_fwd[b][n_f]))
logger.info('hyp (bwd): %s' % idx2token(nbest_hyps_bwd[b][n_b]))
logger.info('hyp (fwd-bwd): %s' % idx2token(new_hyp))
logger.info('log prob (fwd): %.3f' % scores_fwd[b][n_f][-1])
logger.info('log prob (bwd): %.3f' % scores_bwd[b][n_b][0])
logger.info('log prob (fwd-bwd): %.3f' % new_score)
merged = sorted(merged, key=lambda x: x['score'], reverse=True)
best_hyps.append(merged[0]['hyp'])
return best_hyps
|
the-stack_0_20582 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import os
from ooi_data_explorations.common import inputs, m2m_collect, m2m_request, get_deployment_dates, \
get_vocabulary, update_dataset, ENCODINGS
# load configuration settings
ATTRS = dict({
'raw_backscatter': {
'long_name': 'Raw Optical Backscatter at 700 nm',
'units': 'counts',
'comment': 'Raw optical backscatter measurements at 700 nm.',
'data_product_identifier': 'FLUBSCT_L0'
},
'raw_chlorophyll': {
'long_name': 'Raw Chlorophyll Fluorescence',
'units': 'counts',
'comment': 'Raw chlorophyll fluorescence (470 nm excitation/ 695 nm emission) measurements.',
'data_product_identifier': 'CHLAFLO_L0'
},
'raw_cdom': {
'long_name': 'Raw CDOM Fluorescence',
'units': 'counts',
'comment': 'Raw CDOM fluorescence (370 nm excitation/ 460 nm emission) measurements.',
'data_product_identifier': 'CDOMFLO_L0'
},
'estimated_chlorophyll': {
'long_name': 'Estimated Chlorophyll Concentration',
'standard_name': 'mass_concentration_of_chlorophyll_in_sea_water',
'units': 'ug L-1',
'comment': ('Estimated chlorophyll concentration based upon a calibration curve derived from a fluorescent ' +
'proxy approximately equal to 25 ug/l of a Thalassiosira weissflogii phytoplankton culture. ' +
'This measurement is considered to be an estimate only of the true chlorophyll concentration.'),
'data_product_identifier': 'CHLAFLO_L1',
'ancillary_variables': 'raw_chlorophyll estimated_chlorophyll_qc_executed estimated_chlorophyll_qc_results'
},
'fluorometric_cdom': {
'long_name': 'Fluorometric CDOM Concentration',
'standard_name': ('concentration_of_colored_dissolved_organic_matter_in_sea_water_expressed_as_equivalent' +
'_mass_fraction_of_quinine_sulfate_dihydrate'),
'units': 'ppb',
'comment': ('More commonly referred to as Chromophoric Dissolved Organic Matter (CDOM). CDOM plays an ' +
'important role in the carbon cycling and biogeochemistry of coastal waters. It occurs ' +
'naturally in aquatic environments primarily as a result of tannins released from decaying ' +
'plant and animal matter, and can enter coastal areas in river run-off containing organic ' +
'materials leached from soils.'),
'data_product_identifier': 'CDOMFLO_L1',
'ancillary_variables': 'raw_cdom fluorometric_cdom_qc_executed fluorometric_cdom_qc_results'
},
'beta_700': {
'long_name': 'Volume Scattering Function at 700 nm',
'standard_name': 'volume_scattering_function_of_radiative_flux_in_sea_water',
'units': 'm-1 sr-1',
'comment': ('Radiative flux is the sum of shortwave and longwave radiative fluxes. Scattering of ' +
'radiation is its deflection from its incident path without loss of energy. The volume ' +
'scattering function is the intensity (flux per unit solid angle) of scattered radiation per ' +
'unit length of scattering medium, normalised by the incident radiation flux.'),
'data_product_identifier': 'FLUBSCT_L1',
'ancillary_variables': 'raw_backscatter beta_700_qc_executed beta_700_qc_results'
},
'bback': {
'long_name': 'Total Optical Backscatter at 700 nm',
'units': 'm-1',
'comment': ('Total (particulate + water) optical backscatter at 700 nm, derived from the Volume ' +
'Scattering Function and corrected for effects of temperature and salinity.'),
'data_product_identifier': 'FLUBSCT_L2',
'ancillary_variables': 'beta_700 temperature salinity bback_qc_executed bback_qc_results'
}
})
def flort_datalogger(ds, burst=True):
"""
Takes flort data recorded by the data loggers used in the CGSN/EA moorings
and cleans up the data set to make it more user-friendly. Primary task is
renaming parameters and dropping some that are of limited use. Additionally,
re-organize some of the variables to permit better assessments of the data.
:param ds: initial flort data set downloaded from OOI via the M2M system
:param burst: resample the data to the defined time interval
:return ds: cleaned up data set
"""
# drop some of the variables:
# internal_timestamp == superseded by time, redundant so can remove
# suspect_timestamp = not used
# measurement_wavelength_* == metadata, move into variable attributes.
# pressure_depth == variable assigned if this was a FLORT on a CSPP, not with moorings
# seawater_scattering_coefficient == not used
ds = ds.reset_coords()
ds = ds.drop(['internal_timestamp', 'suspect_timestamp', 'measurement_wavelength_beta',
'measurement_wavelength_cdom', 'measurement_wavelength_chl', 'seawater_scattering_coefficient'])
# check for data from a co-located CTD, if not present add with appropriate attributes
if 'temp' not in ds.variables:
ds['temp'] = ('time', ds['deployment'] * np.nan)
ds['temp'].attrs = {
'comment': ('Normally this would be seawater temperature data from a co-located CTD. However, data from ' +
'that sensor is unavailable. This value has been filled with NaNs to preserve the structure ' +
'of the data set.'),
'data_product_identifier': 'TEMPWAT_L1',
'long_name': 'Seawater Temperature',
'standard_name': 'sea_water_temperature',
'units': 'degree_Celsius'
}
ds['practical_salinity'] = ('time', ds['deployment'] * np.nan)
ds['practical_salinity'].attrs = {
'long_name': 'Practical Salinity',
'standard_name': 'sea_water_practical_salinity',
'units': '1',
'comment': ('Normally this would be seawater salinity data from a co-located CTD. However, data from ' +
'that sensor is unavailable. This value has been filled with NaNs to preserve the structure ' +
'of the data set.'),
'data_product_identifier': 'PRACSAL_L2'
}
# lots of renaming here to get a better defined data set with cleaner attributes
rename = {
'temp': 'seawater_temperature',
'raw_signal_chl': 'raw_chlorophyll',
'fluorometric_chlorophyll_a': 'estimated_chlorophyll',
'fluorometric_chlorophyll_a_qc_executed': 'estimated_chlorophyll_qc_executed',
'fluorometric_chlorophyll_a_qc_results': 'estimated_chlorophyll_qc_results',
'raw_signal_cdom': 'raw_cdom',
'raw_signal_beta': 'raw_backscatter',
'total_volume_scattering_coefficient': 'beta_700',
'total_volume_scattering_coefficient_qc_executed': 'beta_700_qc_executed',
'total_volume_scattering_coefficient_qc_results': 'beta_700_qc_results',
'optical_backscatter': 'bback',
'optical_backscatter_qc_executed': 'bback_qc_executed',
'optical_backscatter_qc_results': 'bback_qc_results',
}
ds = ds.rename(rename)
# reset some attributes
for key, value in ATTRS.items():
for atk, atv in value.items():
if key in ds.variables:
ds[key].attrs[atk] = atv
# add the original variable name as an attribute, if renamed
for key, value in rename.items():
ds[value].attrs['ooinet_variable_name'] = key
if burst: # re-sample the data to a defined time interval using a median average
# create the burst averaging
burst = ds
burst = burst.resample(time='900s', base=3150, loffset='450s', keep_attrs=True, skipna=True).median()
burst = burst.where(~np.isnan(burst.deployment), drop=True)
# reset the attributes...which keep_attrs should do...
burst.attrs = ds.attrs
for v in burst.variables:
burst[v].attrs = ds[v].attrs
# save the newly average data
ds = burst
return ds
def flort_instrument(ds):
"""
Takes flort data recorded by the Sea-Bird Electronics SBE16Plus used in the
CGSN/EA moorings and cleans up the data set to make it more user-friendly.
Primary task is renaming parameters and dropping some that are of limited
use. Additionally, re-organize some of the variables to permit better
assessments of the data.
:param ds: initial flort data set downloaded from OOI via the M2M system
:return ds: cleaned up data set
"""
# drop some of the variables:
# internal_timestamp == superseded by time, redundant so can remove
# suspect_timestamp = not used
# measurement_wavelength_* == metadata, move into variable attributes.
# pressure_depth == variable assigned if this was a FLORT on a CSPP, not with moorings
# seawater_scattering_coefficient == not used
ds = ds.reset_coords()
ds = ds.drop(['internal_timestamp', 'suspect_timestamp', 'measurement_wavelength_beta',
'measurement_wavelength_cdom', 'measurement_wavelength_chl', 'seawater_scattering_coefficient'])
# lots of renaming here to get a better defined data set with cleaner attributes
rename = {
'temp': 'seawater_temperature',
'raw_signal_chl': 'raw_chlorophyll',
'fluorometric_chlorophyll_a': 'estimated_chlorophyll',
'fluorometric_chlorophyll_a_qc_executed': 'estimated_chlorophyll_qc_executed',
'fluorometric_chlorophyll_a_qc_results': 'estimated_chlorophyll_qc_results',
'raw_signal_cdom': 'raw_cdom',
'raw_signal_beta': 'raw_backscatter',
'total_volume_scattering_coefficient': 'beta_700',
'total_volume_scattering_coefficient_qc_executed': 'beta_700_qc_executed',
'total_volume_scattering_coefficient_qc_results': 'beta_700_qc_results',
'optical_backscatter': 'bback',
'optical_backscatter_qc_executed': 'bback_qc_executed',
'optical_backscatter_qc_results': 'bback_qc_results',
}
ds = ds.rename(rename)
# reset some attributes
for key, value in ATTRS.items():
for atk, atv in value.items():
if key in ds.variables:
ds[key].attrs[atk] = atv
# add the original variable name as an attribute, if renamed
for key, value in rename.items():
ds[value].attrs['ooinet_variable_name'] = key
return ds
def main(argv=None):
args = inputs(argv)
site = args.site
node = args.node
sensor = args.sensor
method = args.method
stream = args.stream
deploy = args.deploy
start = args.start
stop = args.stop
burst = args.burst
# determine the start and stop times for the data request based on either the deployment number or user entered
# beginning and ending dates.
if not deploy or (start and stop):
return SyntaxError('You must specify either a deployment number or beginning and end dates of interest.')
else:
if deploy:
# Determine start and end dates based on the deployment number
start, stop = get_deployment_dates(site, node, sensor, deploy)
if not start or not stop:
exit_text = ('Deployment dates are unavailable for %s-%s-%s, deployment %02d.' % (site, node, sensor,
deploy))
raise SystemExit(exit_text)
# Request the data for download
r = m2m_request(site, node, sensor, method, stream, start, stop)
if not r:
exit_text = ('Request failed for %s-%s-%s. Check request.' % (site, node, sensor))
raise SystemExit(exit_text)
# Valid request, start downloading the data
if deploy:
flort = m2m_collect(r, ('.*deployment%04d.*FLORT.*\\.nc$' % deploy))
else:
flort = m2m_collect(r, '.*FLORT.*\\.nc$')
if not flort:
exit_text = ('Data unavailable for %s-%s-%s. Check request.' % (site, node, sensor))
raise SystemExit(exit_text)
# clean-up and reorganize
if method in ['telemetered', 'recovered_host']:
flort = flort_datalogger(flort, burst)
else:
flort = flort_instrument(flort)
vocab = get_vocabulary(site, node, sensor)[0]
flort = update_dataset(flort, vocab['maxdepth'])
# save the data to disk
out_file = os.path.abspath(args.outfile)
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file))
flort.to_netcdf(out_file, mode='w', format='NETCDF4', engine='h5netcdf', encoding=ENCODINGS)
if __name__ == '__main__':
main()
|
the-stack_0_20583 | """
PyTorch implementation of Frechet Inception Distance (FID score)
Reference:
Martin Heusel et al. "GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash Equilibrium",
https://arxiv.org/abs/1706.08500
Credits:
https://github.com/hukkelas/pytorch-frechet-inception-distance/
https://github.com/mseitzer/pytorch-fid
"""
from typing import Tuple
import torch
from piq.base import BaseFeatureMetric
from piq.utils import _validate_input
def _approximation_error(matrix: torch.Tensor, s_matrix: torch.Tensor) -> torch.Tensor:
norm_of_matrix = torch.norm(matrix)
error = matrix - torch.mm(s_matrix, s_matrix)
error = torch.norm(error) / norm_of_matrix
return error
def _sqrtm_newton_schulz(matrix: torch.Tensor, num_iters: int = 100) -> Tuple[torch.Tensor, torch.Tensor]:
r"""
Square root of matrix using Newton-Schulz Iterative method
Source: https://github.com/msubhransu/matrix-sqrt/blob/master/matrix_sqrt.py
Args:
matrix: matrix or batch of matrices
num_iters: Number of iteration of the method
Returns:
Square root of matrix
Error
"""
dim = matrix.size(0)
norm_of_matrix = matrix.norm(p='fro')
Y = matrix.div(norm_of_matrix)
I = torch.eye(dim, dim, device=matrix.device, dtype=matrix.dtype)
Z = torch.eye(dim, dim, device=matrix.device, dtype=matrix.dtype)
s_matrix = torch.empty_like(matrix)
error = torch.empty(1, device=matrix.device, dtype=matrix.dtype)
for _ in range(num_iters):
T = 0.5 * (3.0 * I - Z.mm(Y))
Y = Y.mm(T)
Z = T.mm(Z)
s_matrix = Y * torch.sqrt(norm_of_matrix)
error = _approximation_error(matrix, s_matrix)
if torch.isclose(error, torch.tensor([0.], device=error.device, dtype=error.dtype), atol=1e-5):
break
return s_matrix, error
def _compute_fid(mu1: torch.Tensor, sigma1: torch.Tensor, mu2: torch.Tensor, sigma2: torch.Tensor,
eps=1e-6) -> torch.Tensor:
r"""
The Frechet Inception Distance between two multivariate Gaussians X_x ~ N(mu_1, sigm_1)
and X_y ~ N(mu_2, sigm_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(sigm_1 + sigm_2 - 2*sqrt(sigm_1*sigm_2)).
Args:
mu1: mean of activations calculated on predicted (x) samples
sigma1: covariance matrix over activations calculated on predicted (x) samples
mu2: mean of activations calculated on target (y) samples
sigma2: covariance matrix over activations calculated on target (y) samples
eps: offset constant. used if sigma_1 @ sigma_2 matrix is singular
Returns:
Scalar value of the distance between sets.
"""
diff = mu1 - mu2
covmean, _ = _sqrtm_newton_schulz(sigma1.mm(sigma2))
# Product might be almost singular
if not torch.isfinite(covmean).all():
print(f'FID calculation produces singular product; adding {eps} to diagonal of cov estimates')
offset = torch.eye(sigma1.size(0), device=mu1.device, dtype=mu1.dtype) * eps
covmean, _ = _sqrtm_newton_schulz((sigma1 + offset).mm(sigma2 + offset))
tr_covmean = torch.trace(covmean)
return diff.dot(diff) + torch.trace(sigma1) + torch.trace(sigma2) - 2 * tr_covmean
def _cov(m: torch.Tensor, rowvar: bool = True) -> torch.Tensor:
r"""Estimate a covariance matrix given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, `X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element `C_{ij}` is the covariance of
`x_i` and `x_j`. The element `C_{ii}` is the variance of `x_i`.
Args:
m: A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables.
rowvar: If `rowvar` is True, then each row represents a
variable, with observations in the columns. Otherwise, the
relationship is transposed: each column represents a variable,
while the rows contain observations.
Returns:
The covariance matrix of the variables.
"""
if m.dim() > 2:
raise ValueError('Tensor for covariance computations has more than 2 dimensions. '
'Only 1 or 2 dimensional arrays are allowed')
if m.dim() < 2:
m = m.view(1, -1)
if not rowvar and m.size(0) != 1:
m = m.t()
fact = 1.0 / (m.size(1) - 1)
m = m - torch.mean(m, dim=1, keepdim=True)
mt = m.t()
return fact * m.matmul(mt).squeeze()
def _compute_statistics(samples: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Calculates the statistics used by FID
Args:
samples: Low-dimension representation of image set.
Shape (N_samples, dims) and dtype: np.float32 in range 0 - 1
Returns:
mu: mean over all activations from the encoder.
sigma: covariance matrix over all activations from the encoder.
"""
mu = torch.mean(samples, dim=0)
sigma = _cov(samples, rowvar=False)
return mu, sigma
class FID(BaseFeatureMetric):
r"""Interface of Frechet Inception Distance.
It's computed for a whole set of data and uses features from encoder instead of images itself to decrease
computation cost. FID can compare two data distributions with different number of samples.
But dimensionalities should match, otherwise it won't be possible to correctly compute statistics.
Examples:
>>> fid_metric = FID()
>>> x_feats = torch.rand(10000, 1024)
>>> y_feats = torch.rand(10000, 1024)
>>> fid: torch.Tensor = fid_metric(x_feats, y_feats)
References:
Heusel M. et al. (2017).
Gans trained by a two time-scale update rule converge to a local nash equilibrium.
Advances in neural information processing systems,
https://arxiv.org/abs/1706.08500
"""
def compute_metric(self, x_features: torch.Tensor, y_features: torch.Tensor) -> torch.Tensor:
r"""
Fits multivariate Gaussians: :math:`X \sim \mathcal{N}(\mu_x, \sigma_x)` and
:math:`Y \sim \mathcal{N}(\mu_y, \sigma_y)` to image stacks.
Then computes FID as :math:`d^2 = ||\mu_x - \mu_y||^2 + Tr(\sigma_x + \sigma_y - 2\sqrt{\sigma_x \sigma_y})`.
Args:
x_features: Samples from data distribution. Shape :math:`(N_x, D)`
y_features: Samples from data distribution. Shape :math:`(N_y, D)`
Returns:
The Frechet Distance.
"""
_validate_input([x_features, y_features], dim_range=(2, 2), size_range=(1, 2))
# GPU -> CPU
mu_x, sigma_x = _compute_statistics(x_features.detach().to(dtype=torch.float64))
mu_y, sigma_y = _compute_statistics(y_features.detach().to(dtype=torch.float64))
score = _compute_fid(mu_x, sigma_x, mu_y, sigma_y)
return score
|
the-stack_0_20584 | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
import sys
import unittest
from launch import LaunchDescription
from launch.actions import ExecuteProcess
from launch.actions import OpaqueFunction
from launch_ros.actions import Node
import launch_testing
import launch_testing.asserts
import launch_testing.markers
import launch_testing.tools
import launch_testing_ros.tools
import pytest
from rclpy.utilities import get_available_rmw_implementations
from ros2node.api import INFO_NONUNIQUE_WARNING_TEMPLATE
# Skip cli tests on Windows while they exhibit pathological behavior
# https://github.com/ros2/build_farmer/issues/248
if sys.platform.startswith('win'):
pytest.skip(
'CLI tests can block for a pathological amount of time on Windows.',
allow_module_level=True)
@pytest.mark.rostest
@launch_testing.parametrize('rmw_implementation', get_available_rmw_implementations())
def generate_test_description(rmw_implementation, ready_fn):
path_to_complex_node_script = os.path.join(
os.path.dirname(__file__), 'fixtures', 'complex_node.py'
)
additional_env = {'RMW_IMPLEMENTATION': rmw_implementation}
return LaunchDescription([
# Always restart daemon to isolate tests.
ExecuteProcess(
cmd=['ros2', 'daemon', 'stop'],
name='daemon-stop',
on_exit=[
ExecuteProcess(
cmd=['ros2', 'daemon', 'start'],
name='daemon-start',
on_exit=[
# Add test fixture actions.
Node(
executable=sys.executable,
arguments=[path_to_complex_node_script],
node_name='complex_node',
additional_env=additional_env,
),
Node(
executable=sys.executable,
arguments=[path_to_complex_node_script],
node_name='complex_node',
additional_env=additional_env,
),
Node(
executable=sys.executable,
arguments=[path_to_complex_node_script],
node_name='complex_node_2',
additional_env=additional_env,
),
OpaqueFunction(function=lambda context: ready_fn()),
],
additional_env=additional_env
)
]
),
])
class TestROS2NodeCLIWithDuplicateNodeNames(unittest.TestCase):
@classmethod
def setUpClass(
cls,
launch_service,
proc_info,
proc_output,
rmw_implementation
):
@contextlib.contextmanager
def launch_node_command(self, arguments):
node_command_action = ExecuteProcess(
cmd=['ros2', 'node', *arguments],
additional_env={
'RMW_IMPLEMENTATION': rmw_implementation,
'PYTHONUNBUFFERED': '1'
},
name='ros2node-cli',
output='screen'
)
with launch_testing.tools.launch_process(
launch_service, node_command_action, proc_info, proc_output,
output_filter=launch_testing_ros.tools.basic_output_filter(
# ignore launch_ros and ros2cli daemon nodes
filtered_patterns=['.*launch_ros.*', '.*ros2cli.*'],
filtered_rmw_implementation=rmw_implementation
)
) as node_command:
yield node_command
cls.launch_node_command = launch_node_command
@launch_testing.markers.retry_on_failure(times=5, delay=1)
def test_info_warning(self):
with self.launch_node_command(arguments=['info', '/complex_node']) as node_command:
assert node_command.wait_for_shutdown(timeout=20)
assert node_command.exit_code == launch_testing.asserts.EXIT_OK
assert launch_testing.tools.expect_output(
expected_lines=[
INFO_NONUNIQUE_WARNING_TEMPLATE.format(num_nodes='2', node_name='/complex_node')
],
text=node_command.output, strict=False
), 'Output does not match:\n' + node_command.output
|
the-stack_0_20585 | import pycurl
from urllib.parse import urlencode
from io import BytesIO
import sys
import json
def make_get_request(url, access_token="", headers=[], data=""):
bytes_obj = BytesIO()
curl_obj = pycurl.Curl()
curl_obj.setopt(curl_obj.URL, url)
curl_obj.setopt(curl_obj.WRITEDATA, bytes_obj)
if len(data) > 1:
curl_obj.setopt(curl_obj.READDATA, data.encode('utf8'))
if len(access_token) > 1:
# Authenticate:
headers.append("Authorization: Token {:s}".format(access_token))
curl_obj.setopt(curl_obj.HTTPHEADER, headers)
try:
curl_obj.perform()
except pycurl.error as e:
print(e)
return {"Request failed."}
curl_obj.close()
return bytes_obj.getvalue().decode('utf8') # Body of the reply
def make_put_request(url, data, header, json_data=""):
response_buffer = BytesIO()
curl_obj = pycurl.Curl()
curl_obj.setopt(curl_obj.URL, url)
curl_obj.setopt(curl_obj.UPLOAD, 1)
if json_data == "":
data_buffer = BytesIO(data.encode('utf8'))
else:
data_buffer = BytesIO(json.dumps(json_data).encode('utf-8'))
curl_obj.setopt(curl_obj.READDATA, data_buffer)
curl_obj.setopt(curl_obj.HTTPHEADER, header)
curl_obj.setopt(curl_obj.WRITEDATA, response_buffer)
curl_obj.perform()
curl_obj.close()
return response_buffer.getvalue().decode('utf8')
def get_arguments(api_names):
target_URLs = {}
access_tokens = {}
apis = []
for api_name in api_names:
apis.append([api_name, '', ''])
for i in [1, 3]:
if len(sys.argv) > i:
target_URLs[apis[i-1]] = sys.argv[i]
else:
print("fetch_data.py: Warning: missing argument: target API URL!")
return False, False
for i in [2, 4]:
if len(sys.argv) > i:
access_tokens[apis[i-2]] = sys.argv[i]
else:
print("fetch_data.py: Warning: missing argument: Access token for target API!")
return False, False
return target_URLs, access_tokens
def read_secrets():
print("fetch_data.py: Reading API URL and acces key from the secrets file.")
secrets = {}
with open("secrets", 'r') as file:
for row in file:
row = row.rstrip()
if row.startswith("#") or row == "":
continue
# Example: GITLAB_API_KEY = qwertyuiop1234567
row_split = row.split()
key_split = row_split[0].lower().split('_') # ['gitlab', 'api', 'key']
api_name = key_split[0] # 'gitlab'
key_name = "API {:s}s".format(key_split[2]) # 'API keys'
if len(key_split) > 3:
api_name_specifier = key_split[3]
else:
api_name_specifier = 'root'
whole_name = "{:s}".format(api_name)
if key_name == "API urls":
whole_name = "{:s}-{:s}".format(api_name, api_name_specifier)
if len(row_split) == 3:
if api_name not in secrets.keys():
secrets[api_name] = {}
if key_name in secrets[api_name]:
secrets[api_name][key_name][whole_name] = row_split[2]
else:
secrets[api_name][key_name] = {whole_name: row_split[2]}
if len(secrets) > 1:
print("fetch_data.py: read some API URL and access key succesfully from secrets!\n")
else:
print("fetch_data.py: Did not read any API url or access key from secrets!")
return secrets
def get_id():
document_id = '0'
try:
with open('id', 'r') as file:
document_id = file.read()
if document_id == "":
document_id = '0'
file.close()
except OSError:
document_id = '0'
with open('id', 'w') as file:
id_int = int(document_id)
id_int += 1
file.write(str(id_int))
file.close()
return document_id
def write_to_elasticsearch(data, index='testindex', data_type='_doc', document_id=get_id(), json_data=""):
header = ['Content-Type: application/json']
url = 'http://localhost:9200/{:s}/{:s}/{:d}'.format(index, data_type, document_id)
# curl -XPUT 'http://localhost:9200/testindex/_doc/1' -H 'Content-Type: application/json' -d '{"name":"John Doe"}'
return make_put_request(url, data, header, json_data=json_data)
def search_elasticsearch(index=''):
header = ['Content-Type: application/json']
data_type = '_search'
url = 'http://localhost:9200/{:s}/{:s}?pretty'.format(index, data_type)
return make_get_request(url, headers=header)
def copy_api_point_to_index(url, access_token, api_name):
reply_body = make_get_request(url, access_token, [])
reply_body_json = json.loads(reply_body)
if "error" in reply_body_json:
print("error occured in get request from url: {:s}".format(url))
print("Reply body:", reply_body)
return False
if isinstance(reply_body_json, list):
for item in reply_body_json:
id = get_id()
if "id" in item.keys():
id = str(item['id'])
reply_body = write_to_elasticsearch(json.dumps(item), api_name, '_doc', id)
reply_body_json = json.loads(reply_body)
if "error" in reply_body_json.keys():
print("Error reply from Elastic:\n", reply_body)
return False
else:
print("Wrote successfully to ElasticSearch.")
else:
reply_body = write_to_elasticsearch(reply_body, api_name, '_doc', get_id())
reply_body_json = json.loads(reply_body)
if "error" in reply_body_json.keys():
print("Error reply from Elastic:\n", reply_body)
return False
def fetch_api_to_ES(url, api_key, index_name, data_id, write_to_es=True):
# Get data from api
reply = make_get_request(url, api_key, [])
# Check that no error occured in getting course data and cast reply to json:
if reply == False or "error" in reply:
print("error occured in get request from plussa API!")
print("Reply body:", reply)
return False
else:
print("Got requested data from API: {:s}".format(url))
# Write details into ES cluster with given index name:
if write_to_es:
print("Writing data to ElasticSearch... ", end="")
write_to_elasticsearch(reply, index=index_name, document_id=data_id)
print("Done.")
return json.loads(reply)
def get_agreements(api_key):
agreement_url = "https://plus.tuni.fi/api/v2/courses/40/submissiondata/?exercise_id=4543&format=json"
agreements = fetch_api_to_ES(agreement_url, api_key, "", 0, write_to_es=False)
list_of_agreed = []
for submission in agreements:
if submission["field_0"] == "a":
list_of_agreed.append(submission["StudentID"])
return list_of_agreed
def parse_empty_fields(json_data):
for module in json_data:
old = module["points_by_difficulty"]
new_dict = {}
for key in old:
value = old[key]
if key != "":
new_dict[key] = value
else:
new_dict["empty"] = value
module["points_by_difficulty"] = new_dict
new_modules = []
for module in json_data:
if len(module["exercises"]) > 0 or module['id'] in [352, 570]:
new_modules.append(module)
return new_modules
from hashlib import blake2b
def write_students(course_students_url, plussa_api_key, course_id, course_instance, page_id):
student_list_index_key = "plussa-course-{:d}-students-anonymized".format(course_id)
# 7. Get student list details:
student_list_reply = fetch_api_to_ES(course_students_url, plussa_api_key, '', 0, write_to_es=False)
if student_list_reply == False:
return False
else:
if course_instance['instance_name'] in ['spring-2020', 'summer-2020']: ## Handling GDPR.
user_ids_of_agreed = get_agreements(plussa_api_key)
# Fetch point data for each student and write it to the student list:
for student in student_list_reply['results']:
if student['student_id'] in user_ids_of_agreed:
# Anonymize student data:
for key in ['username', 'student_id', 'email', 'full_name']:
hasher = blake2b(digest_size=10)
hasher.update(student[key].encode())
student[key] = hasher.hexdigest()
student.pop('tag_slugs', None)
# Fetch point data for the student:
student_points_reply = fetch_api_to_ES(student['points'], plussa_api_key, '', 0, write_to_es=False)
if student_points_reply == False:
return False
student_points_reply['modules'] = parse_empty_fields(student_points_reply['modules'])
# Remove unnecessary fields from points reply:
for key in ['username', 'student_id', 'email', 'full_name', 'points_by_difficulty', 'tag_slugs', 'id', 'url', 'is_external', 'tags']:
student_points_reply.pop(key, None)
student['points'] = student_points_reply
else:
# Redact and remove student data:
student['username'] = 'redacted_due_to_no_research_permission'
for key in ['student_id', 'email', 'full_name', 'data', 'is_external', 'points', 'tag_slugs']:
student.pop(key, None)
# 8. Write course student list with point data into the ES cluster:
print("Writing course student list with point data into the ES cluster...")
reply = write_to_elasticsearch(student_list_reply, student_list_index_key, document_id=page_id, json_data=student_list_reply)
print("Reply: ", reply)
return student_list_reply["next"]
def get_plussa_data(api_url, api_key, course_id_to_fetch):
# 2. Get Plussa API root contents:
plussa_root_reply = fetch_api_to_ES(api_url, api_key, "plussa-root", 0)
if plussa_root_reply == False:
return False
# Get course list api url:
plussa_courselist_api_url = plussa_root_reply["courses"]
# 3. Get list of course instances in Plussa:
plussa_courselist_reply = fetch_api_to_ES(plussa_courselist_api_url, api_key, "plussa-course-list", 0)
if plussa_courselist_reply == False:
return False
# 4. Fetch data for courses:
for course_instance in plussa_courselist_reply["results"]:
if course_instance['id'] == course_id_to_fetch:
print("course instance:", course_instance)
print()
# Get course API url and id:
course_instance_url = course_instance["url"]
course_id = course_id_to_fetch
# 5. Get course details from Plussa Course Instance API:
course_details_reply = fetch_api_to_ES(course_instance_url, api_key, "plussa-course-{:d}".format(course_id), 0)
if course_details_reply == False:
return False
# Get exercise list and student list API urls:
course_exercises_url = course_details_reply["exercises"]
# 6. Get exercise list details from Plussa API:
exercise_list_reply = make_get_request(course_exercises_url, api_key, [])
# Check that no error occured in getting course data and cast reply to json:
if exercise_list_reply == False:
return False
exercise_list_reply = json.loads(exercise_list_reply)
# Remove extra modules:
exercise_list_reply['results'] = [module for module in exercise_list_reply['results'] if len(module['exercises']) > 0 or module['id'] in [570]]
# Write details into ES cluster with given index name:
write_to_elasticsearch("", index="plussa-course-{:d}-exercises".format(course_id), document_id=0, json_data=exercise_list_reply)
next_url = course_details_reply["students"]
page_id = 0
while isinstance(next_url, str):
print("next_url is:", next_url)
next_url = write_students(next_url, api_key, course_id, course_instance, page_id)
page_id += 1
def find_git_url(student, access_token):
for module in student['points']['modules']:
if module['max_points'] > 0:
for exercise in module['exercises']:
submission_url = exercise['best_submission']
if submission_url is None:
continue
if len(submission_url) > 0:
submission_reply = make_get_request(submission_url, access_token, [], "")
if submission_reply == False:
return False
else:
submission_reply = json.loads(submission_reply)
submission_data = submission_reply['submission_data']
if submission_data is not None and len(submission_data) > 0 and len(submission_data[0]) > 1 and submission_data[0][0] == "git":
return submission_data[0][1] # Found a git url
return False
def get_commits_for_file(gitlab_api_url, encoded, full_path, gitlab_api_key):
encoded_full_path = full_path.replace("/", "%2F").replace(" ", "%20").replace("(", "%28").replace(")", "%29").replace("ä", "%C3%A4").replace("ö", "%C3%B6")
url = "{:s}/{:s}/repository/files/{:s}/blame?ref=master".format(gitlab_api_url, encoded, encoded_full_path)
blame_reply = make_get_request(url, gitlab_api_key, ["Private-Token: {:s}".format(gitlab_api_key)], "")
if blame_reply == False or blame_reply is None or "400 Bad Request" in blame_reply:
return []
else:
blame_reply = json.loads(blame_reply)
if "error" in blame_reply:
return []
commits = []
commit_ids = []
if "message" not in blame_reply:
for commit in blame_reply:
commit_id = commit['commit']['id']
if commit_id not in commit_ids:
commit_ids.append(commit_id)
commits.append(commit['commit'])
else:
commits = ["Could not fetch commit data for {:s}".format(url)]
return commits
def get_module_tree(git_url, gitlab_api_key, gitlab_api_url):
if git_url.startswith("http"):
ending = ".git" if git_url.endswith(".git") else ""
repo_name = git_url[len("https://course-gitlab.tuni.fi/"):len(git_url)-len(ending)]
elif git_url.startswith("git@"):
repo_name = git_url[len("[email protected]/"):len(git_url)-len(".git")]
max_hits = 100
page = 1
module_tree = {}
paths_reply = []
paths = []
fetch_next = True
while fetch_next:
encoded = repo_name.replace("/", "%2F")
tree_url = "{:s}/{:s}/repository/tree?ref=master&recursive=true&path=student&per_page={:d}&page={:d}".format(gitlab_api_url, encoded, max_hits, page)
page += 1
paths_reply = make_get_request(tree_url, gitlab_api_key, ["Private-Token: {:s}".format(gitlab_api_key)], "")
if paths_reply == False or "message" in paths_reply:
print("Could not fetch data for {:s}. Might be due to missing privileges.".format(tree_url))
return module_tree
else:
paths_reply = json.loads(paths_reply)
# Deduce if there's more data to consider:
if (len(paths_reply) < max_hits or paths_reply[0]['path'] in paths):
fetch_next = False
for path in paths_reply:
full_path = path['path']
paths.append(full_path)
if "build" in full_path or "Debug" in full_path or "READ" in full_path:
continue
# Week subdirectory name must start with a number:
if len(full_path.split("/")) > 1 and not full_path.split("/")[1][0].isdigit():
continue
name = path['name']
if name.startswith("."):
name = name[1:]
path['name'] = name
split = full_path.split("/")
if len(split) == 2:
if name not in module_tree:
module_tree[name] = {}
else:
folder = split[1]
if len(split) < 4:
module_tree[folder][name] = {}
else:
file_name = "/".join(split[3:])
#file_name = file_name.replace(".", "DOT")
project = split[2]
if "." in project:
continue
# TODO: Speed-optimization; fetch project-wise commit data by using urls of format:
# https://course-gitlab.tuni.fi/api/v4/projects/<ProjectID>/-/commits/master/student%2F<FolderName>%2F<ProjectName>
module_tree[folder][project][file_name] = get_commits_for_file(gitlab_api_url, encoded, full_path, gitlab_api_key)
return module_tree
def parse_commits(module_tree):
new_tree = []
for module_name in module_tree.keys():
new_module = {
'module_name': module_name,
'projects': []
}
for project_name in module_tree[module_name].keys():
project_data = {
'name': project_name
}
commits = []
commit_ids = []
for file_data in module_tree[module_name][project_name]:
for commit in module_tree[module_name][project_name][file_data]:
if isinstance(commit, str):
continue
if commit['id'] not in commit_ids:
commit_ids.append(commit['id'])
hasher = blake2b(digest_size=10)
hasher.update(commit['committer_email'].encode())
commits.append({
'hash': commit['id'],
'message': commit['message'],
'commit_date': commit['committed_date'],
'committer_email': hasher.hexdigest()
})
project_data['commit_count'] = len(commits)
project_data['commit_meta'] = commits
new_module['projects'].append(project_data)
new_tree.append(new_module)
return new_tree
def parse_no_commits(module_tree):
new_module_tree = []
for module in module_tree:
new_module = {}
new_module['module_name'] = "0{:s}".format(module['name'][0:1]) if '.' in module['name'][0:2] else module['name'][0:2]
if module['name'] == "01-14":
new_module['module_name'] = '14'
projects = []
for exercise in module['exercises']:
new_project = {}
new_project['name'] = exercise['name']
new_project['commit_count'] = 0
new_project['commit_meta'] = []
projects.append(new_project)
new_module['projects'] = projects
new_module_tree.append(new_module)
return new_module_tree
def parse_exercise_count(exercise_list):
passed_exercises = 0
for exercise in exercise_list:
if exercise['points'] > 0:
passed_exercises += 1
return passed_exercises
from itertools import accumulate
def aggregate_history_data_from_index(index_name):
# 1. fetch ES data from given index
# 2. Parse interesting results for each* student:
# 1) Calculate weekly point counts
# 2) Calculate course grade
# 3) Add weekly point sums into weekly point sums of given grade
# 4) Add +1 to student count of said grade
# *: If student is not dummy data point
# 1. Fetch student data from given ElasticSearch index:
url = 'http://localhost:9200/{:s}/_search?pretty&size=20'.format(index_name)
reply = make_get_request(url, headers=['Content-Type: application/json'])
if reply != False:
reply = json.loads(reply)
student_counts = [0 for x in range(0, 6)]
data = {}
for week in ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15"]:
weekData = {}
for key in ["points", "commits", "exercises", "submissions"]:
weekData[key] = [0 for x in range(0, 6)]
weekData["cum_{:s}".format(key)] = [0 for x in range(0, 6)]
data[week] = weekData
for hits in reply['hits']['hits']:
for student in hits['_source']['results']:
if "commits" in student.keys() and (len(student["commits"]) != 15 or student['commits'][0]['projects'][0]['name'] != "1.6.1.1 |fi:Eka palautus|en:First submission|"):
weekly_points = []
weekly_commits = []
weekly_exercises = []
weekly_submissions = []
course_grade = -1
# 1) Calculate weekly point counts:
weekly_points = [module['points'] for module in student['points']['modules']]
# Calculate weekly commit counts:
for module in student['commits']:
commit_count = 0
for project in module['projects']:
commit_count += project['commit_count']
weekly_commits.append(commit_count)
# TODO: Calculate weekly exercise count
weekly_exercises = [parse_exercise_count(module['exercises']) for module in student['points']['modules']]
# TODO: Calculate weekly submissions count
weekly_submissions = [module['submission_count'] for module in student['points']['modules']]
# Make lists of correct length:
for count_list in [weekly_points, weekly_commits, weekly_exercises, weekly_submissions]:
while len(count_list) < 16:
count_list.append(0)
# Calculate cumulative points, commits, exercises and submissions:
cumulative_points = list(accumulate(weekly_points))
cumulative_commits = list(accumulate(weekly_commits))
cumulative_exercises = list(accumulate(weekly_exercises))
cumulative_submissions = list(accumulate(weekly_submissions))
# 2) Calculate approximate course grade:
for treshold in [0, 401, 425, 575, 680, 785]:
course_grade += 0 if cumulative_points[-1] < treshold else 1
# 3) Add +1 to student count of said grade:
student_counts[course_grade] += 1
# 3) Add weekly sums into weekly sums of the given grade:
for week in data:
keys = [('points', weekly_points), ('commits', weekly_commits), ('exercises', weekly_exercises), \
('submissions', weekly_submissions), ('cum_points', cumulative_points), ('cum_commits', cumulative_commits), \
('cum_exercises', cumulative_exercises), ('cum_submissions', cumulative_submissions)]
for pair in keys:
data[week][pair[0]] += pair[1][int(week)-1]
avg_point_data = []
avg_commit_data = []
avg_exercise_data = []
avg_submission_data = []
avg_cum_point_data = []
avg_cum_commit_data = []
avg_cum_exercise_data = []
avg_cum_submission_data = []
# Calculate averages for each week and grade:
for week in data.values():
avg_points = [0 for x in range(0, 6)]
avg_commits = [0 for x in range(0, 6)]
avg_exercises = [0 for x in range(0, 6)]
avg_submissions = [0 for x in range(0, 6)]
avg_cum_points = [0 for x in range(0, 6)]
avg_cum_commits = [0 for x in range(0, 6)]
avg_cum_exercises = [0 for x in range(0, 6)]
avg_cum_submissions = [0 for x in range(0, 6)]
for i in range(0, 6):
avg_points[i] = week['points'][i] / student_counts[i]
avg_commits[i] = week['commits'][i] / student_counts[i]
avg_exercises[i] = week['exercises'][i] / student_counts[i]
avg_submissions[i] = week['submissions'][i] / student_counts[i]
avg_cum_points[i] = week['cum_points'][i] / student_counts[i]
avg_cum_commits[i] = week['cum_commits'][i] / student_counts[i]
avg_cum_exercises[i] = week['cum_exercises'][i] / student_counts[i]
avg_cum_submissions[i] = week['cum_submissions'][i] / student_counts[i]
avg_point_data.append(avg_points)
avg_commit_data.append(avg_commits)
avg_exercise_data.append(avg_exercises)
avg_submission_data.append(avg_submissions)
avg_cum_point_data.append(avg_cum_points)
avg_cum_commit_data.append(avg_cum_commits)
avg_cum_exercise_data.append(avg_cum_exercises)
avg_cum_submission_data.append(avg_cum_submissions)
return avg_point_data, avg_commit_data, avg_exercise_data, avg_submission_data, avg_cum_point_data, avg_cum_commit_data, avg_cum_exercise_data, avg_cum_submission_data, student_counts
def fetch_history_data(prev_course_id):
index_name = "gitlab-course-{:d}-commit-data".format(prev_course_id)
points, commits, exercises, submissions, cum_points, cum_commits, cum_exercises, cum_submissions, student_counts = aggregate_history_data_from_index(index_name)
data_by_weeks = {}
week = 1
for commit_counts in cum_commits:
data_by_weeks[week] = {'avg_cum_commits': commit_counts}
data_by_weeks[week]['avg_cum_points'] = cum_points[week-1]
data_by_weeks[week]['avg_cum_exercises'] = cum_exercises[week-1]
data_by_weeks[week]['avg_cum_submissions'] = cum_submissions[week-1]
data_by_weeks[week]['avg_points'] = points[week-1]
data_by_weeks[week]['avg_commits'] = commits[week-1]
data_by_weeks[week]['avg_exercises'] = exercises[week-1]
data_by_weeks[week]['avg_submissions'] = submissions[week-1]
data_by_weeks[week]['student_counts'] = student_counts
week += 1
data_by_grade = {"0": {}, "1": {}, "2": {}, "3": {}, "4": {}, "5": {}}
for grade in data_by_grade.keys():
data_by_grade[grade]['student_count'] = student_counts[int(grade)]
data_by_grade[grade]['avg_points'] = [x[int(grade)] for x in points]
data_by_grade[grade]['avg_commits'] = [x[int(grade)] for x in commits]
data_by_grade[grade]['avg_exercises'] = [x[int(grade)] for x in exercises]
data_by_grade[grade]['avg_submissions'] = [x[int(grade)] for x in submissions]
data_by_grade[grade]['avg_cum_points'] = [x[int(grade)] for x in cum_points]
data_by_grade[grade]['avg_cum_commits'] = [x[int(grade)] for x in cum_commits]
data_by_grade[grade]['avg_cum_exercises'] = [x[int(grade)] for x in cum_exercises]
data_by_grade[grade]['avg_cum_submissions'] = [x[int(grade)] for x in cum_submissions]
final_data = json.dumps({"data_by_weeks": data_by_weeks, "data_by_grades": data_by_grade})
es_index_name = "gitlab-course-{:s}-aggregate-data".format(prev_course_id)
reply = write_to_elasticsearch(final_data, es_index_name, document_id=0)
print(reply)
def fetch_anonymized_course_data(course_id, plussa_api_key, plussa_api_url, gitlab_api_key, gitlab_api_url):
# TODO: Remove excess modules from Plussa data before writing into ES-cluster
# Fetch data from plussa into ElasticSearch cluster:
get_plussa_data(plussa_api_url, plussa_api_key, course_id)
index_name = "plussa-course-{:d}-students-anonymized".format(course_id)
students_reply = search_elasticsearch(index_name)
if students_reply == False:
return False
else:
students_reply = json.loads(students_reply)
all_commit_data = []
# Fetch commit data for each student that has given a research permission:
for hits in students_reply['hits']['hits']:
for student in hits['_source']['results']:
if "redacted" not in student['username']:
git_url = find_git_url(student, plussa_api_key)
print("Fetching commit data for git repo:", git_url)
student_module_tree = get_module_tree(git_url, gitlab_api_key, gitlab_api_url)
student["commits"] = parse_commits(student_module_tree)
all_commit_data.append(student)
print("Writing data to ES:")
index_name = "gitlab-course-{:d}-commit-data-anonymized".format(course_id)
reply = write_to_elasticsearch(json.dumps({"results": all_commit_data}), index_name, '_doc', 0)
print(reply)
def main():
SPRING_COURSE_ID = 30
SUMMER_COURSE_ID = 40
# Read access tokens, api names and URLs:
secrets = read_secrets()
# Get root api parameters:
plussa_api_url = secrets["plussa"]["API urls"]["plussa-root"]
plussa_api_key = secrets["plussa"]["API keys"]["plussa"]
gitlab_api_url = secrets["gitlab"]["API urls"]["gitlab-projects"]
gitlab_api_key = secrets["gitlab"]["API keys"]["gitlab"]
fetch_anonymized_course_data(SUMMER_COURSE_ID, plussa_api_key, plussa_api_url, gitlab_api_key, gitlab_api_url)
fetch_anonymized_course_data(SPRING_COURSE_ID, plussa_api_key, plussa_api_url, gitlab_api_key, gitlab_api_url)
fetch_history_data(SPRING_COURSE_ID)
main()
|
the-stack_0_20586 | # coding=utf-8
# Copyright 2019 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF-GAN: A Generative Adversarial Networks library for TensorFlow.
TF-GAN is a lightweight library for training and evaluating Generative
Adversarial Networks (GANs).
See the README on GitHub for further documentation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import sys
import unittest
from setuptools import find_packages
from setuptools import setup
from setuptools.command.install import install as InstallCommandBase
from setuptools.command.test import test as TestCommandBase
from setuptools.dist import Distribution
project_name = 'tensorflow-gan'
# Get version from version module.
with open('tensorflow_gan/python/version.py') as fp:
globals_dict = {}
exec(fp.read(), globals_dict) # pylint: disable=exec-used
__version__ = globals_dict['__version__']
version = __version__
class StderrWrapper(io.IOBase):
def write(self, *args, **kwargs):
return sys.stderr.write(*args, **kwargs)
def writeln(self, *args, **kwargs):
if args or kwargs:
sys.stderr.write(*args, **kwargs)
sys.stderr.write('\n')
class Test(TestCommandBase):
def run_tests(self):
# Import absl inside run, where dependencies have been loaded already.
from absl import app # pylint: disable=g-import-not-at-top
def main(_):
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('tensorflow_gan', pattern='*_test.py')
stderr = StderrWrapper()
result = unittest.TextTestResult(stderr, descriptions=True, verbosity=2)
test_suite.run(result)
result.printErrors()
final_output = ('Tests run: {}. Errors: {} Failures: {}.'.format(
result.testsRun, len(result.errors), len(result.failures)))
header = '=' * len(final_output)
stderr.writeln(header)
stderr.writeln(final_output)
stderr.writeln(header)
if result.wasSuccessful():
return 0
else:
return 1
# Run inside absl.app.run to ensure flags parsing is done.
return app.run(main)
class BinaryDistribution(Distribution):
"""This class is needed in order to create OS specific wheels."""
def has_ext_modules(self):
return False
# TODO(joelshor): Maybe someday, when TF-GAN grows up, we can have our
# description be a `README.md` like `tensorflow_probability`.
DOCLINES = __doc__.split('\n')
setup(
name=project_name,
version=version,
description=DOCLINES[0],
long_description='\n'.join(DOCLINES[2:]),
author='Google Inc.',
author_email='[email protected]',
url='http://github.com/tensorflow/gan',
license='Apache 2.0',
packages=find_packages(),
zip_safe=False,
distclass=BinaryDistribution,
cmdclass={
'test': Test,
'pip_pkg': InstallCommandBase,
},
install_requires=[
'tensorflow_hub>=0.2',
'tensorflow_probability>=0.7',
],
extras_require={
'tf': ['tensorflow>=1.12'],
'tensorflow-datasets': ['tensorflow-datasets>=0.5.0'],
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='tensorflow GAN generative model machine learning',
)
|
the-stack_0_20589 | #!/usr/bin/env python
import argparse
import colorama
from .config import Config
from .run import test, build, package, clean
def build_handler(args):
if args.debug:
print('target:', args.target)
print('config:', args.file)
config = Config(args.file)
build(config, args.target)
def bump_handler(args):
if args.debug:
print('bump:', args.part)
print('config:', args.file)
config = Config(args.file)
config.bump_version(args.part)
config.save()
# do other stuff
build(config, 'release')
def package_handler(args):
if args.debug:
print('package')
print('config:', args.file)
config = Config(args.file)
package(config)
def test_handler(args):
if args.debug:
print('test')
print('config:', args.file)
config = Config(args.file)
test(config)
build(config, 'debug')
def clean_handler(args):
if args.debug:
print('clean')
print('config:', args.file)
config = Config(args.file)
clean(config)
def new_handler(args):
if args.debug:
print('new')
print('config:', args.file)
def init_handler(args):
if args.debug:
print('init')
print('config:', args.file)
def main():
colorama.init()
parser = argparse.ArgumentParser(description='Tony Build System')
parser.add_argument('-v', '--verbose', help='increase output verbosity', action='store_true')
parser.add_argument('-d', '--debug', help='print debug info', action='store_true')
parser.add_argument('-f', '--file', help='specify configuration file', default='project.toml')
parser.set_defaults(handler=lambda args: parser.print_help())
subparser = parser.add_subparsers()
build = subparser.add_parser('build', help='build current project')
build.add_argument('target', nargs='?', choices=['debug', 'release'], default='debug')
build.set_defaults(handler=build_handler)
bump = subparser.add_parser('bump', help='bump this shit')
bump.add_argument('part', nargs='?', choices=['major', 'minor', 'bugfix'], default='bugfix')
bump.set_defaults(handler=bump_handler)
package = subparser.add_parser('package', help='pack your project into archive')
package.set_defaults(handler=package_handler)
clean = subparser.add_parser('clean', help='clean your project')
clean.set_defaults(handler=clean_handler)
test = subparser.add_parser('test', help='test your shitty code')
test.set_defaults(handler=test_handler)
new = subparser.add_parser('new', help='create empty project')
new.add_argument('name', nargs=1)
new.set_defaults(handler=new_handler)
init = subparser.add_parser('init', help='initialize project in current directory')
init.add_argument('name', nargs='?', default=None)
init.set_defaults(handler=init_handler)
args = parser.parse_args()
if args.debug:
print('args:', args)
args.handler(args)
if __name__ == '__main__':
main()
|
the-stack_0_20590 | from xml.etree import ElementTree
from shapely.geometry import Polygon, Point
import json
poly_table = []
def kml2coord(filename):
data = open(filename)
tree = ElementTree.parse(data)
namespace = tree.getroot().tag[1:].split("}")[0]
placemarks = tree.findall(".//{%s}Placemark" % namespace)
for p in placemarks:
for d in p.findall(".//{%s}Data" % namespace):
if d.attrib.get('name') == 'OBJECTID':
name = d.find('.//{%s}value' % namespace).text
coord_text = p.find('.//{%s}coordinates' % namespace).text
coord_pairs = coord_text.split(' ')
coords = [z.split(',')[0:2] for z in coord_pairs]
for x in coords:
try:
poly_table.append([float(x[0]),float(x[1])])
except:
pass
return poly_table
|
the-stack_0_20591 | #!/usr/bin/env python
import os
import time
import platform
import argparse
from cmd_helpers import *
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("buildtype", default="Debug", help="build type",
choices=["Debug", "Release"], nargs="?")
parser.add_argument("-r", "--rpi", help="Set resolution for Raspberry Pi",
action="store_true")
return parser.parse_args()
def main():
options = parse_args()
# Fetch system info
system, node, release, version, machine, processor = platform.uname()
build_dir = os.path.join("build", "%s.%s.%s" % (system, machine, options.buildtype))
os.chdir(build_dir)
install_folder = "install"
os.chdir(install_folder)
if (options.rpi):
check_run_cmd('tvservice', ['-e', 'DMT 16']) # change to 1024x768
time.sleep(1) # need a little bit of time for graphics to be stable
game_path = os.path.join(".", "game")
check_run_cmd(game_path)
if (options.rpi):
check_run_cmd("tvservice", ["-p"]) # change to prefered(default) mode
time.sleep(1)
check_run_cmd("fbset", ["-depth", "24"]) # hack to make screen visible
check_run_cmd("fbset", ["-depth", "32"])
if __name__ == "__main__":
main()
|
the-stack_0_20593 | import re
import time
from azureml.core import Workspace
from azureml.core.compute import AmlCompute
from azureml.core.compute import ComputeTarget
from .exceptions import AzureException
from a2ml.api.utils.decorators import error_handler, authenticated
from .credentials import Credentials
class AzureProject(object):
def __init__(self, ctx):
super(AzureProject, self).__init__()
self.ctx = ctx
self.credentials = Credentials(self.ctx).load()
@error_handler
@authenticated
def list(self):
workspaces = Workspace.list(
self.credentials.subscription_id,
auth=self.credentials.get_serviceprincipal_auth())
for project in iter(workspaces.keys()):
self.ctx.log(project)
return {'projects': workspaces.keys()}
@error_handler
@authenticated
def create(self, name):
name = self._get_name(name)
region = self.ctx.config.get('cluster/region', 'eastus2')
resource_group = self._get_resource_group(name)
self.ctx.log('Creating %s' % name)
self.ws = Workspace.create(
name=name,
subscription_id=self.credentials.subscription_id,
resource_group=resource_group,
create_resource_group=True,
location=region,
auth=self.credentials.get_serviceprincipal_auth())
self._select(name)
self.ctx.log('%s created' % name)
return {'created': name}
@error_handler
@authenticated
def delete(self, name):
name = self._get_name(name)
ws = Workspace.get(
name,
subscription_id=self.credentials.subscription_id,
auth=self.credentials.get_serviceprincipal_auth())
self.ctx.log('Deleting %s' % name)
ws.delete(delete_dependent_resources=True, no_wait=False)
self._select(None)
self.ctx.log('%s deleted' % name)
return {'deleted': name}
@error_handler
@authenticated
def select(self, name = None):
self._select(name)
self.ctx.log('Selected project %s' % name)
return {'selected': name}
@error_handler
@authenticated
def get_cluster_config(self, name, local_config = True, ws = None):
result = {
'name': self._fix_cluster_name(self.ctx.config.get('cluster/name', 'cpucluster'))
}
if local_config:
result.update({
'min_nodes': int(self.ctx.config.get('cluster/min_nodes',1)),
'max_nodes': int(self.ctx.config.get('cluster/max_nodes',4)),
'vm_size': self.ctx.config.get('cluster/type','STANDARD_D2_V2')
})
if self.ctx.config.get('cluster/idle_seconds_before_scaledown'):
result['idle_seconds_before_scaledown'] = self.ctx.config.get('cluster/idle_seconds_before_scaledown')
else:
if ws is None:
ws = self._get_ws(name=name)
if result['name'] in ws.compute_targets:
compute_target = ws.compute_targets[result['name']]
if compute_target and type(compute_target) is AmlCompute:
#scale_settings: {'minimum_node_count': 0, 'maximum_node_count': 4, 'idle_seconds_before_scaledown': 120}
ct_status = compute_target.get_status()
if ct_status:
result.update({
'min_nodes': ct_status.scale_settings.minimum_node_count,
'max_nodes': ct_status.scale_settings.maximum_node_count,
'vm_size': ct_status.vm_size,
'idle_seconds_before_scaledown': ct_status.scale_settings.idle_seconds_before_scaledown
})
return result
@error_handler
@authenticated
def update_cluster_config(self, name, params, ws=None, allow_create=True):
cluster_name = self._fix_cluster_name(self.ctx.config.get('cluster/name', 'cpucluster'))
if ws is None:
ws = self._get_ws(name=name)
if 'type' in params:
params['vm_size'] = params['type']
if cluster_name in ws.compute_targets:
compute_target = ws.compute_targets[cluster_name]
remote_cluster = self.get_cluster_config(name=name, local_config=False, ws=ws)
update_properties = {}
props_to_update = ['min_nodes', 'max_nodes', 'vm_size', 'idle_seconds_before_scaledown']
for prop in props_to_update:
if params.get(prop) is not None and remote_cluster.get(prop, params.get(prop)) != params.get(prop):
update_properties[prop] = params.get(prop)
if update_properties.get('vm_size'):
self.ctx.log('Delete existing AML compute context, since cluster type has been changed to %s.'%(update_properties.get('vm_size')))
compute_target.delete()
elif update_properties:
self.ctx.log('Update compute target %s: %s' % (cluster_name, update_properties))
compute_target.update(**update_properties)
try:
compute_target.wait_for_completion(show_output = True)
except Exception as e:
self.ctx.log_debug(str(e))
if not update_properties.get('vm_size'):
return cluster_name
if not allow_create:
raise AzureException("Compute target %s does not exist."%cluster_name)
self.ctx.log('Creating new AML compute context %s...'%cluster_name)
provisioning_config = AmlCompute.provisioning_configuration(
vm_size=params.get('vm_size'), min_nodes=params.get('min_nodes'),
max_nodes=params.get('max_nodes'), idle_seconds_before_scaledown=params.get('idle_seconds_before_scaledown'))
compute_target = ComputeTarget.create(
ws, cluster_name, provisioning_config)
compute_target.wait_for_completion(show_output = True)
return cluster_name
@staticmethod
def _fix_cluster_name(name):
# Name can include letters, digits and dashes.
# It must start with a letter, end with a letter or digit,
# and be between 2 and 16 characters in length.
#TODO check for all conditions
name = re.sub(r'\W+', '-', name)
name = name.replace('_','-')[:16]
if name[0].isdigit():
test = list(name)
test[0] = 'C'
name = ''.join(test)
if name[-1].isdigit():
test = list(name)
test[-1] = 'C'
name = ''.join(test)
return name
def _select(self, name):
self.ctx.config.set('name', name)
self.ctx.config.write()
def _get_name(self, name = None):
if name is None:
name = self.ctx.config.get('name', None)
if name is None:
raise AzureException('Please provide project name...')
return name
def _get_ws(self, name = None, create_if_not_exist = False):
name = self._get_name(name)
nTry = 0
while True:
try:
self.ws = Workspace.get(
name,
subscription_id=self.credentials.subscription_id,
auth=self.credentials.get_serviceprincipal_auth(),
resource_group=self._get_resource_group(name)
)
break
except Exception as e:
message = str(e)
if ('Workspaces not found' in message or 'No workspaces found' in message) and create_if_not_exist:
self.create(name)
break
elif 'invalid_client' in message and nTry < 20:
self.ctx.log('Workspace.get failed with authentication error. Retry.')
nTry += 1
time.sleep(20)
else:
raise
return self.ws
def _get_resource_group(self, name):
resource_group = self.ctx.config.get('resource_group')
if not resource_group:
if name == "a2mlworkspacedev":
resource_group = "a2mldev"
elif name == "a2mlworkspacestaging":
resource_group = "a2mlstaging"
elif name == "a2mlworkspaceprod":
resource_group = "a2mlprod"
else:
resource_group = name+'-resources'
return resource_group
|
the-stack_0_20597 | from ..exceptions.log_msg import InfoMsg
from ..models import db
from ..models.flask_state_host import FlaskStateHost
from ..utils.date import get_current_ms, get_query_ms
from ..utils.logger import logger
ONE_DAY = 1 # Days
THIRTY_DAT = 30 # 30 Days
FIVE_MINUTES_MILLISECONDS = 300000 # Five minutes milliseconds
def retrieve_host_status(days) -> list:
"""
Query the status within the time period and flashback
"""
target_time = get_current_ms() - get_query_ms(days)
result = (
FlaskStateHost.query.with_entities(
FlaskStateHost.cpu,
FlaskStateHost.memory,
FlaskStateHost.load_avg,
FlaskStateHost.disk_usage,
FlaskStateHost.ts,
)
.filter(FlaskStateHost.ts > target_time)
.order_by(FlaskStateHost.ts.desc())
.all()
)
return result
def retrieve_latest_host_status() -> dict:
"""
Query the latest status
"""
result = FlaskStateHost.query.with_entities(FlaskStateHost.__table__).order_by(FlaskStateHost.ts.desc()).first()
result = result._asdict() if result else {}
return result
def create_host_status(kwargs):
"""
Create a new record
"""
try:
flask_state_host = FlaskStateHost(**kwargs)
db.session.add(flask_state_host)
db.session.commit()
logger.info(InfoMsg.INSERT_SUCCESS.get_msg())
except Exception as e:
db.session.rollback()
raise e
def delete_thirty_days_status():
"""
Delete thirty days records ago
"""
try:
target_time = get_current_ms() - get_query_ms(THIRTY_DAT)
result = FlaskStateHost.query.filter(FlaskStateHost.ts < target_time).delete(synchronize_session=False)
if result:
db.session.commit()
logger.info(InfoMsg.DELETE_SUCCESS.get_msg())
except Exception as e:
db.session.rollback()
raise e
def retrieve_host_status_yesterday() -> FlaskStateHost:
"""
Returns the closest time status between yesterday and the current
"""
yesterday_ms = get_current_ms() - get_query_ms(ONE_DAY)
delta_ms = yesterday_ms - FIVE_MINUTES_MILLISECONDS
yesterday_flask_state_host = (
FlaskStateHost.query.filter(FlaskStateHost.ts < yesterday_ms, FlaskStateHost.ts > delta_ms)
.order_by(FlaskStateHost.ts.desc())
.first()
)
return yesterday_flask_state_host
|
the-stack_0_20598 | import json
import os
import shutil
import numpy as np
import tensorflow as tf
from tensorflow.contrib.seq2seq import sequence_loss
from gpt.src import model
from gpt.src import beamsearch
import tensorflow.contrib.slim as slim
from datetime import timedelta
import time
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from gpt.src.single_gpu_serving import beam_search_generator
from utils.file_api import read_file_lines,write_file_lines
from gpt.src.model import positions_for,Encoder,Decoder
from utils.cat_files import cat_files
from gpt.config import *
class NMT_GPT():
def __init__(self,input_num,config_path):
self.hparams = model.default_hparams()
self.config_path = config_path
with open(os.path.join(self.config_path, 'hparams.json')) as f:
self.hparams.override_from_dict(json.load(f))
self.input_num=input_num
self.text_enc = encoder.get_encoder(self.config_path)
self.sos_id=self.text_enc.encode('\t')[0]
self.eos_id=self.text_enc.encode('\n')[0]
def def_placeholder_and_components(self):
# embeddings:
with tf.variable_scope('encoder'):
with tf.variable_scope('model'):
self.wpe = tf.get_variable('wpe', [self.hparams.n_ctx, self.hparams.n_embd],
initializer=tf.random_normal_initializer(stddev=0.01))
self.wte = tf.get_variable('wte', [self.hparams.n_vocab, self.hparams.n_embd],
initializer=tf.random_normal_initializer(stddev=0.02))
self.encoder = Encoder('encoder', self.hparams)
self.decoder = Decoder('encoder', self.hparams)
self.inputs = [tf.placeholder(tf.int32, [None, None], name='input_%d' % i) for i in range(0, self.input_num)]
self.input_lens = [tf.placeholder(tf.int32, [None, ], name='input_len_%d' % i) for i in
range(0, self.input_num)]
self.target_in = tf.placeholder(tf.int32, [None, None], name='target_in')
self.target_out = tf.placeholder(tf.int32, [None, None], name='target_out')
self.target_len = tf.placeholder(tf.int32, [None], name='target_len')
def build_training_model(self):
self.def_placeholder_and_components()
emb_out=[]
enc_h_out=[]
past_for_decoder=[]
for i in range(0,self.input_num):
past_length=0
h = tf.gather(self.wte, self.inputs[i]) + tf.gather(self.wpe, positions_for(self.inputs[i], past_length))
emb_out.append(h)
presents, h_enc=self.encoder.encode(h,self.input_lens[i])
enc_h_out.append(h_enc)
past_for_decoder.append(presents)
all_logits=self.decoder.decode_all(tokens=self.target_in,past_list=past_for_decoder,enc_h_list=enc_h_out)['logits']
with tf.name_scope('loss'):
batch_max_seq_len = tf.shape(self.target_in)[1]
target_mask = tf.sequence_mask(self.target_len, maxlen=batch_max_seq_len, dtype=tf.float32)
cost = sequence_loss(logits=all_logits, targets=self.target_out,
weights=target_mask)
return cost
def build_beam_search_graph(self, beam_size, batch_size, max_decode_length, decode_alpha=0.6):
self.def_placeholder_and_components()
emb_out = []
enc_h_out = []
past_for_decoder = []
for i in range(0, self.input_num):
past_length = 0
h = tf.gather(self.wte, self.inputs[i]) + tf.gather(self.wpe, positions_for(self.inputs[i], past_length))
emb_out.append(h)
presents, h_enc = self.encoder.encode(h, self.input_lens[i])
enc_h_out.append(h_enc)
past_for_decoder.append(presents)
past_length = 0 if enc_h_out[0] is None else tf.shape(enc_h_out[0])[-2]
self.decoder.sef_var_for_beam_search(past_length,enc_h_out,beam_size=beam_size)
with tf.name_scope('beam_search'):
init_seq = tf.fill(dims=(batch_size, 1), value=self.sos_id)
seqs, scores = beamsearch.create_inference_graph(init_seqs=init_seq, state=past_for_decoder,
step_fn=self.decoder.decode_one_step, hparams=self.hparams,
decode_length=max_decode_length,
batch_size=batch_size, beam_size=beam_size,
decode_alpha=decode_alpha, eos_id=self.eos_id,
ensemble=False, concat_state_dim=None)
return seqs, scores
class NMT_GPT_Trainer():
def __init__(self,model_fn:NMT_GPT):
self.model_fn=model_fn
self.learning_rate=1e-4
self.sep_flag='\t'
self.graph=tf.Graph()
self.vars_for_infer = []
self.vars_for_train = []
self.losses=[]
self.only_predict_target=True
tf.logging.set_verbosity(tf.logging.INFO)
self.is_hierarchical=True
self.hier_enc_end_token=self.model_fn.text_enc.encode('\t')
def average_gradients(self,tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = [tf.expand_dims(g, 0) for g, _ in grad_and_vars]
grads = tf.concat(grads, 0)
grad = tf.reduce_mean(grads, 0)
grad_and_var = (grad, grad_and_vars[0][1])
# [(grad0, var0),(grad1, var1),...]
average_grads.append(grad_and_var)
return average_grads
def build_graph(self):
with self.graph.as_default():
self.opt = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
self.tower_grads = []
loss = self.model_fn.build_training_model()
self.losses.append(loss)
grads = self.opt.compute_gradients(loss)
tvs = tf.trainable_variables()
self.accum_vars = [tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False)
for tv in
tvs]
self.zero_ops = [tv.assign(tf.zeros_like(tv)) for tv in self.accum_vars]
self.accum_grad_ops = [self.accum_vars[j].assign_add(gv[0]) for j, gv in
enumerate(grads) if gv[0] is not None]
self.tower_grads.append([(self.accum_vars[j], gv[1]) for j, gv in enumerate(grads) ])
grads = self.average_gradients(self.tower_grads)
with tf.device('/gpu:0'):
self.accum_steps=tf.placeholder(tf.float32, [], name='accum_stpes')
self.train_step = self.opt.apply_gradients([(g/self.accum_steps, v) for g,v in grads])
self.avg_loss=tf.stack(self.losses,axis=0)
self.avg_loss=tf.reduce_mean(self.avg_loss)
def create_session_init_and_print_all_trainable_vars(self, max_to_save, ori_gpt_model_path=None):
# Print parameters
with self.graph.as_default():
all_weights = {v.name: v for v in tf.trainable_variables()}
total_size = 0
for v_name in sorted(list(all_weights)):
v = all_weights[v_name]
tf.logging.info("%s\tshape %s", v.name[:-2].ljust(80),
str(v.shape).ljust(20))
v_size = np.prod(np.array(v.shape.as_list())).tolist()
total_size += v_size
tf.logging.info("Total trainable variables size: %d", total_size)
all_var_list = slim.get_variables_to_restore()
for v in all_var_list:
if 'Adam' in v.name:
self.vars_for_train.append(v)
elif v.name.startswith('beta'):
self.vars_for_train.append(v)
elif v.name.startswith('parallel'):
pass
elif v.name.startswith('Variable'):
pass
else:
self.vars_for_infer.append(v)
if len(self.vars_for_infer) > 0:
self.saver_infer = tf.train.Saver(self.vars_for_infer, max_to_keep=max_to_save)
if len(self.vars_for_train) > 0:
self.saver_train = tf.train.Saver(self.vars_for_train, max_to_keep=max_to_save)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(graph=self.graph, config=config)
init_op = tf.global_variables_initializer()
sess.run(init_op)
restore_ops=[]
if ori_gpt_model_path is not None:
ckpt = tf.train.latest_checkpoint(ori_gpt_model_path)
tf.logging.info("Loading %s" % ckpt)
var_list = tf.train.list_variables(ckpt)
values = {}
reader = tf.train.load_checkpoint(ckpt)
for (name, shape) in var_list:
if not name.startswith('model/'): # ignore global_step
continue
tensor = reader.get_tensor(name)
values[name] = tensor
for v in self.vars_for_infer:
#print(v.name)
tmp = '/'.join(v.name.split('/')[1:])
v_name = tmp.split(':')[0]
if v_name!='model/sen_attn_w':
op = tf.assign(v, values[v_name])
restore_ops.append(op)
sess.run(restore_ops)
return sess
def padding_batch(self, input_list):
in_len = [len(i) for i in input_list]
new_in = pad_sequences(input_list, padding='post')
return new_in, in_len
def train_or_eval_batch_with_raw_text(self, sess, input_text, mini_batch, is_train=True,
run_options=None):
batch_size = len(input_text)
batch_input = {}
batch_target_in = []
batch_target_out =[]
batch_target_len =[]
batch_input_len = {}
for text in input_text:
strs=text.split(self.sep_flag)
inputs=strs[:-1]
target=strs[-1]
if self.is_hierarchical:
inputs_tokens = [self.model_fn.text_enc.encode(item)+self.hier_enc_end_token for item in inputs]
else:
inputs_tokens = [self.model_fn.text_enc.encode(item) for item in inputs]
target_tokens=self.model_fn.text_enc.encode(target)
for i in range(0,len(inputs_tokens)):
if i not in batch_input:
batch_input[i]=[]
batch_input[i].append(inputs_tokens[i])
if i not in batch_input_len:
batch_input_len[i]=[len(inputs_tokens[i])]
else:
batch_input_len[i].append(len(inputs_tokens[i]))
tar_in=[self.model_fn.sos_id]+target_tokens
tar_out=target_tokens+[self.model_fn.eos_id]
batch_target_len.append(len(tar_out))
batch_target_in.append(tar_in)
batch_target_out.append(tar_out)
# gradient accum and update
#assert batch_size%mini_batch==0
with self.graph.as_default():
data_num = batch_size
losses = []
low = 0
if is_train:
sess.run(self.zero_ops)
while low < data_num:
n_samples = min([mini_batch, data_num - low])
mini_batch_input = [batch_input[i][low:low + n_samples] for i in range(0,len(batch_input))]
mini_batch_input_len = [batch_input_len[i][low:low + n_samples] for i in range(0, len(batch_input))]
mini_batch_target_in = batch_target_in[low:low + n_samples]
mini_batch_target_out = batch_target_out[low:low + n_samples]
mini_batch_target_len = batch_target_len[low:low + n_samples]
mini_batch_target_in_padded, _ = self.padding_batch(mini_batch_target_in)
mini_batch_target_out_padded, _ = self.padding_batch(mini_batch_target_out)
feed_dict={}
for i in range(0,self.model_fn.input_num):
p,_ = self.padding_batch(mini_batch_input[i])
feed_dict[self.model_fn.inputs[i]]=p
feed_dict[self.model_fn.input_lens[i]]=mini_batch_input_len[i]
feed_dict[self.model_fn.target_in] = mini_batch_target_in_padded
feed_dict[self.model_fn.target_out] = mini_batch_target_out_padded
feed_dict[self.model_fn.target_len] = mini_batch_target_len
if is_train:
result = sess.run([self.accum_grad_ops, self.avg_loss], feed_dict=feed_dict, options=run_options)
loss=result[-1]
else:
loss = sess.run(self.avg_loss, feed_dict=feed_dict)
low += n_samples
losses.append(loss*n_samples)
if is_train:
sess.run(self.train_step,feed_dict={self.accum_steps:batch_size/mini_batch})
return sum(losses) / batch_size
def training(self, eos_id=None, train_corpus='./story/story.train', dev_corpus='./story/story.dev',
init_step_num=1, learning_rate=1e-4, batch_size=64, mini_batch=16, total_steps=100000,
train_ckpt_path='./models/117M/model_train_1/', infer_ckpt_path='./models/117M/',
eval_per_n_steps=1, max_to_save=3, early_stop_steps=6000,append_eos=True,ori_gpt_model_path=None):
self.learning_rate=learning_rate
sess=self.create_session_init_and_print_all_trainable_vars(max_to_save,ori_gpt_model_path=ori_gpt_model_path)
if ori_gpt_model_path is None:
self.restore_model_and_init(sess, infer_ckpt_path, train_ckpt_path)
train = load_corpus(train_corpus)
# train=[' '.join(['you' for j in range(0,512)]) for i in range(0,512)]
dev = load_corpus(dev_corpus)
step = init_step_num
low = 0
epoch_num = 1
train_data_num = len(train)
eval_data_num = len(dev)
last_improvement_step = init_step_num
best_loss = 100000
saved_steps = []
tf.logging.info('start training...')
self.graph.finalize()
start_time = time.time()
while step < total_steps:
run_options = tf.RunOptions(report_tensor_allocations_upon_oom=True)
n_samples = min([batch_size, train_data_num - low])
train_loss = self.train_or_eval_batch_with_raw_text(sess, train[low:low + n_samples],
run_options=run_options,
mini_batch=mini_batch,
)
###eval:
if step % eval_per_n_steps == 0:
eval_low = 0
eval_losses = []
while eval_low < eval_data_num:
eval_n_samples = min([batch_size, eval_data_num - eval_low])
eval_losses.append(self.train_or_eval_batch_with_raw_text(
sess, dev[eval_low:eval_low + eval_n_samples], is_train=False, mini_batch=mini_batch))
eval_low += eval_n_samples
eval_avg_loss = sum(eval_losses) / len(eval_losses)
time_dif = get_time_dif(start_time)
if eval_avg_loss < best_loss:
best_loss = eval_avg_loss
last_improvement_step = step
tf.logging.info('save step %d', last_improvement_step)
self.save_model(sess, infer_ckpt_path, train_ckpt_path, step=step)
saved_steps.append(last_improvement_step)
tf.logging.info("%s: step %d: train loss %f; eval loss %f *", time_dif, step, train_loss,
eval_avg_loss)
if len(saved_steps) > max_to_save:
saved_steps = saved_steps[1:]
else:
tf.logging.info("%s: step %d: train loss %f; eval loss %f", time_dif, step, train_loss,
eval_avg_loss)
if step - last_improvement_step > early_stop_steps:
tf.logging.info("early stopping...")
break
###
step += 1
low += n_samples
if low == train_data_num:
low = 0
epoch_num += 1
sess.close()
print('all work has finished')
def restore_model_and_init(self, sess, ckpt_for_infer, ckpt_for_train):
with self.graph.as_default():
if ckpt_for_infer is not None:
ckpt = tf.train.latest_checkpoint(ckpt_for_infer)
if ckpt is not None:
self.saver_infer.restore(sess, ckpt)
tf.logging.info('restored inferring params from %s',ckpt)
if ckpt_for_train is not None:
ckpt = tf.train.latest_checkpoint(ckpt_for_train)
if ckpt is not None:
self.saver_train.restore(sess, ckpt)
tf.logging.info('restored training params from %s', ckpt)
def save_model(self, sess, infer_ckpt_path, train_ckpt_path, step):
with self.graph.as_default():
if infer_ckpt_path is not None and len(self.vars_for_infer) > 0:
self.saver_infer.save(sess, os.path.join(infer_ckpt_path,'model'), global_step=step)
if train_ckpt_path is not None and len(self.vars_for_train) > 0:
self.saver_train.save(sess, os.path.join(train_ckpt_path,'model'), global_step=step)
def padding_for_target_mask(self,mask_list,input_len):
batch_size= len(mask_list)
assert batch_size==len(input_len)
max_len=max(input_len)
for i in range(0,batch_size):
l=input_len[i]
mask_list[i]=mask_list[i]+[0.0]*(max_len-l)
def load_corpus(path):
lines = []
with open(path, 'r', encoding='utf-8') as f:
for line in f:
lines.append(line.strip())
return lines
def get_time_dif(start_time):
end_time = time.time()
time_dif = end_time - start_time
return timedelta(seconds=int(round(time_dif)))
def test(config_path,input_num,model_dir='./models/ori_rule/formality_infer/',input_path='../training_data/dif_models/eval.ori_rule',
output_path='../evaluate/gyafc_model_outputs/fr_out/formal.gpt.cat_ori_rule.old',beam_size=4,max_dec_len=60,dec_alpha=0.6):
gpt2 = NMT_GPT(config_path=config_path,input_num=input_num)
generator = beam_search_generator(gpt2, beam_size=beam_size,
model_directory=model_dir, max_dec_len=max_dec_len,
dec_alpha=dec_alpha)
sess=generator.build_graph_and_restore(eos_id=gpt2.text_enc.encode('\n')[0])
lines=read_file_lines(input_path)
result=[]
for line in lines:
result.append(generator.generate(sess,line,multi_pls=True))
print(line+' ||| '+result[-1].strip())
sess.close()
write_file_lines(output_path, result)
def train(config_path,input_num,ori_gpt_model=None,sep_flag='\t',
train_corpus='../training_data/preprocessed/Family_Relationships/train.ori.txt',
dev_corpus='../training_data/preprocessed/Family_Relationships/val.ori.txt',
infer_ckpt_path='./models/ori_data_fr/formality_infer/',
train_ckpt_path='./models/ori_data_fr/formality_train/'):
gpt2 = NMT_GPT(input_num,config_path)
trainer = NMT_GPT_Trainer(gpt2)
trainer.build_graph()
trainer.sep_flag=sep_flag
trainer.training(train_corpus=train_corpus,
dev_corpus=dev_corpus,
infer_ckpt_path=infer_ckpt_path, train_ckpt_path=train_ckpt_path,
learning_rate=1e-4, init_step_num=1,
batch_size=128, mini_batch=16,
eval_per_n_steps=100,
total_steps=3000,
early_stop_steps=200,
max_to_save=2,
append_eos=True,
eos_id=gpt2.text_enc.encode('\n')[0],ori_gpt_model_path=ori_gpt_model)
def HA(domain='fr',max_len_limit=220,only_test=False):
methods = ['ori', 'rule']
model_path='./models_hie_'+domain+'/'+'_'.join(methods)
init_model_path = './models/formality_infer'
if not os.path.exists('./models_hie_'+domain):
os.mkdir('./models_hie_'+domain)
if not os.path.exists(model_path):
os.mkdir(model_path)
os.mkdir(model_path+'/formality_train')
shutil.copytree(init_model_path, model_path+'/formality_infer')
data_path = '../training_data/dif_models_'+domain+'/'
cat_files([data_path + 'informal.train.'+m for m in methods]+ [ data_path + 'formal.train.rule', ],
data_path + 'train.'+'_'.join(methods),
tokenizer=text_enc, max_len=max_len_limit)
cat_files([data_path + 'informal.val.' + m for m in methods] + [data_path + 'formal.val.rule', ],
data_path + 'val.' + '_'.join(methods),
tokenizer=text_enc, max_len=max_len_limit)
lp = cat_files([data_path + 'informal.test.' + m for m in methods],
data_path + 'eval.' + '_'.join(methods),
tokenizer=text_enc, max_len=max_len_limit)
if lp:
print('_'.join(methods)+' data droped')
if not only_test:
train(config_path=config_path,input_num=len(methods),sep_flag='\t', ori_gpt_model=init_model_path,
train_corpus=data_path + 'train.'+'_'.join(methods),
dev_corpus=data_path + 'val.'+'_'.join(methods),
infer_ckpt_path=model_path+'/formality_infer',
train_ckpt_path=model_path+'/formality_train')
test(config_path=config_path,input_num=len(methods),
model_dir=model_path+'/formality_infer',
input_path=data_path + 'eval.'+'_'.join(methods),
output_path='../evaluate/gyafc_model_outputs/' + domain + '_out/formal.gpt.hie'+'_'.join(methods))
|
the-stack_0_20599 | # Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from collections import namedtuple
from .utils import at_bits, at_q
from .cnn_convolution_pool_relu import gen_activation_op
GEN_MATADD = "CNN_MatAdd"
GEN_MATADDDYN = "CNN_MatAddDynAdjust"
GEN_MATSCALE = "CNN_MatScale"
MatrixAddATParam = namedtuple('MatrixAddATParam', [
"MatrixAddOper"
])
def gen_matrixadd_at_params(_):
return MatrixAddATParam(
MatrixAddOper="KOP_MATADD"
)
def gen_at_matrixadd(code_block, name, in_q1, in_q2, out_q,
in_dim, out_dim, at_matrixadd, gen_ctrl=None, at_ver=3):
if gen_ctrl is None:
gen_ctrl = "0"
else:
raise NotImplementedError("genctrl is not yet implemented")
if at_ver < 3:
code_block.write('{}("{}", {}, {}, {}, {}, 1, 1, 1, {}, {}, {}, {}, {});',
GEN_MATADD, name, gen_ctrl,
at_bits(in_q1), at_bits(in_q2), at_bits(
out_q), in_dim.shape[0], out_dim.shape[0],
in_dim.shape[1], in_dim.shape[2], at_matrixadd.MatrixAddOper)
else:
code_block.write('{}("{}", {}, {}, {}, {}, {}, {}, {}, 1, 1, 1, {}, {}, {}, {}, {});',
GEN_MATADD, name, gen_ctrl,
at_bits(in_q1), at_bits(in_q2), at_bits(out_q),
in_q1.q, in_q2.q, out_q.q, in_dim.shape[0], out_dim.shape[0],
in_dim.shape[1], in_dim.shape[2], at_matrixadd.MatrixAddOper)
# pylint: disable=too-many-arguments
def gen_matrixadddyn_at_params(_):
return MatrixAddATParam(
MatrixAddOper="KOP_MATADD_DYNADJUST"
)
def gen_at_matrixadddyn(code_block, name, in_q1, in_q2, out_q,
in_dim, out_dim, at_matrixadd, gen_ctrl=None):
if gen_ctrl is None:
gen_ctrl = "0"
else:
raise NotImplementedError("genctrl is not yet implemented")
code_block.write('{}("{}", {}, {}, {}, {}, {}, {}, {}, 1, 1, 1, {}, {}, {}, {}, {});',
GEN_MATADDDYN, name, gen_ctrl,
at_bits(in_q1), at_bits(in_q2), at_bits(out_q),
in_q1.q, in_q2.q, out_q.q,
in_dim.shape[0], out_dim.shape[0],
in_dim.shape[1], in_dim.shape[2], at_matrixadd.MatrixAddOper)
# pylint: disable=too-many-arguments
MatScaleATParam = namedtuple('MatScaleATParam', [
"ScaleOper",
"ReLUOper"
])
def gen_matscale_at_params(params):
# KOP_MATSCALE_VECTOR, KOP_MATSCALE_SCALAR or KOP_MATSCALE_VECTOR_SCALAR
if params.fusion_type == "vec_scalar":
ms_op = 'KOP_MATSCALE_VECTOR_SCALAR'
elif params.fusion_type == "vector":
ms_op = 'KOP_MATSCALE_VECTOR'
elif params.fusion_type == "scalar":
ms_op = 'KOP_MATSCALE_SCALAR'
else:
raise NotImplementedError("unknown fusion type %s" % params.fusion_type)
return MatScaleATParam(
ScaleOper=ms_op,
ReLUOper=gen_activation_op(params.activation)
)
def gen_at_matscale(code_block, name, other_q, vector_q, scalar_q, out_q,
in_dim, out_dim, at_matscale, gen_ctrl=None):
if gen_ctrl is None:
gen_ctrl = "0"
else:
raise NotImplementedError("genctrl is not yet implemented")
code_block.write('{}("{}", {}, {}, {}, {}, {}, {}, {}, {}, {}, 1, 1, 1, 1, {}, {}, {}, {}, {}, {});',
GEN_MATSCALE, name, gen_ctrl,
at_bits(other_q), at_bits(vector_q), at_bits(scalar_q), at_bits(out_q),
at_q(other_q), at_q(vector_q), at_q(scalar_q), at_q(out_q),
in_dim.shape[0], out_dim.shape[0],
in_dim.shape[2], in_dim.shape[1], at_matscale.ScaleOper, at_matscale.ReLUOper) |
the-stack_0_20600 | # Jogo de Nim
# Desenvolvido por
# Compatível apenas com Windows.
import functions # Biblioteca de funções
import consts # Valores constantes
from game import Game # Classe do jogo
# Objeto que armazena da partida atual.
game = Game()
# Loop do programa.
# Caso precise encerrar o programa antes do final da rodada, precione CTRL+C
while True:
# Limpa a tela completamente
functions.clearScreen()
# Caso tenha acontecido algum erro no loop anterior, o exibe
functions.showError()
# Exibe o cabeçalho do programa
functions.showHeader()
# Exibe o palitos disponíveis no jogo ou o resultado da rodada anterior.
game.showStraws()
functions.horizontalLine()
try :
if(game.availablePlays() > 0) :
if(game.computerPlay == False):
game.waitPlayerPlay()
game.computerPlay = True # Computador é o próximo
else :
game.waitComputerPlay()
game.computerPlay = False # Jogador é o próximo
else :
# Fim da rodada. Pergunta ao jogador se ele quer jogar novamente.
game.showPlayAgainInput()
except ValueError as e :
# Registra o erro para ser exibido no próximo loop
functions.registerError(e)
|
the-stack_0_20601 | __all__ = ["ProcessState", "TaskWindow"]
import time
from math import sqrt
from typing import Dict, Iterable, List, Tuple
from uuid import UUID, uuid4
import numpy as np
from PySide2.QtCore import Qt, Signal
from PySide2.QtWidgets import (QComboBox, QDialog, QGridLayout, QLabel,
QMessageBox, QProgressBar, QPushButton,
QSpinBox, QTableWidget, QWidget)
from QGrain.algorithms import DistributionType
from QGrain.models.FittingResult import FittingResult
from QGrain.models.SampleDataset import SampleDataset
from QGrain.resolvers.HeadlessResolver import FittingTask
from QGrain.resolvers.MultiprocessingResolver import (MultiProcessingResolver,
ProcessState)
from QGrain.ui.AlgorithmSettingWidget import AlgorithmSettingWidget
class TaskWindow(QDialog):
task_generated_signal = Signal(list)
fitting_started_signal = Signal()
fitting_finished_signal = Signal(list)
def __init__(self, parent=None,
multiprocessing_resolver: MultiProcessingResolver = None):
super().__init__(parent=parent)
self.multiprocessing_resolver = multiprocessing_resolver
self.grain_size_data = None # type: SampleDataset
self.running_flag = False
self.tasks = None
self.states = None
self.succeeded_results = None
self.staging_tasks = None
# to calculate the residual time
self.task_start_time = None
self.task_accumulative_time = 0.0
self.init_ui()
def init_ui(self):
self.main_layout = QGridLayout(self)
self.task_initialization_label = QLabel(self.tr("Task Initialization:"))
self.task_initialization_label.setStyleSheet("QLabel {font: bold;}")
self.main_layout.addWidget(self.task_initialization_label, 0, 0)
self.sample_from_label = QLabel(self.tr("From"))
self.sample_from_label.setToolTip(self.tr("Select the first sample you want to perform."))
self.start_sample_combo_box = QComboBox()
self.sample_to_label = QLabel(self.tr("To"))
self.sample_to_label.setToolTip(self.tr("Select the last sample you want to perform."))
self.end_sample_combo_box = QComboBox()
self.main_layout.addWidget(self.sample_from_label, 1, 0)
self.main_layout.addWidget(self.start_sample_combo_box, 1, 1)
self.main_layout.addWidget(self.sample_to_label, 2, 0)
self.main_layout.addWidget(self.end_sample_combo_box, 2, 1)
self.interval_label = QLabel(self.tr("Interval"))
self.interval_label.setToolTip(self.tr("Select the interval of each sample you want to perform."))
self.interval_input = QSpinBox()
self.interval_input.setRange(0, 9999)
self.main_layout.addWidget(self.interval_label, 3, 0)
self.main_layout.addWidget(self.interval_input, 3, 1)
self.minimum_component_number_label = QLabel(self.tr("Minimum Component Number"))
self.minimum_component_number_label.setToolTip(self.tr("Select the minimum component number you want to perform."))
self.minimum_component_number = QSpinBox()
self.minimum_component_number.setRange(1, 10)
self.maximum_component_number_label = QLabel(self.tr("Maximum Component Number"))
self.maximum_component_number_label.setToolTip(self.tr("Select the maximum component number you want to perform."))
self.maximum_component_number = QSpinBox()
self.maximum_component_number.setRange(1, 10)
self.main_layout.addWidget(self.minimum_component_number_label, 4, 0)
self.main_layout.addWidget(self.minimum_component_number, 4, 1)
self.main_layout.addWidget(self.maximum_component_number_label, 5, 0)
self.main_layout.addWidget(self.maximum_component_number, 5, 1)
self.distribution_type_label = QLabel(self.tr("Distribution Type"))
self.distribution_type_label.setToolTip(self.tr("Select the base distribution function of each component."))
self.distribution_type_combo_box = QComboBox()
self.distribution_type_options = {self.tr("Normal"): DistributionType.Normal,
self.tr("Weibull"): DistributionType.Weibull,
self.tr("Gen. Weibull"): DistributionType.GeneralWeibull}
self.distribution_type_combo_box.addItems(self.distribution_type_options.keys())
self.distribution_type_combo_box.setCurrentIndex(2)
self.main_layout.addWidget(self.distribution_type_label, 6, 0)
self.main_layout.addWidget(self.distribution_type_combo_box, 6, 1)
self.algorithm_setting_widget = AlgorithmSettingWidget()
self.algorithm_setting_widget.main_layout.setContentsMargins(0, 0, 0, 0)
self.main_layout.addWidget(self.algorithm_setting_widget, 7, 0, 1, 2)
self.generate_task_button = QPushButton(self.tr("Generate Tasks"))
self.generate_task_button.setToolTip(self.tr("Click to generate the fitting tasks."))
self.main_layout.addWidget(self.generate_task_button, 8, 0, 1, 2)
self.process_state_label = QLabel(self.tr("Process State:"))
self.process_state_label.setStyleSheet("QLabel {font: bold;}")
self.main_layout.addWidget(self.process_state_label, 9, 0)
self.not_started_label = QLabel(self.tr("Not Started"))
self.not_started_label.setToolTip(self.tr("The number of not started tasks."))
self.not_started_display = QLabel("0")
self.main_layout.addWidget(self.not_started_label, 10, 0)
self.main_layout.addWidget(self.not_started_display, 10, 1)
self.succeeded_label = QLabel(self.tr("Succeeded"))
self.succeeded_label.setToolTip(self.tr("The number of succeeded tasks."))
self.succeeded_display = QLabel("0")
self.main_layout.addWidget(self.succeeded_label, 11, 0)
self.main_layout.addWidget(self.succeeded_display, 11, 1)
self.failed_label = QLabel(self.tr("Failed"))
self.failed_label.setToolTip(self.tr("The number of failed tasks."))
self.failed_display = QLabel("0")
self.main_layout.addWidget(self.failed_label, 12, 0)
self.main_layout.addWidget(self.failed_display, 12, 1)
self.time_spent_label = QLabel(self.tr("Time Spent"))
self.time_spent_label.setToolTip(self.tr("The spent time of these fitting tasks."))
self.time_spent_dispaly = QLabel("0:00:00")
self.time_left_label = QLabel(self.tr("Time Left"))
self.time_left_label.setToolTip(self.tr("The left time of these fitting tasks."))
self.time_left_display = QLabel("99:59:59")
self.main_layout.addWidget(self.time_spent_label, 13, 0)
self.main_layout.addWidget(self.time_spent_dispaly, 13, 1)
self.main_layout.addWidget(self.time_left_label, 14, 0)
self.main_layout.addWidget(self.time_left_display, 14, 1)
self.progress_bar = QProgressBar()
self.progress_bar.setMaximum(0)
self.progress_bar.setValue(0)
self.main_layout.addWidget(self.progress_bar, 15, 0, 1, 2)
self.run_button = QPushButton(self.tr("Run"))
self.run_button.setToolTip(self.tr("Click to run / pause these fitting tasks."))
self.run_button.setEnabled(False)
self.finish_button = QPushButton(self.tr("Finish"))
self.finish_button.setToolTip(self.tr("Click to finish these fitting progress, record the succeeded results."))
self.finish_button.setEnabled(False)
self.main_layout.addWidget(self.run_button, 16, 0)
self.main_layout.addWidget(self.finish_button, 16, 1)
self.generate_task_button.clicked.connect(self.on_generate_task_button_clicked)
self.run_button.clicked.connect(self.on_run_button_clicked)
self.finish_button.clicked.connect(self.on_finish_button_clicked)
self.setWindowTitle(self.tr("The States of Fitting Tasks"))
self.setWindowFlags(Qt.Drawer)
self.cancel_msg_box = QMessageBox(self)
self.cancel_msg_box.setWindowTitle(self.tr("Warning"))
self.cancel_msg_box.setText(self.tr("Are you sure to cancel the tasks?"))
self.cancel_msg_box.addButton(QMessageBox.StandardButton.Yes)
self.cancel_msg_box.addButton(QMessageBox.StandardButton.No)
self.cancel_msg_box.setDefaultButton(QMessageBox.StandardButton.No)
self.cancel_msg_box.setWindowFlags(Qt.Drawer)
self.storage_msg_box = QMessageBox(self)
self.storage_msg_box.setWindowTitle(self.tr("Info"))
self.storage_msg_box.setText(self.tr("Storage the left and failed tasks for the next processing?"))
self.storage_msg_box.addButton(QMessageBox.StandardButton.Yes)
self.storage_msg_box.addButton(QMessageBox.StandardButton.No)
self.storage_msg_box.setDefaultButton(QMessageBox.StandardButton.Yes)
self.storage_msg_box.setWindowFlags(Qt.Drawer)
@property
def samples(self):
if self.grain_size_data is None:
return []
start = self.start_sample_combo_box.currentIndex()
end = self.end_sample_combo_box.currentIndex()
interval = self.interval_input.value() + 1
# make sure the order is positive
if start > end:
start, end = end, start
return self.grain_size_data.samples[start: end+1: interval]
@property
def distribution_type(self):
return self.distribution_type_options[self.distribution_type_combo_box.currentText()]
@property
def component_numbers(self):
min_number = self.minimum_component_number.value()
max_number = self.maximum_component_number.value()
# make sure the order is positive
if min_number > max_number:
max_number, max_number = max_number, max_number
return list(range(min_number, max_number+1))
def on_data_loaded(self, data: SampleDataset):
if data is None:
return
elif not data.has_data:
return
self.grain_size_data = data
sample_names = [sample.name for sample in data.samples]
self.start_sample_combo_box.clear()
self.start_sample_combo_box.addItems(sample_names)
self.start_sample_combo_box.setCurrentIndex(0)
self.end_sample_combo_box.clear()
self.end_sample_combo_box.addItems(sample_names)
self.end_sample_combo_box.setCurrentIndex(len(sample_names)-1)
def on_generate_task_button_clicked(self):
tasks = []
if self.staging_tasks is not None:
tasks.extend(self.staging_tasks)
self.staging_tasks = None
for sample in self.samples:
for component_number in self.component_numbers:
task = FittingTask(
sample,
component_number=component_number,
distribution_type=self.distribution_type,
algorithm_settings=self.algorithm_setting_widget.algorithm_settings)
tasks.append(task)
self.task_generated_signal.emit(tasks)
# update the ui
self.time_left_display.setText("99:59:59")
new_task_number = len(tasks)
all_task_number = new_task_number + self.progress_bar.maximum()
self.progress_bar.setMaximum(all_task_number)
not_started_number = int(self.not_started_display.text()) + new_task_number
self.not_started_display.setText(str(not_started_number))
self.run_button.setEnabled(True)
self.finish_button.setEnabled(True)
def on_task_state_updated(self, tasks: List[FittingTask],
states: Dict[UUID, ProcessState],
succeeded_results: Dict[UUID, FittingResult]):
assert tasks is not None
assert states is not None
assert succeeded_results is not None
task_number = len(tasks)
succeeded_task_number = len([value for value in states.values() if value == ProcessState.Succeeded])
failed_task_number = len([value for value in states.values() if value == ProcessState.Failed])
not_started_task_number = task_number - succeeded_task_number - failed_task_number
# update the ui
self.not_started_display.setText(str(not_started_task_number))
self.succeeded_display.setText(str(succeeded_task_number))
self.failed_display.setText(str(failed_task_number))
self.progress_bar.setMaximum(task_number)
self.progress_bar.setValue(succeeded_task_number + failed_task_number)
# calculate the spent and left time of tasks
if self.task_start_time is not None:
time_spent = time.time() - self.task_start_time + self.task_accumulative_time
else:
time_spent = self.task_accumulative_time
if not_started_task_number == task_number:
time_left = 359999 # equals to 99:59:59
elif not_started_task_number == 0:
time_left = 0
else:
time_left = time_spent / (succeeded_task_number + failed_task_number) * not_started_task_number
def second_to_hms(seconds: float):
m, s = divmod(int(seconds), 60)
h, m = divmod(m, 60)
return f"{h:d}:{m:02d}:{s:02d}"
self.time_spent_dispaly.setText(second_to_hms(time_spent))
self.time_left_display.setText(second_to_hms(time_left))
self.tasks = tasks
self.states = states
self.succeeded_results = succeeded_results
if not_started_task_number == 0:
self.running_flag = False
self.task_accumulative_time += time.time() - self.task_start_time
self.task_start_time = None
self.generate_task_button.setEnabled(True)
self.run_button.setText(self.tr("Run"))
self.run_button.setEnabled(False)
self.finish_button.setEnabled(True)
def on_run_button_clicked(self):
if not self.running_flag:
self.running_flag = True
self.task_start_time = time.time()
self.fitting_started_signal.emit()
self.generate_task_button.setEnabled(False)
self.run_button.setEnabled(True)
self.run_button.setText(self.tr("Pause"))
self.finish_button.setEnabled(False)
else:
if self.multiprocessing_resolver is not None:
self.multiprocessing_resolver.pause_task()
self.running_flag = False
self.task_accumulative_time += time.time() - self.task_start_time
self.task_start_time = None
self.generate_task_button.setEnabled(True)
self.run_button.setEnabled(True)
self.run_button.setText(self.tr("Run"))
self.finish_button.setEnabled(True)
def cleanup(self):
if self.multiprocessing_resolver is not None:
self.multiprocessing_resolver.cleanup()
self.tasks = None
self.states = None
self.succeeded_results = None
self.task_start_time = None
self.task_accumulative_time = 0.0
self.not_started_display.setText("0")
self.succeeded_display.setText("0")
self.failed_display.setText("0")
self.progress_bar.setMaximum(0)
self.progress_bar.setValue(0)
self.time_spent_dispaly.setText("0:00:00")
self.time_left_display.setText("99:59:59")
self.run_button.setText(self.tr("Run"))
self.running_flag = False
self.generate_task_button.setEnabled(True)
self.run_button.setEnabled(False)
self.finish_button.setEnabled(False)
def check_result(self):
# classify the results by samples' id
results_by_sample_id = {}
for task in self.tasks:
if self.states[task.uuid] == ProcessState.Succeeded:
if task.sample.uuid in results_by_sample_id:
results_by_sample_id[task.sample.uuid].append(self.succeeded_results[task.uuid])
else:
results_by_sample_id[task.sample.uuid] = [self.succeeded_results[task.uuid]]
checked_results = []
for sample_id, results in results_by_sample_id.items():
valid_results = {}
mse_values = {}
minimum_mse_values = {}
optional_component_numbers = {}
for result in results:
# record the mean squared errors to judge if the component number of the small-end sample is not enough
mse_values[result.component_number] = result.mean_squared_error
# check if there is any needless component
has_needless_component = False
needless_component_number = 0
for component in result.components:
# if the fraction of any component is less than 0.01%
if component.fraction < 1e-4 or component.has_nan:
has_needless_component = True
needless_component_number += 1
# if there is any needless component, the mean squared error is close to the lowest level of this sample
if has_needless_component:
minimum_mse_values[result.component_number] = result.mean_squared_error
optional_component_numbers[result.component_number] = result.component_number - needless_component_number
continue
# ignore invalid result
if result.has_invalid_value:
continue
valid_results[result.component_number] = result
optional_component_number_count = {}
for component_number, optional_component_number in optional_component_numbers.items():
if optional_component_number in optional_component_number_count:
optional_component_number_count[optional_component_number] += 1
else:
optional_component_number_count[optional_component_number] = 1
min_count_component_number = None
min_count = 10000
for optional_component_number, count in optional_component_number_count.items():
if count < min_count:
min_count = count
min_count_component_number = optional_component_number
# for result in results:
# if result.component_number == min_count_component_number:
# checked_results.append(result)
# break
# calculate the 1/4, 1/2, and 3/4 postion value to judge which result is invalid
# 1. the mean squared errors are much higher in the results which are lack of components
# 2. with the component number getting higher, the mean squared error will get lower and finally reach the minimum
median = np.median(list(mse_values.values()))
upper_group = [value for value in mse_values.values() if value >= median]
lower_group = [value for value in mse_values.values() if value <= median]
value_1_4 = np.median(lower_group)
value_3_4 = np.median(upper_group)
distance_QR = value_3_4 - value_1_4
non_outlier_results = []
for component_number, result in valid_results.items():
if np.abs(result.mean_squared_error - median) < distance_QR * 2.5:
non_outlier_results.append(result)
if len(non_outlier_results) >= 1:
least_result = non_outlier_results[0]
for result in non_outlier_results:
if result.component_number < least_result.component_number:
least_result = result
checked_results.append(least_result)
else:
for result in results:
if result.component_number == min_count_component_number:
checked_results.append(result)
break
self.fitting_finished_signal.emit(checked_results)
def on_finish_button_clicked(self):
if self.tasks is None:
assert self.states is None
assert self.succeeded_results is None
else:
# self.check_result()
not_started_number = int(self.not_started_display.text())
self.fitting_finished_signal.emit(list(self.succeeded_results.values()))
if not_started_number != 0:
res = self.storage_msg_box.exec_()
if res == QMessageBox.Yes:
staging_tasks = []
for task in self.tasks:
if self.states[task.uuid] != ProcessState.Succeeded:
staging_tasks.append(task)
self.staging_tasks = staging_tasks
self.cleanup()
def closeEvent(self, e):
if self.running_flag:
res = self.cancel_msg_box.exec_()
if res == QMessageBox.Yes:
self.on_run_button_clicked()
e.accept()
else:
e.ignore()
else:
e.accept()
if __name__ == "__main__":
import sys
from PySide2.QtWidgets import QApplication
app = QApplication(sys.argv)
task_window = TaskWindow()
task_window.show()
sys.exit(app.exec_())
|
the-stack_0_20602 | # -*- coding:utf-8 -*-
__author__ = 'Randolph'
__modify__ = 'Zachary'
import os
import sys
import time
import numpy as np
import tensorflow as tf
from utils import checkpoints
from utils import feed
from sklearn.metrics import precision_score, recall_score, f1_score, \
roc_auc_score, average_precision_score
def test_ann(word2vec_path,
model_number):
# Parameters
# =============================================================================
logger = feed.logger_fn("tflog",
"logs/test-{0}.log".format(time.asctime()))
# MODEL = input("☛ Please input the model file you want to test, "
# "it should be like(1490175368): ")
MODEL = str(model_number)
while not (MODEL.isdigit() and len(MODEL) == 10):
MODEL = input("✘ The format of your input is illegal, "
"it should be like(1490175368), please re-input: ")
logger.info("✔︎ The format of your input is legal, "
"now loading to next step...")
TRAININGSET_DIR = 'models/citability/data/Train.json'
VALIDATIONSET_DIR = 'models/citability/data/Validation.json'
# TEST_DIR = 'data/Test.json'
cwd = os.getcwd()
TEST_DIR = os.path.join(cwd, 'web/test_data.json')
cwd = os.getcwd()
MODEL_DIR = os.path.join(cwd, 'web/runs/' + MODEL + '/checkpoints/')
print(MODEL_DIR)
BEST_MODEL_DIR = 'runs/' + MODEL + '/bestcheckpoints/'
SAVE_DIR = 'results/' + MODEL
# Data Parameters
tf.flags.DEFINE_string("training_data_file",
TRAININGSET_DIR,
"Data source for the training data.")
tf.flags.DEFINE_string("validation_data_file",
VALIDATIONSET_DIR,
"Data source for the validation data")
tf.flags.DEFINE_string("test_data_file",
TEST_DIR,
"Data source for the test data")
tf.flags.DEFINE_string("checkpoint_dir",
MODEL_DIR,
"Checkpoint directory from training run")
tf.flags.DEFINE_string("best_checkpoint_dir",
BEST_MODEL_DIR,
"Best checkpoint directory from training run")
# Model Hyperparameters
tf.flags.DEFINE_integer("pad_seq_len",
35842,
"Recommended padding Sequence length of data "
"(depends on the data)")
tf.flags.DEFINE_integer("embedding_dim",
300,
"Dimensionality of character embedding "
"(default: 128)")
tf.flags.DEFINE_integer("embedding_type",
1,
"The embedding type (default: 1)")
tf.flags.DEFINE_integer("fc_hidden_size",
1024,
"Hidden size for fully connected layer "
"(default: 1024)")
tf.flags.DEFINE_float("dropout_keep_prob",
0.5,
"Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda",
0.0,
"L2 regularization lambda (default: 0.0)")
tf.flags.DEFINE_integer("num_classes",
80,
"Number of labels (depends on the task)")
tf.flags.DEFINE_integer("top_num",
80,
"Number of top K prediction classes (default: 5)")
tf.flags.DEFINE_float("threshold",
0.5,
"Threshold for prediction classes (default: 0.5)")
# Test Parameters
tf.flags.DEFINE_integer("batch_size",
1,
"Batch Size (default: 1)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement",
True,
"Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement",
False,
"Log placement of ops on devices")
tf.flags.DEFINE_boolean("gpu_options_allow_growth",
True,
"Allow gpu options growth")
FLAGS = tf.flags.FLAGS
FLAGS(sys.argv)
dilim = '-' * 100
logger.info('\n'.join(
[dilim, *['{0:>50}|{1:<50}'.format(attr.upper(), FLAGS.__getattr__(
attr)) for attr in sorted(FLAGS.__dict__['__wrapped'])], dilim]))
"""Test ANN model."""
# Load data
logger.info("✔︎ Loading data...")
logger.info("Recommended padding Sequence length is: {0}".
format(FLAGS.pad_seq_len))
logger.info("✔︎ Test data processing...")
test_data = feed.load_data_and_labels(FLAGS.test_data_file,
FLAGS.num_classes,
FLAGS.embedding_dim,
data_aug_flag=False,
word2vec_path=word2vec_path)
logger.info("✔︎ Test data padding...")
x_test, y_test = feed.pad_data(test_data, FLAGS.pad_seq_len)
y_test_labels = test_data.labels
# Load ann model
# BEST_OR_LATEST = input("☛ Load Best or Latest Model?(B/L): ")
BEST_OR_LATEST = 'L'
while not (BEST_OR_LATEST.isalpha() and BEST_OR_LATEST.upper()
in ['B', 'L']):
BEST_OR_LATEST = \
input("✘ The format of your input is illegal, please re-input: ")
if BEST_OR_LATEST.upper() == 'B':
logger.info("✔︎ Loading best model...")
checkpoint_file = checkpoints.get_best_checkpoint(
FLAGS.best_checkpoint_dir, select_maximum_value=True)
else:
logger.info("✔︎ Loading latest model...")
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
logger.info(checkpoint_file)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
session_conf.gpu_options.allow_growth = FLAGS.gpu_options_allow_growth
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{0}.meta".format(
checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x = graph.get_operation_by_name("input_x").outputs[0]
input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name(
"dropout_keep_prob").outputs[0]
is_training = graph.get_operation_by_name("is_training").outputs[0]
# Tensors we want to evaluate
scores = graph.get_operation_by_name("output/scores").outputs[0]
loss = graph.get_operation_by_name("loss/loss").outputs[0]
# Split the output nodes name by '|' if you have several output
# nodes
output_node_names = "output/scores"
# Save the .pb model file
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, sess.graph_def, output_node_names.split("|"))
tf.train.write_graph(output_graph_def, "graph", "graph-ann-{0}.pb".
format(MODEL), as_text=False)
# Generate batches for one epoch
batches = feed.batch_iter(list(zip(x_test, y_test, y_test_labels)),
FLAGS.batch_size, 1, shuffle=False)
test_counter, test_loss = 0, 0.0
test_pre_tk = [0.0] * FLAGS.top_num
test_rec_tk = [0.0] * FLAGS.top_num
test_F_tk = [0.0] * FLAGS.top_num
# Collect the predictions here
true_labels = []
predicted_labels = []
predicted_scores = []
# Collect for calculating metrics
true_onehot_labels = []
predicted_onehot_scores = []
predicted_onehot_labels_ts = []
predicted_onehot_labels_tk = [[] for _ in range(FLAGS.top_num)]
for batch_test in batches:
x_batch_test, y_batch_test, y_batch_test_labels = zip(
*batch_test)
print("x_batch_test", x_batch_test)
print("y_batch_test", y_batch_test)
feed_dict = {
input_x: x_batch_test,
input_y: y_batch_test,
dropout_keep_prob: 1.0,
is_training: False
}
batch_scores, cur_loss = sess.run([scores, loss], feed_dict)
# Prepare for calculating metrics
for i in y_batch_test:
true_onehot_labels.append(i)
for j in batch_scores:
predicted_onehot_scores.append(j)
# Get the predicted labels by threshold
batch_predicted_labels_ts, batch_predicted_scores_ts = \
feed.get_label_threshold(scores=batch_scores,
threshold=FLAGS.threshold)
# Add results to collection
for i in y_batch_test_labels:
true_labels.append(i)
for j in batch_predicted_labels_ts:
predicted_labels.append(j)
for k in batch_predicted_scores_ts:
predicted_scores.append(k)
# Get onehot predictions by threshold
batch_predicted_onehot_labels_ts = \
feed.get_onehot_label_threshold(scores=batch_scores,
threshold=FLAGS.threshold)
for i in batch_predicted_onehot_labels_ts:
predicted_onehot_labels_ts.append(i)
# Get onehot predictions by topK
for top_num in range(FLAGS.top_num):
batch_predicted_onehot_labels_tk = feed.\
get_onehot_label_topk(scores=batch_scores,
top_num=top_num + 1)
for i in batch_predicted_onehot_labels_tk:
predicted_onehot_labels_tk[top_num].append(i)
test_loss = test_loss + cur_loss
test_counter = test_counter + 1
# Calculate Precision & Recall & F1 (threshold & topK)
test_pre_ts = precision_score(y_true=np.array(true_onehot_labels),
y_pred=np.array(
predicted_onehot_labels_ts),
average='micro')
test_rec_ts = recall_score(y_true=np.array(true_onehot_labels),
y_pred=np.array(
predicted_onehot_labels_ts),
average='micro')
test_F_ts = f1_score(y_true=np.array(true_onehot_labels),
y_pred=np.array(predicted_onehot_labels_ts),
average='micro')
for top_num in range(FLAGS.top_num):
test_pre_tk[top_num] = precision_score(
y_true=np.array(true_onehot_labels),
y_pred=np.array(predicted_onehot_labels_tk[top_num]),
average='micro')
test_rec_tk[top_num] = recall_score(
y_true=np.array(true_onehot_labels),
y_pred=np.array(predicted_onehot_labels_tk[top_num]),
average='micro')
test_F_tk[top_num] = f1_score(
y_true=np.array(true_onehot_labels),
y_pred=np.array(predicted_onehot_labels_tk[top_num]),
average='micro')
# Calculate the average AUC
test_auc = roc_auc_score(
y_true=np.array(true_onehot_labels),
y_score=np.array(predicted_onehot_scores),
average='micro')
# Calculate the average PR
test_prc = average_precision_score(
y_true=np.array(true_onehot_labels),
y_score=np.array(predicted_onehot_scores),
average="micro")
test_loss = float(test_loss / test_counter)
logger.info(
"☛ All Test Dataset: Loss {0:g} | AUC {1:g} | AUPRC {2:g}"
.format(test_loss, test_auc, test_prc))
# Predict by threshold
logger.info(
"☛ Predict by threshold: Precision {0:g}, Recall {1:g}, F1 {2:g}"
.format(test_pre_ts, test_rec_ts, test_F_ts))
# Predict by topK
logger.info("☛ Predict by topK:")
for top_num in range(FLAGS.top_num):
logger.info("Top{0}: Precision {1:g}, Recall {2:g}, F {3:g}"
.format(top_num + 1, test_pre_tk[top_num],
test_rec_tk[top_num],
test_F_tk[top_num]))
# Save the prediction result
if not os.path.exists(SAVE_DIR):
os.makedirs(SAVE_DIR)
feed.create_prediction_file(
output_file=SAVE_DIR + "/predictions.json",
data_id=test_data.testid,
all_labels=true_labels,
all_predict_labels=predicted_labels,
all_predict_scores=predicted_scores)
logger.info("✔︎ Done.")
if __name__ == '__main__':
home = "/home/zachary"
word2vec_path = os.path.join(home, "GoogleNews-vectors-negative300.bin")
model_number = 1553177254
test_ann(word2vec_path,
model_number)
|
the-stack_0_20605 | import argparse
from ldap3.core.exceptions import LDAPInvalidFilterError
from arcutils.colorize import printer
from arcutils.ldap import ldapsearch
from arcutils.settings import init_settings
from django.conf import global_settings, settings
def main(argv=None):
base_settings = {}
init_settings(base_settings, quiet=True)
settings.configure(global_settings, **base_settings)
parser = argparse.ArgumentParser(description='ArcUtils Commands')
subparsers = parser.add_subparsers()
ldap_parser = subparsers.add_parser('ldap')
ldap_parser.set_defaults(command=ldap)
ldap_parser.add_argument('query')
ldap_parser.add_argument('--search-base', default=None)
ldap_parser.add_argument(
'--attributes', default=None, help='LDAP attributes to fetch, separated by commas')
ldap_parser.add_argument('--parse', default=True, action='store_true', dest='parse')
ldap_parser.add_argument('--no-parse', default=True, action='store_false', dest='parse')
ldap_parser.add_argument('--using', default='default')
args = parser.parse_args(argv)
if hasattr(args, 'command'):
status = args.command(args)
parser.exit(status or 0)
else:
parser.print_help()
def ldap(args):
q = args.query
using = args.using
search_base = args.search_base
attributes = args.attributes
parse = args.parse
if attributes is not None:
attributes = [a.strip() for a in attributes.split(',')]
try:
results = ldapsearch(
q, using=using, search_base=search_base, attributes=attributes, parse=parse)
except LDAPInvalidFilterError:
printer.error('Invalid LDAP filter: {q}'.format(q=q))
printer.error('Is the query wrapped in parens?')
return 1
if not results:
printer.error('No results found')
return 2
for r in results:
if not parse:
print('dn', '=>', r['dn'], '\n')
r = r['attributes']
for k in sorted(r.keys()):
v = r[k]
print(k, '=>', v)
if __name__ == '__main__':
main()
|
the-stack_0_20606 | import argparse
import torch
from torchvision import utils
from model import Generator
if __name__ == "__main__":
torch.set_grad_enabled(False)
parser = argparse.ArgumentParser(description="Apply closed form factorization")
parser.add_argument(
"-i", "--index", type=int, default=0, help="index of eigenvector"
)
parser.add_argument(
"-d",
"--degree",
type=float,
default=5,
help="scalar factors for moving latent vectors along eigenvector",
)
parser.add_argument(
"--channel_multiplier",
type=int,
default=2,
help='channel multiplier factor. config-f = 2, else = 1',
)
parser.add_argument("--ckpt", type=str, required=True, help="stylegan2 checkpoints")
parser.add_argument(
"--size", type=int, default=256, help="output image size of the generator"
)
parser.add_argument(
"-n", "--n_sample", type=int, default=7, help="number of samples created"
)
parser.add_argument(
"--truncation", type=float, default=0.7, help="truncation factor"
)
parser.add_argument(
"--device", type=str, default="cuda", help="device to run the model"
)
parser.add_argument(
"--out_prefix",
type=str,
default="factor",
help="filename prefix to result samples",
)
parser.add_argument(
"factor",
type=str,
help="name of the closed form factorization result factor file",
)
args = parser.parse_args()
eigvec = torch.load(args.factor)["eigvec"].to(args.device)
ckpt = torch.load(args.ckpt)
g = Generator(args.size, 512, 8, channel_multiplier=args.channel_multiplier).to(args.device)
g.load_state_dict(ckpt["g_ema"], strict=False)
trunc = g.mean_latent(4096)
latent = torch.randn(args.n_sample, 512, device=args.device)
latent = g.get_latent(latent)
direction = args.degree * eigvec[:, args.index].unsqueeze(0)
img, _ = g(
[latent],
truncation=args.truncation,
truncation_latent=trunc,
input_is_latent=True,
)
img1, _ = g(
[latent + direction],
truncation=args.truncation,
truncation_latent=trunc,
input_is_latent=True,
)
img2, _ = g(
[latent - direction],
truncation=args.truncation,
truncation_latent=trunc,
input_is_latent=True,
)
grid = utils.save_image(
torch.cat([img1, img, img2], 0),
f"{args.out_prefix}_index-{args.index}_degree-{args.degree}.png",
normalize=True,
range=(-1, 1),
nrow=args.n_sample,
)
|
the-stack_0_20608 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 24 10:03:44 2021
@author: woojae-macbook13
"""
from gurobipy import*
try :
m = Model("ex4_1_2")
Z = LinExpr()
E = m.addVars(4, vtype = GRB.BINARY, name = 'E')
S = m.addVars(4, vtype = GRB.BINARY, name = 'S')
VAL_E = [4.5, 7.8, 3.6, 2.9]
VAL_S = [4.9, 7.2, 4.3, 3.1]
for i in range(0, 4):
Z += VAL_E[i]*E[i] + VAL_S[i]*S[i]
# 제약식 1
tempC = 0
for i in range(0, 4):
tempC = E[i] + S[i]
c0 = tempC == 1
m.addConstr(c0, 'c0'+str(i))
# 제약식 2
tempE = 0
for i in range(0, 4):
tempE += E[i]
c1 = tempE == 2
# 제약식 3
tempS = 0
for i in range(0, 4):
tempS += S[i]
c2 = tempS == 2
m.addConstr(c1, 'c1')
m.addConstr(c2, 'c2')
m.setObjective(Z, GRB.MINIMIZE)
m.optimize()
for v in m.getVars():
print(v.varName, ':', v.x)
print('Z : ', m.objVal)
except GurobiError:
print('Error reported')
|
the-stack_0_20609 | from __future__ import absolute_import
import sys
import os
import distutils.errors
import six
from six.moves import urllib, http_client
import pkg_resources
import setuptools.package_index
from setuptools.tests.server import IndexServer
from .textwrap import DALS
class TestPackageIndex:
def test_regex(self):
hash_url = 'http://other_url?:action=show_md5&'
hash_url += 'digest=0123456789abcdef0123456789abcdef'
doc = """
<a href="http://some_url">Name</a>
(<a title="MD5 hash"
href="{hash_url}">md5</a>)
""".lstrip().format(**locals())
assert setuptools.package_index.PYPI_MD5.match(doc)
def test_bad_url_bad_port(self):
index = setuptools.package_index.PackageIndex()
url = 'http://127.0.0.1:0/nonesuch/test_package_index'
try:
v = index.open_url(url)
except Exception as v:
assert url in str(v)
else:
assert isinstance(v, urllib.error.HTTPError)
def test_bad_url_typo(self):
# issue 16
# easy_install inquant.contentmirror.plone breaks because of a typo
# in its home URL
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
url = 'url:%20https://svn.plone.org/svn/collective/inquant.contentmirror.plone/trunk'
try:
v = index.open_url(url)
except Exception as v:
assert url in str(v)
else:
assert isinstance(v, urllib.error.HTTPError)
def test_bad_url_bad_status_line(self):
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
def _urlopen(*args):
raise http_client.BadStatusLine('line')
index.opener = _urlopen
url = 'http://example.com'
try:
v = index.open_url(url)
except Exception as v:
assert 'line' in str(v)
else:
raise AssertionError('Should have raise here!')
def test_bad_url_double_scheme(self):
"""
A bad URL with a double scheme should raise a DistutilsError.
"""
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
# issue 20
url = 'http://http://svn.pythonpaste.org/Paste/wphp/trunk'
try:
index.open_url(url)
except distutils.errors.DistutilsError as error:
msg = six.text_type(error)
assert 'nonnumeric port' in msg or 'getaddrinfo failed' in msg or 'Name or service not known' in msg
return
raise RuntimeError("Did not raise")
def test_bad_url_screwy_href(self):
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
# issue #160
if sys.version_info[0] == 2 and sys.version_info[1] == 7:
# this should not fail
url = 'http://example.com'
page = ('<a href="http://www.famfamfam.com]('
'http://www.famfamfam.com/">')
index.process_index(url, page)
def test_url_ok(self):
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
url = 'file:///tmp/test_package_index'
assert index.url_ok(url, True)
def test_links_priority(self):
"""
Download links from the pypi simple index should be used before
external download links.
https://bitbucket.org/tarek/distribute/issue/163
Usecase :
- someone uploads a package on pypi, a md5 is generated
- someone manually copies this link (with the md5 in the url) onto an
external page accessible from the package page.
- someone reuploads the package (with a different md5)
- while easy_installing, an MD5 error occurs because the external link
is used
-> Setuptools should use the link from pypi, not the external one.
"""
if sys.platform.startswith('java'):
# Skip this test on jython because binding to :0 fails
return
# start an index server
server = IndexServer()
server.start()
index_url = server.base_url() + 'test_links_priority/simple/'
# scan a test index
pi = setuptools.package_index.PackageIndex(index_url)
requirement = pkg_resources.Requirement.parse('foobar')
pi.find_packages(requirement)
server.stop()
# the distribution has been found
assert 'foobar' in pi
# we have only one link, because links are compared without md5
assert len(pi['foobar']) == 1
# the link should be from the index
assert 'correct_md5' in pi['foobar'][0].location
def test_parse_bdist_wininst(self):
parse = setuptools.package_index.parse_bdist_wininst
actual = parse('reportlab-2.5.win32-py2.4.exe')
expected = 'reportlab-2.5', '2.4', 'win32'
assert actual == expected
actual = parse('reportlab-2.5.win32.exe')
expected = 'reportlab-2.5', None, 'win32'
assert actual == expected
actual = parse('reportlab-2.5.win-amd64-py2.7.exe')
expected = 'reportlab-2.5', '2.7', 'win-amd64'
assert actual == expected
actual = parse('reportlab-2.5.win-amd64.exe')
expected = 'reportlab-2.5', None, 'win-amd64'
assert actual == expected
def test__vcs_split_rev_from_url(self):
"""
Test the basic usage of _vcs_split_rev_from_url
"""
vsrfu = setuptools.package_index.PackageIndex._vcs_split_rev_from_url
url, rev = vsrfu('https://example.com/bar@2995')
assert url == 'https://example.com/bar'
assert rev == '2995'
def test_local_index(self, tmpdir):
"""
local_open should be able to read an index from the file system.
"""
index_file = tmpdir / 'index.html'
with index_file.open('w') as f:
f.write('<div>content</div>')
url = 'file:' + urllib.request.pathname2url(str(tmpdir)) + '/'
res = setuptools.package_index.local_open(url)
assert 'content' in res.read()
def test_egg_fragment(self):
"""
EGG fragments must comply to PEP 440
"""
epoch = [
'',
'1!',
]
releases = [
'0',
'0.0',
'0.0.0',
]
pre = [
'a0',
'b0',
'rc0',
]
post = [
'.post0'
]
dev = [
'.dev0',
]
local = [
('', ''),
('+ubuntu.0', '+ubuntu.0'),
('+ubuntu-0', '+ubuntu.0'),
('+ubuntu_0', '+ubuntu.0'),
]
versions = [
[''.join([e, r, p, l]) for l in ll]
for e in epoch
for r in releases
for p in sum([pre, post, dev], [''])
for ll in local]
for v, vc in versions:
dists = list(setuptools.package_index.distros_for_url(
'http://example.com/example.zip#egg=example-' + v))
assert dists[0].version == ''
assert dists[1].version == vc
class TestContentCheckers:
def test_md5(self):
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478')
checker.feed('You should probably not be using MD5'.encode('ascii'))
assert checker.hash.hexdigest() == 'f12895fdffbd45007040d2e44df98478'
assert checker.is_valid()
def test_other_fragment(self):
"Content checks should succeed silently if no hash is present"
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#something%20completely%20different')
checker.feed('anything'.encode('ascii'))
assert checker.is_valid()
def test_blank_md5(self):
"Content checks should succeed if a hash is empty"
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=')
checker.feed('anything'.encode('ascii'))
assert checker.is_valid()
def test_get_hash_name_md5(self):
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478')
assert checker.hash_name == 'md5'
def test_report(self):
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478')
rep = checker.report(lambda x: x, 'My message about %s')
assert rep == 'My message about md5'
class TestPyPIConfig:
def test_percent_in_password(self, tmpdir, monkeypatch):
monkeypatch.setitem(os.environ, 'HOME', str(tmpdir))
pypirc = tmpdir / '.pypirc'
with pypirc.open('w') as strm:
strm.write(DALS("""
[pypi]
repository=https://pypi.python.org
username=jaraco
password=pity%
"""))
cfg = setuptools.package_index.PyPIConfig()
cred = cfg.creds_by_repository['https://pypi.python.org']
assert cred.username == 'jaraco'
assert cred.password == 'pity%'
|
the-stack_0_20610 | import subprocess
from multiprocessing.queues import Empty
__author__ = 'veselt12'
from multiprocessing import Process, JoinableQueue, Event
import gzip, io
class FileReader(Process):
def __init__(self, filename, buffer_size=1000):
super(FileReader, self).__init__()
self.filename = filename
self.que = JoinableQueue(buffer_size)
self.event = Event()
self.event.set()
self.started = Event()
self.started.clear()
# It's crucial to call task_done on the queue after the item was processed
def get_queue(self):
return self.que
def get_event(self):
return self.event
def is_done(self):
return not self.event.is_set() and self.que.empty()
def run(self):
self.started.set()
self.proc()
self.event.clear()
def proc(self):
with open_gz(self.filename, encoding='utf-8') as file:
for line in file:
self.que.put(line)
def __iter__(self):
self.start()
self.started.wait()
while not self.is_done():
try:
text = self.que.get(timeout=0.1)
yield text
self.que.task_done()
except Empty:
pass
class BlockFileReader(FileReader):
def proc(self):
with open_gz(self.filename, encoding='utf-8') as file:
buffer = []
for line in file:
if line == '\n':
self.que.put("".join(buffer))
buffer = []
else:
buffer.append(line)
def open_gz(file, mode='r', encoding='utf-8'):
if file.endswith('.gz'):
return io.TextIOWrapper(gzip.open(file, mode=mode), encoding=encoding)
return open(file, mode=mode, encoding=encoding) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.